summaryrefslogtreecommitdiff
path: root/src/couch/test/eunit
diff options
context:
space:
mode:
authorJoan Touzet <wohali@users.noreply.github.com>2020-10-21 19:33:29 +0000
committerGitHub <noreply@github.com>2020-10-21 15:33:29 -0400
commitbd45d9bcd113d35b08ff20ea68abecab5299280d (patch)
tree58edde483c4dd8c0b0c68f055af8370b973c6d1e /src/couch/test/eunit
parentbf82a3f3d9e9dda1fc15ad49bd3be98c96cd755c (diff)
downloadcouchdb-master.tar.gz
Remove master content, point to main (#3224)master
Diffstat (limited to 'src/couch/test/eunit')
-rw-r--r--src/couch/test/eunit/chttpd_endpoints_tests.erl103
-rw-r--r--src/couch/test/eunit/couch_auth_cache_tests.erl349
-rw-r--r--src/couch/test/eunit/couch_base32_tests.erl28
-rw-r--r--src/couch/test/eunit/couch_bt_engine_compactor_tests.erl129
-rw-r--r--src/couch/test/eunit/couch_bt_engine_tests.erl20
-rw-r--r--src/couch/test/eunit/couch_bt_engine_upgrade_tests.erl244
-rw-r--r--src/couch/test/eunit/couch_btree_tests.erl572
-rw-r--r--src/couch/test/eunit/couch_changes_tests.erl962
-rw-r--r--src/couch/test/eunit/couch_db_doc_tests.erl121
-rw-r--r--src/couch/test/eunit/couch_db_mpr_tests.erl137
-rw-r--r--src/couch/test/eunit/couch_db_plugin_tests.erl205
-rw-r--r--src/couch/test/eunit/couch_db_props_upgrade_tests.erl83
-rw-r--r--src/couch/test/eunit/couch_db_split_tests.erl331
-rw-r--r--src/couch/test/eunit/couch_db_tests.erl198
-rw-r--r--src/couch/test/eunit/couch_doc_json_tests.erl493
-rw-r--r--src/couch/test/eunit/couch_doc_tests.erl145
-rw-r--r--src/couch/test/eunit/couch_ejson_size_tests.erl72
-rw-r--r--src/couch/test/eunit/couch_etag_tests.erl30
-rw-r--r--src/couch/test/eunit/couch_file_tests.erl551
-rw-r--r--src/couch/test/eunit/couch_flags_config_tests.erl119
-rw-r--r--src/couch/test/eunit/couch_flags_tests.erl150
-rw-r--r--src/couch/test/eunit/couch_hotp_tests.erl28
-rw-r--r--src/couch/test/eunit/couch_index_tests.erl232
-rw-r--r--src/couch/test/eunit/couch_js_tests.erl172
-rw-r--r--src/couch/test/eunit/couch_key_tree_prop_tests.erl530
-rw-r--r--src/couch/test/eunit/couch_key_tree_tests.erl413
-rw-r--r--src/couch/test/eunit/couch_passwords_tests.erl54
-rw-r--r--src/couch/test/eunit/couch_query_servers_tests.erl95
-rw-r--r--src/couch/test/eunit/couch_server_tests.erl294
-rw-r--r--src/couch/test/eunit/couch_stream_tests.erl124
-rw-r--r--src/couch/test/eunit/couch_task_status_tests.erl233
-rw-r--r--src/couch/test/eunit/couch_totp_tests.erl55
-rw-r--r--src/couch/test/eunit/couch_util_tests.erl177
-rw-r--r--src/couch/test/eunit/couch_uuids_tests.erl125
-rw-r--r--src/couch/test/eunit/couch_work_queue_tests.erl402
-rw-r--r--src/couch/test/eunit/couchdb_attachments_tests.erl765
-rw-r--r--src/couch/test/eunit/couchdb_auth_tests.erl115
-rwxr-xr-xsrc/couch/test/eunit/couchdb_cookie_domain_tests.erl80
-rw-r--r--src/couch/test/eunit/couchdb_cors_tests.erl344
-rw-r--r--src/couch/test/eunit/couchdb_db_tests.erl91
-rw-r--r--src/couch/test/eunit/couchdb_design_doc_tests.erl87
-rw-r--r--src/couch/test/eunit/couchdb_file_compression_tests.erl250
-rw-r--r--src/couch/test/eunit/couchdb_location_header_tests.erl78
-rw-r--r--src/couch/test/eunit/couchdb_mrview_cors_tests.erl140
-rw-r--r--src/couch/test/eunit/couchdb_mrview_tests.erl261
-rw-r--r--src/couch/test/eunit/couchdb_os_proc_pool.erl306
-rw-r--r--src/couch/test/eunit/couchdb_update_conflicts_tests.erl280
-rw-r--r--src/couch/test/eunit/couchdb_vhosts_tests.erl271
-rw-r--r--src/couch/test/eunit/couchdb_views_tests.erl668
-rw-r--r--src/couch/test/eunit/fixtures/6cf2c2f766f87b618edf6630b00f8736.viewbin8310 -> 0 bytes
-rw-r--r--src/couch/test/eunit/fixtures/couch_stats_aggregates.cfg19
-rw-r--r--src/couch/test/eunit/fixtures/couch_stats_aggregates.ini20
-rw-r--r--src/couch/test/eunit/fixtures/db_non_partitioned.couchbin12479 -> 0 bytes
-rw-r--r--src/couch/test/eunit/fixtures/db_v6_with_1_purge_req.couchbin12470 -> 0 bytes
-rw-r--r--src/couch/test/eunit/fixtures/db_v6_with_1_purge_req_for_2_docs.couchbin16557 -> 0 bytes
-rw-r--r--src/couch/test/eunit/fixtures/db_v6_with_2_purge_req.couchbin16566 -> 0 bytes
-rw-r--r--src/couch/test/eunit/fixtures/db_v6_without_purge_req.couchbin61644 -> 0 bytes
-rw-r--r--src/couch/test/eunit/fixtures/db_v7_with_1_purge_req.couchbin16617 -> 0 bytes
-rw-r--r--src/couch/test/eunit/fixtures/db_v7_with_1_purge_req_for_2_docs.couchbin20705 -> 0 bytes
-rw-r--r--src/couch/test/eunit/fixtures/db_v7_with_2_purge_req.couchbin20713 -> 0 bytes
-rw-r--r--src/couch/test/eunit/fixtures/db_v7_without_purge_req.couchbin65781 -> 0 bytes
-rw-r--r--src/couch/test/eunit/fixtures/logo.pngbin3010 -> 0 bytes
-rw-r--r--src/couch/test/eunit/fixtures/multipart.http13
-rw-r--r--src/couch/test/eunit/fixtures/os_daemon_bad_perm.sh17
-rwxr-xr-xsrc/couch/test/eunit/fixtures/os_daemon_can_reboot.sh15
-rwxr-xr-xsrc/couch/test/eunit/fixtures/os_daemon_configer.escript97
-rwxr-xr-xsrc/couch/test/eunit/fixtures/os_daemon_die_on_boot.sh15
-rwxr-xr-xsrc/couch/test/eunit/fixtures/os_daemon_die_quickly.sh15
-rwxr-xr-xsrc/couch/test/eunit/fixtures/os_daemon_looper.escript26
-rw-r--r--src/couch/test/eunit/fixtures/test.couchbin28878 -> 0 bytes
-rw-r--r--src/couch/test/eunit/global_changes_tests.erl159
-rw-r--r--src/couch/test/eunit/json_stream_parse_tests.erl151
-rw-r--r--src/couch/test/eunit/test_web.erl114
73 files changed, 0 insertions, 12373 deletions
diff --git a/src/couch/test/eunit/chttpd_endpoints_tests.erl b/src/couch/test/eunit/chttpd_endpoints_tests.erl
deleted file mode 100644
index 3c8586a14..000000000
--- a/src/couch/test/eunit/chttpd_endpoints_tests.erl
+++ /dev/null
@@ -1,103 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_endpoints_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-
-endpoints_test_() ->
- {
- "Checking dynamic endpoints",
- {
- setup,
- fun() ->
- test_util:start_couch([chttpd])
- end,
- fun test_util:stop/1,
- [
- fun url_handlers/0,
- fun db_handlers/0,
- fun design_handlers/0
- ]
- }
- }.
-
-
-url_handlers() ->
- Handlers = [
- {<<"">>, chttpd_misc, handle_welcome_req},
- {<<"favicon.ico">>, chttpd_misc, handle_favicon_req},
- {<<"_utils">>, chttpd_misc, handle_utils_dir_req},
- {<<"_all_dbs">>, chttpd_misc, handle_all_dbs_req},
- {<<"_dbs_info">>, chttpd_misc, handle_dbs_info_req},
- {<<"_active_tasks">>, chttpd_misc, handle_task_status_req},
- {<<"_node">>, chttpd_node, handle_node_req},
- {<<"_reload_query_servers">>, chttpd_misc, handle_reload_query_servers_req},
- {<<"_replicate">>, chttpd_misc, handle_replicate_req},
- {<<"_uuids">>, chttpd_misc, handle_uuids_req},
- {<<"_session">>, chttpd_auth, handle_session_req},
- {<<"_up">>, chttpd_misc, handle_up_req},
- {<<"_membership">>, mem3_httpd, handle_membership_req},
- {<<"_db_updates">>, global_changes_httpd, handle_global_changes_req},
- {<<"_cluster_setup">>, setup_httpd, handle_setup_req}
- ],
-
- lists:foreach(fun({Path, Mod, Fun}) ->
- Handler = chttpd_handlers:url_handler(Path, undefined),
- Expect = fun Mod:Fun/1,
- ?assertEqual(Expect, Handler)
- end, Handlers),
-
- ?assertEqual(undefined, chttpd_handlers:url_handler("foo", undefined)).
-
-
-db_handlers() ->
- Handlers = [
- {<<"_view_cleanup">>, chttpd_db, handle_view_cleanup_req},
- {<<"_compact">>, chttpd_db, handle_compact_req},
- {<<"_design">>, chttpd_db, handle_design_req},
- {<<"_temp_view">>, chttpd_view, handle_temp_view_req},
- {<<"_changes">>, chttpd_db, handle_changes_req},
- {<<"_shards">>, mem3_httpd, handle_shards_req},
- {<<"_index">>, mango_httpd, handle_req},
- {<<"_explain">>, mango_httpd, handle_req},
- {<<"_find">>, mango_httpd, handle_req}
- ],
-
- lists:foreach(fun({Path, Mod, Fun}) ->
- Handler = chttpd_handlers:db_handler(Path, undefined),
- Expect = fun Mod:Fun/2,
- ?assertEqual(Expect, Handler)
- end, Handlers),
-
- ?assertEqual(undefined, chttpd_handlers:db_handler("bam", undefined)).
-
-
-design_handlers() ->
- Handlers = [
- {<<"_view">>, chttpd_view, handle_view_req},
- {<<"_show">>, chttpd_show, handle_doc_show_req},
- {<<"_list">>, chttpd_show, handle_view_list_req},
- {<<"_update">>, chttpd_show, handle_doc_update_req},
- {<<"_info">>, chttpd_db, handle_design_info_req},
- {<<"_rewrite">>, chttpd_rewrite, handle_rewrite_req}
- ],
-
- lists:foreach(fun({Path, Mod, Fun}) ->
- Handler = chttpd_handlers:design_handler(Path, undefined),
- Expect = fun Mod:Fun/3,
- ?assertEqual(Expect, Handler)
- end, Handlers),
-
- ?assertEqual(undefined, chttpd_handlers:design_handler("baz", undefined)).
diff --git a/src/couch/test/eunit/couch_auth_cache_tests.erl b/src/couch/test/eunit/couch_auth_cache_tests.erl
deleted file mode 100644
index 71faf77d6..000000000
--- a/src/couch/test/eunit/couch_auth_cache_tests.erl
+++ /dev/null
@@ -1,349 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_auth_cache_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(SALT, <<"SALT">>).
--define(DB_TIMEOUT, 15000).
-
-start() ->
- test_util:start_couch([ioq]).
-
-
-setup() ->
- DbName = ?tempdb(),
- config:set("couch_httpd_auth", "authentication_db",
- ?b2l(DbName), false),
- DbName.
-
-teardown(DbName) ->
- ok = couch_server:delete(DbName, [?ADMIN_CTX]),
- ok.
-
-
-couch_auth_cache_test_() ->
- {
- "CouchDB auth cache tests",
- {
- setup,
- fun start/0, fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_get_nil_on_missed_cache/1,
- fun should_get_right_password_hash/1,
- fun should_ensure_doc_hash_equals_cached_one/1,
- fun should_update_password/1,
- fun should_cleanup_cache_after_userdoc_deletion/1,
- fun should_restore_cache_after_userdoc_recreation/1,
- fun should_drop_cache_on_auth_db_change/1,
- fun should_restore_cache_on_auth_db_change/1,
- fun should_recover_cache_after_shutdown/1,
- fun should_get_admin_from_config/1
- ]
- }
- }
- }.
-
-auth_vdu_test_() ->
- Cases = [
- %% Old , New , Result
- %% [Roles, Type] , [Roles, Type] ,
-
- %% Updating valid user doc with valid one
- {[custom, user], [custom, user], "ok"},
-
- %% Updating invalid doc (missing type or roles field) with valid one
- {[missing, missing], [custom, user], "ok"},
- {[missing, user], [custom, user], "ok"},
- {[custom, missing], [custom, user], "ok"},
-
- %% Updating invalid doc (wrong type) with valid one
- {[missing, other], [custom, user], "ok"},
- {[custom, other], [custom, user], "ok"},
-
- %% Updating valid document with invalid one
- {[custom, user], [missing, missing], "doc.type must be user"},
- {[custom, user], [missing, user], "doc.roles must exist"},
- {[custom, user], [custom, missing], "doc.type must be user"},
- {[custom, user], [missing, other], "doc.type must be user"},
- {[custom, user], [custom, other], "doc.type must be user"},
-
- %% Updating invalid doc with invalid one
- {[missing, missing], [missing, missing], "doc.type must be user"},
- {[missing, missing], [missing, user], "doc.roles must exist"},
- {[missing, missing], [custom, missing], "doc.type must be user"},
- {[missing, missing], [missing, other], "doc.type must be user"},
- {[missing, missing], [custom, other], "doc.type must be user"},
-
- {[missing, user], [missing, missing], "doc.type must be user"},
- {[missing, user], [missing, user], "doc.roles must exist"},
- {[missing, user], [custom, missing], "doc.type must be user"},
- {[missing, user], [missing, other], "doc.type must be user"},
- {[missing, user], [custom, other], "doc.type must be user"},
-
- {[missing, other], [missing, missing], "doc.type must be user"},
- {[missing, other], [missing, user], "doc.roles must exist"},
- {[missing, other], [custom, missing], "doc.type must be user"},
- {[missing, other], [missing, other], "doc.type must be user"},
- {[missing, other], [custom, other], "doc.type must be user"},
-
- {[custom, missing], [missing, missing], "doc.type must be user"},
- {[custom, missing], [missing, user], "doc.roles must exist"},
- {[custom, missing], [custom, missing], "doc.type must be user"},
- {[custom, missing], [missing, other], "doc.type must be user"},
- {[custom, missing], [custom, other], "doc.type must be user"},
-
- {[custom, other], [missing, missing], "doc.type must be user"},
- {[custom, other], [missing, user], "doc.roles must exist"},
- {[custom, other], [custom, missing], "doc.type must be user"},
- {[custom, other], [missing, other], "doc.type must be user"},
- {[custom, other], [custom, other], "doc.type must be user"}
- ],
-
- %% Make sure we covered all combinations
- AllPossibleDocs = couch_tests_combinatorics:product([
- [missing, custom],
- [missing, user, other]
- ]),
- AllPossibleCases = couch_tests_combinatorics:product(
- [AllPossibleDocs, AllPossibleDocs]),
- ?assertEqual([], AllPossibleCases -- [[A, B] || {A, B, _} <- Cases]),
-
- {
- "Check User doc validation",
- {
- setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
- [
- make_validate_test(Case) || Case <- Cases
- ]
- }
- }.
-
-should_get_nil_on_missed_cache(_) ->
- ?_assertEqual(nil, couch_auth_cache:get_user_creds("joe")).
-
-should_get_right_password_hash(DbName) ->
- ?_test(begin
- PasswordHash = hash_password("pass1"),
- {ok, _} = update_user_doc(DbName, "joe", "pass1"),
- {ok, Creds, _} = couch_auth_cache:get_user_creds("joe"),
- ?assertEqual(PasswordHash,
- couch_util:get_value(<<"password_sha">>, Creds))
- end).
-
-should_ensure_doc_hash_equals_cached_one(DbName) ->
- ?_test(begin
- {ok, _} = update_user_doc(DbName, "joe", "pass1"),
- {ok, Creds, _} = couch_auth_cache:get_user_creds("joe"),
-
- CachedHash = couch_util:get_value(<<"password_sha">>, Creds),
- StoredHash = get_user_doc_password_sha(DbName, "joe"),
- ?assertEqual(StoredHash, CachedHash)
- end).
-
-should_update_password(DbName) ->
- ?_test(begin
- PasswordHash = hash_password("pass2"),
- {ok, Rev} = update_user_doc(DbName, "joe", "pass1"),
- {ok, _} = update_user_doc(DbName, "joe", "pass2", Rev),
- {ok, Creds, _} = couch_auth_cache:get_user_creds("joe"),
- ?assertEqual(PasswordHash,
- couch_util:get_value(<<"password_sha">>, Creds))
- end).
-
-should_cleanup_cache_after_userdoc_deletion(DbName) ->
- ?_test(begin
- {ok, _} = update_user_doc(DbName, "joe", "pass1"),
- delete_user_doc(DbName, "joe"),
- ?assertEqual(nil, couch_auth_cache:get_user_creds("joe"))
- end).
-
-should_restore_cache_after_userdoc_recreation(DbName) ->
- ?_test(begin
- PasswordHash = hash_password("pass5"),
- {ok, _} = update_user_doc(DbName, "joe", "pass1"),
- delete_user_doc(DbName, "joe"),
- ?assertEqual(nil, couch_auth_cache:get_user_creds("joe")),
-
- {ok, _} = update_user_doc(DbName, "joe", "pass5"),
- {ok, Creds, _} = couch_auth_cache:get_user_creds("joe"),
-
- ?assertEqual(PasswordHash,
- couch_util:get_value(<<"password_sha">>, Creds))
- end).
-
-should_drop_cache_on_auth_db_change(DbName) ->
- ?_test(begin
- {ok, _} = update_user_doc(DbName, "joe", "pass1"),
- config:set("couch_httpd_auth", "authentication_db",
- ?b2l(?tempdb()), false),
- ?assertEqual(nil, couch_auth_cache:get_user_creds("joe"))
- end).
-
-should_restore_cache_on_auth_db_change(DbName) ->
- ?_test(begin
- PasswordHash = hash_password("pass1"),
- {ok, _} = update_user_doc(DbName, "joe", "pass1"),
- {ok, Creds, _} = couch_auth_cache:get_user_creds("joe"),
-
- DbName1 = ?tempdb(),
- config:set("couch_httpd_auth", "authentication_db",
- ?b2l(DbName1), false),
-
- {ok, _} = update_user_doc(DbName1, "joe", "pass5"),
-
- config:set("couch_httpd_auth", "authentication_db",
- ?b2l(DbName), false),
-
- {ok, Creds, _} = couch_auth_cache:get_user_creds("joe"),
- ?assertEqual(PasswordHash,
- couch_util:get_value(<<"password_sha">>, Creds))
- end).
-
-should_recover_cache_after_shutdown(DbName) ->
- ?_test(begin
- PasswordHash = hash_password("pass2"),
- {ok, Rev0} = update_user_doc(DbName, "joe", "pass1"),
- {ok, Rev1} = update_user_doc(DbName, "joe", "pass2", Rev0),
- shutdown_db(DbName),
- {ok, Rev1} = get_doc_rev(DbName, "joe"),
- ?assertEqual(PasswordHash, get_user_doc_password_sha(DbName, "joe"))
- end).
-
-
-should_get_admin_from_config(_DbName) ->
- ?_test(begin
- config:set("admins", "testadmin", "password", false),
- Creds = test_util:wait(fun() ->
- case couch_auth_cache:get_user_creds("testadmin") of
- {ok, Creds0, _} -> Creds0;
- nil -> wait
- end
- end),
- Roles = couch_util:get_value(<<"roles">>, Creds),
- ?assertEqual([<<"_admin">>], Roles)
- end).
-
-update_user_doc(DbName, UserName, Password) ->
- update_user_doc(DbName, UserName, Password, nil).
-
-update_user_doc(DbName, UserName, Password, Rev) ->
- ok = couch_auth_cache:ensure_users_db_exists(),
- User = iolist_to_binary(UserName),
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"org.couchdb.user:", User/binary>>},
- {<<"name">>, User},
- {<<"type">>, <<"user">>},
- {<<"salt">>, ?SALT},
- {<<"password_sha">>, hash_password(Password)},
- {<<"roles">>, []}
- ] ++ case Rev of
- nil -> [];
- _ -> [{<<"_rev">>, Rev}]
- end
- }),
- {ok, AuthDb} = couch_db:open_int(DbName, [?ADMIN_CTX]),
- {ok, NewRev} = couch_db:update_doc(AuthDb, Doc, []),
- ok = couch_db:close(AuthDb),
- {ok, couch_doc:rev_to_str(NewRev)}.
-
-hash_password(Password) ->
- ?l2b(couch_util:to_hex(crypto:hash(sha, iolist_to_binary([Password, ?SALT])))).
-
-shutdown_db(DbName) ->
- {ok, AuthDb} = couch_db:open_int(DbName, [?ADMIN_CTX]),
- ok = couch_db:close(AuthDb),
- couch_util:shutdown_sync(couch_db:get_pid(AuthDb)),
- ok = timer:sleep(1000).
-
-get_doc_rev(DbName, UserName) ->
- DocId = iolist_to_binary([<<"org.couchdb.user:">>, UserName]),
- {ok, AuthDb} = couch_db:open_int(DbName, [?ADMIN_CTX]),
- UpdateRev =
- case couch_db:open_doc(AuthDb, DocId, []) of
- {ok, Doc} ->
- {Props} = couch_doc:to_json_obj(Doc, []),
- couch_util:get_value(<<"_rev">>, Props);
- {not_found, missing} ->
- nil
- end,
- ok = couch_db:close(AuthDb),
- {ok, UpdateRev}.
-
-get_user_doc_password_sha(DbName, UserName) ->
- DocId = iolist_to_binary([<<"org.couchdb.user:">>, UserName]),
- {ok, AuthDb} = couch_db:open_int(DbName, [?ADMIN_CTX]),
- {ok, Doc} = couch_db:open_doc(AuthDb, DocId, []),
- ok = couch_db:close(AuthDb),
- {Props} = couch_doc:to_json_obj(Doc, []),
- couch_util:get_value(<<"password_sha">>, Props).
-
-delete_user_doc(DbName, UserName) ->
- DocId = iolist_to_binary([<<"org.couchdb.user:">>, UserName]),
- {ok, AuthDb} = couch_db:open_int(DbName, [?ADMIN_CTX]),
- {ok, Doc} = couch_db:open_doc(AuthDb, DocId, []),
- {Props} = couch_doc:to_json_obj(Doc, []),
- DeletedDoc = couch_doc:from_json_obj({[
- {<<"_id">>, DocId},
- {<<"_rev">>, couch_util:get_value(<<"_rev">>, Props)},
- {<<"_deleted">>, true}
- ]}),
- {ok, _} = couch_db:update_doc(AuthDb, DeletedDoc, []),
- ok = couch_db:close(AuthDb).
-
-
-make_validate_test({Old, New, "ok"} = Case) ->
- {test_id(Case), ?_assertEqual(ok, validate(doc(Old), doc(New)))};
-make_validate_test({Old, New, Reason} = Case) ->
- Failure = ?l2b(Reason),
- {test_id(Case), ?_assertThrow({forbidden, Failure}, validate(doc(Old), doc(New)))}.
-
-test_id({[OldRoles, OldType], [NewRoles, NewType], Result}) ->
- lists:flatten(io_lib:format(
- "(roles: ~w, type: ~w) -> (roles: ~w, type: ~w) ==> \"~s\"",
- [OldRoles, OldType, NewRoles, NewType, Result])).
-
-doc([Roles, Type]) ->
- couch_doc:from_json_obj({[
- {<<"_id">>,<<"org.couchdb.user:foo">>},
- {<<"_rev">>,<<"1-281c81adb1bf10927a6160f246dc0468">>},
- {<<"name">>,<<"foo">>},
- {<<"password_scheme">>,<<"simple">>},
- {<<"salt">>,<<"00000000000000000000000000000000">>},
- {<<"password_sha">>, <<"111111111111111111111111111111111111">>}]
- ++ type(Type) ++ roles(Roles)}).
-
-roles(custom) -> [{<<"roles">>, [<<"custom">>]}];
-roles(missing) -> [].
-
-type(user) -> [{<<"type">>, <<"user">>}];
-type(other) -> [{<<"type">>, <<"other">>}];
-type(missing) -> [].
-
-validate(DiskDoc, NewDoc) ->
- JSONCtx = {[
- {<<"db">>, <<"foo/bar">>},
- {<<"name">>, <<"foo">>},
- {<<"roles">>, [<<"_admin">>]}
- ]},
- validate(DiskDoc, NewDoc, JSONCtx).
-
-validate(DiskDoc, NewDoc, JSONCtx) ->
- {ok, DDoc0} = couch_auth_cache:auth_design_doc(<<"_design/anything">>),
- DDoc = DDoc0#doc{revs = {1, [<<>>]}},
- couch_query_servers:validate_doc_update(DDoc, NewDoc, DiskDoc, JSONCtx, []).
diff --git a/src/couch/test/eunit/couch_base32_tests.erl b/src/couch/test/eunit/couch_base32_tests.erl
deleted file mode 100644
index 7e4d59a09..000000000
--- a/src/couch/test/eunit/couch_base32_tests.erl
+++ /dev/null
@@ -1,28 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_base32_tests).
-
--include_lib("eunit/include/eunit.hrl").
-
-base32_test() ->
- roundtrip(<<"">>, <<"">>),
- roundtrip(<<"f">>, <<"MY======">>),
- roundtrip(<<"fo">>, <<"MZXQ====">>),
- roundtrip(<<"foo">>, <<"MZXW6===">>),
- roundtrip(<<"foob">>, <<"MZXW6YQ=">>),
- roundtrip(<<"fooba">>, <<"MZXW6YTB">>),
- roundtrip(<<"foobar">>, <<"MZXW6YTBOI======">>).
-
-roundtrip(Plain, Encoded) ->
- ?assertEqual(Plain, couch_base32:decode(Encoded)),
- ?assertEqual(Encoded, couch_base32:encode(Plain)).
diff --git a/src/couch/test/eunit/couch_bt_engine_compactor_tests.erl b/src/couch/test/eunit/couch_bt_engine_compactor_tests.erl
deleted file mode 100644
index 4c4c43958..000000000
--- a/src/couch/test/eunit/couch_bt_engine_compactor_tests.erl
+++ /dev/null
@@ -1,129 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_bt_engine_compactor_tests).
-
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-
--define(DELAY, 100).
--define(WAIT_DELAY_COUNT, 50).
-
-
-setup() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- ok = couch_db:close(Db),
- create_docs(DbName),
- DbName.
-
-
-teardown(DbName) when is_binary(DbName) ->
- couch_server:delete(DbName, [?ADMIN_CTX]),
- ok.
-
-
-compaction_resume_test_() ->
- {
- setup,
- fun test_util:start_couch/0,
- fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun compaction_resume/1
- ]
- }
- }.
-
-
-compaction_resume(DbName) ->
- ?_test(begin
- check_db_validity(DbName),
- compact_db(DbName),
- check_db_validity(DbName),
-
- % Force an error when copying document ids
- with_mecked_emsort(fun() ->
- compact_db(DbName)
- end),
-
- check_db_validity(DbName),
- compact_db(DbName),
- check_db_validity(DbName)
- end).
-
-
-check_db_validity(DbName) ->
- couch_util:with_db(DbName, fun(Db) ->
- ?assertEqual({ok, 3}, couch_db:get_doc_count(Db)),
- ?assertEqual(3, couch_db:count_changes_since(Db, 0))
- end).
-
-
-with_mecked_emsort(Fun) ->
- meck:new(couch_emsort, [passthrough]),
- meck:expect(couch_emsort, iter, fun(_) -> erlang:error(kaboom) end),
- try
- Fun()
- after
- meck:unload()
- end.
-
-
-create_docs(DbName) ->
- couch_util:with_db(DbName, fun(Db) ->
- Doc1 = couch_doc:from_json_obj({[
- {<<"_id">>, <<"doc1">>},
- {<<"value">>, 1}
-
- ]}),
- Doc2 = couch_doc:from_json_obj({[
- {<<"_id">>, <<"doc2">>},
- {<<"value">>, 2}
-
- ]}),
- Doc3 = couch_doc:from_json_obj({[
- {<<"_id">>, <<"doc3">>},
- {<<"value">>, 3}
-
- ]}),
- {ok, _} = couch_db:update_docs(Db, [Doc1, Doc2, Doc3])
- end).
-
-
-compact_db(DbName) ->
- couch_util:with_db(DbName, fun(Db) ->
- {ok, _} = couch_db:start_compact(Db)
- end),
- wait_db_compact_done(DbName, ?WAIT_DELAY_COUNT).
-
-
-wait_db_compact_done(_DbName, 0) ->
- Failure = [
- {module, ?MODULE},
- {line, ?LINE},
- {reason, "DB compaction failed to finish"}
- ],
- erlang:error({assertion_failed, Failure});
-wait_db_compact_done(DbName, N) ->
- IsDone = couch_util:with_db(DbName, fun(Db) ->
- not is_pid(couch_db:get_compactor_pid(Db))
- end),
- if IsDone -> ok; true ->
- timer:sleep(?DELAY),
- wait_db_compact_done(DbName, N - 1)
- end.
diff --git a/src/couch/test/eunit/couch_bt_engine_tests.erl b/src/couch/test/eunit/couch_bt_engine_tests.erl
deleted file mode 100644
index 3e3ecbf25..000000000
--- a/src/couch/test/eunit/couch_bt_engine_tests.erl
+++ /dev/null
@@ -1,20 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_bt_engine_tests).
-
-
--include_lib("eunit/include/eunit.hrl").
-
-
-couch_bt_engine_test_()->
- cpse_util:create_tests(couch, couch_bt_engine, "couch").
diff --git a/src/couch/test/eunit/couch_bt_engine_upgrade_tests.erl b/src/couch/test/eunit/couch_bt_engine_upgrade_tests.erl
deleted file mode 100644
index a2a972caf..000000000
--- a/src/couch/test/eunit/couch_bt_engine_upgrade_tests.erl
+++ /dev/null
@@ -1,244 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_bt_engine_upgrade_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(TIMEOUT, 60). % seconds
-
-setup(_) ->
- Ctx = test_util:start_couch(),
- DbDir = config:get("couchdb", "database_dir"),
- DbFileNames = [
- "db_v6_without_purge_req.couch",
- "db_v6_with_1_purge_req.couch",
- "db_v6_with_2_purge_req.couch",
- "db_v6_with_1_purge_req_for_2_docs.couch",
- "db_v7_without_purge_req.couch",
- "db_v7_with_1_purge_req.couch",
- "db_v7_with_2_purge_req.couch",
- "db_v7_with_1_purge_req_for_2_docs.couch"
- ],
- NewPaths = lists:map(fun(DbFileName) ->
- OldDbFilePath = filename:join([?FIXTURESDIR, DbFileName]),
- NewDbFilePath = filename:join([DbDir, DbFileName]),
- ok = filelib:ensure_dir(NewDbFilePath),
- file:delete(NewDbFilePath),
- {ok, _} = file:copy(OldDbFilePath, NewDbFilePath),
- NewDbFilePath
- end, DbFileNames),
- {Ctx, NewPaths}.
-
-
-teardown(_, {Ctx, Paths}) ->
- test_util:stop_couch(Ctx),
- lists:foreach(fun(Path) ->
- file:delete(Path)
- end, Paths).
-
-
-upgrade_test_() ->
- From = [6, 7],
- {
- "Couch Bt Engine Upgrade tests",
- {
- foreachx,
- fun setup/1, fun teardown/2,
- [{F, fun t_upgrade_without_purge_req/2} || F <- From] ++
- [{F, fun t_upgrade_with_1_purge_req/2} || F <- From] ++
- [{F, fun t_upgrade_with_N_purge_req/2} || F <- From] ++
- [{F, fun t_upgrade_with_1_purge_req_for_2_docs/2} || F <- From]
- }
- }.
-
-
-t_upgrade_without_purge_req(VersionFrom, {_Ctx, _NewPaths}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- % There are three documents in the fixture
- % db with zero purge entries
- DbName = ?l2b("db_v" ++ integer_to_list(VersionFrom)
- ++ "_without_purge_req"),
-
- ?assertEqual(VersionFrom, get_disk_version_from_header(DbName)),
- {ok, UpgradedPurged} = couch_util:with_db(DbName, fun(Db) ->
- ?assertEqual(0, couch_db:get_purge_seq(Db)),
- couch_db:fold_purge_infos(Db, 0, fun fold_fun/2, [])
- end),
- ?assertEqual([], UpgradedPurged),
- ?assertEqual(8, get_disk_version_from_header(DbName)),
- {ok, Rev} = save_doc(
- DbName, {[{<<"_id">>, <<"doc4">>}, {<<"v">>, 1}]}
- ),
- {ok, _} = save_doc(DbName, {[{<<"_id">>, <<"doc5">>}, {<<"v">>, 2}]}),
-
- couch_util:with_db(DbName, fun(Db) ->
- ?assertEqual({ok, 5}, couch_db:get_doc_count(Db)),
- ?assertEqual(0, couch_db:get_purge_seq(Db))
- end),
-
- PurgeReqs = [
- {couch_uuids:random(), <<"doc4">>, [Rev]}
- ],
-
- {ok, [{ok, PRevs}]} = couch_util:with_db(DbName, fun(Db) ->
- couch_db:purge_docs(Db, PurgeReqs)
- end),
- ?assertEqual(PRevs, [Rev]),
-
- couch_util:with_db(DbName, fun(Db) ->
- ?assertEqual({ok, 4}, couch_db:get_doc_count(Db)),
- ?assertEqual(1, couch_db:get_purge_seq(Db))
- end)
- end)}.
-
-
-t_upgrade_with_1_purge_req(VersionFrom, {_Ctx, _NewPaths}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- % There are two documents in the fixture database
- % with a single purge entry
- DbName = ?l2b("db_v" ++ integer_to_list(VersionFrom)
- ++ "_with_1_purge_req"),
-
- ?assertEqual(VersionFrom, get_disk_version_from_header(DbName)),
- {ok, UpgradedPurged} = couch_util:with_db(DbName, fun(Db) ->
- ?assertEqual(1, couch_db:get_purge_seq(Db)),
- couch_db:fold_purge_infos(Db, 0, fun fold_fun/2, [])
- end),
- ?assertEqual(8, get_disk_version_from_header(DbName)),
- ?assertEqual([{1, <<"doc1">>}], UpgradedPurged),
-
- {ok, Rev} = save_doc(
- DbName, {[{<<"_id">>, <<"doc4">>}, {<<"v">>, 1}]}
- ),
- {ok, _} = save_doc(DbName, {[{<<"_id">>, <<"doc5">>}, {<<"v">>, 2}]}),
-
- couch_util:with_db(DbName, fun(Db) ->
- ?assertEqual({ok, 4}, couch_db:get_doc_count(Db)),
- ?assertEqual(1, couch_db:get_purge_seq(Db))
- end),
-
- PurgeReqs = [
- {couch_uuids:random(), <<"doc4">>, [Rev]}
- ],
-
- {ok, [{ok, PRevs}]} = couch_util:with_db(DbName, fun(Db) ->
- couch_db:purge_docs(Db, PurgeReqs)
- end),
- ?assertEqual(PRevs, [Rev]),
-
- couch_util:with_db(DbName, fun(Db) ->
- ?assertEqual({ok, 3}, couch_db:get_doc_count(Db)),
- ?assertEqual(2, couch_db:get_purge_seq(Db))
- end)
- end)}.
-
-
-t_upgrade_with_N_purge_req(VersionFrom, {_Ctx, _NewPaths}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- % There is one document in the fixture database
- % with two docs that have been purged
- DbName = ?l2b("db_v" ++ integer_to_list(VersionFrom)
- ++ "_with_2_purge_req"),
-
- ?assertEqual(VersionFrom, get_disk_version_from_header(DbName)),
- {ok, UpgradedPurged} = couch_util:with_db(DbName, fun(Db) ->
- ?assertEqual(2, couch_db:get_purge_seq(Db)),
- couch_db:fold_purge_infos(Db, 1, fun fold_fun/2, [])
- end),
- ?assertEqual(8, get_disk_version_from_header(DbName)),
- ?assertEqual([{2, <<"doc2">>}], UpgradedPurged),
-
- {ok, Rev} = save_doc(DbName, {[{<<"_id">>, <<"doc4">>}, {<<"v">>, 1}]}),
- {ok, _} = save_doc(DbName, {[{<<"_id">>, <<"doc5">>}, {<<"v">>, 2}]}),
-
- couch_util:with_db(DbName, fun(Db) ->
- ?assertEqual({ok, 3}, couch_db:get_doc_count(Db)),
- ?assertEqual(2, couch_db:get_purge_seq(Db))
- end),
-
- PurgeReqs = [
- {couch_uuids:random(), <<"doc4">>, [Rev]}
- ],
-
- {ok, [{ok, PRevs}]} = couch_util:with_db(DbName, fun(Db) ->
- couch_db:purge_docs(Db, PurgeReqs)
- end),
- ?assertEqual(PRevs, [Rev]),
-
- couch_util:with_db(DbName, fun(Db) ->
- ?assertEqual({ok, 2}, couch_db:get_doc_count(Db)),
- ?assertEqual(3, couch_db:get_purge_seq(Db))
- end)
- end)}.
-
-
-t_upgrade_with_1_purge_req_for_2_docs(VersionFrom, {_Ctx, _NewPaths}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- % There are two documents (Doc4 and Doc5) in the fixture database
- % with three docs (Doc1, Doc2 and Doc3) that have been purged, and
- % with one purge req for Doc1 and another purge req for Doc 2 and Doc3
- DbName = ?l2b("db_v" ++ integer_to_list(VersionFrom)
- ++ "_with_1_purge_req_for_2_docs"),
-
- ?assertEqual(VersionFrom, get_disk_version_from_header(DbName)),
- {ok, UpgradedPurged} = couch_util:with_db(DbName, fun(Db) ->
- ?assertEqual(3, couch_db:get_purge_seq(Db)),
- couch_db:fold_purge_infos(Db, 1, fun fold_fun/2, [])
- end),
- ?assertEqual(8, get_disk_version_from_header(DbName)),
- ?assertEqual([{3,<<"doc2">>},{2,<<"doc3">>}], UpgradedPurged),
-
- {ok, Rev} = save_doc(DbName, {[{<<"_id">>, <<"doc6">>}, {<<"v">>, 1}]}),
- {ok, _} = save_doc(DbName, {[{<<"_id">>, <<"doc7">>}, {<<"v">>, 2}]}),
-
- couch_util:with_db(DbName, fun(Db) ->
- ?assertEqual({ok, 4}, couch_db:get_doc_count(Db)),
- ?assertEqual(3, couch_db:get_purge_seq(Db))
- end),
-
- PurgeReqs = [
- {couch_uuids:random(), <<"doc6">>, [Rev]}
- ],
-
- {ok, [{ok, PRevs}]} = couch_util:with_db(DbName, fun(Db) ->
- couch_db:purge_docs(Db, PurgeReqs)
- end),
- ?assertEqual(PRevs, [Rev]),
-
- couch_util:with_db(DbName, fun(Db) ->
- ?assertEqual({ok, 3}, couch_db:get_doc_count(Db)),
- ?assertEqual(4, couch_db:get_purge_seq(Db))
- end)
- end)}.
-
-
-save_doc(DbName, Json) ->
- Doc = couch_doc:from_json_obj(Json),
- couch_util:with_db(DbName, fun(Db) ->
- couch_db:update_doc(Db, Doc, [])
- end).
-
-
-fold_fun({PSeq, _UUID, Id, _Revs}, Acc) ->
- {ok, [{PSeq, Id} | Acc]}.
-
-
-get_disk_version_from_header(DbFileName) ->
- DbDir = config:get("couchdb", "database_dir"),
- DbFilePath = filename:join([DbDir, ?l2b(?b2l(DbFileName) ++ ".couch")]),
- {ok, Fd} = couch_file:open(DbFilePath, []),
- {ok, Header} = couch_file:read_header(Fd),
- DiskVerison = couch_bt_engine_header:disk_version(Header),
- couch_file:close(Fd),
- DiskVerison.
diff --git a/src/couch/test/eunit/couch_btree_tests.erl b/src/couch/test/eunit/couch_btree_tests.erl
deleted file mode 100644
index c9b791d2c..000000000
--- a/src/couch/test/eunit/couch_btree_tests.erl
+++ /dev/null
@@ -1,572 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_btree_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(ROWS, 1000).
--define(TIMEOUT, 60). % seconds
-
-
-setup() ->
- {ok, Fd} = couch_file:open(?tempfile(), [create, overwrite]),
- {ok, Btree} = couch_btree:open(nil, Fd, [{compression, none},
- {reduce, fun reduce_fun/2}]),
- {Fd, Btree}.
-
-setup_kvs(_) ->
- setup().
-
-setup_red() ->
- {_, EvenOddKVs} = lists:foldl(
- fun(Idx, {Key, Acc}) ->
- case Key of
- "even" -> {"odd", [{{Key, Idx}, 1} | Acc]};
- _ -> {"even", [{{Key, Idx}, 1} | Acc]}
- end
- end, {"odd", []}, lists:seq(1, ?ROWS)),
- {Fd, Btree} = setup(),
- {ok, Btree1} = couch_btree:add_remove(Btree, EvenOddKVs, []),
- {Fd, Btree1}.
-setup_red(_) ->
- setup_red().
-
-teardown(Fd) when is_pid(Fd) ->
- ok = couch_file:close(Fd);
-teardown({Fd, _}) ->
- teardown(Fd).
-teardown(_, {Fd, _}) ->
- teardown(Fd).
-
-
-kvs_test_funs() ->
- [
- fun should_set_fd_correctly/2,
- fun should_set_root_correctly/2,
- fun should_create_zero_sized_btree/2,
- fun should_set_reduce_option/2,
- fun should_fold_over_empty_btree/2,
- fun should_add_all_keys/2,
- fun should_continuously_add_new_kv/2,
- fun should_continuously_remove_keys/2,
- fun should_insert_keys_in_reversed_order/2,
- fun should_add_every_odd_key_remove_every_even/2,
- fun should_add_every_even_key_remove_every_old/2
- ].
-
-red_test_funs() ->
- [
- fun should_reduce_whole_range/2,
- fun should_reduce_first_half/2,
- fun should_reduce_second_half/2
- ].
-
-
-btree_open_test_() ->
- {ok, Fd} = couch_file:open(?tempfile(), [create, overwrite]),
- {ok, Btree} = couch_btree:open(nil, Fd, [{compression, none}]),
- {
- "Ensure that created btree is really a btree record",
- ?_assert(is_record(Btree, btree))
- }.
-
-sorted_kvs_test_() ->
- Funs = kvs_test_funs(),
- Sorted = [{Seq, couch_rand:uniform()} || Seq <- lists:seq(1, ?ROWS)],
- {
- "BTree with sorted keys",
- {
- setup,
- fun() -> test_util:start(?MODULE, [ioq]) end, fun test_util:stop/1,
- {
- foreachx,
- fun setup_kvs/1, fun teardown/2,
- [{Sorted, Fun} || Fun <- Funs]
- }
- }
- }.
-
-rsorted_kvs_test_() ->
- Sorted = [{Seq, couch_rand:uniform()} || Seq <- lists:seq(1, ?ROWS)],
- Funs = kvs_test_funs(),
- Reversed = Sorted,
- {
- "BTree with backward sorted keys",
- {
- setup,
- fun() -> test_util:start(?MODULE, [ioq]) end, fun test_util:stop/1,
- {
- foreachx,
- fun setup_kvs/1, fun teardown/2,
- [{Reversed, Fun} || Fun <- Funs]
- }
- }
- }.
-
-shuffled_kvs_test_() ->
- Funs = kvs_test_funs(),
- Sorted = [{Seq, couch_rand:uniform()} || Seq <- lists:seq(1, ?ROWS)],
- Shuffled = shuffle(Sorted),
- {
- "BTree with shuffled keys",
- {
- setup,
- fun() -> test_util:start(?MODULE, [ioq]) end, fun test_util:stop/1,
- {
- foreachx,
- fun setup_kvs/1, fun teardown/2,
- [{Shuffled, Fun} || Fun <- Funs]
- }
- }
- }.
-
-reductions_test_() ->
- {
- "BTree reductions",
- {
- setup,
- fun() -> test_util:start(?MODULE, [ioq]) end, fun test_util:stop/1,
- [
- {
- "Common tests",
- {
- foreach,
- fun setup_red/0, fun teardown/1,
- [
- fun should_reduce_without_specified_direction/1,
- fun should_reduce_forward/1,
- fun should_reduce_backward/1
- ]
- }
- },
- {
- "Range requests",
- [
- {
- "Forward direction",
- {
- foreachx,
- fun setup_red/1, fun teardown/2,
- [{fwd, F} || F <- red_test_funs()]
- }
- },
- {
- "Backward direction",
- {
- foreachx,
- fun setup_red/1, fun teardown/2,
- [{rev, F} || F <- red_test_funs()]
- }
- }
- ]
- }
- ]
- }
- }.
-
-
-should_set_fd_correctly(_, {Fd, Btree}) ->
- ?_assertMatch(Fd, Btree#btree.fd).
-
-should_set_root_correctly(_, {_, Btree}) ->
- ?_assertMatch(nil, Btree#btree.root).
-
-should_create_zero_sized_btree(_, {_, Btree}) ->
- ?_assertMatch(0, couch_btree:size(Btree)).
-
-should_set_reduce_option(_, {_, Btree}) ->
- ReduceFun = fun reduce_fun/2,
- Btree1 = couch_btree:set_options(Btree, [{reduce, ReduceFun}]),
- ?_assertMatch(ReduceFun, Btree1#btree.reduce).
-
-should_fold_over_empty_btree(_, {_, Btree}) ->
- {ok, _, EmptyRes} = couch_btree:foldl(Btree, fun(_, X) -> {ok, X+1} end, 0),
- ?_assertEqual(EmptyRes, 0).
-
-should_add_all_keys(KeyValues, {Fd, Btree}) ->
- {ok, Btree1} = couch_btree:add_remove(Btree, KeyValues, []),
- [
- should_return_complete_btree_on_adding_all_keys(KeyValues, Btree1),
- should_have_non_zero_size(Btree1),
- should_have_lesser_size_than_file(Fd, Btree1),
- should_keep_root_pointer_to_kp_node(Fd, Btree1),
- should_remove_all_keys(KeyValues, Btree1)
- ].
-
-should_return_complete_btree_on_adding_all_keys(KeyValues, Btree) ->
- ?_assert(test_btree(Btree, KeyValues)).
-
-should_have_non_zero_size(Btree) ->
- ?_assert(couch_btree:size(Btree) > 0).
-
-should_have_lesser_size_than_file(Fd, Btree) ->
- ?_assert((couch_btree:size(Btree) =< couch_file:bytes(Fd))).
-
-should_keep_root_pointer_to_kp_node(Fd, Btree) ->
- ?_assertMatch({ok, {kp_node, _}},
- couch_file:pread_term(Fd, element(1, Btree#btree.root))).
-
-should_remove_all_keys(KeyValues, Btree) ->
- Keys = keys(KeyValues),
- {ok, Btree1} = couch_btree:add_remove(Btree, [], Keys),
- {
- "Should remove all the keys",
- [
- should_produce_valid_btree(Btree1, []),
- should_be_empty(Btree1)
- ]
- }.
-
-should_continuously_add_new_kv(KeyValues, {_, Btree}) ->
- {Btree1, _} = lists:foldl(
- fun(KV, {BtAcc, PrevSize}) ->
- {ok, BtAcc2} = couch_btree:add_remove(BtAcc, [KV], []),
- ?assert(couch_btree:size(BtAcc2) > PrevSize),
- {BtAcc2, couch_btree:size(BtAcc2)}
- end, {Btree, couch_btree:size(Btree)}, KeyValues),
- {
- "Should continuously add key-values to btree",
- [
- should_produce_valid_btree(Btree1, KeyValues),
- should_not_be_empty(Btree1)
- ]
- }.
-
-should_continuously_remove_keys(KeyValues, {_, Btree}) ->
- {ok, Btree1} = couch_btree:add_remove(Btree, KeyValues, []),
- {Btree2, _} = lists:foldl(
- fun({K, _}, {BtAcc, PrevSize}) ->
- {ok, BtAcc2} = couch_btree:add_remove(BtAcc, [], [K]),
- ?assert(couch_btree:size(BtAcc2) < PrevSize),
- {BtAcc2, couch_btree:size(BtAcc2)}
- end, {Btree1, couch_btree:size(Btree1)}, KeyValues),
- {
- "Should continuously remove keys from btree",
- [
- should_produce_valid_btree(Btree2, []),
- should_be_empty(Btree2)
- ]
- }.
-
-should_insert_keys_in_reversed_order(KeyValues, {_, Btree}) ->
- KeyValuesRev = lists:reverse(KeyValues),
- {Btree1, _} = lists:foldl(
- fun(KV, {BtAcc, PrevSize}) ->
- {ok, BtAcc2} = couch_btree:add_remove(BtAcc, [KV], []),
- ?assert(couch_btree:size(BtAcc2) > PrevSize),
- {BtAcc2, couch_btree:size(BtAcc2)}
- end, {Btree, couch_btree:size(Btree)}, KeyValuesRev),
- should_produce_valid_btree(Btree1, KeyValues).
-
-should_add_every_odd_key_remove_every_even(KeyValues, {_, Btree}) ->
- {ok, Btree1} = couch_btree:add_remove(Btree, KeyValues, []),
- {_, Rem2Keys0, Rem2Keys1} = lists:foldl(fun(X, {Count, Left, Right}) ->
- case Count rem 2 == 0 of
- true -> {Count + 1, [X | Left], Right};
- false -> {Count + 1, Left, [X | Right]}
- end
- end, {0, [], []}, KeyValues),
- {timeout, ?TIMEOUT,
- ?_assert(test_add_remove(Btree1, Rem2Keys0, Rem2Keys1))
- }.
-
-should_add_every_even_key_remove_every_old(KeyValues, {_, Btree}) ->
- {ok, Btree1} = couch_btree:add_remove(Btree, KeyValues, []),
- {_, Rem2Keys0, Rem2Keys1} = lists:foldl(fun(X, {Count, Left, Right}) ->
- case Count rem 2 == 0 of
- true -> {Count + 1, [X | Left], Right};
- false -> {Count + 1, Left, [X | Right]}
- end
- end, {0, [], []}, KeyValues),
- {timeout, ?TIMEOUT,
- ?_assert(test_add_remove(Btree1, Rem2Keys1, Rem2Keys0))
- }.
-
-
-should_reduce_without_specified_direction({_, Btree}) ->
- ?_assertMatch(
- {ok, [{{"odd", _}, ?ROWS div 2}, {{"even", _}, ?ROWS div 2}]},
- fold_reduce(Btree, [])).
-
-should_reduce_forward({_, Btree}) ->
- ?_assertMatch(
- {ok, [{{"odd", _}, ?ROWS div 2}, {{"even", _}, ?ROWS div 2}]},
- fold_reduce(Btree, [{dir, fwd}])).
-
-should_reduce_backward({_, Btree}) ->
- ?_assertMatch(
- {ok, [{{"even", _}, ?ROWS div 2}, {{"odd", _}, ?ROWS div 2}]},
- fold_reduce(Btree, [{dir, rev}])).
-
-should_reduce_whole_range(fwd, {_, Btree}) ->
- {SK, EK} = {{"even", 0}, {"odd", ?ROWS - 1}},
- [
- {
- "include endkey",
- ?_assertMatch(
- {ok, [{{"odd", 1}, ?ROWS div 2},
- {{"even", 2}, ?ROWS div 2}]},
- fold_reduce(Btree, [{dir, fwd},
- {start_key, SK},
- {end_key, EK}]))
- },
- {
- "exclude endkey",
- ?_assertMatch(
- {ok, [{{"odd", 1}, (?ROWS div 2) - 1},
- {{"even", 2}, ?ROWS div 2}]},
- fold_reduce(Btree, [{dir, fwd},
- {start_key, SK},
- {end_key_gt, EK}]))
- }
- ];
-should_reduce_whole_range(rev, {_, Btree}) ->
- {SK, EK} = {{"odd", ?ROWS - 1}, {"even", 2}},
- [
- {
- "include endkey",
- ?_assertMatch(
- {ok, [{{"even", ?ROWS}, ?ROWS div 2},
- {{"odd", ?ROWS - 1}, ?ROWS div 2}]},
- fold_reduce(Btree, [{dir, rev},
- {start_key, SK},
- {end_key, EK}]))
- },
- {
- "exclude endkey",
- ?_assertMatch(
- {ok, [{{"even", ?ROWS}, (?ROWS div 2) - 1},
- {{"odd", ?ROWS - 1}, ?ROWS div 2}]},
- fold_reduce(Btree, [{dir, rev},
- {start_key, SK},
- {end_key_gt, EK}]))
- }
- ].
-
-should_reduce_first_half(fwd, {_, Btree}) ->
- {SK, EK} = {{"even", 0}, {"odd", (?ROWS div 2) - 1}},
- [
- {
- "include endkey",
- ?_assertMatch(
- {ok, [{{"odd", 1}, ?ROWS div 4},
- {{"even", 2}, ?ROWS div 2}]},
- fold_reduce(Btree, [{dir, fwd},
- {start_key, SK}, {end_key, EK}]))
- },
- {
- "exclude endkey",
- ?_assertMatch(
- {ok, [{{"odd", 1}, (?ROWS div 4) - 1},
- {{"even", 2}, ?ROWS div 2}]},
- fold_reduce(Btree, [{dir, fwd},
- {start_key, SK},
- {end_key_gt, EK}]))
- }
- ];
-should_reduce_first_half(rev, {_, Btree}) ->
- {SK, EK} = {{"odd", ?ROWS - 1}, {"even", ?ROWS div 2}},
- [
- {
- "include endkey",
- ?_assertMatch(
- {ok, [{{"even", ?ROWS}, (?ROWS div 4) + 1},
- {{"odd", ?ROWS - 1}, ?ROWS div 2}]},
- fold_reduce(Btree, [{dir, rev},
- {start_key, SK},
- {end_key, EK}]))
- },
- {
- "exclude endkey",
- ?_assertMatch(
- {ok, [{{"even", ?ROWS}, ?ROWS div 4},
- {{"odd", ?ROWS - 1}, ?ROWS div 2}]},
- fold_reduce(Btree, [{dir, rev},
- {start_key, SK},
- {end_key_gt, EK}]))
- }
- ].
-
-should_reduce_second_half(fwd, {_, Btree}) ->
- {SK, EK} = {{"even", ?ROWS div 2}, {"odd", ?ROWS - 1}},
- [
- {
- "include endkey",
- ?_assertMatch(
- {ok, [{{"odd", 1}, ?ROWS div 2},
- {{"even", ?ROWS div 2}, (?ROWS div 4) + 1}]},
- fold_reduce(Btree, [{dir, fwd},
- {start_key, SK},
- {end_key, EK}]))
- },
- {
- "exclude endkey",
- ?_assertMatch(
- {ok, [{{"odd", 1}, (?ROWS div 2) - 1},
- {{"even", ?ROWS div 2}, (?ROWS div 4) + 1}]},
- fold_reduce(Btree, [{dir, fwd},
- {start_key, SK},
- {end_key_gt, EK}]))
- }
- ];
-should_reduce_second_half(rev, {_, Btree}) ->
- {SK, EK} = {{"odd", (?ROWS div 2) + 1}, {"even", 2}},
- [
- {
- "include endkey",
- ?_assertMatch(
- {ok, [{{"even", ?ROWS}, ?ROWS div 2},
- {{"odd", (?ROWS div 2) + 1}, (?ROWS div 4) + 1}]},
- fold_reduce(Btree, [{dir, rev},
- {start_key, SK},
- {end_key, EK}]))
- },
- {
- "exclude endkey",
- ?_assertMatch(
- {ok, [{{"even", ?ROWS}, (?ROWS div 2) - 1},
- {{"odd", (?ROWS div 2) + 1}, (?ROWS div 4) + 1}]},
- fold_reduce(Btree, [{dir, rev},
- {start_key, SK},
- {end_key_gt, EK}]))
- }
- ].
-
-should_produce_valid_btree(Btree, KeyValues) ->
- ?_assert(test_btree(Btree, KeyValues)).
-
-should_be_empty(Btree) ->
- ?_assertEqual(couch_btree:size(Btree), 0).
-
-should_not_be_empty(Btree) ->
- ?_assert(couch_btree:size(Btree) > 0).
-
-fold_reduce(Btree, Opts) ->
- GroupFun = fun({K1, _}, {K2, _}) ->
- K1 == K2
- end,
- FoldFun = fun(GroupedKey, Unreduced, Acc) ->
- {ok, [{GroupedKey, couch_btree:final_reduce(Btree, Unreduced)} | Acc]}
- end,
- couch_btree:fold_reduce(Btree, FoldFun, [],
- [{key_group_fun, GroupFun}] ++ Opts).
-
-
-keys(KVs) ->
- [K || {K, _} <- KVs].
-
-reduce_fun(reduce, KVs) ->
- length(KVs);
-reduce_fun(rereduce, Reds) ->
- lists:sum(Reds).
-
-
-shuffle(List) ->
- randomize(round(math:log(length(List)) + 0.5), List).
-
-randomize(1, List) ->
- randomize(List);
-randomize(T, List) ->
- lists:foldl(
- fun(_E, Acc) ->
- randomize(Acc)
- end, randomize(List), lists:seq(1, (T - 1))).
-
-randomize(List) ->
- D = lists:map(fun(A) -> {couch_rand:uniform(), A} end, List),
- {_, D1} = lists:unzip(lists:keysort(1, D)),
- D1.
-
-test_btree(Btree, KeyValues) ->
- ok = test_key_access(Btree, KeyValues),
- ok = test_lookup_access(Btree, KeyValues),
- ok = test_final_reductions(Btree, KeyValues),
- ok = test_traversal_callbacks(Btree, KeyValues),
- true.
-
-test_add_remove(Btree, OutKeyValues, RemainingKeyValues) ->
- Btree2 = lists:foldl(
- fun({K, _}, BtAcc) ->
- {ok, BtAcc2} = couch_btree:add_remove(BtAcc, [], [K]),
- BtAcc2
- end, Btree, OutKeyValues),
- true = test_btree(Btree2, RemainingKeyValues),
-
- Btree3 = lists:foldl(
- fun(KV, BtAcc) ->
- {ok, BtAcc2} = couch_btree:add_remove(BtAcc, [KV], []),
- BtAcc2
- end, Btree2, OutKeyValues),
- true = test_btree(Btree3, OutKeyValues ++ RemainingKeyValues).
-
-test_key_access(Btree, List) ->
- FoldFun = fun(Element, {[HAcc|TAcc], Count}) ->
- case Element == HAcc of
- true -> {ok, {TAcc, Count + 1}};
- _ -> {ok, {TAcc, Count + 1}}
- end
- end,
- Length = length(List),
- Sorted = lists:sort(List),
- {ok, _, {[], Length}} = couch_btree:foldl(Btree, FoldFun, {Sorted, 0}),
- {ok, _, {[], Length}} = couch_btree:fold(Btree, FoldFun,
- {Sorted, 0}, [{dir, rev}]),
- ok.
-
-test_lookup_access(Btree, KeyValues) ->
- FoldFun = fun({Key, Value}, {Key, Value}) -> {stop, true} end,
- lists:foreach(
- fun({Key, Value}) ->
- [{ok, {Key, Value}}] = couch_btree:lookup(Btree, [Key]),
- {ok, _, true} = couch_btree:foldl(Btree, FoldFun,
- {Key, Value}, [{start_key, Key}])
- end, KeyValues).
-
-test_final_reductions(Btree, KeyValues) ->
- KVLen = length(KeyValues),
- FoldLFun = fun(_X, LeadingReds, Acc) ->
- CountToStart = KVLen div 3 + Acc,
- CountToStart = couch_btree:final_reduce(Btree, LeadingReds),
- {ok, Acc + 1}
- end,
- FoldRFun = fun(_X, LeadingReds, Acc) ->
- CountToEnd = KVLen - KVLen div 3 + Acc,
- CountToEnd = couch_btree:final_reduce(Btree, LeadingReds),
- {ok, Acc + 1}
- end,
- {LStartKey, _} = case KVLen of
- 0 -> {nil, nil};
- _ -> lists:nth(KVLen div 3 + 1, lists:sort(KeyValues))
- end,
- {RStartKey, _} = case KVLen of
- 0 -> {nil, nil};
- _ -> lists:nth(KVLen div 3, lists:sort(KeyValues))
- end,
- {ok, _, FoldLRed} = couch_btree:foldl(Btree, FoldLFun, 0,
- [{start_key, LStartKey}]),
- {ok, _, FoldRRed} = couch_btree:fold(Btree, FoldRFun, 0,
- [{dir, rev}, {start_key, RStartKey}]),
- KVLen = FoldLRed + FoldRRed,
- ok.
-
-test_traversal_callbacks(Btree, _KeyValues) ->
- FoldFun = fun
- (visit, _GroupedKey, _Unreduced, Acc) ->
- {ok, Acc andalso false};
- (traverse, _LK, _Red, Acc) ->
- {skip, Acc andalso true}
- end,
- % With 250 items the root is a kp. Always skipping should reduce to true.
- {ok, _, true} = couch_btree:fold(Btree, FoldFun, true, [{dir, fwd}]),
- ok.
diff --git a/src/couch/test/eunit/couch_changes_tests.erl b/src/couch/test/eunit/couch_changes_tests.erl
deleted file mode 100644
index 848b471f9..000000000
--- a/src/couch/test/eunit/couch_changes_tests.erl
+++ /dev/null
@@ -1,962 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_changes_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(TIMEOUT, 6000).
--define(TEST_TIMEOUT, 10000).
-
--record(row, {
- id,
- seq,
- deleted = false,
- doc = nil
-}).
-
-setup() ->
- DbName = ?tempdb(),
- {ok, Db} = create_db(DbName),
- Revs = [R || {ok, R} <- [
- save_doc(Db, {[{<<"_id">>, <<"doc1">>}]}),
- save_doc(Db, {[{<<"_id">>, <<"doc2">>}]}),
- save_doc(Db, {[{<<"_id">>, <<"doc3">>}]}),
- save_doc(Db, {[{<<"_id">>, <<"doc4">>}]}),
- save_doc(Db, {[{<<"_id">>, <<"doc5">>}]})
- ]],
- Rev = lists:nth(3, Revs),
- {ok, Db1} = couch_db:reopen(Db),
-
- {ok, Rev1} = save_doc(Db1, {[{<<"_id">>, <<"doc3">>}, {<<"_rev">>, Rev}]}),
- Revs1 = Revs ++ [Rev1],
- Revs2 = Revs1 ++ [R || {ok, R} <- [
- save_doc(Db1, {[{<<"_id">>, <<"doc6">>}]}),
- save_doc(Db1, {[{<<"_id">>, <<"_design/foo">>}]}),
- save_doc(Db1, {[{<<"_id">>, <<"doc7">>}]}),
- save_doc(Db1, {[{<<"_id">>, <<"doc8">>}]})
- ]],
- config:set("native_query_servers", "erlang", "{couch_native_process, start_link, []}", _Persist=false),
- {DbName, list_to_tuple(Revs2)}.
-
-teardown({DbName, _}) ->
- config:delete("native_query_servers", "erlang", _Persist=false),
- delete_db(DbName),
- ok.
-
-
-changes_test_() ->
- {
- "Changes feed",
- {
- setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
- [
- filter_by_selector(),
- filter_by_doc_id(),
- filter_by_design(),
- continuous_feed(),
- %%filter_by_custom_function()
- filter_by_filter_function(),
- filter_by_view()
- ]
- }
- }.
-
-filter_by_doc_id() ->
- {
- "Filter _doc_id",
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_filter_by_specific_doc_ids/1,
- fun should_filter_by_specific_doc_ids_descending/1,
- fun should_filter_by_specific_doc_ids_with_since/1,
- fun should_filter_by_specific_doc_ids_no_result/1,
- fun should_handle_deleted_docs/1
- ]
- }
- }.
-
-filter_by_selector() ->
- {
- "Filter _selector",
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_select_basic/1,
- fun should_select_with_since/1,
- fun should_select_when_no_result/1,
- fun should_select_with_deleted_docs/1,
- fun should_select_with_continuous/1,
- fun should_stop_selector_when_db_deleted/1,
- fun should_select_with_empty_fields/1,
- fun should_select_with_fields/1
- ]
- }
- }.
-
-
-filter_by_design() ->
- {
- "Filter _design",
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_emit_only_design_documents/1
- ]
- }
- }.
-
-%% filter_by_custom_function() ->
-%% {
-%% "Filter function",
-%% {
-%% foreach,
-%% fun setup/0, fun teardown/1,
-%% [
-%% fun should_receive_heartbeats/1
-%% ]
-%% }
-%% }.
-
-filter_by_filter_function() ->
- {
- "Filter by filters",
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_filter_by_doc_attribute/1,
- fun should_filter_by_user_ctx/1
- ]
- }
- }.
-
-filter_by_view() ->
- {
- "Filter _view",
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_filter_by_view/1,
- fun should_filter_by_erlang_view/1
- ]
- }
- }.
-
-continuous_feed() ->
- {
- "Continuous Feed",
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_filter_continuous_feed_by_specific_doc_ids/1,
- fun should_end_changes_when_db_deleted/1
- ]
- }
- }.
-
-
-should_filter_by_specific_doc_ids({DbName, _}) ->
- ?_test(
- begin
- ChArgs = #changes_args{
- filter = "_doc_ids"
- },
- DocIds = [<<"doc3">>, <<"doc4">>, <<"doc9999">>],
- Req = {json_req, {[{<<"doc_ids">>, DocIds}]}},
- {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req),
-
- ?assertEqual(2, length(Rows)),
- [#row{seq = Seq1, id = Id1}, #row{seq = Seq2, id = Id2}] = Rows,
- ?assertEqual(<<"doc4">>, Id1),
- ?assertEqual(4, Seq1),
- ?assertEqual(<<"doc3">>, Id2),
- ?assertEqual(6, Seq2),
- ?assertEqual(UpSeq, LastSeq)
- end).
-
-should_filter_by_specific_doc_ids_descending({DbName, _}) ->
- ?_test(
- begin
- ChArgs = #changes_args{
- filter = "_doc_ids",
- dir = rev
- },
- DocIds = [<<"doc3">>, <<"doc4">>, <<"doc9999">>],
- Req = {json_req, {[{<<"doc_ids">>, DocIds}]}},
- {Rows, LastSeq, _} = run_changes_query(DbName, ChArgs, Req),
-
- ?assertEqual(2, length(Rows)),
- [#row{seq = Seq1, id = Id1}, #row{seq = Seq2, id = Id2}] = Rows,
- ?assertEqual(<<"doc3">>, Id1),
- ?assertEqual(6, Seq1),
- ?assertEqual(<<"doc4">>, Id2),
- ?assertEqual(4, Seq2),
- ?assertEqual(4, LastSeq)
- end).
-
-should_filter_by_specific_doc_ids_with_since({DbName, _}) ->
- ?_test(
- begin
- ChArgs = #changes_args{
- filter = "_doc_ids",
- since = 5
- },
- DocIds = [<<"doc3">>, <<"doc4">>, <<"doc9999">>],
- Req = {json_req, {[{<<"doc_ids">>, DocIds}]}},
- {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req),
-
- ?assertEqual(1, length(Rows)),
- [#row{seq = Seq1, id = Id1}] = Rows,
- ?assertEqual(<<"doc3">>, Id1),
- ?assertEqual(6, Seq1),
- ?assertEqual(UpSeq, LastSeq)
- end).
-
-should_filter_by_specific_doc_ids_no_result({DbName, _}) ->
- ?_test(
- begin
- ChArgs = #changes_args{
- filter = "_doc_ids",
- since = 6
- },
- DocIds = [<<"doc3">>, <<"doc4">>, <<"doc9999">>],
- Req = {json_req, {[{<<"doc_ids">>, DocIds}]}},
- {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req),
-
- ?assertEqual(0, length(Rows)),
- ?assertEqual(UpSeq, LastSeq)
- end).
-
-should_handle_deleted_docs({DbName, Revs}) ->
- ?_test(
- begin
- Rev3_2 = element(6, Revs),
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, _} = save_doc(
- Db,
- {[{<<"_id">>, <<"doc3">>},
- {<<"_deleted">>, true},
- {<<"_rev">>, Rev3_2}]}),
-
- ChArgs = #changes_args{
- filter = "_doc_ids",
- since = 9
- },
- DocIds = [<<"doc3">>, <<"doc4">>, <<"doc9999">>],
- Req = {json_req, {[{<<"doc_ids">>, DocIds}]}},
- {Rows, LastSeq, _} = run_changes_query(DbName, ChArgs, Req),
-
- ?assertEqual(1, length(Rows)),
- ?assertMatch(
- [#row{seq = LastSeq, id = <<"doc3">>, deleted = true}],
- Rows
- ),
- ?assertEqual(11, LastSeq)
- end).
-
-should_filter_continuous_feed_by_specific_doc_ids({DbName, Revs}) ->
- ?_test(
- begin
- {ok, Db} = couch_db:open_int(DbName, []),
- ChangesArgs = #changes_args{
- filter = "_doc_ids",
- feed = "continuous"
- },
- DocIds = [<<"doc3">>, <<"doc4">>, <<"doc9999">>],
- Req = {json_req, {[{<<"doc_ids">>, DocIds}]}},
- reset_row_notifications(),
- Consumer = spawn_consumer(DbName, ChangesArgs, Req),
- ?assertEqual(ok, wait_row_notifications(2)),
- ok = pause(Consumer),
-
- Rows = get_rows(Consumer),
- ?assertEqual(2, length(Rows)),
- [#row{seq = Seq1, id = Id1}, #row{seq = Seq2, id = Id2}] = Rows,
- ?assertEqual(<<"doc4">>, Id1),
- ?assertEqual(4, Seq1),
- ?assertEqual(<<"doc3">>, Id2),
- ?assertEqual(6, Seq2),
-
- clear_rows(Consumer),
- {ok, _Rev9} = save_doc(Db, {[{<<"_id">>, <<"doc9">>}]}),
- {ok, _Rev10} = save_doc(Db, {[{<<"_id">>, <<"doc10">>}]}),
- ok = unpause(Consumer),
- timer:sleep(100),
- ok = pause(Consumer),
- ?assertEqual([], get_rows(Consumer)),
-
- Rev4 = element(4, Revs),
- Rev3_2 = element(6, Revs),
- {ok, Rev4_2} = save_doc(Db, {[{<<"_id">>, <<"doc4">>},
- {<<"_rev">>, Rev4}]}),
- {ok, _} = save_doc(Db, {[{<<"_id">>, <<"doc11">>}]}),
- {ok, _} = save_doc(Db, {[{<<"_id">>, <<"doc4">>},
- {<<"_rev">>, Rev4_2}]}),
- {ok, _} = save_doc(Db, {[{<<"_id">>, <<"doc12">>}]}),
- {ok, Rev3_3} = save_doc(Db, {[{<<"_id">>, <<"doc3">>},
- {<<"_rev">>, Rev3_2}]}),
- reset_row_notifications(),
- ok = unpause(Consumer),
- ?assertEqual(ok, wait_row_notifications(2)),
- ok = pause(Consumer),
-
- NewRows = get_rows(Consumer),
- ?assertEqual(2, length(NewRows)),
- [Row14, Row16] = NewRows,
- ?assertEqual(<<"doc4">>, Row14#row.id),
- ?assertEqual(15, Row14#row.seq),
- ?assertEqual(<<"doc3">>, Row16#row.id),
- ?assertEqual(17, Row16#row.seq),
-
- clear_rows(Consumer),
- {ok, _Rev3_4} = save_doc(Db, {[{<<"_id">>, <<"doc3">>},
- {<<"_rev">>, Rev3_3}]}),
- reset_row_notifications(),
- ok = unpause(Consumer),
- ?assertEqual(ok, wait_row_notifications(1)),
- ok = pause(Consumer),
-
- FinalRows = get_rows(Consumer),
-
- ok = unpause(Consumer),
- stop_consumer(Consumer),
-
- ?assertMatch([#row{seq = 18, id = <<"doc3">>}], FinalRows)
- end).
-
-
-should_end_changes_when_db_deleted({DbName, _Revs}) ->
- ?_test(begin
- {ok, _Db} = couch_db:open_int(DbName, []),
- ChangesArgs = #changes_args{
- filter = "_doc_ids",
- feed = "continuous"
- },
- DocIds = [<<"doc3">>, <<"doc4">>, <<"doc9999">>],
- Req = {json_req, {[{<<"doc_ids">>, DocIds}]}},
- Consumer = spawn_consumer(DbName, ChangesArgs, Req),
- ok = pause(Consumer),
- ok = couch_server:delete(DbName, [?ADMIN_CTX]),
- ok = unpause(Consumer),
- {_Rows, _LastSeq} = wait_finished(Consumer),
- stop_consumer(Consumer),
- ok
- end).
-
-
-should_select_basic({DbName, _}) ->
- ?_test(
- begin
- ChArgs = #changes_args{filter = "_selector"},
- Selector = {[{<<"_id">>, <<"doc3">>}]},
- Req = {json_req, {[{<<"selector">>, Selector}]}},
- {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req),
- ?assertEqual(1, length(Rows)),
- [#row{seq = Seq, id = Id}] = Rows,
- ?assertEqual(<<"doc3">>, Id),
- ?assertEqual(6, Seq),
- ?assertEqual(UpSeq, LastSeq)
- end).
-
-should_select_with_since({DbName, _}) ->
- ?_test(
- begin
- ChArgs = #changes_args{filter = "_selector", since = 9},
- GteDoc2 = {[{<<"$gte">>, <<"doc1">>}]},
- Selector = {[{<<"_id">>, GteDoc2}]},
- Req = {json_req, {[{<<"selector">>, Selector}]}},
- {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req),
- ?assertEqual(1, length(Rows)),
- [#row{seq = Seq, id = Id}] = Rows,
- ?assertEqual(<<"doc8">>, Id),
- ?assertEqual(10, Seq),
- ?assertEqual(UpSeq, LastSeq)
- end).
-
-should_select_when_no_result({DbName, _}) ->
- ?_test(
- begin
- ChArgs = #changes_args{filter = "_selector"},
- Selector = {[{<<"_id">>, <<"nopers">>}]},
- Req = {json_req, {[{<<"selector">>, Selector}]}},
- {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req),
- ?assertEqual(0, length(Rows)),
- ?assertEqual(UpSeq, LastSeq)
- end).
-
-should_select_with_deleted_docs({DbName, Revs}) ->
- ?_test(
- begin
- Rev3_2 = element(6, Revs),
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, _} = save_doc(
- Db,
- {[{<<"_id">>, <<"doc3">>},
- {<<"_deleted">>, true},
- {<<"_rev">>, Rev3_2}]}),
- ChArgs = #changes_args{filter = "_selector"},
- Selector = {[{<<"_id">>, <<"doc3">>}]},
- Req = {json_req, {[{<<"selector">>, Selector}]}},
- {Rows, LastSeq, _} = run_changes_query(DbName, ChArgs, Req),
- ?assertMatch(
- [#row{seq = LastSeq, id = <<"doc3">>, deleted = true}],
- Rows
- ),
- ?assertEqual(11, LastSeq)
- end).
-
-should_select_with_continuous({DbName, Revs}) ->
- ?_test(
- begin
- {ok, Db} = couch_db:open_int(DbName, []),
- ChArgs = #changes_args{filter = "_selector", feed = "continuous"},
- GteDoc8 = {[{<<"$gte">>, <<"doc8">>}]},
- Selector = {[{<<"_id">>, GteDoc8}]},
- Req = {json_req, {[{<<"selector">>, Selector}]}},
- reset_row_notifications(),
- Consumer = spawn_consumer(DbName, ChArgs, Req),
- ?assertEqual(ok, wait_row_notifications(1)),
- ok = pause(Consumer),
- Rows = get_rows(Consumer),
- ?assertMatch(
- [#row{seq = 10, id = <<"doc8">>, deleted = false}],
- Rows
- ),
- clear_rows(Consumer),
- {ok, _} = save_doc(Db, {[{<<"_id">>, <<"doc01">>}]}),
- ok = unpause(Consumer),
- timer:sleep(100),
- ok = pause(Consumer),
- ?assertEqual([], get_rows(Consumer)),
- Rev4 = element(4, Revs),
- Rev8 = element(10, Revs),
- {ok, _} = save_doc(Db, {[{<<"_id">>, <<"doc8">>},
- {<<"_rev">>, Rev8}]}),
- {ok, _} = save_doc(Db, {[{<<"_id">>, <<"doc4">>},
- {<<"_rev">>, Rev4}]}),
- reset_row_notifications(),
- ok = unpause(Consumer),
- ?assertEqual(ok, wait_row_notifications(1)),
- ok = pause(Consumer),
- NewRows = get_rows(Consumer),
- ?assertMatch(
- [#row{seq = _, id = <<"doc8">>, deleted = false}],
- NewRows
- )
- end).
-
-should_stop_selector_when_db_deleted({DbName, _Revs}) ->
- ?_test(
- begin
- {ok, _Db} = couch_db:open_int(DbName, []),
- ChArgs = #changes_args{filter = "_selector", feed = "continuous"},
- Selector = {[{<<"_id">>, <<"doc3">>}]},
- Req = {json_req, {[{<<"selector">>, Selector}]}},
- Consumer = spawn_consumer(DbName, ChArgs, Req),
- ok = pause(Consumer),
- ok = couch_server:delete(DbName, [?ADMIN_CTX]),
- ok = unpause(Consumer),
- {_Rows, _LastSeq} = wait_finished(Consumer),
- stop_consumer(Consumer),
- ok
- end).
-
-
-should_select_with_empty_fields({DbName, _}) ->
- ?_test(
- begin
- ChArgs = #changes_args{filter = "_selector", include_docs=true},
- Selector = {[{<<"_id">>, <<"doc3">>}]},
- Req = {json_req, {[{<<"selector">>, Selector},
- {<<"fields">>, []}]}},
- {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req),
- ?assertEqual(1, length(Rows)),
- [#row{seq = Seq, id = Id, doc = Doc}] = Rows,
- ?assertEqual(<<"doc3">>, Id),
- ?assertEqual(6, Seq),
- ?assertEqual(UpSeq, LastSeq),
- ?assertMatch({[{_K1, _V1}, {_K2, _V2}]}, Doc)
- end).
-
-should_select_with_fields({DbName, _}) ->
- ?_test(
- begin
- ChArgs = #changes_args{filter = "_selector", include_docs=true},
- Selector = {[{<<"_id">>, <<"doc3">>}]},
- Req = {json_req, {[{<<"selector">>, Selector},
- {<<"fields">>, [<<"_id">>, <<"nope">>]}]}},
- {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req),
- ?assertEqual(1, length(Rows)),
- [#row{seq = Seq, id = Id, doc = Doc}] = Rows,
- ?assertEqual(<<"doc3">>, Id),
- ?assertEqual(6, Seq),
- ?assertEqual(UpSeq, LastSeq),
- ?assertMatch(Doc, {[{<<"_id">>, <<"doc3">>}]})
- end).
-
-
-should_emit_only_design_documents({DbName, Revs}) ->
- ?_test(
- begin
- ChArgs = #changes_args{
- filter = "_design"
- },
- Req = {json_req, null},
- {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req),
-
- ?assertEqual(1, length(Rows)),
- ?assertEqual(UpSeq, LastSeq),
- ?assertEqual([#row{seq = 8, id = <<"_design/foo">>}], Rows),
-
-
- {ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX]),
- {ok, _} = save_doc(Db, {[{<<"_id">>, <<"_design/foo">>},
- {<<"_rev">>, element(8, Revs)},
- {<<"_deleted">>, true}]}),
-
- couch_db:close(Db),
- {Rows2, LastSeq2, _} = run_changes_query(DbName, ChArgs, Req),
-
- UpSeq2 = UpSeq + 1,
-
- ?assertEqual(1, length(Rows2)),
- ?assertEqual(UpSeq2, LastSeq2),
- ?assertEqual([#row{seq = 11,
- id = <<"_design/foo">>,
- deleted = true}],
- Rows2)
- end).
-
-%% should_receive_heartbeats(_) ->
-%% {timeout, ?TEST_TIMEOUT div 1000,
-%% ?_test(
-%% begin
-%% DbName = ?tempdb(),
-%% Timeout = 100,
-%% {ok, Db} = create_db(DbName),
-
-%% {ok, _} = save_doc(Db, {[
-%% {<<"_id">>, <<"_design/filtered">>},
-%% {<<"language">>, <<"javascript">>},
-%% {<<"filters">>, {[
-%% {<<"foo">>, <<"function(doc) {
-%% return ['doc10', 'doc11', 'doc12'].indexOf(doc._id) != -1;}">>
-%% }]}}
-%% ]}),
-
-%% ChangesArgs = #changes_args{
-%% filter = "filtered/foo",
-%% feed = "continuous",
-%% timeout = 10000,
-%% heartbeat = 1000
-%% },
-%% Consumer = spawn_consumer(DbName, ChangesArgs, {json_req, null}),
-
-%% {ok, _Rev1} = save_doc(Db, {[{<<"_id">>, <<"doc1">>}]}),
-%% timer:sleep(Timeout),
-%% {ok, _Rev2} = save_doc(Db, {[{<<"_id">>, <<"doc2">>}]}),
-%% timer:sleep(Timeout),
-%% {ok, _Rev3} = save_doc(Db, {[{<<"_id">>, <<"doc3">>}]}),
-%% timer:sleep(Timeout),
-%% {ok, _Rev4} = save_doc(Db, {[{<<"_id">>, <<"doc4">>}]}),
-%% timer:sleep(Timeout),
-%% {ok, _Rev5} = save_doc(Db, {[{<<"_id">>, <<"doc5">>}]}),
-%% timer:sleep(Timeout),
-%% {ok, _Rev6} = save_doc(Db, {[{<<"_id">>, <<"doc6">>}]}),
-%% timer:sleep(Timeout),
-%% {ok, _Rev7} = save_doc(Db, {[{<<"_id">>, <<"doc7">>}]}),
-%% timer:sleep(Timeout),
-%% {ok, _Rev8} = save_doc(Db, {[{<<"_id">>, <<"doc8">>}]}),
-%% timer:sleep(Timeout),
-%% {ok, _Rev9} = save_doc(Db, {[{<<"_id">>, <<"doc9">>}]}),
-
-%% Heartbeats = get_heartbeats(Consumer),
-%% ?assert(Heartbeats > 0),
-
-%% {ok, _Rev10} = save_doc(Db, {[{<<"_id">>, <<"doc10">>}]}),
-%% timer:sleep(Timeout),
-%% {ok, _Rev11} = save_doc(Db, {[{<<"_id">>, <<"doc11">>}]}),
-%% timer:sleep(Timeout),
-%% {ok, _Rev12} = save_doc(Db, {[{<<"_id">>, <<"doc12">>}]}),
-
-%% Heartbeats2 = get_heartbeats(Consumer),
-%% ?assert(Heartbeats2 > Heartbeats),
-
-%% Rows = get_rows(Consumer),
-%% ?assertEqual(3, length(Rows)),
-
-%% {ok, _Rev13} = save_doc(Db, {[{<<"_id">>, <<"doc13">>}]}),
-%% timer:sleep(Timeout),
-%% {ok, _Rev14} = save_doc(Db, {[{<<"_id">>, <<"doc14">>}]}),
-%% timer:sleep(Timeout),
-
-%% Heartbeats3 = get_heartbeats(Consumer),
-%% ?assert(Heartbeats3 > Heartbeats2)
-%% end)}.
-
-should_filter_by_doc_attribute({DbName, _}) ->
- ?_test(
- begin
- DDocId = <<"_design/app">>,
- DDoc = couch_doc:from_json_obj({[
- {<<"_id">>, DDocId},
- {<<"language">>, <<"javascript">>},
- {<<"filters">>, {[
- {<<"valid">>, <<"function(doc, req) {"
- " if (doc._id == 'doc3') {"
- " return true; "
- "} }">>}
- ]}}
- ]}),
- ChArgs = #changes_args{filter = "app/valid"},
- Req = {json_req, null},
- ok = update_ddoc(DbName, DDoc),
- {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req),
- ?assertEqual(1, length(Rows)),
- [#row{seq = Seq, id = Id}] = Rows,
- ?assertEqual(<<"doc3">>, Id),
- ?assertEqual(6, Seq),
- ?assertEqual(UpSeq, LastSeq)
- end).
-
-should_filter_by_user_ctx({DbName, _}) ->
- ?_test(
- begin
- DDocId = <<"_design/app">>,
- DDoc = couch_doc:from_json_obj({[
- {<<"_id">>, DDocId},
- {<<"language">>, <<"javascript">>},
- {<<"filters">>, {[
- {<<"valid">>, <<"function(doc, req) {"
- " if (req.userCtx.name == doc._id) {"
- " return true; "
- "} }">>}
- ]}}
- ]}),
- ChArgs = #changes_args{filter = "app/valid"},
- UserCtx = #user_ctx{name = <<"doc3">>, roles = []},
- {ok, DbRec} = couch_db:clustered_db(DbName, UserCtx),
- Req = {json_req, {[{
- <<"userCtx">>, couch_util:json_user_ctx(DbRec)
- }]}},
- ok = update_ddoc(DbName, DDoc),
- {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req),
- ?assertEqual(1, length(Rows)),
- [#row{seq = Seq, id = Id}] = Rows,
- ?assertEqual(<<"doc3">>, Id),
- ?assertEqual(6, Seq),
- ?assertEqual(UpSeq, LastSeq)
- end).
-
-should_filter_by_view({DbName, _}) ->
- ?_test(
- begin
- DDocId = <<"_design/app">>,
- DDoc = couch_doc:from_json_obj({[
- {<<"_id">>, DDocId},
- {<<"language">>, <<"javascript">>},
- {<<"views">>, {[
- {<<"valid">>, {[
- {<<"map">>, <<"function(doc) {"
- " if (doc._id == 'doc3') {"
- " emit(doc); "
- "} }">>}
- ]}}
- ]}}
- ]}),
- ChArgs = #changes_args{filter = "_view"},
- Req = {json_req, {[{
- <<"query">>, {[
- {<<"view">>, <<"app/valid">>}
- ]}
- }]}},
- ok = update_ddoc(DbName, DDoc),
- {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req),
- ?assertEqual(1, length(Rows)),
- [#row{seq = Seq, id = Id}] = Rows,
- ?assertEqual(<<"doc3">>, Id),
- ?assertEqual(6, Seq),
- ?assertEqual(UpSeq, LastSeq)
- end).
-
-should_filter_by_erlang_view({DbName, _}) ->
- ?_test(
- begin
- DDocId = <<"_design/app">>,
- DDoc = couch_doc:from_json_obj({[
- {<<"_id">>, DDocId},
- {<<"language">>, <<"erlang">>},
- {<<"views">>, {[
- {<<"valid">>, {[
- {<<"map">>, <<"fun({Doc}) ->"
- " case lists:keyfind(<<\"_id\">>, 1, Doc) of"
- " {<<\"_id\">>, <<\"doc3\">>} -> Emit(Doc, null); "
- " false -> ok"
- " end "
- "end.">>}
- ]}}
- ]}}
- ]}),
- ChArgs = #changes_args{filter = "_view"},
- Req = {json_req, {[{
- <<"query">>, {[
- {<<"view">>, <<"app/valid">>}
- ]}
- }]}},
- ok = update_ddoc(DbName, DDoc),
- {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req),
- ?assertEqual(1, length(Rows)),
- [#row{seq = Seq, id = Id}] = Rows,
- ?assertEqual(<<"doc3">>, Id),
- ?assertEqual(6, Seq),
- ?assertEqual(UpSeq, LastSeq)
- end).
-
-update_ddoc(DbName, DDoc) ->
- {ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX]),
- {ok, _} = couch_db:update_doc(Db, DDoc, []),
- couch_db:close(Db).
-
-run_changes_query(DbName, ChangesArgs, Opts) ->
- Consumer = spawn_consumer(DbName, ChangesArgs, Opts),
- {Rows, LastSeq} = wait_finished(Consumer),
- {ok, Db} = couch_db:open_int(DbName, []),
- UpSeq = couch_db:get_update_seq(Db),
- couch_db:close(Db),
- stop_consumer(Consumer),
- {Rows, LastSeq, UpSeq}.
-
-save_doc(Db, Json) ->
- Doc = couch_doc:from_json_obj(Json),
- {ok, Rev} = couch_db:update_doc(Db, Doc, []),
- {ok, couch_doc:rev_to_str(Rev)}.
-
-get_rows({Consumer, _}) ->
- Ref = make_ref(),
- Consumer ! {get_rows, Ref},
- Resp = receive
- {rows, Ref, Rows} ->
- Rows
- after ?TIMEOUT ->
- timeout
- end,
- ?assertNotEqual(timeout, Resp),
- Resp.
-
-%% get_heartbeats({Consumer, _}) ->
-%% Ref = make_ref(),
-%% Consumer ! {get_heartbeats, Ref},
-%% Resp = receive
-%% {hearthbeats, Ref, HeartBeats} ->
-%% HeartBeats
-%% after ?TIMEOUT ->
-%% timeout
-%% end,
-%% ?assertNotEqual(timeout, Resp),
-%% Resp.
-
-clear_rows({Consumer, _}) ->
- Ref = make_ref(),
- Consumer ! {reset, Ref},
- Resp = receive
- {ok, Ref} ->
- ok
- after ?TIMEOUT ->
- timeout
- end,
- ?assertNotEqual(timeout, Resp),
- Resp.
-
-stop_consumer({Consumer, _}) ->
- Ref = make_ref(),
- Consumer ! {stop, Ref},
- Resp = receive
- {ok, Ref} ->
- ok
- after ?TIMEOUT ->
- timeout
- end,
- ?assertNotEqual(timeout, Resp),
- Resp.
-
-pause({Consumer, _}) ->
- Ref = make_ref(),
- Consumer ! {pause, Ref},
- Resp = receive
- {paused, Ref} ->
- ok
- after ?TIMEOUT ->
- timeout
- end,
- ?assertNotEqual(timeout, Resp),
- Resp.
-
-unpause({Consumer, _}) ->
- Ref = make_ref(),
- Consumer ! {continue, Ref},
- Resp = receive
- {ok, Ref} ->
- ok
- after ?TIMEOUT ->
- timeout
- end,
- ?assertNotEqual(timeout, Resp),
- Resp.
-
-wait_finished({_, ConsumerRef}) ->
- receive
- {consumer_finished, Rows, LastSeq} ->
- {Rows, LastSeq};
- {'DOWN', ConsumerRef, _, _, Msg} when Msg == normal; Msg == ok ->
- ok;
- {'DOWN', ConsumerRef, _, _, Msg} ->
- erlang:error({consumer_died, [
- {module, ?MODULE},
- {line, ?LINE},
- {value, Msg}
- ]})
- after ?TIMEOUT ->
- erlang:error({consumer_died, [
- {module, ?MODULE},
- {line, ?LINE},
- {value, timeout}
- ]})
- end.
-
-
-reset_row_notifications() ->
- receive
- row ->
- reset_row_notifications()
- after 0 ->
- ok
- end.
-
-
-wait_row_notifications(N) ->
- receive
- row when N == 1 ->
- ok;
- row when N > 1 ->
- wait_row_notifications(N - 1)
- after ?TIMEOUT ->
- timeout
- end.
-
-
-spawn_consumer(DbName, ChangesArgs0, Req) ->
- Parent = self(),
- spawn_monitor(fun() ->
- put(heartbeat_count, 0),
- Callback = fun
- ({change, {Change}, _}, _, Acc) ->
- Id = couch_util:get_value(<<"id">>, Change),
- Seq = couch_util:get_value(<<"seq">>, Change),
- Del = couch_util:get_value(<<"deleted">>, Change, false),
- Doc = couch_util:get_value(doc, Change, nil),
- Parent ! row,
- [#row{id = Id, seq = Seq, deleted = Del, doc = Doc} | Acc];
- ({stop, LastSeq}, _, Acc) ->
- Parent ! {consumer_finished, lists:reverse(Acc), LastSeq},
- stop_loop(Parent, Acc);
- (timeout, _, Acc) ->
- put(heartbeat_count, get(heartbeat_count) + 1),
- maybe_pause(Parent, Acc);
- (_, _, Acc) ->
- maybe_pause(Parent, Acc)
- end,
- {ok, Db} = couch_db:open_int(DbName, []),
- ChangesArgs = case (ChangesArgs0#changes_args.timeout =:= undefined)
- andalso (ChangesArgs0#changes_args.heartbeat =:= undefined) of
- true ->
- ChangesArgs0#changes_args{timeout = 1000, heartbeat = 100};
- false ->
- ChangesArgs0
- end,
- FeedFun = couch_changes:handle_db_changes(ChangesArgs, Req, Db),
- try
- FeedFun({Callback, []})
- catch
- throw:{stop, _} -> ok;
- _:Error -> exit(Error)
- after
- couch_db:close(Db)
- end
- end).
-
-maybe_pause(Parent, Acc) ->
- receive
- {get_rows, Ref} ->
- Parent ! {rows, Ref, lists:reverse(Acc)},
- maybe_pause(Parent, Acc);
- {get_heartbeats, Ref} ->
- Parent ! {hearthbeats, Ref, get(heartbeat_count)},
- maybe_pause(Parent, Acc);
- {reset, Ref} ->
- Parent ! {ok, Ref},
- maybe_pause(Parent, []);
- {pause, Ref} ->
- Parent ! {paused, Ref},
- pause_loop(Parent, Acc);
- {stop, Ref} ->
- Parent ! {ok, Ref},
- throw({stop, Acc});
- V when V /= updated ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {value, V},
- {reason, "Received unexpected message"}]})
- after 0 ->
- Acc
- end.
-
-pause_loop(Parent, Acc) ->
- receive
- {stop, Ref} ->
- Parent ! {ok, Ref},
- throw({stop, Acc});
- {reset, Ref} ->
- Parent ! {ok, Ref},
- pause_loop(Parent, []);
- {continue, Ref} ->
- Parent ! {ok, Ref},
- Acc;
- {get_rows, Ref} ->
- Parent ! {rows, Ref, lists:reverse(Acc)},
- pause_loop(Parent, Acc)
- end.
-
-stop_loop(Parent, Acc) ->
- receive
- {get_rows, Ref} ->
- Parent ! {rows, Ref, lists:reverse(Acc)},
- stop_loop(Parent, Acc);
- {stop, Ref} ->
- Parent ! {ok, Ref},
- Acc
- end.
-
-create_db(DbName) ->
- couch_db:create(DbName, [?ADMIN_CTX, overwrite]).
-
-delete_db(DbName) ->
- couch_server:delete(DbName, [?ADMIN_CTX]).
diff --git a/src/couch/test/eunit/couch_db_doc_tests.erl b/src/couch/test/eunit/couch_db_doc_tests.erl
deleted file mode 100644
index 916b63207..000000000
--- a/src/couch/test/eunit/couch_db_doc_tests.erl
+++ /dev/null
@@ -1,121 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_db_doc_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-start() ->
- test_util:start_couch([ioq]).
-
-
-setup() ->
- DbName = ?tempdb(),
- config:set("couchdb", "stem_interactive_updates", "false", false),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- couch_db:close(Db),
- DbName.
-
-
-teardown(DbName) ->
- ok = couch_server:delete(DbName, [?ADMIN_CTX]),
- ok.
-
-
-couch_db_doc_test_() ->
- {
- "CouchDB doc tests",
- {
- setup,
- fun start/0, fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_truncate_number_of_revisions/1,
- fun should_raise_bad_request_on_invalid_rev/1,
- fun should_allow_access_in_doc_keys_test/1
- ]
- }
- }
- }.
-
-
-should_truncate_number_of_revisions(DbName) ->
- DocId = <<"foo">>,
- Db = open_db(DbName),
- couch_db:set_revs_limit(Db, 5),
- Rev = create_doc(Db, DocId),
- Rev10 = add_revisions(Db, DocId, Rev, 10),
- {ok, [{ok, #doc{revs = {11, Revs}}}]} = open_doc_rev(Db, DocId, Rev10),
- ?_assertEqual(5, length(Revs)).
-
-
-should_raise_bad_request_on_invalid_rev(DbName) ->
- DocId = <<"foo">>,
- InvalidRev1 = <<"foo">>,
- InvalidRev2 = <<"a-foo">>,
- InvalidRev3 = <<"1-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx">>,
- Expect = {bad_request, <<"Invalid rev format">>},
- Db = open_db(DbName),
- create_doc(Db, DocId),
- [
- {InvalidRev1,
- ?_assertThrow(Expect, add_revisions(Db, DocId, InvalidRev1, 1))},
- {InvalidRev2,
- ?_assertThrow(Expect, add_revisions(Db, DocId, InvalidRev2, 1))},
- {InvalidRev3,
- ?_assertThrow(Expect, add_revisions(Db, DocId, InvalidRev3, 1))}
- ].
-
-should_allow_access_in_doc_keys_test(_DbName) ->
- Json = <<"{\"_id\":\"foo\",\"_access\":[\"test\"]}">>,
- EJson = couch_util:json_decode(Json),
- Expected = {[{<<"_id">>,<<"foo">>}, {<<"_access">>, [<<"test">>]}]},
- EJson = Expected,
- Doc = couch_doc:from_json_obj(EJson),
- NewEJson = couch_doc:to_json_obj(Doc, []),
- ?_assertEqual(NewEJson, Expected).
-
-open_db(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX]),
- Db.
-
-
-create_doc(Db, DocId) ->
- add_revision(Db, DocId, undefined).
-
-
-open_doc_rev(Db0, DocId, Rev) ->
- {ok, Db} = couch_db:reopen(Db0),
- couch_db:open_doc_revs(Db, DocId, [couch_doc:parse_rev(Rev)], []).
-
-
-add_revision(Db, DocId, undefined) ->
- add_revision(Db, DocId, []);
-add_revision(Db, DocId, Rev) when is_binary(Rev) ->
- add_revision(Db, DocId, [{<<"_rev">>, Rev}]);
-add_revision(Db0, DocId, Rev) ->
- {ok, Db} = couch_db:reopen(Db0),
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, DocId},
- {<<"value">>, DocId}
- ] ++ Rev}),
- {ok, NewRev} = couch_db:update_doc(Db, Doc, []),
- couch_doc:rev_to_str(NewRev).
-
-
-add_revisions(Db, DocId, Rev, N) ->
- lists:foldl(fun(_, OldRev) ->
- add_revision(Db, DocId, OldRev)
- end, Rev, lists:seq(1, N)).
diff --git a/src/couch/test/eunit/couch_db_mpr_tests.erl b/src/couch/test/eunit/couch_db_mpr_tests.erl
deleted file mode 100644
index bb97c66d7..000000000
--- a/src/couch/test/eunit/couch_db_mpr_tests.erl
+++ /dev/null
@@ -1,137 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_db_mpr_tests).
-
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(TIMEOUT, 30).
-
--define(USER, "couch_db_admin").
--define(PASS, "pass").
--define(AUTH, {basic_auth, {?USER, ?PASS}}).
--define(CONTENT_JSON, {"Content-Type", "application/json"}).
--define(JSON_BODY, "{\"foo\": \"bar\"}").
--define(CONTENT_MULTI_RELATED,
- {"Content-Type", "multipart/related;boundary=\"bound\""}).
-
-
-setup() ->
- Hashed = couch_passwords:hash_admin_password(?PASS),
- ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false),
- TmpDb = ?tempdb(),
- Addr = config:get("httpd", "bind_address", "127.0.0.1"),
- Port = mochiweb_socket_server:get(couch_httpd, port),
- Url = lists:concat(["http://", Addr, ":", Port, "/", ?b2l(TmpDb)]),
- Url.
-
-
-teardown(Url) ->
- catch delete_db(Url),
- ok = config:delete("admins", ?USER, _Persist=false).
-
-
-create_db(Url) ->
- {ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"),
- ?assert(Status =:= 201 orelse Status =:= 202).
-
-
-delete_db(Url) ->
- {ok, 200, _, _} = test_request:delete(Url, [?AUTH]).
-
-
-create_doc(Url, Id, Body, Type) ->
- test_request:put(Url ++ "/" ++ Id, [Type, ?AUTH], Body).
-
-
-delete_doc(Url, Id, Rev) ->
- test_request:delete(Url ++ "/" ++ Id ++ "?rev=" ++ ?b2l(Rev)).
-
-
-couch_db_mpr_test_() ->
- {
- "multi-part attachment tests",
- {
- setup,
- fun test_util:start_couch/0,
- fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun recreate_with_mpr/1
- ]
- }
- }
- }.
-
-
-recreate_with_mpr(Url) ->
- {timeout, ?TIMEOUT, ?_test(begin
- DocId1 = "foo",
- DocId2 = "bar",
-
- create_db(Url),
- create_and_delete_doc(Url, DocId1),
- Rev1 = create_with_mpr(Url, DocId1),
- delete_db(Url),
-
- create_db(Url),
- create_and_delete_doc(Url, DocId1),
- % We create a second unrelated doc to change the
- % position on disk where the attachment is written
- % so that we can assert that the position on disk
- % is not included when calculating a revision.
- create_and_delete_doc(Url, DocId2),
- Rev2 = create_with_mpr(Url, DocId1),
- delete_db(Url),
-
- ?assertEqual(Rev1, Rev2)
- end)}.
-
-
-create_and_delete_doc(Url, DocId) ->
- {ok, _, _, Resp} = create_doc(Url, DocId, ?JSON_BODY, ?CONTENT_JSON),
- {Props} = ?JSON_DECODE(Resp),
- Rev = couch_util:get_value(<<"rev">>, Props, undefined),
- ?assert(is_binary(Rev)),
- {ok, _, _, _} = delete_doc(Url, DocId, Rev).
-
-
-create_with_mpr(Url, DocId) ->
- {ok, _, _, Resp} = create_doc(Url, DocId, mpr(), ?CONTENT_MULTI_RELATED),
- {Props} = ?JSON_DECODE(Resp),
- Rev = couch_util:get_value(<<"rev">>, Props, undefined),
- ?assert(is_binary(Rev)),
- Rev.
-
-
-mpr() ->
- lists:concat([
- "--bound\r\n",
- "Content-Type: application/json\r\n\r\n",
- "{",
- "\"body\":\"stuff\","
- "\"_attachments\":",
- "{\"foo.txt\":{",
- "\"follows\":true,",
- "\"content_type\":\"text/plain\","
- "\"length\":21",
- "}}"
- "}",
- "\r\n--bound\r\n\r\n",
- "this is 21 chars long",
- "\r\n--bound--epilogue"
- ]).
diff --git a/src/couch/test/eunit/couch_db_plugin_tests.erl b/src/couch/test/eunit/couch_db_plugin_tests.erl
deleted file mode 100644
index 93551adbc..000000000
--- a/src/couch/test/eunit/couch_db_plugin_tests.erl
+++ /dev/null
@@ -1,205 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_db_plugin_tests).
-
--export([
- validate_dbname/2,
- before_doc_update/3,
- after_doc_read/2,
- validate_docid/1,
- check_is_admin/1,
- on_delete/2
-]).
-
--export([ %% couch_epi_plugin behaviour
- app/0,
- providers/0,
- services/0,
- data_providers/0,
- data_subscriptions/0,
- processes/0,
- notify/3
-]).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-%% couch_epi_plugin behaviour
-
-app() -> test_app.
-providers() -> [{couch_db, ?MODULE}].
-services() -> [].
-data_providers() -> [].
-data_subscriptions() -> [].
-processes() -> [].
-notify(_, _, _) -> ok.
-fake_db() -> test_util:fake_db([]).
-
-setup() ->
- couch_tests:setup([
- couch_epi_dispatch:dispatch(chttpd, ?MODULE)
- ]).
-
-teardown(Ctx) ->
- couch_tests:teardown(Ctx).
-
-validate_dbname({true, _Db}, _) -> {decided, true};
-validate_dbname({false, _Db}, _) -> {decided, false};
-validate_dbname({fail, _Db}, _) -> throw(validate_dbname);
-validate_dbname({pass, _Db}, _) -> no_decision.
-
-before_doc_update({fail, _Doc}, _Db, interactive_edit) -> throw(before_doc_update);
-before_doc_update({true, Doc}, Db, interactive_edit) -> [{true, [before_doc_update|Doc]}, Db, interactive_edit];
-before_doc_update({false, Doc}, Db, interactive_edit) -> [{false, Doc}, Db, interactive_edit].
-
-after_doc_read({fail, _Doc}, _Db) -> throw(after_doc_read);
-after_doc_read({true, Doc}, Db) -> [{true, [after_doc_read|Doc]}, Db];
-after_doc_read({false, Doc}, Db) -> [{false, Doc}, Db].
-
-validate_docid({true, _Id}) -> true;
-validate_docid({false, _Id}) -> false;
-validate_docid({fail, _Id}) -> throw(validate_docid).
-
-check_is_admin({true, _Db}) -> true;
-check_is_admin({false, _Db}) -> false;
-check_is_admin({fail, _Db}) -> throw(check_is_admin).
-
-on_delete(true, _Opts) -> true;
-on_delete(false, _Opts) -> false;
-on_delete(fail, _Opts) -> throw(on_delete).
-
-callback_test_() ->
- {
- "callback tests",
- {
- setup, fun setup/0, fun teardown/1,
- [
- {"validate_dbname_match", fun validate_dbname_match/0},
- {"validate_dbname_no_match", fun validate_dbname_no_match/0},
- {"validate_dbname_throw", fun validate_dbname_throw/0},
- {"validate_dbname_pass", fun validate_dbname_pass/0},
-
- {"before_doc_update_match", fun before_doc_update_match/0},
- {"before_doc_update_no_match", fun before_doc_update_no_match/0},
- {"before_doc_update_throw", fun before_doc_update_throw/0},
-
- {"after_doc_read_match", fun after_doc_read_match/0},
- {"after_doc_read_no_match", fun after_doc_read_no_match/0},
- {"after_doc_read_throw", fun after_doc_read_throw/0},
-
- {"validate_docid_match", fun validate_docid_match/0},
- {"validate_docid_no_match", fun validate_docid_no_match/0},
- {"validate_docid_throw", fun validate_docid_throw/0},
-
- {"check_is_admin_match", fun check_is_admin_match/0},
- {"check_is_admin_no_match", fun check_is_admin_no_match/0},
- {"check_is_admin_throw", fun check_is_admin_throw/0},
-
- {"on_delete_match", fun on_delete_match/0},
- {"on_delete_no_match", fun on_delete_no_match/0},
- {"on_delete_throw", fun on_delete_throw/0}
- ]
- }
- }.
-
-
-validate_dbname_match() ->
- ?assert(couch_db_plugin:validate_dbname(
- {true, [db]}, db, fun(_, _) -> pass end)).
-
-validate_dbname_no_match() ->
- ?assertNot(couch_db_plugin:validate_dbname(
- {false, [db]}, db, fun(_, _) -> pass end)).
-
-validate_dbname_throw() ->
- ?assertThrow(
- validate_dbname,
- couch_db_plugin:validate_dbname(
- {fail, [db]}, db, fun(_, _) -> pass end)).
-
-validate_dbname_pass() ->
- ?assertEqual(pass, couch_db_plugin:validate_dbname(
- {pass, [db]}, db, fun(_, _) -> pass end)).
-
-before_doc_update_match() ->
- ?assertMatch(
- {true, [before_doc_update, doc]},
- couch_db_plugin:before_doc_update(
- fake_db(), {true, [doc]}, interactive_edit)).
-
-before_doc_update_no_match() ->
- ?assertMatch(
- {false, [doc]},
- couch_db_plugin:before_doc_update(
- fake_db(), {false, [doc]}, interactive_edit)).
-
-before_doc_update_throw() ->
- ?assertThrow(
- before_doc_update,
- couch_db_plugin:before_doc_update(
- fake_db(), {fail, [doc]}, interactive_edit)).
-
-
-after_doc_read_match() ->
- ?assertMatch(
- {true, [after_doc_read, doc]},
- couch_db_plugin:after_doc_read(fake_db(), {true, [doc]})).
-
-after_doc_read_no_match() ->
- ?assertMatch(
- {false, [doc]},
- couch_db_plugin:after_doc_read(fake_db(), {false, [doc]})).
-
-after_doc_read_throw() ->
- ?assertThrow(
- after_doc_read,
- couch_db_plugin:after_doc_read(fake_db(), {fail, [doc]})).
-
-
-validate_docid_match() ->
- ?assert(couch_db_plugin:validate_docid({true, [doc]})).
-
-validate_docid_no_match() ->
- ?assertNot(couch_db_plugin:validate_docid({false, [doc]})).
-
-validate_docid_throw() ->
- ?assertThrow(
- validate_docid,
- couch_db_plugin:validate_docid({fail, [doc]})).
-
-
-check_is_admin_match() ->
- ?assert(couch_db_plugin:check_is_admin({true, [db]})).
-
-check_is_admin_no_match() ->
- ?assertNot(couch_db_plugin:check_is_admin({false, [db]})).
-
-check_is_admin_throw() ->
- ?assertThrow(
- check_is_admin,
- couch_db_plugin:check_is_admin({fail, [db]})).
-
-on_delete_match() ->
- ?assertMatch(
- [true],
- couch_db_plugin:on_delete(true, [])).
-
-on_delete_no_match() ->
- ?assertMatch(
- [false],
- couch_db_plugin:on_delete(false, [])).
-
-on_delete_throw() ->
- ?assertThrow(
- on_delete,
- couch_db_plugin:on_delete(fail, [])).
diff --git a/src/couch/test/eunit/couch_db_props_upgrade_tests.erl b/src/couch/test/eunit/couch_db_props_upgrade_tests.erl
deleted file mode 100644
index 40ad283cf..000000000
--- a/src/couch/test/eunit/couch_db_props_upgrade_tests.erl
+++ /dev/null
@@ -1,83 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_db_props_upgrade_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
-
-setup() ->
- DbName = <<"test">>,
- DbFileName = "test.couch",
- OldDbFilePath = filename:join([?FIXTURESDIR, DbFileName]),
-
- DbDir = config:get("couchdb", "database_dir"),
- NewDbFilePath = filename:join([DbDir, DbFileName]),
-
- file:delete(NewDbFilePath),
- {ok, _} = file:copy(OldDbFilePath, NewDbFilePath),
-
- DbName.
-
-
-teardown(DbName) when is_binary(DbName) ->
- couch_server:delete(DbName, [?ADMIN_CTX]),
- ok.
-
-
-old_db_info_test_() ->
- {
- "Old database versions work",
- {
- setup,
- fun test_util:start_couch/0,
- fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun can_get_props/1,
- fun can_get_db_info/1,
- fun can_compact_db/1
- ]
- }
- }
- }.
-
-
-can_get_props(DbName) ->
- ?_test(begin
- {ok, Db} = couch_db:open_int(DbName, []),
- Props = couch_db_engine:get_props(Db),
- ?assert(is_list(Props))
- end).
-
-
-can_get_db_info(DbName) ->
- ?_test(begin
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, Info} = couch_db:get_db_info(Db),
- Props = couch_util:get_value(props, Info),
- ?assertEqual({[]}, Props)
- end).
-
-
-can_compact_db(DbName) ->
- ?_test(begin
- couch_util:with_db(DbName, fun(Db) ->
- couch_db:start_compact(Db),
- couch_db:wait_for_compaction(Db)
- end)
- end).
diff --git a/src/couch/test/eunit/couch_db_split_tests.erl b/src/couch/test/eunit/couch_db_split_tests.erl
deleted file mode 100644
index 6e24c36ee..000000000
--- a/src/couch/test/eunit/couch_db_split_tests.erl
+++ /dev/null
@@ -1,331 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_db_split_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(RINGTOP, 2 bsl 31).
--define(TIMEOUT, 60). % seconds
-
-
-setup() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- ok = couch_db:close(Db),
- DbName.
-
-
-teardown(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- FilePath = couch_db:get_filepath(Db),
- ok = couch_db:close(Db),
- ok = file:delete(FilePath).
-
-
-split_test_() ->
- Cases = [
- {"Should split an empty shard", 0, 2},
- {"Should split shard in half", 100, 2},
- {"Should split shard in three", 99, 3},
- {"Should split shard in four", 100, 4}
- ],
- {
- setup,
- fun test_util:start_couch/0, fun test_util:stop/1,
- [
- {
- foreachx,
- fun(_) -> setup() end, fun(_, St) -> teardown(St) end,
- [{Case, fun should_split_shard/2} || Case <- Cases]
- },
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_fail_on_missing_source/1,
- fun should_fail_on_existing_target/1,
- fun should_fail_on_invalid_target_name/1,
- fun should_crash_on_invalid_tmap/1,
- fun should_fail_on_opened_target/1
- ]
- }
- ]
- }.
-
-
-should_split_shard({Desc, TotalDocs, Q}, DbName) ->
- {ok, ExpectSeq} = create_docs(DbName, TotalDocs),
- Ranges = make_ranges(Q),
- TMap = make_targets(Ranges),
- DocsPerRange = TotalDocs div Q,
- PickFun = make_pickfun(DocsPerRange),
- {Desc, timeout, ?TIMEOUT, ?_test(begin
- {ok, UpdateSeq} = couch_db_split:split(DbName, TMap, PickFun),
- ?assertEqual(ExpectSeq, UpdateSeq),
- maps:map(fun(Range, Name) ->
- {ok, Db} = couch_db:open_int(Name, []),
- FilePath = couch_db:get_filepath(Db),
- %% target actually exists
- ?assertMatch({ok, _}, file:read_file_info(FilePath)),
- %% target's update seq is the same as source's update seq
- USeq = couch_db:get_update_seq(Db),
- ?assertEqual(ExpectSeq, USeq),
- %% target shard has all the expected in its range docs
- {ok, DocsInShard} = couch_db:fold_docs(Db, fun(FDI, Acc) ->
- DocId = FDI#full_doc_info.id,
- ExpectedRange = PickFun(DocId, Ranges, undefined),
- ?assertEqual(ExpectedRange, Range),
- {ok, Acc + 1}
- end, 0),
- ?assertEqual(DocsPerRange, DocsInShard),
- ok = couch_db:close(Db),
- ok = file:delete(FilePath)
- end, TMap)
- end)}.
-
-
-should_fail_on_missing_source(_DbName) ->
- DbName = ?tempdb(),
- Ranges = make_ranges(2),
- TMap = make_targets(Ranges),
- Response = couch_db_split:split(DbName, TMap, fun fake_pickfun/3),
- ?_assertEqual({error, missing_source}, Response).
-
-
-should_fail_on_existing_target(DbName) ->
- Ranges = make_ranges(2),
- TMap = maps:map(fun(_, TName) ->
- % We create the target but make sure to remove it from the cache so we
- % hit the eexist error instaed of already_opened
- {ok, Db} = couch_db:create(TName, [?ADMIN_CTX]),
- Pid = couch_db:get_pid(Db),
- ok = couch_db:close(Db),
- exit(Pid, kill),
- test_util:wait(fun() ->
- case ets:lookup(couch_dbs, TName) of
- [] -> ok;
- [_ | _] -> wait
- end
- end),
- TName
- end, make_targets(Ranges)),
- Response = couch_db_split:split(DbName, TMap, fun fake_pickfun/3),
- ?_assertMatch({error, {target_create_error, _, eexist}}, Response).
-
-
-should_fail_on_invalid_target_name(DbName) ->
- Ranges = make_ranges(2),
- TMap = maps:map(fun([B, _], _) ->
- iolist_to_binary(["_$", couch_util:to_hex(<<B:32/integer>>)])
- end, make_targets(Ranges)),
- Expect = {error, {target_create_error, <<"_$00000000">>,
- {illegal_database_name, <<"_$00000000">>}}},
- Response = couch_db_split:split(DbName, TMap, fun fake_pickfun/3),
- ?_assertMatch(Expect, Response).
-
-
-should_crash_on_invalid_tmap(DbName) ->
- Ranges = make_ranges(1),
- TMap = make_targets(Ranges),
- ?_assertError(function_clause,
- couch_db_split:split(DbName, TMap, fun fake_pickfun/3)).
-
-
-should_fail_on_opened_target(DbName) ->
- Ranges = make_ranges(2),
- TMap = maps:map(fun(_, TName) ->
- % We create and keep the target open but delete
- % its file on disk so we don't fail with eexist
- {ok, Db} = couch_db:create(TName, [?ADMIN_CTX]),
- FilePath = couch_db:get_filepath(Db),
- ok = file:delete(FilePath),
- TName
- end, make_targets(Ranges)),
- ?_assertMatch({error, {target_create_error, _, already_opened}},
- couch_db_split:split(DbName, TMap, fun fake_pickfun/3)).
-
-
-copy_local_docs_test_() ->
- Cases = [
- {"Should work with no docs", 0, 2},
- {"Should copy local docs after split in two", 100, 2},
- {"Should copy local docs after split in three", 99, 3},
- {"Should copy local docs after split in four", 100, 4}
- ],
- {
- setup,
- fun test_util:start_couch/0, fun test_util:stop/1,
- [
- {
- foreachx,
- fun(_) -> setup() end, fun(_, St) -> teardown(St) end,
- [{Case, fun should_copy_local_docs/2} || Case <- Cases]
- },
- {"Should return error on missing source",
- fun should_fail_copy_local_on_missing_source/0}
- ]
- }.
-
-
-should_copy_local_docs({Desc, TotalDocs, Q}, DbName) ->
- {ok, ExpectSeq} = create_docs(DbName, TotalDocs),
- Ranges = make_ranges(Q),
- TMap = make_targets(Ranges),
- DocsPerRange = TotalDocs div Q,
- PickFun = make_pickfun(DocsPerRange),
- {Desc, timeout, ?TIMEOUT, ?_test(begin
- {ok, UpdateSeq} = couch_db_split:split(DbName, TMap, PickFun),
- ?assertEqual(ExpectSeq, UpdateSeq),
- Response = couch_db_split:copy_local_docs(DbName, TMap, PickFun),
- ?assertEqual(ok, Response),
- maps:map(fun(Range, Name) ->
- {ok, Db} = couch_db:open_int(Name, []),
- FilePath = couch_db:get_filepath(Db),
- %% target shard has all the expected in its range docs
- {ok, DocsInShard} = couch_db:fold_local_docs(Db, fun(Doc, Acc) ->
- DocId = Doc#doc.id,
- ExpectedRange = PickFun(DocId, Ranges, undefined),
- ?assertEqual(ExpectedRange, Range),
- {ok, Acc + 1}
- end, 0, []),
- ?assertEqual(DocsPerRange, DocsInShard),
- ok = couch_db:close(Db),
- ok = file:delete(FilePath)
- end, TMap)
- end)}.
-
-
-should_fail_copy_local_on_missing_source() ->
- DbName = ?tempdb(),
- Ranges = make_ranges(2),
- TMap = make_targets(Ranges),
- PickFun = fun fake_pickfun/3,
- Response = couch_db_split:copy_local_docs(DbName, TMap, PickFun),
- ?assertEqual({error, missing_source}, Response).
-
-
-cleanup_target_test_() ->
- {
- setup,
- fun test_util:start_couch/0, fun test_util:stop/1,
- [
- {
- setup,
- fun setup/0, fun teardown/1,
- fun should_delete_existing_targets/1
- },
- {"Should return error on missing source",
- fun should_fail_cleanup_target_on_missing_source/0}
- ]
- }.
-
-
-should_delete_existing_targets(SourceName) ->
- {ok, ExpectSeq} = create_docs(SourceName, 100),
- Ranges = make_ranges(2),
- TMap = make_targets(Ranges),
- PickFun = make_pickfun(50),
- ?_test(begin
- {ok, UpdateSeq} = couch_db_split:split(SourceName, TMap, PickFun),
- ?assertEqual(ExpectSeq, UpdateSeq),
- maps:map(fun(_Range, TargetName) ->
- FilePath = couch_util:with_db(TargetName, fun(Db) ->
- couch_db:get_filepath(Db)
- end),
- ?assertMatch({ok, _}, file:read_file_info(FilePath)),
- Response = couch_db_split:cleanup_target(SourceName, TargetName),
- ?assertEqual(ok, Response),
- ?assertEqual({error, enoent}, file:read_file_info(FilePath))
- end, TMap)
- end).
-
-
-should_fail_cleanup_target_on_missing_source() ->
- SourceName = ?tempdb(),
- TargetName = ?tempdb(),
- Response = couch_db_split:cleanup_target(SourceName, TargetName),
- ?assertEqual({error, missing_source}, Response).
-
-
-make_pickfun(DocsPerRange) ->
- fun(DocId, Ranges, _HashFun) ->
- Id = docid_to_integer(DocId),
- case {Id div DocsPerRange, Id rem DocsPerRange} of
- {N, 0} ->
- lists:nth(N, Ranges);
- {N, _} ->
- lists:nth(N + 1, Ranges)
- end
- end.
-
-
-fake_pickfun(_, Ranges, _) ->
- hd(Ranges).
-
-
-make_targets([]) ->
- maps:new();
-make_targets(Ranges) ->
- Targets = lists:map(fun(Range) ->
- {Range, ?tempdb()}
- end, Ranges),
- maps:from_list(Targets).
-
-
-make_ranges(Q) when Q > 0 ->
- Incr = (2 bsl 31) div Q,
- lists:map(fun
- (End) when End >= ?RINGTOP - 1 ->
- [End - Incr, ?RINGTOP - 1];
- (End) ->
- [End - Incr, End - 1]
- end, lists:seq(Incr, ?RINGTOP, Incr));
-make_ranges(_) ->
- [].
-
-
-create_docs(DbName, 0) ->
- couch_util:with_db(DbName, fun(Db) ->
- UpdateSeq = couch_db:get_update_seq(Db),
- {ok, UpdateSeq}
- end);
-create_docs(DbName, DocNum) ->
- Docs = lists:foldl(fun(I, Acc) ->
- [create_doc(I), create_local_doc(I) | Acc]
- end, [], lists:seq(DocNum, 1, -1)),
- couch_util:with_db(DbName, fun(Db) ->
- {ok, _Result} = couch_db:update_docs(Db, Docs),
- {ok, Db1} = couch_db:reopen(Db),
- UpdateSeq = couch_db:get_update_seq(Db1),
- {ok, UpdateSeq}
- end).
-
-
-create_doc(I) ->
- create_prefix_id_doc(I, "").
-
-
-create_local_doc(I) ->
- create_prefix_id_doc(I, "_local/").
-
-
-create_prefix_id_doc(I, Prefix) ->
- Id = iolist_to_binary(io_lib:format(Prefix ++ "~3..0B", [I])),
- couch_doc:from_json_obj({[{<<"_id">>, Id}, {<<"value">>, I}]}).
-
-
-docid_to_integer(<<"_local/", DocId/binary>>) ->
- docid_to_integer(DocId);
-docid_to_integer(DocId) ->
- list_to_integer(binary_to_list(DocId)).
diff --git a/src/couch/test/eunit/couch_db_tests.erl b/src/couch/test/eunit/couch_db_tests.erl
deleted file mode 100644
index dd2cb427d..000000000
--- a/src/couch/test/eunit/couch_db_tests.erl
+++ /dev/null
@@ -1,198 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_db_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
-
--define(TIMEOUT, 120).
-
-
-
-create_delete_db_test_()->
- {
- "Database create/delete tests",
- {
- setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
- {
- foreach,
- fun() -> ?tempdb() end,
- [
- fun should_create_db/1,
- fun should_delete_db/1
- ]
- }
- }
- }.
-
-create_delete_multiple_dbs_test_()->
- {
- "Multiple database create/delete tests",
- {
- setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
- {
- foreach,
- fun() -> [?tempdb() || _ <- lists:seq(1, 6)] end,
- [
- fun should_create_multiple_dbs/1,
- fun should_delete_multiple_dbs/1
- ]
- }
- }
- }.
-
-create_delete_database_continuously_test_() ->
- {
- "Continious database create/delete tests",
- {
- setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
- {
- foreachx,
- fun(_) -> ?tempdb() end,
- [
- {10, fun should_create_delete_database_continuously/2},
- {100, fun should_create_delete_database_continuously/2}
- ]
- }
- }
- }.
-
-open_db_test_()->
- {
- "Database open tests",
- {
- setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
- {
- foreach,
- fun() -> ?tempdb() end,
- [
- fun should_create_db_if_missing/1,
- fun should_open_db_if_exists/1,
- fun locking_should_work/1
- ]
- }
- }
- }.
-
-
-should_create_db(DbName) ->
- ?_test(begin
- {ok, Before} = couch_server:all_databases(),
- ?assertNot(lists:member(DbName, Before)),
- ?assert(create_db(DbName)),
- {ok, After} = couch_server:all_databases(),
- ?assert(lists:member(DbName, After))
- end).
-
-should_delete_db(DbName) ->
- ?_test(begin
- ?assert(create_db(DbName)),
- {ok, Before} = couch_server:all_databases(),
- ?assert(lists:member(DbName, Before)),
- couch_server:delete(DbName, []),
- {ok, After} = couch_server:all_databases(),
- ?assertNot(lists:member(DbName, After))
- end).
-
-should_create_multiple_dbs(DbNames) ->
- ?_test(begin
- gen_server:call(couch_server, {set_max_dbs_open, 3}),
- {ok, Before} = couch_server:all_databases(),
- [?assertNot(lists:member(DbName, Before)) || DbName <- DbNames],
- [?assert(create_db(DbName)) || DbName <- DbNames],
- {ok, After} = couch_server:all_databases(),
- [?assert(lists:member(DbName, After)) || DbName <- DbNames]
- end).
-
-should_delete_multiple_dbs(DbNames) ->
- ?_test(begin
- [?assert(create_db(DbName)) || DbName <- DbNames],
- {ok, Before} = couch_server:all_databases(),
- [?assert(lists:member(DbName, Before)) || DbName <- DbNames],
- [?assert(delete_db(DbName)) || DbName <- DbNames],
- {ok, After} = couch_server:all_databases(),
- [?assertNot(lists:member(DbName, After)) || DbName <- DbNames]
- end).
-
-should_create_delete_database_continuously(Times, DbName) ->
- {lists:flatten(io_lib:format("~b times", [Times])),
- {timeout, ?TIMEOUT, ?_test(begin
- ?assert(create_db(DbName)),
- lists:foreach(fun(_) ->
- ?assert(delete_db(DbName)),
- ?assert(create_db(DbName))
- end, lists:seq(1, Times))
- end)}}.
-
-should_create_db_if_missing(DbName) ->
- ?_test(begin
- {ok, Before} = couch_server:all_databases(),
- ?assertNot(lists:member(DbName, Before)),
- {ok, Db} = couch_db:open(DbName, [{create_if_missing, true}]),
- ok = couch_db:close(Db),
- {ok, After} = couch_server:all_databases(),
- ?assert(lists:member(DbName, After))
- end).
-
-should_open_db_if_exists(DbName) ->
- ?_test(begin
- ?assert(create_db(DbName)),
- {ok, Before} = couch_server:all_databases(),
- ?assert(lists:member(DbName, Before)),
- {ok, Db} = couch_db:open(DbName, [{create_if_missing, true}]),
- ok = couch_db:close(Db),
- {ok, After} = couch_server:all_databases(),
- ?assert(lists:member(DbName, After))
- end).
-
-locking_should_work(DbName) ->
- ?_test(begin
- ?assertEqual(ok, couch_server:lock(DbName, <<"x">>)),
- ?assertEqual({error, {locked, <<"x">>}}, couch_db:create(DbName, [])),
- ?assertEqual(ok, couch_server:unlock(DbName)),
- {ok, Db} = couch_db:create(DbName, []),
- ?assertEqual({error, already_opened},
- couch_server:lock(DbName, <<>>)),
-
- ok = couch_db:close(Db),
- catch exit(couch_db:get_pid(Db), kill),
- test_util:wait(fun() ->
- case ets:lookup(couch_dbs, DbName) of
- [] -> ok;
- [_ | _] -> wait
- end
- end),
-
- ?assertEqual(ok, couch_server:lock(DbName, <<"y">>)),
- ?assertEqual({error, {locked, <<"y">>}},
- couch_db:open(DbName, [])),
-
- couch_server:unlock(DbName),
- {ok, Db1} = couch_db:open(DbName, [{create_if_missing, true}]),
- ok = couch_db:close(Db1)
- end).
-
-create_db(DbName) ->
- create_db(DbName, []).
-
-create_db(DbName, Opts) ->
- {ok, Db} = couch_db:create(DbName, Opts),
- ok = couch_db:close(Db),
- true.
-
-delete_db(DbName) ->
- ok = couch_server:delete(DbName, []),
- true.
diff --git a/src/couch/test/eunit/couch_doc_json_tests.erl b/src/couch/test/eunit/couch_doc_json_tests.erl
deleted file mode 100644
index 51f228900..000000000
--- a/src/couch/test/eunit/couch_doc_json_tests.erl
+++ /dev/null
@@ -1,493 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_doc_json_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-
-setup() ->
- mock(couch_log),
- mock(config),
- mock(couch_db_plugin),
- ok.
-
-teardown(_) ->
- meck:unload(couch_log),
- meck:unload(config),
- meck:unload(couch_db_plugin),
- ok.
-
-mock(couch_db_plugin) ->
- ok = meck:new(couch_db_plugin, [passthrough]),
- ok = meck:expect(couch_db_plugin, validate_docid, fun(_) -> false end),
- ok;
-mock(couch_log) ->
- ok = meck:new(couch_log, [passthrough]),
- ok = meck:expect(couch_log, debug, fun(_, _) -> ok end),
- ok;
-mock(config) ->
- meck:new(config, [passthrough]),
- meck:expect(config, get_integer,
- fun("couchdb", "max_document_size", 4294967296) -> 1024 end),
- meck:expect(config, get, fun(_, _) -> undefined end),
- meck:expect(config, get, fun(_, _, Default) -> Default end),
- ok.
-
-
-json_doc_test_() ->
- {
- setup,
- fun setup/0, fun teardown/1,
- fun(_) ->
- [{"Document from JSON", [
- from_json_with_dbname_error_cases(),
- from_json_with_db_name_success_cases(),
- from_json_success_cases(),
- from_json_error_cases()
- ]},
- {"Document to JSON", [
- to_json_success_cases()
- ]}]
- end
- }.
-
-from_json_success_cases() ->
- Cases = [
- {
- {[]},
- #doc{},
- "Return an empty document for an empty JSON object."
- },
- {
- {[{<<"_id">>, <<"zing!">>}]},
- #doc{id = <<"zing!">>},
- "Parses document ids."
- },
- {
- {[{<<"_id">>, <<"_design/foo">>}]},
- #doc{id = <<"_design/foo">>},
- "_design/document ids."
- },
- {
- {[{<<"_id">>, <<"_local/bam">>}]},
- #doc{id = <<"_local/bam">>},
- "_local/document ids."
- },
- {
- {[{<<"_rev">>, <<"4-230234">>}]},
- #doc{revs = {4, [<<"230234">>]}},
- "_rev stored in revs."
- },
- {
- {[{<<"soap">>, 35}]},
- #doc{body = {[{<<"soap">>, 35}]}},
- "Non underscore prefixed fields stored in body."
- },
- {
- {[{<<"_attachments">>, {[
- {<<"my_attachment.fu">>, {[
- {<<"stub">>, true},
- {<<"content_type">>, <<"application/awesome">>},
- {<<"length">>, 45}
- ]}},
- {<<"noahs_private_key.gpg">>, {[
- {<<"data">>, <<"SSBoYXZlIGEgcGV0IGZpc2gh">>},
- {<<"content_type">>, <<"application/pgp-signature">>}
- ]}}
- ]}}]},
- #doc{atts = [
- couch_att:new([
- {name, <<"my_attachment.fu">>},
- {data, stub},
- {type, <<"application/awesome">>},
- {att_len, 45},
- {disk_len, 45},
- {revpos, undefined}
- ]),
- couch_att:new([
- {name, <<"noahs_private_key.gpg">>},
- {data, <<"I have a pet fish!">>},
- {type, <<"application/pgp-signature">>},
- {att_len, 18},
- {disk_len, 18},
- {revpos, 0}
- ])
- ]},
- "Attachments are parsed correctly."
- },
- {
- {[{<<"_deleted">>, true}]},
- #doc{deleted = true},
- "_deleted controls the deleted field."
- },
- {
- {[{<<"_deleted">>, false}]},
- #doc{},
- "{\"_deleted\": false} is ok."
- },
- {
- {[
- {<<"_revisions">>,
- {[{<<"start">>, 4},
- {<<"ids">>, [<<"foo1">>, <<"phi3">>, <<"omega">>]}]}},
- {<<"_rev">>, <<"6-something">>}
- ]},
- #doc{revs = {4, [<<"foo1">>, <<"phi3">>, <<"omega">>]}},
- "_revisions attribute are preferred to _rev."
- },
- {
- {[{<<"_revs_info">>, dropping}]},
- #doc{},
- "Drops _revs_info."
- },
- {
- {[{<<"_local_seq">>, dropping}]},
- #doc{},
- "Drops _local_seq."
- },
- {
- {[{<<"_conflicts">>, dropping}]},
- #doc{},
- "Drops _conflicts."
- },
- {
- {[{<<"_deleted_conflicts">>, dropping}]},
- #doc{},
- "Drops _deleted_conflicts."
- }
- ],
- lists:map(
- fun({EJson, Expect, Msg}) ->
- {Msg, ?_assertMatch(Expect, couch_doc:from_json_obj_validate(EJson))}
- end,
- Cases).
-
-from_json_with_db_name_success_cases() ->
- Cases = [
- {
- {[]},
- <<"_dbs">>,
- #doc{},
- "DbName _dbs is acceptable with no docid"
- },
- {
- {[{<<"_id">>, <<"zing!">>}]},
- <<"_dbs">>,
- #doc{id = <<"zing!">>},
- "DbName _dbs is acceptable with a normal docid"
- },
- {
- {[{<<"_id">>, <<"_users">>}]},
- <<"_dbs">>,
- #doc{id = <<"_users">>},
- "_dbs/_users is acceptable"
- },
- {
- {[{<<"_id">>, <<"_replicator">>}]},
- <<"_dbs">>,
- #doc{id = <<"_replicator">>},
- "_dbs/_replicator is acceptable"
- },
- {
- {[{<<"_id">>, <<"_global_changes">>}]},
- <<"_dbs">>,
- #doc{id = <<"_global_changes">>},
- "_dbs/_global_changes is acceptable"
- }
- ],
- lists:map(
- fun({EJson, DbName, Expect, Msg}) ->
- {Msg, ?_assertMatch(Expect, couch_doc:from_json_obj_validate(EJson, DbName))}
- end,
- Cases).
-
-from_json_error_cases() ->
- Cases = [
- {
- [],
- {bad_request, "Document must be a JSON object"},
- "arrays are invalid"
- },
- {
- 4,
- {bad_request, "Document must be a JSON object"},
- "integers are invalid"
- },
- {
- true,
- {bad_request, "Document must be a JSON object"},
- "literals are invalid"
- },
- {
- {[{<<"_id">>, {[{<<"foo">>, 5}]}}]},
- {illegal_docid, <<"Document id must be a string">>},
- "Document id must be a string."
- },
- {
- {[{<<"_id">>, <<"_random">>}]},
- {illegal_docid,
- <<"Only reserved document ids may start with underscore.">>},
- "Disallow arbitrary underscore prefixed docids."
- },
- {
- {[{<<"_rev">>, 5}]},
- {bad_request, <<"Invalid rev format">>},
- "_rev must be a string"
- },
- {
- {[{<<"_rev">>, "foobar"}]},
- {bad_request, <<"Invalid rev format">>},
- "_rev must be %d-%s"
- },
- {
- {[{<<"_rev">>, "foo-bar"}]},
- "Error if _rev's integer expection is broken."
- },
- {
- {[{<<"_revisions">>, {[{<<"start">>, true}]}}]},
- {doc_validation, "_revisions.start isn't an integer."},
- "_revisions.start must be an integer."
- },
- {
- {[{<<"_revisions">>, {[{<<"start">>, 0}, {<<"ids">>, 5}]}}]},
- {doc_validation, "_revisions.ids isn't a array."},
- "_revions.ids must be a list."
- },
- {
- {[{<<"_revisions">>, {[{<<"start">>, 0}, {<<"ids">>, [5]}]}}]},
- {doc_validation, "RevId isn't a string"},
- "Revision ids must be strings."
- },
- {
- {[{<<"_revisions">>, {[{<<"start">>, 0},
- {<<"ids">>, [<<"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx">>]}]}}]},
- {doc_validation, "RevId isn't a valid hexadecimal"},
- "Revision ids must be a valid hex."
- },
- {
- {[{<<"_something">>, 5}]},
- {doc_validation, <<"Bad special document member: _something">>},
- "Underscore prefix fields are reserved."
- },
- {
- fun() ->
- {[
- {<<"_id">>, <<"large_doc">>},
- {<<"x">> , << <<"x">> || _ <- lists:seq(1,1025) >>}
- ]}
- end,
- {request_entity_too_large, <<"large_doc">>},
- "Document too large."
- }
- ],
-
- lists:map(fun
- ({Fun, Expect, Msg}) when is_function(Fun, 0) ->
- {Msg,
- ?_assertThrow(Expect, couch_doc:from_json_obj_validate(Fun()))};
- ({EJson, Expect, Msg}) ->
- {Msg,
- ?_assertThrow(Expect, couch_doc:from_json_obj_validate(EJson))};
- ({EJson, Msg}) ->
- {Msg,
- ?_assertThrow(_, couch_doc:from_json_obj_validate(EJson))}
- end, Cases).
-
-from_json_with_dbname_error_cases() ->
- Cases = [
- {
- {[{<<"_id">>, <<"_random">>}]},
- <<"_dbs">>,
- {illegal_docid,
- <<"Only reserved document ids may start with underscore.">>},
- "Disallow non-system-DB underscore prefixed docids in _dbs database."
- },
- {
- {[{<<"_id">>, <<"_random">>}]},
- <<"foobar">>,
- {illegal_docid,
- <<"Only reserved document ids may start with underscore.">>},
- "Disallow arbitrary underscore prefixed docids in regular database."
- },
- {
- {[{<<"_id">>, <<"_users">>}]},
- <<"foobar">>,
- {illegal_docid,
- <<"Only reserved document ids may start with underscore.">>},
- "Disallow system-DB docid _users in regular database."
- }
- ],
-
- lists:map(
- fun({EJson, DbName, Expect, Msg}) ->
- Error = (catch couch_doc:from_json_obj_validate(EJson, DbName)),
- {Msg, ?_assertMatch(Expect, Error)}
- end,
- Cases).
-
-to_json_success_cases() ->
- Cases = [
- {
- #doc{},
- {[{<<"_id">>, <<"">>}]},
- "Empty docs are {\"_id\": \"\"}"
- },
- {
- #doc{id = <<"foo">>},
- {[{<<"_id">>, <<"foo">>}]},
- "_id is added."
- },
- {
- #doc{revs = {5, ["foo"]}},
- {[{<<"_id">>, <<>>}, {<<"_rev">>, <<"5-foo">>}]},
- "_rev is added."
- },
- {
- [revs],
- #doc{revs = {5, [<<"first">>, <<"second">>]}},
- {[
- {<<"_id">>, <<>>},
- {<<"_rev">>, <<"5-first">>},
- {<<"_revisions">>, {[
- {<<"start">>, 5},
- {<<"ids">>, [<<"first">>, <<"second">>]}
- ]}}
- ]},
- "_revisions include with revs option"
- },
- {
- #doc{body = {[{<<"foo">>, <<"bar">>}]}},
- {[{<<"_id">>, <<>>}, {<<"foo">>, <<"bar">>}]},
- "Arbitrary fields are added."
- },
- {
- #doc{deleted = true, body = {[{<<"foo">>, <<"bar">>}]}},
- {[{<<"_id">>, <<>>}, {<<"foo">>, <<"bar">>}, {<<"_deleted">>, true}]},
- "Deleted docs no longer drop body members."
- },
- {
- #doc{meta = [
- {revs_info, 4, [{<<"fin">>, deleted}, {<<"zim">>, missing}]}
- ]},
- {[
- {<<"_id">>, <<>>},
- {<<"_revs_info">>, [
- {[{<<"rev">>, <<"4-fin">>}, {<<"status">>, <<"deleted">>}]},
- {[{<<"rev">>, <<"3-zim">>}, {<<"status">>, <<"missing">>}]}
- ]}
- ]},
- "_revs_info field is added correctly."
- },
- {
- #doc{meta = [{local_seq, 5}]},
- {[{<<"_id">>, <<>>}, {<<"_local_seq">>, 5}]},
- "_local_seq is added as an integer."
- },
- {
- #doc{meta = [{conflicts, [{3, <<"yep">>}, {1, <<"snow">>}]}]},
- {[
- {<<"_id">>, <<>>},
- {<<"_conflicts">>, [<<"3-yep">>, <<"1-snow">>]}
- ]},
- "_conflicts is added as an array of strings."
- },
- {
- #doc{meta = [{deleted_conflicts, [{10923, <<"big_cowboy_hat">>}]}]},
- {[
- {<<"_id">>, <<>>},
- {<<"_deleted_conflicts">>, [<<"10923-big_cowboy_hat">>]}
- ]},
- "_deleted_conflicsts is added as an array of strings."
- },
- {
- #doc{atts = [
- couch_att:new([
- {name, <<"big.xml">>},
- {type, <<"xml/sucks">>},
- {data, fun() -> ok end},
- {revpos, 1},
- {att_len, 400},
- {disk_len, 400}
- ]),
- couch_att:new([
- {name, <<"fast.json">>},
- {type, <<"json/ftw">>},
- {data, <<"{\"so\": \"there!\"}">>},
- {revpos, 1},
- {att_len, 16},
- {disk_len, 16}
- ])
- ]},
- {[
- {<<"_id">>, <<>>},
- {<<"_attachments">>, {[
- {<<"big.xml">>, {[
- {<<"content_type">>, <<"xml/sucks">>},
- {<<"revpos">>, 1},
- {<<"length">>, 400},
- {<<"stub">>, true}
- ]}},
- {<<"fast.json">>, {[
- {<<"content_type">>, <<"json/ftw">>},
- {<<"revpos">>, 1},
- {<<"length">>, 16},
- {<<"stub">>, true}
- ]}}
- ]}}
- ]},
- "Attachments attached as stubs only include a length."
- },
- {
- [attachments],
- #doc{atts = [
- couch_att:new([
- {name, <<"stuff.txt">>},
- {type, <<"text/plain">>},
- {data, fun() -> <<"diet pepsi">> end},
- {revpos, 1},
- {att_len, 10},
- {disk_len, 10}
- ]),
- couch_att:new([
- {name, <<"food.now">>},
- {type, <<"application/food">>},
- {revpos, 1},
- {data, <<"sammich">>}
- ])
- ]},
- {[
- {<<"_id">>, <<>>},
- {<<"_attachments">>, {[
- {<<"stuff.txt">>, {[
- {<<"content_type">>, <<"text/plain">>},
- {<<"revpos">>, 1},
- {<<"data">>, <<"ZGlldCBwZXBzaQ==">>}
- ]}},
- {<<"food.now">>, {[
- {<<"content_type">>, <<"application/food">>},
- {<<"revpos">>, 1},
- {<<"data">>, <<"c2FtbWljaA==">>}
- ]}}
- ]}}
- ]},
- "Attachments included inline with attachments option."
- }
- ],
-
- lists:map(fun
- ({Doc, EJson, Msg}) ->
- {Msg, ?_assertMatch(EJson, couch_doc:to_json_obj(Doc, []))};
- ({Options, Doc, EJson, Msg}) ->
- {Msg, ?_assertMatch(EJson, couch_doc:to_json_obj(Doc, Options))}
- end, Cases).
diff --git a/src/couch/test/eunit/couch_doc_tests.erl b/src/couch/test/eunit/couch_doc_tests.erl
deleted file mode 100644
index cf41df61d..000000000
--- a/src/couch/test/eunit/couch_doc_tests.erl
+++ /dev/null
@@ -1,145 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_doc_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-
--define(REQUEST_FIXTURE,
- filename:join([?FIXTURESDIR, "multipart.http"])).
-
-parse_rev_test() ->
- ?assertEqual({1, <<"123">>}, couch_doc:parse_rev("1-123")),
- ?assertEqual({1, <<"123">>}, couch_doc:parse_rev(<<"1-123">>)),
- ?assertException(throw, {bad_request, _}, couch_doc:parse_rev("1f-123")),
- ?assertException(throw, {bad_request, _}, couch_doc:parse_rev("bar")).
-
-doc_from_multi_part_stream_test() ->
- ContentType = "multipart/related;boundary=multipart_related_boundary~~~~~~~~~~~~~~~~~~~~",
- DataFun = fun() -> request(start) end,
-
- mock_config(),
- {ok, #doc{id = <<"doc0">>, atts = [_]}, _Fun, _Parser} =
- couch_doc:doc_from_multi_part_stream(ContentType, DataFun),
- meck:unload(config),
- ok.
-
-doc_to_multi_part_stream_test() ->
- Boundary = <<"multipart_related_boundary~~~~~~~~~~~~~~~~~~~~">>,
- JsonBytes = <<"{\n \"_id\": \"our document goes here\"\n}\n\n">>,
- AttData = <<"Hello my important document">>,
- AttLength = size(AttData),
- Atts = [couch_att:new([
- {name, <<"test">>}, {data, AttData}, {type, <<"text/plain">>},
- {att_len, AttLength}, {disk_len, AttLength}])],
- couch_doc:doc_to_multi_part_stream(Boundary, JsonBytes, Atts, fun send/1, true),
- AttLengthStr = integer_to_binary(AttLength),
- BoundaryLen = size(Boundary),
- [
- <<"--", Boundary/binary>>,
- <<"Content-Type: application/json">>,
- <<>>,
- JsonBytes,
- <<"--", Boundary/binary>>,
- <<"Content-Disposition: attachment; filename=\"test\"">>,
- <<"Content-Type: text/plain">>,
- <<"Content-Length: ", AttLengthStr/binary>>,
- <<>>,
- AttData,
- <<"--", Boundary:BoundaryLen/binary, "--">>
- ] = collected(),
- ok.
-
-len_doc_to_multi_part_stream_test() ->
- Boundary = <<"simple_boundary">>,
- JsonBytes = <<"{\n \"_id\": \"our document goes here\"\n}\n\n">>,
- ContentType = <<"multipart/related; boundary=\"", Boundary/binary, "\"">>,
- AttData = <<"Hello my important document">>,
- AttLength = size(AttData),
- Atts = [couch_att:new([
- {name, <<"test">>}, {data, AttData}, {type, <<"text/plain">>},
- {att_len, AttLength}, {disk_len, AttLength}])],
- {ContentType, 258} = %% 258 is expected size of the document
- couch_doc:len_doc_to_multi_part_stream(Boundary, JsonBytes, Atts, true),
- ok.
-
-validate_docid_test_() ->
- {setup,
- fun() ->
- mock_config(),
- ok = meck:new(couch_db_plugin, [passthrough]),
- meck:expect(couch_db_plugin, validate_docid, fun(_) -> false end)
- end,
- fun(_) ->
- meck:unload(config),
- meck:unload(couch_db_plugin)
- end,
- [
- ?_assertEqual(ok, couch_doc:validate_docid(<<"idx">>)),
- ?_assertEqual(ok, couch_doc:validate_docid(<<"_design/idx">>)),
- ?_assertEqual(ok, couch_doc:validate_docid(<<"_local/idx">>)),
- ?_assertEqual(ok, couch_doc:validate_docid(large_id(1024))),
- ?_assertEqual(ok, couch_doc:validate_docid(<<"_users">>, <<"_dbs">>)),
- ?_assertEqual(ok, couch_doc:validate_docid(<<"_replicator">>, <<"_dbs">>)),
- ?_assertEqual(ok, couch_doc:validate_docid(<<"_global_changes">>, <<"_dbs">>)),
- ?_assertThrow({illegal_docid, _},
- couch_doc:validate_docid(<<>>)),
- ?_assertThrow({illegal_docid, _},
- couch_doc:validate_docid(<<16#80>>)),
- ?_assertThrow({illegal_docid, _},
- couch_doc:validate_docid(<<"_idx">>)),
- ?_assertThrow({illegal_docid, _},
- couch_doc:validate_docid(<<"_">>)),
- ?_assertThrow({illegal_docid, _},
- couch_doc:validate_docid(<<"_design/">>)),
- ?_assertThrow({illegal_docid, _},
- couch_doc:validate_docid(<<"_local/">>)),
- ?_assertThrow({illegal_docid, _},
- couch_doc:validate_docid(large_id(1025))),
- ?_assertThrow({illegal_docid, _},
- couch_doc:validate_docid(<<"_users">>, <<"foo">>)),
- ?_assertThrow({illegal_docid, _},
- couch_doc:validate_docid(<<"_weeee">>, <<"_dbs">>))
- ]
- }.
-
-large_id(N) ->
- << <<"x">> || _ <- lists:seq(1, N) >>.
-
-request(start) ->
- {ok, Doc} = file:read_file(?REQUEST_FIXTURE),
- {Doc, fun() -> request(stop) end};
-request(stop) ->
- {"", fun() -> request(stop) end}.
-
-send(Data) ->
- send(Data, get(data)).
-send(Data, undefined) ->
- send(Data, []);
-send(Data, Acc) ->
- put(data, [Acc|Data]).
-
-collected() ->
- B = binary:replace(iolist_to_binary(get(data)), <<"\r\n">>, <<0>>, [global]),
- binary:split(B, [<<0>>], [global]).
-
-mock_config() ->
- ok = meck:new(config, [passthrough]),
- meck:expect(config, get,
- fun("couchdb", "max_document_id_length", "infinity") -> "1024";
- ("couchdb", "max_attachment_size", "infinity") -> "infinity";
- ("mem3", "shards_db", "_dbs") -> "_dbs";
- (Key, Val, Default) -> meck:passthrough([Key, Val, Default])
- end
- ).
diff --git a/src/couch/test/eunit/couch_ejson_size_tests.erl b/src/couch/test/eunit/couch_ejson_size_tests.erl
deleted file mode 100644
index df9168ed1..000000000
--- a/src/couch/test/eunit/couch_ejson_size_tests.erl
+++ /dev/null
@@ -1,72 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_ejson_size_tests).
-
--include_lib("eunit/include/eunit.hrl").
-
--define(HWAIR, $\x{10348}). % 4 byte utf8 encoding
--define(EURO, $\x{20ac}). % 3 byte utf8 encoding
--define(CENT, $\x{a2}). % 2 byte utf8 encoding
-
-
-ejson_size_test_() ->
- [?_assertEqual(R, couch_ejson_size:encoded_size(Input)) || {R, Input} <- [
- {1, 1}, {1, 1}, {2, -1}, {1, 9}, {2, 10}, {3, -10},
- {2, 11}, {2, 99}, {3, 100}, {3, 999}, {4, 1000}, {4, 9999},
- {5, 10000},
-
- {3, 0.0}, {3, 0.1}, {3, 1.0}, {4, -1.0}, {3, 1.0e9},
- {4, 1.0e10}, {5, 1.0e-10}, {5, 1.0e-99}, {6, 1.0e-100}, {3, 1.0e-323},
-
- {2, arr_nested(0)}, {22, arr_nested(10)}, {2002, arr_nested(1000)},
- {9, obj_nested(0)}, {69, obj_nested(10)}, {6009, obj_nested(1000)},
-
- {4, null}, {4, true}, {5, false},
-
- {3, str(1, $x)}, {4, str(1, ?CENT)}, {5, str(1, ?EURO)},
- {6, str(1, ?HWAIR)}, {3, str(1, $\x{1})}, {12, str(10, $x)},
- {22, str(10, ?CENT)}, {32, str(10, ?EURO)}, {42, str(10, ?HWAIR)},
- {12, str(10, $\x{1})}
- ]].
-
-
-%% Helper functions
-
-arr_nested(MaxDepth) ->
- arr_nested(MaxDepth, 0).
-
-
-obj_nested(MaxDepth) ->
- obj_nested(MaxDepth, 0).
-
-
-obj(N, K, V) ->
- {[{K, V} || _ <- lists:seq(1, N)]}.
-
-
-str(N, C) ->
- unicode:characters_to_binary([C || _ <- lists:seq(1, N)]).
-
-
-arr_nested(MaxDepth, MaxDepth) ->
- [];
-
-arr_nested(MaxDepth, Depth) ->
- [arr_nested(MaxDepth, Depth + 1)].
-
-
-obj_nested(MaxDepth, MaxDepth) ->
- obj(1, <<"k">>, <<"v">>);
-
-obj_nested(MaxDepth, Depth) ->
- {[{<<"k">>, obj_nested(MaxDepth, Depth + 1)}]}.
diff --git a/src/couch/test/eunit/couch_etag_tests.erl b/src/couch/test/eunit/couch_etag_tests.erl
deleted file mode 100644
index 9d15e483f..000000000
--- a/src/couch/test/eunit/couch_etag_tests.erl
+++ /dev/null
@@ -1,30 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_etag_tests).
-
--include_lib("eunit/include/eunit.hrl").
-
-local_with_empty_body_test() ->
- Etag = couch_httpd:doc_etag(<<"_local/local-and-empty">>, {[]}, {0, <<"1">>}),
- ?assertEqual(Etag, <<"\"5ZVXQYO7VLEOU0TL9VXDNP5PV\"">>).
-
-
-local_with_body_test() ->
- DocBody = {[{<<"hello">>,<<"world">>},{<<"relax">>,true}]},
- Etag = couch_httpd:doc_etag(<<"_local/local-with-body">>, DocBody, {0, <<"1">>}),
- ?assertEqual(Etag, <<"\"CEFXP6WH8OKYIWO1GLGBHKCCA\"">>).
-
-normal_doc_uses_rev_test() ->
- DocBody = {[{<<"hello">>,<<"world">>},{<<"relax">>,true}]},
- Etag = couch_httpd:doc_etag(<<"nomal-doc">>, DocBody, {1, <<"efda11e34e88ebe31a2f83e84a0435b6">>}),
- ?assertEqual(Etag, <<"\"1-efda11e34e88ebe31a2f83e84a0435b6\"">>).
diff --git a/src/couch/test/eunit/couch_file_tests.erl b/src/couch/test/eunit/couch_file_tests.erl
deleted file mode 100644
index 606f4bbf4..000000000
--- a/src/couch/test/eunit/couch_file_tests.erl
+++ /dev/null
@@ -1,551 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_file_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
-
--define(BLOCK_SIZE, 4096).
--define(setup(F), {setup, fun setup/0, fun teardown/1, F}).
--define(foreach(Fs), {foreach, fun setup/0, fun teardown/1, Fs}).
-
-
-setup() ->
- {ok, Fd} = couch_file:open(?tempfile(), [create, overwrite]),
- Fd.
-
-teardown(Fd) ->
- case is_process_alive(Fd) of
- true -> ok = couch_file:close(Fd);
- false -> ok
- end.
-
-open_close_test_() ->
- {
- "Test for proper file open and close",
- {
- setup,
- fun() -> test_util:start(?MODULE, [ioq]) end, fun test_util:stop/1,
- [
- should_return_enoent_if_missed(),
- should_ignore_invalid_flags_with_open(),
- ?setup(fun should_return_pid_on_file_open/1),
- should_close_file_properly(),
- ?setup(fun should_create_empty_new_files/1)
- ]
- }
- }.
-
-should_return_enoent_if_missed() ->
- ?_assertEqual({error, enoent}, couch_file:open("not a real file")).
-
-should_ignore_invalid_flags_with_open() ->
- ?_assertMatch({ok, _},
- couch_file:open(?tempfile(), [create, invalid_option])).
-
-should_return_pid_on_file_open(Fd) ->
- ?_assert(is_pid(Fd)).
-
-should_close_file_properly() ->
- {ok, Fd} = couch_file:open(?tempfile(), [create, overwrite]),
- ok = couch_file:close(Fd),
- ?_assert(true).
-
-should_create_empty_new_files(Fd) ->
- ?_assertMatch({ok, 0}, couch_file:bytes(Fd)).
-
-
-read_write_test_() ->
- {
- "Common file read/write tests",
- {
- setup,
- fun() -> test_util:start(?MODULE, [ioq]) end, fun test_util:stop/1,
- ?foreach([
- fun should_increase_file_size_on_write/1,
- fun should_return_current_file_size_on_write/1,
- fun should_write_and_read_term/1,
- fun should_write_and_read_binary/1,
- fun should_write_and_read_large_binary/1,
- fun should_return_term_as_binary_for_reading_binary/1,
- fun should_read_term_written_as_binary/1,
- fun should_read_iolist/1,
- fun should_fsync/1,
- fun should_not_read_beyond_eof/1,
- fun should_truncate/1
- ])
- }
- }.
-
-
-should_increase_file_size_on_write(Fd) ->
- {ok, 0, _} = couch_file:append_term(Fd, foo),
- {ok, Size} = couch_file:bytes(Fd),
- ?_assert(Size > 0).
-
-should_return_current_file_size_on_write(Fd) ->
- {ok, 0, _} = couch_file:append_term(Fd, foo),
- {ok, Size} = couch_file:bytes(Fd),
- ?_assertMatch({ok, Size, _}, couch_file:append_term(Fd, bar)).
-
-should_write_and_read_term(Fd) ->
- {ok, Pos, _} = couch_file:append_term(Fd, foo),
- ?_assertMatch({ok, foo}, couch_file:pread_term(Fd, Pos)).
-
-should_write_and_read_binary(Fd) ->
- {ok, Pos, _} = couch_file:append_binary(Fd, <<"fancy!">>),
- ?_assertMatch({ok, <<"fancy!">>}, couch_file:pread_binary(Fd, Pos)).
-
-should_return_term_as_binary_for_reading_binary(Fd) ->
- {ok, Pos, _} = couch_file:append_term(Fd, foo),
- Foo = couch_compress:compress(foo, snappy),
- ?_assertMatch({ok, Foo}, couch_file:pread_binary(Fd, Pos)).
-
-should_read_term_written_as_binary(Fd) ->
- {ok, Pos, _} = couch_file:append_binary(Fd, <<131,100,0,3,102,111,111>>),
- ?_assertMatch({ok, foo}, couch_file:pread_term(Fd, Pos)).
-
-should_write_and_read_large_binary(Fd) ->
- BigBin = list_to_binary(lists:duplicate(100000, 0)),
- {ok, Pos, _} = couch_file:append_binary(Fd, BigBin),
- ?_assertMatch({ok, BigBin}, couch_file:pread_binary(Fd, Pos)).
-
-should_read_iolist(Fd) ->
- %% append_binary == append_iolist?
- %% Possible bug in pread_iolist or iolist() -> append_binary
- {ok, Pos, _} = couch_file:append_binary(Fd, ["foo", $m, <<"bam">>]),
- {ok, IoList} = couch_file:pread_iolist(Fd, Pos),
- ?_assertMatch(<<"foombam">>, iolist_to_binary(IoList)).
-
-should_fsync(Fd) ->
- {"How does on test fsync?", ?_assertMatch(ok, couch_file:sync(Fd))}.
-
-should_not_read_beyond_eof(Fd) ->
- BigBin = list_to_binary(lists:duplicate(100000, 0)),
- DoubleBin = round(byte_size(BigBin) * 2),
- {ok, Pos, _Size} = couch_file:append_binary(Fd, BigBin),
- {_, Filepath} = couch_file:process_info(Fd),
- %% corrupt db file
- {ok, Io} = file:open(Filepath, [read, write, binary]),
- ok = file:pwrite(Io, Pos, <<0:1/integer, DoubleBin:31/integer>>),
- file:close(Io),
- unlink(Fd),
- ExpectedError = {badmatch, {'EXIT', {bad_return_value,
- {read_beyond_eof, Filepath}}}},
- ?_assertError(ExpectedError, couch_file:pread_binary(Fd, Pos)).
-
-should_truncate(Fd) ->
- {ok, 0, _} = couch_file:append_term(Fd, foo),
- {ok, Size} = couch_file:bytes(Fd),
- BigBin = list_to_binary(lists:duplicate(100000, 0)),
- {ok, _, _} = couch_file:append_binary(Fd, BigBin),
- ok = couch_file:truncate(Fd, Size),
- ?_assertMatch({ok, foo}, couch_file:pread_term(Fd, 0)).
-
-pread_limit_test_() ->
- {
- "Read limit tests",
- {
- setup,
- fun() ->
- Ctx = test_util:start(?MODULE),
- config:set("couchdb", "max_pread_size", "50000"),
- Ctx
- end,
- fun(Ctx) ->
- config:delete("couchdb", "max_pread_size"),
- test_util:stop(Ctx)
- end,
- ?foreach([
- fun should_increase_file_size_on_write/1,
- fun should_return_current_file_size_on_write/1,
- fun should_write_and_read_term/1,
- fun should_write_and_read_binary/1,
- fun should_not_read_more_than_pread_limit/1
- ])
- }
- }.
-
-should_not_read_more_than_pread_limit(Fd) ->
- {_, Filepath} = couch_file:process_info(Fd),
- BigBin = list_to_binary(lists:duplicate(100000, 0)),
- {ok, Pos, _Size} = couch_file:append_binary(Fd, BigBin),
- unlink(Fd),
- ExpectedError = {badmatch, {'EXIT', {bad_return_value,
- {exceed_pread_limit, Filepath, 50000}}}},
- ?_assertError(ExpectedError, couch_file:pread_binary(Fd, Pos)).
-
-
-header_test_() ->
- {
- "File header read/write tests",
- {
- setup,
- fun() -> test_util:start(?MODULE, [ioq]) end, fun test_util:stop/1,
- [
- ?foreach([
- fun should_write_and_read_atom_header/1,
- fun should_write_and_read_tuple_header/1,
- fun should_write_and_read_second_header/1,
- fun should_truncate_second_header/1,
- fun should_produce_same_file_size_on_rewrite/1,
- fun should_save_headers_larger_than_block_size/1
- ]),
- should_recover_header_marker_corruption(),
- should_recover_header_size_corruption(),
- should_recover_header_md5sig_corruption(),
- should_recover_header_data_corruption()
- ]
- }
- }.
-
-
-should_write_and_read_atom_header(Fd) ->
- ok = couch_file:write_header(Fd, hello),
- ?_assertMatch({ok, hello}, couch_file:read_header(Fd)).
-
-should_write_and_read_tuple_header(Fd) ->
- ok = couch_file:write_header(Fd, {<<"some_data">>, 32}),
- ?_assertMatch({ok, {<<"some_data">>, 32}}, couch_file:read_header(Fd)).
-
-should_write_and_read_second_header(Fd) ->
- ok = couch_file:write_header(Fd, {<<"some_data">>, 32}),
- ok = couch_file:write_header(Fd, [foo, <<"more">>]),
- ?_assertMatch({ok, [foo, <<"more">>]}, couch_file:read_header(Fd)).
-
-should_truncate_second_header(Fd) ->
- ok = couch_file:write_header(Fd, {<<"some_data">>, 32}),
- {ok, Size} = couch_file:bytes(Fd),
- ok = couch_file:write_header(Fd, [foo, <<"more">>]),
- ok = couch_file:truncate(Fd, Size),
- ?_assertMatch({ok, {<<"some_data">>, 32}}, couch_file:read_header(Fd)).
-
-should_produce_same_file_size_on_rewrite(Fd) ->
- ok = couch_file:write_header(Fd, {<<"some_data">>, 32}),
- {ok, Size1} = couch_file:bytes(Fd),
- ok = couch_file:write_header(Fd, [foo, <<"more">>]),
- {ok, Size2} = couch_file:bytes(Fd),
- ok = couch_file:truncate(Fd, Size1),
- ok = couch_file:write_header(Fd, [foo, <<"more">>]),
- ?_assertMatch({ok, Size2}, couch_file:bytes(Fd)).
-
-should_save_headers_larger_than_block_size(Fd) ->
- Header = erlang:make_tuple(5000, <<"CouchDB">>),
- couch_file:write_header(Fd, Header),
- {"COUCHDB-1319", ?_assertMatch({ok, Header}, couch_file:read_header(Fd))}.
-
-
-should_recover_header_marker_corruption() ->
- ?_assertMatch(
- ok,
- check_header_recovery(
- fun(CouchFd, RawFd, Expect, HeaderPos) ->
- ?assertNotMatch(Expect, couch_file:read_header(CouchFd)),
- file:pwrite(RawFd, HeaderPos, <<0>>),
- ?assertMatch(Expect, couch_file:read_header(CouchFd))
- end)
- ).
-
-should_recover_header_size_corruption() ->
- ?_assertMatch(
- ok,
- check_header_recovery(
- fun(CouchFd, RawFd, Expect, HeaderPos) ->
- ?assertNotMatch(Expect, couch_file:read_header(CouchFd)),
- % +1 for 0x1 byte marker
- file:pwrite(RawFd, HeaderPos + 1, <<10/integer>>),
- ?assertMatch(Expect, couch_file:read_header(CouchFd))
- end)
- ).
-
-should_recover_header_md5sig_corruption() ->
- ?_assertMatch(
- ok,
- check_header_recovery(
- fun(CouchFd, RawFd, Expect, HeaderPos) ->
- ?assertNotMatch(Expect, couch_file:read_header(CouchFd)),
- % +5 = +1 for 0x1 byte and +4 for term size.
- file:pwrite(RawFd, HeaderPos + 5, <<"F01034F88D320B22">>),
- ?assertMatch(Expect, couch_file:read_header(CouchFd))
- end)
- ).
-
-should_recover_header_data_corruption() ->
- ?_assertMatch(
- ok,
- check_header_recovery(
- fun(CouchFd, RawFd, Expect, HeaderPos) ->
- ?assertNotMatch(Expect, couch_file:read_header(CouchFd)),
- % +21 = +1 for 0x1 byte, +4 for term size and +16 for MD5 sig
- file:pwrite(RawFd, HeaderPos + 21, <<"some data goes here!">>),
- ?assertMatch(Expect, couch_file:read_header(CouchFd))
- end)
- ).
-
-
-check_header_recovery(CheckFun) ->
- Path = ?tempfile(),
- {ok, Fd} = couch_file:open(Path, [create, overwrite]),
- {ok, RawFd} = file:open(Path, [read, write, raw, binary]),
-
- {ok, _} = write_random_data(Fd),
- ExpectHeader = {some_atom, <<"a binary">>, 756},
- ok = couch_file:write_header(Fd, ExpectHeader),
-
- {ok, HeaderPos} = write_random_data(Fd),
- ok = couch_file:write_header(Fd, {2342, <<"corruption! greed!">>}),
-
- CheckFun(Fd, RawFd, {ok, ExpectHeader}, HeaderPos),
-
- ok = file:close(RawFd),
- ok = couch_file:close(Fd),
- ok.
-
-write_random_data(Fd) ->
- write_random_data(Fd, 100 + couch_rand:uniform(1000)).
-
-write_random_data(Fd, 0) ->
- {ok, Bytes} = couch_file:bytes(Fd),
- {ok, (1 + Bytes div ?BLOCK_SIZE) * ?BLOCK_SIZE};
-write_random_data(Fd, N) ->
- Choices = [foo, bar, <<"bizzingle">>, "bank", ["rough", stuff]],
- Term = lists:nth(couch_rand:uniform(4) + 1, Choices),
- {ok, _, _} = couch_file:append_term(Fd, Term),
- write_random_data(Fd, N - 1).
-
-
-delete_test_() ->
- {
- "File delete tests",
- {
- setup,
- fun() ->
- meck:new(config, [passthrough])
- end,
- fun(_) ->
- meck:unload()
- end,
- {
- foreach,
- fun() ->
- meck:reset([config]),
- File = ?tempfile() ++ ".couch",
- RootDir = filename:dirname(File),
- ok = couch_file:init_delete_dir(RootDir),
- ok = file:write_file(File, <<>>),
- {RootDir, File}
- end,
- fun({_, File}) ->
- file:delete(File)
- end,
- [
- fun(Cfg) ->
- {"enable_database_recovery = false, context = delete",
- make_enable_recovery_test_case(Cfg, false, delete)}
- end,
- fun(Cfg) ->
- {"enable_database_recovery = true, context = delete",
- make_enable_recovery_test_case(Cfg, true, delete)}
- end,
- fun(Cfg) ->
- {"enable_database_recovery = false, context = compaction",
- make_enable_recovery_test_case(Cfg, false, compaction)}
- end,
- fun(Cfg) ->
- {"enable_database_recovery = true, context = compaction",
- make_enable_recovery_test_case(Cfg, true, compaction)}
- end,
- fun(Cfg) ->
- {"delete_after_rename = true",
- make_delete_after_rename_test_case(Cfg, true)}
- end,
- fun(Cfg) ->
- {"delete_after_rename = false",
- make_delete_after_rename_test_case(Cfg, false)}
- end
- ]
- }
- }
- }.
-
-
-make_enable_recovery_test_case({RootDir, File}, EnableRecovery, Context) ->
- meck:expect(config, get_boolean, fun
- ("couchdb", "enable_database_recovery", _) -> EnableRecovery;
- ("couchdb", "delete_after_rename", _) -> false
- end),
- FileExistsBefore = filelib:is_regular(File),
- couch_file:delete(RootDir, File, [{context, Context}]),
- FileExistsAfter = filelib:is_regular(File),
- RenamedFiles = filelib:wildcard(filename:rootname(File) ++ "*.deleted.*"),
- DeletedFiles = filelib:wildcard(RootDir ++ "/.delete/*"),
- {ExpectRenamedCount, ExpectDeletedCount} = if
- EnableRecovery andalso Context =:= delete -> {1, 0};
- true -> {0, 1}
- end,
- [
- ?_assert(FileExistsBefore),
- ?_assertNot(FileExistsAfter),
- ?_assertEqual(ExpectRenamedCount, length(RenamedFiles)),
- ?_assertEqual(ExpectDeletedCount, length(DeletedFiles))
- ].
-
-make_delete_after_rename_test_case({RootDir, File}, DeleteAfterRename) ->
- meck:expect(config, get_boolean, fun
- ("couchdb", "enable_database_recovery", _) -> false;
- ("couchdb", "delete_after_rename", _) -> DeleteAfterRename
- end),
- FileExistsBefore = filelib:is_regular(File),
- couch_file:delete(RootDir, File),
- FileExistsAfter = filelib:is_regular(File),
- RenamedFiles = filelib:wildcard(filename:join([RootDir, ".delete", "*"])),
- ExpectRenamedCount = if DeleteAfterRename -> 0; true -> 1 end,
- [
- ?_assert(FileExistsBefore),
- ?_assertNot(FileExistsAfter),
- ?_assertEqual(ExpectRenamedCount, length(RenamedFiles))
- ].
-
-
-nuke_dir_test_() ->
- {
- "Nuke directory tests",
- {
- setup,
- fun() ->
- meck:new(config, [passthrough])
- end,
- fun(_) ->
- meck:unload()
- end,
- {
- foreach,
- fun() ->
- meck:reset([config]),
- File0 = ?tempfile() ++ ".couch",
- RootDir = filename:dirname(File0),
- BaseName = filename:basename(File0),
- Seed = couch_rand:uniform(8999999999) + 999999999,
- DDocDir = io_lib:format("db.~b_design", [Seed]),
- ViewDir = filename:join([RootDir, DDocDir]),
- file:make_dir(ViewDir),
- File = filename:join([ViewDir, BaseName]),
- file:rename(File0, File),
- ok = couch_file:init_delete_dir(RootDir),
- ok = file:write_file(File, <<>>),
- {RootDir, ViewDir}
- end,
- fun({RootDir, ViewDir}) ->
- remove_dir(ViewDir),
- Ext = filename:extension(ViewDir),
- case filelib:wildcard(RootDir ++ "/*.deleted" ++ Ext) of
- [DelDir] -> remove_dir(DelDir);
- _ -> ok
- end
- end,
- [
- fun(Cfg) ->
- {"enable_database_recovery = false",
- make_rename_dir_test_case(Cfg, false)}
- end,
- fun(Cfg) ->
- {"enable_database_recovery = true",
- make_rename_dir_test_case(Cfg, true)}
- end,
- fun(Cfg) ->
- {"delete_after_rename = true",
- make_delete_dir_test_case(Cfg, true)}
- end,
- fun(Cfg) ->
- {"delete_after_rename = false",
- make_delete_dir_test_case(Cfg, false)}
- end
- ]
- }
- }
- }.
-
-
-make_rename_dir_test_case({RootDir, ViewDir}, EnableRecovery) ->
- meck:expect(config, get_boolean, fun
- ("couchdb", "enable_database_recovery", _) -> EnableRecovery;
- ("couchdb", "delete_after_rename", _) -> true;
- (_, _, Default) -> Default
- end),
- DirExistsBefore = filelib:is_dir(ViewDir),
- couch_file:nuke_dir(RootDir, ViewDir),
- DirExistsAfter = filelib:is_dir(ViewDir),
- Ext = filename:extension(ViewDir),
- RenamedDirs = filelib:wildcard(RootDir ++ "/*.deleted" ++ Ext),
- ExpectRenamedCount = if EnableRecovery -> 1; true -> 0 end,
- [
- ?_assert(DirExistsBefore),
- ?_assertNot(DirExistsAfter),
- ?_assertEqual(ExpectRenamedCount, length(RenamedDirs))
- ].
-
-make_delete_dir_test_case({RootDir, ViewDir}, DeleteAfterRename) ->
- meck:expect(config, get_boolean, fun
- ("couchdb", "enable_database_recovery", _) -> false;
- ("couchdb", "delete_after_rename", _) -> DeleteAfterRename;
- (_, _, Default) -> Default
- end),
- DirExistsBefore = filelib:is_dir(ViewDir),
- couch_file:nuke_dir(RootDir, ViewDir),
- DirExistsAfter = filelib:is_dir(ViewDir),
- Ext = filename:extension(ViewDir),
- RenamedDirs = filelib:wildcard(RootDir ++ "/*.deleted" ++ Ext),
- RenamedFiles = filelib:wildcard(RootDir ++ "/.delete/*"),
- ExpectRenamedCount = if DeleteAfterRename -> 0; true -> 1 end,
- [
- ?_assert(DirExistsBefore),
- ?_assertNot(DirExistsAfter),
- ?_assertEqual(0, length(RenamedDirs)),
- ?_assertEqual(ExpectRenamedCount, length(RenamedFiles))
- ].
-
-remove_dir(Dir) ->
- [file:delete(File) || File <- filelib:wildcard(filename:join([Dir, "*"]))],
- file:del_dir(Dir).
-
-
-fsync_error_test_() ->
- {
- "Test fsync raises errors",
- {
- setup,
- fun() ->
- test_util:start(?MODULE, [ioq])
- end,
- fun(Ctx) ->
- test_util:stop(Ctx)
- end,
- [
- fun fsync_raises_errors/0
- ]
- }
- }.
-
-
-fsync_raises_errors() ->
- Fd = spawn(fun() -> fake_fsync_fd() end),
- ?assertError({fsync_error, eio}, couch_file:sync(Fd)).
-
-
-fake_fsync_fd() ->
- % Mocking gen_server did not go very
- % well so faking the couch_file pid
- % will have to do.
- receive
- {'$gen_call', From, sync} ->
- gen:reply(From, {error, eio})
- end.
diff --git a/src/couch/test/eunit/couch_flags_config_tests.erl b/src/couch/test/eunit/couch_flags_config_tests.erl
deleted file mode 100644
index ed7df1123..000000000
--- a/src/couch/test/eunit/couch_flags_config_tests.erl
+++ /dev/null
@@ -1,119 +0,0 @@
--module(couch_flags_config_tests).
--include_lib("eunit/include/eunit.hrl").
-
-%% value copied from couch_flags_config
--define(MAX_FLAG_NAME_LENGTH, 256).
-
-setup() ->
- meck:new(couch_log),
- meck:expect(couch_log, error, ['_', '_'], meck:val(ok)),
- ok.
-
-teardown(_) ->
- meck:unload().
-
-couch_flags_config_test_() ->
- {
- "test couch_flags_config",
- {
- setup,
- fun setup/0,
- fun teardown/1,
- [fun all_combinations_return_same_result/0]
- ++ latest_overide_wins()
- ++ [
- {"rules_are_sorted", fun rules_are_sorted/0}
- ]
- }
- }.
-
-all_combinations_return_same_result() ->
- Config = [
- {"foo, bar||*", "true"},
- {"baz, qux||*", "false"},
- {"baz||shards/test*", "true"},
- {"baz||shards/blacklist*", "false"},
- {"bar||shards/test*", "false"},
- {"bar||shards/test/blacklist*", "true"}
- ],
- Expected = [
- {{<<"shards/test/blacklist*">>},{<<"shards/test/blacklist*">>,22,[bar, baz, foo]}},
- {{<<"shards/test*">>},{<<"shards/test*">>, 12, [baz, foo]}},
- {{<<"shards/blacklist*">>},{<<"shards/blacklist*">>, 17, [bar, foo]}},
- {{<<"*">>},{<<"*">>, 1, [bar, foo]}}
- ],
- Combinations = couch_tests_combinatorics:permutations(Config),
- lists:foreach(fun(Items) ->
- ?assertEqual(Expected, couch_flags_config:data(Items))
- end, Combinations).
-
-rules_are_sorted() ->
- Expected = [
- {{<<"shards/test/exact">>},{<<"shards/test/exact">>, 17, [baz,flag_bar,flag_foo]}},
- {{<<"shards/test/blacklist*">>},{<<"shards/test/blacklist*">>,22,[flag_foo]}},
- {{<<"shards/test*">>},{<<"shards/test*">>, 12, [baz,flag_bar,flag_foo]}},
- {{<<"shards/exact">>},{<<"shards/exact">>, 12, [flag_bar,flag_foo]}},
- {{<<"shards/blacklist*">>},{<<"shards/blacklist*">>, 17, []}},
- {{<<"*">>},{<<"*">>, 1, [flag_foo]}}
- ],
- ?assertEqual(Expected, couch_flags_config:data(test_config())).
-
-latest_overide_wins() ->
- Cases = [
- {[
- {"flag||*", "false"}, {"flag||a*", "true"},
- {"flag||ab*", "true"}, {"flag||abc*", "true"}
- ], true},
- {[
- {"flag||*", "true"}, {"flag||a*", "false"},
- {"flag||ab*", "true"}, {"flag||abc*", "false"}
- ], false}
- ],
- [{test_id(Rules, Expected),
- ?_assertEqual(Expected, lists:member(flag,
- flags(hd(couch_flags_config:data(Rules)))))}
- || {Rules, Expected} <- Cases].
-
-flags({{_Pattern}, {_Pattern, _Size, Flags}}) ->
- Flags.
-
-test_id(Items, ExpectedResult) ->
- lists:flatten(io_lib:format("~p -> ~p", [[P || {P, _} <- Items], ExpectedResult])).
-
-
-test_id(Items) ->
- lists:flatten(io_lib:format("~p", [[P || {P, _} <- Items]])).
-
-test_config() ->
- [
- {"flag_foo||*", "true"},
- {"flag_bar||*", "false"},
- {"flag_bar||shards/test*", "true"},
- {"flag_foo||shards/blacklist*", "false"},
- {"baz||shards/test*", "true"},
- {"baz||shards/test/blacklist*", "false"},
- {"flag_bar||shards/exact", "true"},
- {"flag_bar||shards/test/exact", "true"}
- ].
-
-parse_flags_term_test_() ->
- LongBinary = binary:copy(<<"a">>, ?MAX_FLAG_NAME_LENGTH + 1),
- ExpectedError = {error, {"Cannot parse list of tags: ~n~p",
- [{too_long, LongBinary}]}},
- ExpectedUnknownError = {error,{"Cannot parse list of tags: ~n~p",
- [{invalid_flag,<<"dddddddd">>}]}},
- [
- {"empty binary", ?_assertEqual(
- [], couch_flags_config:parse_flags_term(<<>>))},
- {"single flag", ?_assertEqual(
- [fff], couch_flags_config:parse_flags_term(<<"fff">>))},
- {"sorted", ?_assertEqual(
- [aaa,bbb,fff], couch_flags_config:parse_flags_term(<<"fff,aaa,bbb">>))},
- {"whitespace", ?_assertEqual(
- [aaa,bbb,fff], couch_flags_config:parse_flags_term(<<"fff , aaa, bbb ">>))},
- {"error", ?_assertEqual(
- ExpectedError, couch_flags_config:parse_flags_term(LongBinary))},
- {"unknown_flag", ?_assertEqual(
- ExpectedUnknownError, couch_flags_config:parse_flags_term(<<"dddddddd">>))}
- ].
-
diff --git a/src/couch/test/eunit/couch_flags_tests.erl b/src/couch/test/eunit/couch_flags_tests.erl
deleted file mode 100644
index 32ec57b77..000000000
--- a/src/couch/test/eunit/couch_flags_tests.erl
+++ /dev/null
@@ -1,150 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_flags_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
-
-%% couch_epi_plugin behaviour callbacks
--export([
- app/0,
- providers/0,
- services/0,
- data_providers/0,
- data_subscriptions/0,
- processes/0,
- notify/3
-]).
-
--export([
- rules/0
-]).
-
-app() ->
- test_app.
-
-providers() ->
- [{feature_flags, ?MODULE}].
-
-services() ->
- [].
-
-data_providers() ->
- [].
-
-data_subscriptions() ->
- [].
-
-processes() ->
- [].
-
-notify(_, _, _) ->
- ok.
-
-rules() ->
- test_config().
-
-setup() ->
- %% FIXME after we upgrade couch_epi
- application:stop(couch_epi), % in case it's already running from other tests...
- application:unload(couch_epi),
-
- application:load(couch_epi),
- application:set_env(couch_epi, plugins, [couch_db_epi, ?MODULE]),
- meck:expect(config, get, 1, []),
-
- Ctx = test_util:start_couch([couch_epi]),
- Ctx.
-
-
-teardown(Ctx) ->
- test_util:stop_couch(Ctx),
- ok = application:unload(couch_epi),
- meck:unload(),
- ok.
-
-couch_flags_test_() ->
- {
- "test couch_flags",
- {
- setup, fun setup/0, fun teardown/1,
- enabled_flags_tests()
- ++ is_enabled()
-%% ++ match_performance()
- }
- }.
-
-enabled_flags_tests() ->
-
- [{"enabled_flags_tests", [
- {"flags_default_rule",
- ?_assertEqual(
- [foo], couch_flags:enabled("something"))},
- {"flags_wildcard_rule",
- ?_assertEqual(
- [bar, baz, foo],
- couch_flags:enabled("shards/test/something"))},
- {"flags_exact_rule",
- ?_assertEqual(
- [bar, baz, foo],
- couch_flags:enabled("shards/test/exact"))},
- {"flags_blacklist_rule",
- ?_assertEqual(
- [],
- couch_flags:enabled("shards/blacklist/4"))}
- ]}].
-
-is_enabled() ->
- [{"is_enabled_tests", [
- {"flags_default_rule [enabled]",
- ?_assert(couch_flags:is_enabled(foo, "something"))},
- {"flags_default_rule [disabled]",
- ?_assertNot(couch_flags:is_enabled(baz, "something"))},
- {"flags_default_rule [not_existent]",
- ?_assertNot(couch_flags:is_enabled(non_existent, "something"))},
-
- {"flags_wildcard_rule [enabled]",
- ?_assert(couch_flags:is_enabled(bar, "shards/test/something"))},
- {"flags_wildcard_rule [not_existent]",
- ?_assertNot(couch_flags:is_enabled(non_existent, "shards/test/something"))},
-
- {"flags_exact_rule [overide_disbled]",
- ?_assert(couch_flags:is_enabled(bar, "shards/test/exact"))},
- {"flags_exact_rule [not_existent]",
- ?_assertNot(couch_flags:is_enabled(non_existent, "shards/test/exact"))},
-
- {"flags_blacklist_rule [overide_enabled]",
- ?_assertNot(couch_flags:is_enabled(foo, "shards/blacklist/4"))},
- {"flags_blacklist_rule [not_existent]",
- ?_assertNot(couch_flags:is_enabled(non_existent, "shards/blacklist/4"))}
- ]}].
-
-%% match_performance() ->
-%% [{"match_performance", [
-%% ?_test(begin
-%% ?debugTime("1 million of operations took", lists:foreach(fun(_) ->
-%% couch_flags:is_enabled(bar, "shards/test/exact")
-%% end, lists:seq(1, 1000000)))
-%% end)
-%% ]}].
-
-
-test_config() ->
- [
- {"foo||/*", "true"},
- {"bar||/*", "false"},
- {"bar||/shards/test*", "true"},
- {"foo||/shards/blacklist*", "false"},
- {"baz||/shards/test*", "true"},
- {"bar||/shards/exact", "true"},
- {"bar||/shards/test/exact", "true"}
- ].
diff --git a/src/couch/test/eunit/couch_hotp_tests.erl b/src/couch/test/eunit/couch_hotp_tests.erl
deleted file mode 100644
index fee10ff5e..000000000
--- a/src/couch/test/eunit/couch_hotp_tests.erl
+++ /dev/null
@@ -1,28 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_hotp_tests).
-
--include_lib("eunit/include/eunit.hrl").
-
-hotp_test() ->
- Key = <<"12345678901234567890">>,
- ?assertEqual(755224, couch_hotp:generate(sha, Key, 0, 6)),
- ?assertEqual(287082, couch_hotp:generate(sha, Key, 1, 6)),
- ?assertEqual(359152, couch_hotp:generate(sha, Key, 2, 6)),
- ?assertEqual(969429, couch_hotp:generate(sha, Key, 3, 6)),
- ?assertEqual(338314, couch_hotp:generate(sha, Key, 4, 6)),
- ?assertEqual(254676, couch_hotp:generate(sha, Key, 5, 6)),
- ?assertEqual(287922, couch_hotp:generate(sha, Key, 6, 6)),
- ?assertEqual(162583, couch_hotp:generate(sha, Key, 7, 6)),
- ?assertEqual(399871, couch_hotp:generate(sha, Key, 8, 6)),
- ?assertEqual(520489, couch_hotp:generate(sha, Key, 9, 6)).
diff --git a/src/couch/test/eunit/couch_index_tests.erl b/src/couch/test/eunit/couch_index_tests.erl
deleted file mode 100644
index 23c857d6c..000000000
--- a/src/couch/test/eunit/couch_index_tests.erl
+++ /dev/null
@@ -1,232 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_index_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
--include_lib("stdlib/include/ms_transform.hrl").
-
--define(TIMEOUT, 1000).
-
-setup() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- ok = couch_db:close(Db),
- create_design_doc(DbName, <<"_design/foo">>, <<"bar">>),
- tracer_new(),
- DbName.
-
-teardown(DbName) ->
- tracer_delete(),
- couch_server:delete(DbName, [?ADMIN_CTX]).
-
-couch_index_ioq_priority_test_() ->
- {
- "Test ioq_priority for views",
- {
- setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun check_io_priority_for_updater/1,
- fun check_io_priority_for_compactor/1
- ]
- }
- }
- }.
-
-
-check_io_priority_for_updater(DbName) ->
- ?_test(begin
- {ok, IndexerPid} = couch_index_server:get_index(
- couch_mrview_index, DbName, <<"_design/foo">>),
- CouchIndexUpdaterPid = updater_pid(IndexerPid),
- tracer_record(CouchIndexUpdaterPid),
-
- create_docs(DbName),
-
- CommittedSeq = couch_util:with_db(DbName, fun(Db) -> couch_db:get_update_seq(Db) end),
- couch_index:get_state(IndexerPid, CommittedSeq),
- [UpdaterPid] = wait_spawn_event_for_pid(CouchIndexUpdaterPid),
-
- [UpdaterMapProcess] = wait_spawn_by_anonymous_fun(
- UpdaterPid, '-start_update/4-fun-0-'),
-
- ?assert(wait_set_io_priority(
- UpdaterMapProcess, {view_update, DbName, <<"_design/foo">>})),
-
- [UpdaterWriterProcess] = wait_spawn_by_anonymous_fun(
- UpdaterPid, '-start_update/4-fun-1-'),
- ?assert(wait_set_io_priority(
- UpdaterWriterProcess, {view_update, DbName, <<"_design/foo">>})),
-
- ok
- end).
-
-check_io_priority_for_compactor(DbName) ->
- ?_test(begin
- {ok, IndexerPid} = couch_index_server:get_index(
- couch_mrview_index, DbName, <<"_design/foo">>),
- {ok, CompactorPid} = couch_index:get_compactor_pid(IndexerPid),
- tracer_record(CompactorPid),
-
- create_docs(DbName),
-
- couch_index:compact(IndexerPid),
- wait_spawn_event_for_pid(CompactorPid),
-
- [CompactorProcess] = wait_spawn_by_anonymous_fun(
- CompactorPid, '-handle_call/3-fun-0-'),
- ?assert(wait_set_io_priority(
- CompactorProcess, {view_compact, DbName, <<"_design/foo">>})),
- ok
- end).
-
-create_docs(DbName) ->
- {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
- Doc1 = couch_doc:from_json_obj({[
- {<<"_id">>, <<"doc1">>},
- {<<"value">>, 1}
-
- ]}),
- Doc2 = couch_doc:from_json_obj({[
- {<<"_id">>, <<"doc2">>},
- {<<"value">>, 2}
-
- ]}),
- Doc3 = couch_doc:from_json_obj({[
- {<<"_id">>, <<"doc3">>},
- {<<"value">>, 3}
-
- ]}),
- {ok, _} = couch_db:update_docs(Db, [Doc1, Doc2, Doc3]),
- couch_db:close(Db).
-
-create_design_doc(DbName, DDName, ViewName) ->
- {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
- DDoc = couch_doc:from_json_obj({[
- {<<"_id">>, DDName},
- {<<"language">>, <<"javascript">>},
- {<<"views">>, {[
- {ViewName, {[
- {<<"map">>, <<"function(doc) { emit(doc.value, null); }">>}
- ]}}
- ]}}
- ]}),
- {ok, Rev} = couch_db:update_doc(Db, DDoc, []),
- couch_db:close(Db),
- Rev.
-
-wait_set_io_priority(Pid, IOPriority) ->
- test_util:wait_value(fun() ->
- does_process_set_io_priority(Pid, IOPriority)
- end, true).
-
-does_process_set_io_priority(Pid, IOPriority) ->
- PutCallsArgs = find_calls_to_fun(Pid, {erlang, put, 2}),
- lists:any(fun([_, Priority]) -> Priority =:= IOPriority end, PutCallsArgs).
-
-wait_events(MatchSpec) ->
- test_util:wait_other_value(fun() -> select(MatchSpec) end, []).
-
-find_spawned_by_anonymous_fun(ParentPid, Name) ->
- AnonymousFuns = select(ets:fun2ms(fun
- ({spawned, Pid, _TS, _Name, _Dict, [PPid, {erlang, apply, [Fun, _]}]})
- when is_function(Fun) andalso PPid =:= ParentPid -> {Pid, Fun}
- end)),
- lists:filtermap(fun({Pid, Fun}) ->
- case erlang:fun_info(Fun, name) of
- {name, Name} -> {true, Pid};
- _ -> false
- end
- end, AnonymousFuns).
-
-find_calls_to_fun(Pid, {Module, Function, Arity}) ->
- select(ets:fun2ms(fun
- ({call, P, _TS, _Name, _Dict, [{M, F, Args}]})
- when length(Args) =:= Arity
- andalso M =:= Module
- andalso F =:= Function
- andalso P =:= Pid
- -> Args
- end)).
-
-wait_spawn_event_for_pid(ParentPid) ->
- wait_events(ets:fun2ms(fun
- ({spawned, Pid, _TS, _Name, _Dict, [P, _]}) when P =:= ParentPid -> Pid
- end)).
-
-wait_spawn_by_anonymous_fun(ParentPid, Name) ->
- test_util:wait_other_value(fun() ->
- find_spawned_by_anonymous_fun(ParentPid, Name)
- end, []).
-
-updater_pid(IndexerPid) ->
- {links, Links} = process_info(IndexerPid, links),
- [Pid] = select_process_by_name_prefix(Links, "couch_index_updater:init/1"),
- Pid.
-
-select_process_by_name_prefix(Pids, Name) ->
- lists:filter(fun(Pid) ->
- Key = couch_debug:process_name(Pid),
- string:str(Key, Name) =:= 1
- end, Pids).
-
-select(MatchSpec) ->
- lists:filtermap(fun(Event) ->
- case ets:test_ms(Event, MatchSpec) of
- {ok, false} -> false;
- {ok, Result} -> {true, Result};
- _ -> false
- end
- end, tracer_events()).
-
-
-%% ========================
-%% Tracer related functions
-%% ------------------------
-tracer_new() ->
- ets:new(?MODULE, [public, named_table]),
- {ok, _Tracer} = dbg:tracer(process, {fun tracer_collector/2, 0}),
- ok.
-
-tracer_delete() ->
- dbg:stop_clear(),
- (catch ets:delete(?MODULE)),
- ok.
-
-tracer_record(Pid) ->
- {ok, _} = dbg:tp(erlang, put, x),
- {ok, _} = dbg:p(Pid, [c, p, sos]),
- ok.
-
-tracer_events() ->
- Events = [{Idx, E} || [Idx, E] <- ets:match(?MODULE, {{trace, '$1'}, '$2'})],
- {_, Sorted} = lists:unzip(lists:keysort(1, Events)),
- Sorted.
-
-tracer_collector(Msg, Seq) ->
- ets:insert(?MODULE, {{trace, Seq}, normalize_trace_msg(Msg)}),
- Seq + 1.
-
-normalize_trace_msg(TraceMsg) ->
- case tuple_to_list(TraceMsg) of
- [trace_ts, Pid, Type | Info] ->
- {TraceInfo, [Timestamp]} = lists:split(length(Info)-1, Info),
- {Type, Pid, Timestamp, couch_debug:process_name(Pid), process_info(Pid), TraceInfo};
- [trace, Pid, Type | TraceInfo] ->
- {Type, Pid, os:timestamp(), couch_debug:process_name(Pid), process_info(Pid), TraceInfo}
- end.
diff --git a/src/couch/test/eunit/couch_js_tests.erl b/src/couch/test/eunit/couch_js_tests.erl
deleted file mode 100644
index 693cd9772..000000000
--- a/src/couch/test/eunit/couch_js_tests.erl
+++ /dev/null
@@ -1,172 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_js_tests).
--include_lib("eunit/include/eunit.hrl").
-
-
-couch_js_test_() ->
- {
- "Test couchjs",
- {
- setup,
- fun test_util:start_couch/0,
- fun test_util:stop_couch/1,
- [
- fun should_create_sandbox/0,
- fun should_roundtrip_utf8/0,
- fun should_roundtrip_modified_utf8/0,
- fun should_replace_broken_utf16/0,
- fun should_allow_js_string_mutations/0,
- {timeout, 60000, fun should_exit_on_oom/0}
- ]
- }
- }.
-
-
-should_create_sandbox() ->
- % Try and detect whether we can see out of the
- % sandbox or not.
- Src = <<
- "function(doc) {\n"
- " try {\n"
- " emit(false, typeof(Couch.compile_function));\n"
- " } catch (e) {\n"
- " emit(true, e.message);\n"
- " }\n"
- "}\n"
- >>,
- Proc = couch_query_servers:get_os_process(<<"javascript">>),
- true = couch_query_servers:proc_prompt(Proc, [<<"add_fun">>, Src]),
- Result = couch_query_servers:proc_prompt(Proc, [<<"map_doc">>, <<"{}">>]),
- ?assertEqual([[[true, <<"Couch is not defined">>]]], Result).
-
-
-should_roundtrip_utf8() ->
- % Try round tripping UTF-8 both directions through
- % couchjs. These tests use hex encoded values of
- % Ä (C384) and Ü (C39C) so as to avoid odd editor/Erlang encoding
- % strangeness.
- Src = <<
- "function(doc) {\n"
- " emit(doc.value, \"", 16#C3, 16#9C, "\");\n"
- "}\n"
- >>,
- Proc = couch_query_servers:get_os_process(<<"javascript">>),
- true = couch_query_servers:proc_prompt(Proc, [<<"add_fun">>, Src]),
- Doc = {[
- {<<"value">>, <<16#C3, 16#84>>}
- ]},
- Result = couch_query_servers:proc_prompt(Proc, [<<"map_doc">>, Doc]),
- ?assertEqual([[[<<16#C3, 16#84>>, <<16#C3, 16#9C>>]]], Result).
-
-
-should_roundtrip_modified_utf8() ->
- % Mimicing the test case from the mailing list
- Src = <<
- "function(doc) {\n"
- " emit(doc.value.toLowerCase(), \"", 16#C3, 16#9C, "\");\n"
- "}\n"
- >>,
- Proc = couch_query_servers:get_os_process(<<"javascript">>),
- true = couch_query_servers:proc_prompt(Proc, [<<"add_fun">>, Src]),
- Doc = {[
- {<<"value">>, <<16#C3, 16#84>>}
- ]},
- Result = couch_query_servers:proc_prompt(Proc, [<<"map_doc">>, Doc]),
- ?assertEqual([[[<<16#C3, 16#A4>>, <<16#C3, 16#9C>>]]], Result).
-
-
-should_replace_broken_utf16() ->
- % This test reverse the surrogate pair of
- % the Boom emoji U+1F4A5
- Src = <<
- "function(doc) {\n"
- " emit(doc.value.split(\"\").reverse().join(\"\"), 1);\n"
- "}\n"
- >>,
- Proc = couch_query_servers:get_os_process(<<"javascript">>),
- true = couch_query_servers:proc_prompt(Proc, [<<"add_fun">>, Src]),
- Doc = {[
- {<<"value">>, list_to_binary(xmerl_ucs:to_utf8([16#1F4A5]))}
- ]},
- Result = couch_query_servers:proc_prompt(Proc, [<<"map_doc">>, Doc]),
- % Invalid UTF-8 gets replaced with the 16#FFFD replacement
- % marker
- Markers = list_to_binary(xmerl_ucs:to_utf8([16#FFFD, 16#FFFD])),
- ?assertEqual([[[Markers, 1]]], Result).
-
-
-should_allow_js_string_mutations() ->
- % This binary corresponds to this string: мама мыла раму
- % Which I'm told translates to: "mom was washing the frame"
- MomWashedTheFrame = <<
- 16#D0, 16#BC, 16#D0, 16#B0, 16#D0, 16#BC, 16#D0, 16#B0, 16#20,
- 16#D0, 16#BC, 16#D1, 16#8B, 16#D0, 16#BB, 16#D0, 16#B0, 16#20,
- 16#D1, 16#80, 16#D0, 16#B0, 16#D0, 16#BC, 16#D1, 16#83
- >>,
- Mom = <<16#D0, 16#BC, 16#D0, 16#B0, 16#D0, 16#BC, 16#D0, 16#B0>>,
- Washed = <<16#D0, 16#BC, 16#D1, 16#8B, 16#D0, 16#BB, 16#D0, 16#B0>>,
- Src1 = <<
- "function(doc) {\n"
- " emit(\"length\", doc.value.length);\n"
- "}\n"
- >>,
- Src2 = <<
- "function(doc) {\n"
- " emit(\"substring\", doc.value.substring(5, 9));\n"
- "}\n"
- >>,
- Src3 = <<
- "function(doc) {\n"
- " emit(\"slice\", doc.value.slice(0, 4));\n"
- "}\n"
- >>,
- Proc = couch_query_servers:get_os_process(<<"javascript">>),
- true = couch_query_servers:proc_prompt(Proc, [<<"add_fun">>, Src1]),
- true = couch_query_servers:proc_prompt(Proc, [<<"add_fun">>, Src2]),
- true = couch_query_servers:proc_prompt(Proc, [<<"add_fun">>, Src3]),
- Doc = {[{<<"value">>, MomWashedTheFrame}]},
- Result = couch_query_servers:proc_prompt(Proc, [<<"map_doc">>, Doc]),
- Expect = [
- [[<<"length">>, 14]],
- [[<<"substring">>, Washed]],
- [[<<"slice">>, Mom]]
- ],
- ?assertEqual(Expect, Result).
-
-
-should_exit_on_oom() ->
- Src = <<
- "var state = [];\n"
- "function(doc) {\n"
- " var val = \"0123456789ABCDEF\";\n"
- " for(var i = 0; i < 165535; i++) {\n"
- " state.push([val, val]);\n"
- " }\n"
- "}\n"
- >>,
- Proc = couch_query_servers:get_os_process(<<"javascript">>),
- true = couch_query_servers:proc_prompt(Proc, [<<"add_fun">>, Src]),
- trigger_oom(Proc).
-
-trigger_oom(Proc) ->
- Status = try
- couch_query_servers:proc_prompt(Proc, [<<"map_doc">>, <<"{}">>]),
- continue
- catch throw:{os_process_error, {exit_status, 1}} ->
- done
- end,
- case Status of
- continue -> trigger_oom(Proc);
- done -> ok
- end.
diff --git a/src/couch/test/eunit/couch_key_tree_prop_tests.erl b/src/couch/test/eunit/couch_key_tree_prop_tests.erl
deleted file mode 100644
index 9c09aace5..000000000
--- a/src/couch/test/eunit/couch_key_tree_prop_tests.erl
+++ /dev/null
@@ -1,530 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_key_tree_prop_tests).
-
-
--ifdef(WITH_PROPER).
-
--include_lib("couch/include/couch_eunit_proper.hrl").
-
-
--define(SIZE_REDUCTION, 3). % How much to reduce size with tree depth.
--define(MAX_BRANCHES, 4). % Maximum number of branches.
--define(RAND_SIZE, 1 bsl 64).
-
-
-property_test_() ->
- ?EUNIT_QUICKCHECK(60).
-
-
-%
-% Properties
-%
-
-
-% Merge random paths from a revtree into itself. Check that no revisions have
-% been lost in the process and that result is one of the 3 expected values.
-%
-prop_revtree_merge_with_subset_of_own_nodes() ->
- ?FORALL(Revs, g_revs(),
- ?FORALL({RevTree, Branch}, {g_revtree(Revs), g_revtree(Revs, 1)},
- ?IMPLIES(length(Branch) > 0 andalso repeating_revs(levels(RevTree ++ Branch)) == [],
- begin
- {Merged, Result} = couch_key_tree:merge(RevTree, hd(Branch)),
- lists:member(Result, [new_leaf, new_branch, internal_node])
- andalso same_keys(RevTree ++ Branch, Merged)
- andalso valid_revtree(Merged)
- end
- )
- )
- ).
-
-
-% Merge random trees into revtree.
-%
-prop_revtree_merge_random_nodes() ->
- ?FORALL({RevTree, Branch}, {g_revtree(), g_revtree([], 1)},
- ?IMPLIES(length(Branch) > 0,
- begin
- {Merged, _} = couch_key_tree:merge(RevTree, hd(Branch)),
- valid_revtree(Merged)
- end
- )
- ).
-
-
-
-% Merge mix or random and existing revtree paths into revtree
-%
-prop_revtree_merge_some_existing_some_new() ->
- ?FORALL(RevTree, g_revtree(),
- ?FORALL(Branch,
- begin
- KeyList = keylist(RevTree),
- Half = lists:sublist(KeyList, length(KeyList) div 2),
- g_revtree(Half, 1)
- end,
- ?IMPLIES(length(Branch) > 0 andalso repeating_revs(levels(RevTree ++ Branch)) == [],
- begin
- {Merged, _} = couch_key_tree:merge(RevTree, hd(Branch)),
- valid_revtree(Merged)
- end
- )
- )
- ).
-
-
-
-% Stem deeper than the current max level. Expect no changes to the revtree
-%
-prop_no_change_stemming_deeper_than_current_depth() ->
- ?FORALL(RevTree, g_revtree(),
- begin
- StemDepth = depth(RevTree) + 1,
- Stemmed = couch_key_tree:stem(RevTree, StemDepth),
- StemmedKeys = lists:usort(keylist(Stemmed)),
- InputKeys = lists:usort(keylist(RevTree)),
- StemmedKeys == InputKeys
- end
- ).
-
-
-% Stem at a random small depth, make sure that resulting tree has
-% unique revisions and the same number or less revisions than input
-%
-prop_stemming_results_in_same_or_less_total_revs() ->
- ?FORALL({RevTree, StemDepth}, {g_revtree(), choose(1, 20)},
- begin
- Stemmed = couch_key_tree:stem(RevTree, StemDepth),
- OldRealDepth = real_depth(RevTree),
- StemmedKeys = keylist(Stemmed),
- UniqueStemmedKeys = lists:usort(StemmedKeys),
- UniqueInputKeys = lists:usort(keylist(RevTree)),
- NewRealDepth = real_depth(Stemmed),
- length(StemmedKeys) == length(UniqueStemmedKeys)
- andalso length(UniqueStemmedKeys) =< length(UniqueInputKeys)
- andalso OldRealDepth >= NewRealDepth
- end
- ).
-
-
-% Generate a longer path (revtree with no branches) then stem it.
-% Always expect it to shrink to stemmed depth.
-prop_stem_path_expect_size_to_get_smaller() ->
- ?FORALL({RevTree, StemDepth},
- {
- ?SIZED(Size, g_revtree(Size * 10, [], 1)),
- choose(1,3)
- },
- ?IMPLIES(real_depth(RevTree) > 3,
- begin
- Stemmed = couch_key_tree:stem(RevTree, StemDepth),
- StemmedKeys = lists:usort(keylist(Stemmed)),
- InputKeys = lists:usort(keylist(RevTree)),
- length(InputKeys) > length(StemmedKeys)
- andalso real_depth(Stemmed) == StemDepth
- end
- )
- ).
-
-
-% After stemming all leaves are still present
-prop_after_stemming_all_leaves_are_present() ->
- ?FORALL({RevTree, StemDepth},
- {g_revtree(), choose(1,20)},
- begin
- OldRealDepth = real_depth(RevTree),
- OldLeaves = leaves(RevTree),
- Stemmed = couch_key_tree:stem(RevTree, StemDepth),
- NewRealDepth = real_depth(Stemmed),
- NewLeaves = leaves(Stemmed),
- valid_revtree(Stemmed)
- andalso OldRealDepth >= NewRealDepth
- andalso OldLeaves == NewLeaves
-
- end
- ).
-
-
-% After stemming paths to root didn't get longer
-prop_after_stemming_paths_are_shorter() ->
- ?FORALL({StemDepth, RevTree}, {choose(2,10), g_revtree()},
- begin
- OldPaths = paths(RevTree),
- Stemmed = couch_key_tree:stem(RevTree, StemDepth),
- NewPaths = paths(Stemmed),
- GrowingPaths = orddict:fold(fun(Rev, Path, Acc) ->
- OldPath = orddict:fetch(Rev, OldPaths),
- case length(Path) > length(OldPath) of
- true ->
- [{Rev, Path, OldPath}| Acc];
- false ->
- Acc
- end
- end, [], NewPaths),
- valid_revtree(Stemmed) andalso GrowingPaths == []
- end
- ).
-
-
-% Check leaf count
-prop_leaf_count() ->
- ?FORALL(RevTree, g_revtree(),
- length(leaves(RevTree)) == couch_key_tree:count_leafs(RevTree)
- ).
-
-
-% Check get leafs
-prop_get_leafs() ->
- ?FORALL(RevTree, g_revtree(),
- begin
- LeafsFull = couch_key_tree:get_all_leafs(RevTree),
- lists:usort([Rev || {_V, {_D, [Rev | _]}} <- LeafsFull]) == leaves(RevTree)
- end
- ).
-
-
-%
-% Generators
-%
-
-% Generate a full rev tree. Most of the forms are just there to set up default
-% parameters, _revtree/3 does all heavy lifting.
-%
-
-g_revtree() ->
- ?SIZED(Size, g_revtree(Size)).
-
-
-g_revtree(Size) when is_integer(Size) ->
- g_revtree(Size, [], ?MAX_BRANCHES);
-g_revtree(Revs) when is_list(Revs) ->
- ?SIZED(Size, g_revtree(Size, Revs, ?MAX_BRANCHES)).
-
-
-g_revtree(Size, Revs) when is_integer(Size), is_list(Revs) ->
- g_revtree(Size, Revs, ?MAX_BRANCHES);
-g_revtree(Revs, MaxBranches) when is_list(Revs), is_integer(MaxBranches) ->
- ?SIZED(Size, g_revtree(Size, Revs, MaxBranches)).
-
-
-g_revtree(0, _Revs, _MaxBranches) ->
- [];
-g_revtree(Size, ERevs, MaxBranches) ->
- ?LET({Depth, Revs}, {g_stem_depth(Size), g_revs(Size, ERevs)},
- [{Depth, g_treenode(Size, Revs, MaxBranches)}]
- ).
-
-
-% Generate a tree node and then recursively generate its children.
-%
-g_treenode(0, Revs, _) ->
- {elements(Revs), x, []};
-g_treenode(Size, Revs, MaxBranches) ->
- ?LAZY(?LET(N, choose(0, MaxBranches),
- begin
- [Rev | ChildRevs] = Revs,
- {Rev, x, g_nodes(Size div ?SIZE_REDUCTION, N, ChildRevs, MaxBranches)}
- end
- )).
-
-
-% Generate a list of child nodes. Depending on how many children there are
-% the pre-generarated revision list is split into that many sublists.
-%
-g_nodes(0, _N, _Revs, _MaxBranches) ->
- [];
-g_nodes(_Size, 0, _Revs, _MaxBranches) ->
- [];
-g_nodes(Size, ChildCount, Revs, MaxBranches) ->
- ?LETSHRINK(
- ChildNodes,
- begin
- ChildRevList = child_revs(ChildCount, Revs, Size, MaxBranches),
- [g_treenode(Size, ChildRevs, MaxBranches) || ChildRevs <- ChildRevList]
- end,
- ordered_nodes(ChildNodes)
- ).
-
-
-% Generate each subtree's stem depth
-%
-
-
-g_stem_depth(Size) ->
- choose(0, expected_height(Size, ?SIZE_REDUCTION) div 2).
-
-
-% Uses the shuffle/1 function to shuffle the input list. Unshuffled list is
-% used as the shrink value.
-%
-g_shuffle([]) -> [];
-g_shuffle(L) when is_list(L) ->
- ?LET(X, elements(L), [X | g_shuffle(lists:delete(X,L))]).
-
-
-% Wrapper to make a list shuffling generator that doesn't shrink
-%
-g_shuffle_noshrink(L) when is_list(L) ->
- proper_types:noshrink(g_shuffle(L)).
-
-
-% Generate shuffled sublists up to N items long from a list.
-%
-g_shuffled_sublists(L, N) ->
- ?LET(Shuffled, g_shuffle_noshrink(L), lists:sublist(Shuffled, N)).
-
-
-% Generate revision lists.
-%
-g_revs() ->
- ?SIZED(Size, g_revs(Size)).
-
-
-g_revs(Size) when is_integer(Size) ->
- g_revs(Size, []).
-
-
-g_revs(Size, Existing) when is_integer(Size), is_list(Existing) ->
- Expected = keys_needed(Size, ?SIZE_REDUCTION, ?MAX_BRANCHES),
- Revs = revs(Expected, Existing),
- case length(Revs) > Expected of
- true -> % have extra, try various sublists
- g_shuffled_sublists(Revs, Expected);
- false ->
- proper_types:return(Revs)
- end.
-
-
-%
-% Helper functions
-%
-
-
-valid_revtree(RevTree) ->
- repeating_revs(levels(RevTree)) == [] andalso children_sorted(RevTree).
-
-
-same_keys(RevTree1, RevTree2) ->
- Keys1 = lists:usort(keylist(RevTree1)),
- Keys2 = lists:usort(keylist(RevTree2)),
- Keys1 == Keys2.
-
-
-all(L) ->
- lists:all(fun(E) -> E end, L).
-
-
-% Generate list of relateively unique large random numbers
-rand_list(N) when N =< 0 ->
- [];
-rand_list(N) ->
- [rand:uniform(?RAND_SIZE) || _ <- lists:seq(1, N)].
-
-
-% Generate a list of revisions to be used as key in revision trees. Expected
-% must the number of maximum expected nodes in a revision tree. Existing is an
-% optional list revisions which must be included in the result. The output list
-% is sorted.
-revs(0, _Existing) ->
- [];
-revs(Expected, Existing) when is_integer(Expected), is_list(Existing) ->
- Need = Expected - length(Existing),
- lists:usort(lists:append(Existing, rand_list(Need))).
-
-
-% Get the list of all the keys in a revision tree. The input can also be a
-% an individual tree (tagged with the depth to virtual root) or a node.
-% Yes, this is not tail recursive but the idea is to keep it simple.
-%
-keylist({_D, Node}) when is_tuple(Node) ->
- keylist(Node);
-keylist({K, _V, Nodes}) ->
- [K | keylist(Nodes)];
-keylist(Nodes) ->
- lists:append([keylist(Node) || Node <- Nodes]).
-
-
-% Get the list of leaves from a revision tree.
-leaves([]) ->
- [];
-leaves({_D, Node}) when is_tuple(Node) ->
- leaves(Node);
-leaves({K, _V, []}) ->
- [K];
-leaves({_K, _V, Nodes}) ->
- leaves(Nodes);
-leaves(Nodes) ->
- lists:usort(lists:append([leaves(N) || N <- Nodes])).
-
-
-% Get paths from leaf to root. Result is an orddict of [{LeafRev, [Rev]}]
-%
-paths([]) ->
- orddict:new();
-paths(RevTree) when is_list(RevTree) ->
- paths_merge_dicts([paths(T) || T <- RevTree]);
-paths({_Depth, Node}) when is_tuple(Node) ->
- paths(Node);
-paths({K, _V, []}) ->
- orddict:store(K, [], orddict:new());
-paths({K, _V, Nodes}) ->
- CombinedDict = paths_merge_dicts([paths(N) || N <- Nodes]),
- orddict:map(fun(_LeafKey, Path) -> Path ++ [K] end, CombinedDict).
-
-
-paths_merge_dicts(Dicts) ->
- lists:foldl(fun(D, AccD) ->
- orddict:merge(fun(K, V1, V2) ->
- throw({found_duplicates, K, V1, V2})
- end, D, AccD)
- end, orddict:new(), Dicts).
-
-
-% Get lists of all the keys at each depth level. Result is an orddict that
-% looks like [{depth, [key]}]. The depth used here is the "virtual" depth as
-% indicated by the stemmed depth tag that goes with every top level subtree.
-%
-levels([]) ->
- orddict:new();
-levels(RevTree) when is_list(RevTree) ->
- lists:foldl(fun(T, Dict) -> levels(T, Dict) end, orddict:new(), RevTree).
-
-
-levels({Depth, Node}, Dict) when is_tuple(Node) ->
- levels(Node, Depth, Dict).
-
-
-levels({K, _V, Nodes}, Depth, Dict) ->
- Dict1 = case orddict:is_key(Depth, Dict) of
- true -> orddict:append(Depth, K, Dict);
- false -> orddict:store(Depth, [K], Dict)
- end,
- levels(Nodes, Depth + 1, Dict1);
-levels(Nodes, Depth, Dict) ->
- lists:foldl(fun(Node, AccDict) ->
- levels(Node, Depth, AccDict)
- end, Dict, Nodes).
-
-
-% Using the output of leaves/1 as input return any repeating revisions if
-% there are any at a particular level. Levels which have not revisions are
-% not returned.
-%
-repeating_revs(Dict) ->
- orddict:filter(fun(_Depth, Revs) ->
- length(lists:usort(Revs)) =/= length(Revs)
- end, Dict).
-
-
-% Check that children of all nodes are sorted
-children_sorted([]) ->
- true;
-children_sorted(Nodes) when is_list(Nodes) ->
- all([children_sorted(N) || N <- Nodes]);
-children_sorted({_D, Node}) when is_tuple(Node) ->
- children_sorted(Node);
-children_sorted({_K, _V, Nodes}) ->
- children_sorted(Nodes).
-
-
-% Get the maximum depth of a revtree. The depth is "virtual" as it takes into
-% account the distance to the now stemmed root node as indicated by the top
-% level subtrees.
-%
-depth([]) ->
- 0;
-depth(RevTree) when is_list(RevTree) ->
- lists:max([depth(T) || T <- RevTree]);
-depth({Depth, Node}) when is_tuple(Node) ->
- depth(Node, Depth - 1).
-
-
-depth({_K, _V, Nodes}, Depth) ->
- depth(Nodes, Depth + 1);
-depth([], Depth) ->
- Depth;
-depth(Nodes, Depth) ->
- lists:max([depth(Node, Depth) || Node <- Nodes]).
-
-
-% Get the "real" tree depth, not the virtual one. As revtrees gets stemmed they
-% will keep their virtual depth but the actual number of nodes in the tree
-% could be reduced.
-%
-real_depth([]) ->
- 0;
-real_depth(RevTree) when is_list(RevTree) ->
- lists:max([real_depth(T) || T <- RevTree]);
-real_depth({_Depth, Node}) when is_tuple(Node) ->
- depth(Node, 0). % Note from here on use the depth/3 function
-
-
-% Return an ordered list of revtree nodes. When sorting only immediate keys
-% (revisions) are looked at and comparison doesn't descent into the treee.
-%
-ordered_nodes(Nodes) ->
- lists:sort(fun({K1, _, _}, {K2, _, _}) -> K1 =< K2 end, Nodes).
-
-
-% Calculate a maximum number of rev tree nodes needed for a tree of a given
-% height and branchiness. Height is derived from Size and LevelReductionFactor,
-% that is how big the sample should be and quickly the size parameter would
-% shrink on each level.
-%
-keys_needed(0, _, _) ->
- 0;
-keys_needed(Size, LevelReductionFactor, 1) ->
- expected_height(Size, LevelReductionFactor);
-keys_needed(Size, LevelReductionFactor, Branches) ->
- Height = expected_height(Size, LevelReductionFactor),
- trunc(math:pow(Branches, Height + 1)) + 1.
-
-
-% Calculate expected tree height for a given sample size and branchiness.
-% At each step the size is divided by the reduction factor.
-expected_height(Size, LevelReductionFactor) ->
- trunc(log(LevelReductionFactor, Size)) + 1.
-
-
-log(B, X) ->
- math:log(X) / math:log(B).
-
-
-% Distribute items in a list into roughly equal chunks of a given size.
-%
-distribute(_ChunkSize, []) ->
- [];
-distribute(ChunkSize, L) when ChunkSize >= length(L) ->
- [L];
-distribute(ChunkSize, L) ->
- {L1, L2} = lists:split(ChunkSize, L),
- [L1 | distribute(ChunkSize, L2)].
-
-
-% Split a single (parent) revision list into chunks (sub-lists), one for each
-% child. Also, for safety, double check that at this point in the process the
-% list of revisions is sufficiently large. If it isn't something went wrong and
-% a specific exception is thrown ({not_enough_revisions, Got, Needed}).
-%
-child_revs(ChildCount, Revs, Size, MaxBranches) ->
- NeedKeys = keys_needed(Size, ?SIZE_REDUCTION, MaxBranches),
- case length(Revs) >= NeedKeys of
- true ->
- ChunkSize = trunc(length(Revs) / ChildCount) + 1,
- distribute(ChunkSize, Revs);
- false ->
- throw({not_enough_revisions, length(Revs), NeedKeys})
- end.
-
--endif.
diff --git a/src/couch/test/eunit/couch_key_tree_tests.erl b/src/couch/test/eunit/couch_key_tree_tests.erl
deleted file mode 100644
index 5d9cc8372..000000000
--- a/src/couch/test/eunit/couch_key_tree_tests.erl
+++ /dev/null
@@ -1,413 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_key_tree_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
-
--define(DEPTH, 10).
-
-
-key_tree_merge_test_()->
- {
- "Key tree merge",
- [
- should_merge_with_empty_tree(),
- should_merge_reflexive(),
- should_merge_prefix_of_a_tree_with_tree(),
- should_produce_conflict_on_merge_with_unrelated_branch(),
- should_merge_reflexive_for_child_nodes(),
- should_merge_tree_to_itself(),
- should_merge_tree_of_odd_length(),
- should_merge_tree_with_stem(),
- should_merge_with_stem_at_deeper_level(),
- should_merge_with_stem_at_deeper_level_with_deeper_paths(),
- should_merge_single_tree_with_deeper_stem(),
- should_merge_tree_with_large_stem(),
- should_merge_stems(),
- should_create_conflicts_on_merge(),
- should_create_no_conflicts_on_merge(),
- should_ignore_conflicting_branch()
- ]
- }.
-
-key_tree_missing_leaves_test_()->
- {
- "Missing tree leaves",
- [
- should_not_find_missing_leaves(),
- should_find_missing_leaves()
- ]
- }.
-
-key_tree_remove_leaves_test_()->
- {
- "Remove tree leaves",
- [
- should_have_no_effect_on_removing_no_leaves(),
- should_have_no_effect_on_removing_non_existant_branch(),
- should_remove_leaf(),
- should_produce_empty_tree_on_removing_all_leaves(),
- should_have_no_effect_on_removing_non_existant_node(),
- should_produce_empty_tree_on_removing_last_leaf()
- ]
- }.
-
-key_tree_get_leaves_test_()->
- {
- "Leaves retrieving",
- [
- should_extract_subtree(),
- should_extract_subsubtree(),
- should_gather_non_existant_leaf(),
- should_gather_leaf(),
- shoul_gather_multiple_leaves(),
- should_gather_single_leaf_for_multiple_revs(),
- should_gather_multiple_for_multiple_revs(),
- should_retrieve_full_key_path(),
- should_retrieve_full_key_path_for_node(),
- should_retrieve_leaves_with_parent_node(),
- should_retrieve_all_leaves()
- ]
- }.
-
-key_tree_leaf_counting_test_()->
- {
- "Leaf counting",
- [
- should_have_no_leaves_for_empty_tree(),
- should_have_single_leaf_for_tree_with_single_node(),
- should_have_two_leaves_for_tree_with_chindler_siblings(),
- should_not_affect_on_leaf_counting_for_stemmed_tree()
- ]
- }.
-
-key_tree_stemming_test_()->
- {
- "Stemming",
- [
- should_have_no_effect_for_stemming_more_levels_than_exists(),
- should_return_one_deepest_node(),
- should_return_two_deepest_nodes()
- ]
- }.
-
-
-should_merge_with_empty_tree()->
- One = {1, {"1","foo",[]}},
- ?_assertEqual({[One], new_leaf},
- merge_and_stem([], One)).
-
-should_merge_reflexive()->
- One = {1, {"1","foo",[]}},
- ?_assertEqual({[One], internal_node},
- merge_and_stem([One], One)).
-
-should_merge_prefix_of_a_tree_with_tree()->
- One = {1, {"1","foo",[]}},
- TwoSibs = [{1, {"1","foo",[]}},
- {1, {"2","foo",[]}}],
- ?_assertEqual({TwoSibs, internal_node},
- merge_and_stem(TwoSibs, One)).
-
-should_produce_conflict_on_merge_with_unrelated_branch()->
- TwoSibs = [{1, {"1","foo",[]}},
- {1, {"2","foo",[]}}],
- Three = {1, {"3","foo",[]}},
- ThreeSibs = [{1, {"1","foo",[]}},
- {1, {"2","foo",[]}},
- {1, {"3","foo",[]}}],
- ?_assertEqual({ThreeSibs, new_branch},
- merge_and_stem(TwoSibs, Three)).
-
-should_merge_reflexive_for_child_nodes()->
- TwoChild = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}},
- ?_assertEqual({[TwoChild], internal_node},
- merge_and_stem([TwoChild], TwoChild)).
-
-should_merge_tree_to_itself()->
- TwoChildSibs = {1, {"1","foo", [{"1a", "bar", []},
- {"1b", "bar", []}]}},
- Leafs = couch_key_tree:get_all_leafs([TwoChildSibs]),
- Paths = lists:map(fun leaf_to_path/1, Leafs),
- FinalTree = lists:foldl(fun(Path, TreeAcc) ->
- {NewTree, internal_node} = merge_and_stem(TreeAcc, Path),
- NewTree
- end, [TwoChildSibs], Paths),
- ?_assertEqual([TwoChildSibs], FinalTree).
-
-leaf_to_path({Value, {Start, Keys}}) ->
- [Branch] = to_branch(Value, lists:reverse(Keys)),
- {Start - length(Keys) + 1, Branch}.
-
-to_branch(Value, [Key]) ->
- [{Key, Value, []}];
-to_branch(Value, [Key | RestKeys]) ->
- [{Key, [], to_branch(Value, RestKeys)}].
-
-
-should_merge_tree_of_odd_length()->
- TwoChild = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}},
- TwoChildSibs = {1, {"1","foo", [{"1a", "bar", []},
- {"1b", "bar", []}]}},
- TwoChildPlusSibs = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]},
- {"1b", "bar", []}]}},
- ?_assertEqual({[TwoChildPlusSibs], new_leaf},
- merge_and_stem([TwoChildSibs], TwoChild)).
-
-should_merge_tree_with_stem()->
- Stemmed = {2, {"1a", "bar", []}},
- TwoChildSibs = {1, {"1","foo", [{"1a", "bar", []},
- {"1b", "bar", []}]}},
-
- ?_assertEqual({[TwoChildSibs], internal_node},
- merge_and_stem([TwoChildSibs], Stemmed)).
-
-should_merge_with_stem_at_deeper_level()->
- Stemmed = {3, {"1bb", "boo", []}},
- TwoChildSibs = {1, {"1","foo", [{"1a", "bar", []},
- {"1b", "bar", [{"1bb", "boo", []}]}]}},
- ?_assertEqual({[TwoChildSibs], internal_node},
- merge_and_stem([TwoChildSibs], Stemmed)).
-
-should_merge_with_stem_at_deeper_level_with_deeper_paths()->
- Stemmed = {3, {"1bb", "boo", []}},
- StemmedTwoChildSibs = [{2,{"1a", "bar", []}},
- {2,{"1b", "bar", [{"1bb", "boo", []}]}}],
- ?_assertEqual({StemmedTwoChildSibs, internal_node},
- merge_and_stem(StemmedTwoChildSibs, Stemmed)).
-
-should_merge_single_tree_with_deeper_stem()->
- Stemmed = {3, {"1aa", "bar", []}},
- TwoChild = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}},
- ?_assertEqual({[TwoChild], internal_node},
- merge_and_stem([TwoChild], Stemmed)).
-
-should_merge_tree_with_large_stem()->
- Stemmed = {2, {"1a", "bar", [{"1aa", "bar", []}]}},
- TwoChild = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}},
- ?_assertEqual({[TwoChild], internal_node},
- merge_and_stem([TwoChild], Stemmed)).
-
-should_merge_stems()->
- StemmedA = {2, {"1a", "bar", [{"1aa", "bar", []}]}},
- StemmedB = {3, {"1aa", "bar", []}},
- ?_assertEqual({[StemmedA], internal_node},
- merge_and_stem([StemmedA], StemmedB)).
-
-should_create_conflicts_on_merge()->
- OneChild = {1, {"1","foo",[{"1a", "bar", []}]}},
- Stemmed = {3, {"1aa", "bar", []}},
- ?_assertEqual({[OneChild, Stemmed], new_branch},
- merge_and_stem([OneChild], Stemmed)).
-
-should_create_no_conflicts_on_merge()->
- OneChild = {1, {"1","foo",[{"1a", "bar", []}]}},
- Stemmed = {3, {"1aa", "bar", []}},
- TwoChild = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}},
- ?_assertEqual({[TwoChild], new_leaf},
- merge_and_stem([OneChild, Stemmed], TwoChild)).
-
-should_ignore_conflicting_branch()->
- %% this test is based on couch-902-test-case2.py
- %% foo has conflicts from replication at depth two
- %% foo3 is the current value
- Foo = {1, {"foo",
- "val1",
- [{"foo2","val2",[]},
- {"foo3", "val3", []}
- ]}},
- %% foo now has an attachment added, which leads to foo4 and val4
- %% off foo3
- Bar = {1, {"foo",
- [],
- [{"foo3",
- [],
- [{"foo4","val4",[]}
- ]}]}},
- %% this is what the merge returns
- %% note that it ignore the conflicting branch as there's no match
- FooBar = {1, {"foo",
- "val1",
- [{"foo2","val2",[]},
- {"foo3", "val3", [{"foo4","val4",[]}]}
- ]}},
- {
- "COUCHDB-902",
- ?_assertEqual({[FooBar], new_leaf},
- merge_and_stem([Foo], Bar))
- }.
-
-should_not_find_missing_leaves()->
- TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
- ?_assertEqual([],
- couch_key_tree:find_missing(TwoChildSibs,
- [{0,"1"}, {1,"1a"}])).
-
-should_find_missing_leaves()->
- Stemmed1 = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}],
- Stemmed2 = [{2, {"1aa", "bar", []}}],
- TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
- [
- ?_assertEqual(
- [{0, "10"}, {100, "x"}],
- couch_key_tree:find_missing(
- TwoChildSibs,
- [{0,"1"}, {0, "10"}, {1,"1a"}, {100, "x"}])),
- ?_assertEqual(
- [{0, "1"}, {100, "x"}],
- couch_key_tree:find_missing(
- Stemmed1,
- [{0,"1"}, {1,"1a"}, {100, "x"}])),
- ?_assertEqual(
- [{0, "1"}, {1,"1a"}, {100, "x"}],
- couch_key_tree:find_missing(
- Stemmed2,
- [{0,"1"}, {1,"1a"}, {100, "x"}]))
- ].
-
-should_have_no_effect_on_removing_no_leaves()->
- TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
- ?_assertEqual({TwoChildSibs, []},
- couch_key_tree:remove_leafs(TwoChildSibs,
- [])).
-
-should_have_no_effect_on_removing_non_existant_branch()->
- TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
- ?_assertEqual({TwoChildSibs, []},
- couch_key_tree:remove_leafs(TwoChildSibs,
- [{0, "1"}])).
-
-should_remove_leaf()->
- OneChild = [{0, {"1","foo",[{"1a", "bar", []}]}}],
- TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
- ?_assertEqual({OneChild, [{1, "1b"}]},
- couch_key_tree:remove_leafs(TwoChildSibs,
- [{1, "1b"}])).
-
-should_produce_empty_tree_on_removing_all_leaves()->
- TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
- ?_assertEqual({[], [{1, "1b"}, {1, "1a"}]},
- couch_key_tree:remove_leafs(TwoChildSibs,
- [{1, "1b"}, {1, "1a"}])).
-
-should_have_no_effect_on_removing_non_existant_node()->
- Stemmed = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}],
- ?_assertEqual({Stemmed, []},
- couch_key_tree:remove_leafs(Stemmed,
- [{1, "1a"}])).
-
-should_produce_empty_tree_on_removing_last_leaf()->
- Stemmed = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}],
- ?_assertEqual({[], [{2, "1aa"}]},
- couch_key_tree:remove_leafs(Stemmed,
- [{2, "1aa"}])).
-
-should_extract_subtree()->
- TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
- ?_assertEqual({[{"foo", {0, ["1"]}}],[]},
- couch_key_tree:get(TwoChildSibs, [{0, "1"}])).
-
-should_extract_subsubtree()->
- TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
- ?_assertEqual({[{"bar", {1, ["1a", "1"]}}],[]},
- couch_key_tree:get(TwoChildSibs, [{1, "1a"}])).
-
-should_gather_non_existant_leaf()->
- TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
- ?_assertEqual({[],[{0, "x"}]},
- couch_key_tree:get_key_leafs(TwoChildSibs, [{0, "x"}])).
-
-should_gather_leaf()->
- TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
- ?_assertEqual({[{"bar", {1, ["1a","1"]}}],[]},
- couch_key_tree:get_key_leafs(TwoChildSibs, [{1, "1a"}])).
-
-shoul_gather_multiple_leaves()->
- TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
- ?_assertEqual({[{"bar", {1, ["1a","1"]}},{"bar",{1, ["1b","1"]}}],[]},
- couch_key_tree:get_key_leafs(TwoChildSibs, [{0, "1"}])).
-
-should_gather_single_leaf_for_multiple_revs() ->
- OneChild = [{0, {"1","foo",[{"1a", "bar", []}]}}],
- ToFind = [{0, "1"}, {1, "1a"}],
- ?_assertEqual({[{"bar", {1, ["1a", "1"]}}],[]},
- couch_key_tree:get_key_leafs(OneChild, ToFind)).
-
-should_gather_multiple_for_multiple_revs() ->
- TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
- ToFind = [{0, "1"}, {1, "1a"}],
- ?_assertEqual({[{"bar", {1, ["1a","1"]}},{"bar",{1, ["1b","1"]}}],[]},
- couch_key_tree:get_key_leafs(TwoChildSibs, ToFind)).
-
-should_retrieve_full_key_path()->
- TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
- ?_assertEqual({[{0,[{"1", "foo"}]}],[]},
- couch_key_tree:get_full_key_paths(TwoChildSibs, [{0, "1"}])).
-
-should_retrieve_full_key_path_for_node()->
- TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
- ?_assertEqual({[{1,[{"1a", "bar"},{"1", "foo"}]}],[]},
- couch_key_tree:get_full_key_paths(TwoChildSibs, [{1, "1a"}])).
-
-should_retrieve_leaves_with_parent_node()->
- Stemmed = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}],
- TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
- [
- ?_assertEqual([{2, [{"1aa", "bar"},{"1a", "bar"}]}],
- couch_key_tree:get_all_leafs_full(Stemmed)),
- ?_assertEqual([{1, [{"1a", "bar"},{"1", "foo"}]},
- {1, [{"1b", "bar"},{"1", "foo"}]}],
- couch_key_tree:get_all_leafs_full(TwoChildSibs))
- ].
-
-should_retrieve_all_leaves()->
- Stemmed = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}],
- TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
- [
- ?_assertEqual([{"bar", {2, ["1aa","1a"]}}],
- couch_key_tree:get_all_leafs(Stemmed)),
- ?_assertEqual([{"bar", {1, ["1a", "1"]}}, {"bar", {1, ["1b","1"]}}],
- couch_key_tree:get_all_leafs(TwoChildSibs))
- ].
-
-should_have_no_leaves_for_empty_tree()->
- ?_assertEqual(0, couch_key_tree:count_leafs([])).
-
-should_have_single_leaf_for_tree_with_single_node()->
- ?_assertEqual(1, couch_key_tree:count_leafs([{0, {"1","foo",[]}}])).
-
-should_have_two_leaves_for_tree_with_chindler_siblings()->
- TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
- ?_assertEqual(2, couch_key_tree:count_leafs(TwoChildSibs)).
-
-should_not_affect_on_leaf_counting_for_stemmed_tree()->
- ?_assertEqual(1, couch_key_tree:count_leafs([{2, {"1bb", "boo", []}}])).
-
-should_have_no_effect_for_stemming_more_levels_than_exists()->
- TwoChild = [{0, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}],
- ?_assertEqual(TwoChild, couch_key_tree:stem(TwoChild, 3)).
-
-should_return_one_deepest_node()->
- TwoChild = [{0, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}],
- Stemmed = [{2, {"1aa", "bar", []}}],
- ?_assertEqual(Stemmed, couch_key_tree:stem(TwoChild, 1)).
-
-should_return_two_deepest_nodes()->
- TwoChild = [{0, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}],
- Stemmed = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}],
- ?_assertEqual(Stemmed, couch_key_tree:stem(TwoChild, 2)).
-
-
-merge_and_stem(RevTree, Tree) ->
- {Merged, Result} = couch_key_tree:merge(RevTree, Tree),
- {couch_key_tree:stem(Merged, ?DEPTH), Result}.
diff --git a/src/couch/test/eunit/couch_passwords_tests.erl b/src/couch/test/eunit/couch_passwords_tests.erl
deleted file mode 100644
index 88de8530f..000000000
--- a/src/couch/test/eunit/couch_passwords_tests.erl
+++ /dev/null
@@ -1,54 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_passwords_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
-
-
-pbkdf2_test_()->
- {"PBKDF2",
- [
- {"Iterations: 1, length: 20",
- ?_assertEqual(
- {ok, <<"0c60c80f961f0e71f3a9b524af6012062fe037a6">>},
- couch_passwords:pbkdf2(<<"password">>, <<"salt">>, 1, 20))},
-
- {"Iterations: 2, length: 20",
- ?_assertEqual(
- {ok, <<"ea6c014dc72d6f8ccd1ed92ace1d41f0d8de8957">>},
- couch_passwords:pbkdf2(<<"password">>, <<"salt">>, 2, 20))},
-
- {"Iterations: 4096, length: 20",
- ?_assertEqual(
- {ok, <<"4b007901b765489abead49d926f721d065a429c1">>},
- couch_passwords:pbkdf2(<<"password">>, <<"salt">>, 4096, 20))},
-
- {"Iterations: 4096, length: 25",
- ?_assertEqual(
- {ok, <<"3d2eec4fe41c849b80c8d83662c0e44a8b291a964cf2f07038">>},
- couch_passwords:pbkdf2(<<"passwordPASSWORDpassword">>,
- <<"saltSALTsaltSALTsaltSALTsaltSALTsalt">>,
- 4096, 25))},
- {"Null byte",
- ?_assertEqual(
- {ok, <<"56fa6aa75548099dcc37d7f03425e0c3">>},
- couch_passwords:pbkdf2(<<"pass\0word">>,
- <<"sa\0lt">>,
- 4096, 16))},
-
- {timeout, 600, %% this may runs too long on slow hosts
- {"Iterations: 16777216 - this may take some time",
- ?_assertEqual(
- {ok, <<"eefe3d61cd4da4e4e9945b3d6ba2158c2634e984">>},
- couch_passwords:pbkdf2(<<"password">>, <<"salt">>, 16777216, 20)
- )}}]}.
diff --git a/src/couch/test/eunit/couch_query_servers_tests.erl b/src/couch/test/eunit/couch_query_servers_tests.erl
deleted file mode 100644
index f8df896c4..000000000
--- a/src/couch/test/eunit/couch_query_servers_tests.erl
+++ /dev/null
@@ -1,95 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_query_servers_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
-
-
-setup() ->
- meck:new([config, couch_log]).
-
-
-teardown(_) ->
- meck:unload().
-
-
-sum_overflow_test_() ->
- {
- "Test overflow detection in the _sum reduce function",
- {
- setup,
- fun setup/0,
- fun teardown/1,
- [
- fun should_return_error_on_overflow/0,
- fun should_return_object_on_log/0,
- fun should_return_object_on_false/0
- ]
- }
- }.
-
-
-should_return_error_on_overflow() ->
- meck:reset([config, couch_log]),
- meck:expect(
- config, get, ["query_server_config", "reduce_limit", "true"],
- "true"
- ),
- meck:expect(couch_log, error, ['_', '_'], ok),
- KVs = gen_sum_kvs(),
- {ok, [Result]} = couch_query_servers:reduce(<<"foo">>, [<<"_sum">>], KVs),
- ?assertMatch({[{<<"error">>, <<"builtin_reduce_error">>} | _]}, Result),
- ?assert(meck:called(config, get, '_')),
- ?assert(meck:called(couch_log, error, '_')).
-
-
-should_return_object_on_log() ->
- meck:reset([config, couch_log]),
- meck:expect(
- config, get, ["query_server_config", "reduce_limit", "true"],
- "log"
- ),
- meck:expect(couch_log, error, ['_', '_'], ok),
- KVs = gen_sum_kvs(),
- {ok, [Result]} = couch_query_servers:reduce(<<"foo">>, [<<"_sum">>], KVs),
- ?assertMatch({[_ | _]}, Result),
- Keys = [K || {K, _} <- element(1, Result)],
- ?assert(not lists:member(<<"error">>, Keys)),
- ?assert(meck:called(config, get, '_')),
- ?assert(meck:called(couch_log, error, '_')).
-
-
-should_return_object_on_false() ->
- meck:reset([config, couch_log]),
- meck:expect(
- config, get, ["query_server_config", "reduce_limit", "true"],
- "false"
- ),
- meck:expect(couch_log, error, ['_', '_'], ok),
- KVs = gen_sum_kvs(),
- {ok, [Result]} = couch_query_servers:reduce(<<"foo">>, [<<"_sum">>], KVs),
- ?assertMatch({[_ | _]}, Result),
- Keys = [K || {K, _} <- element(1, Result)],
- ?assert(not lists:member(<<"error">>, Keys)),
- ?assert(meck:called(config, get, '_')),
- ?assertNot(meck:called(couch_log, error, '_')).
-
-
-gen_sum_kvs() ->
- lists:map(fun(I) ->
- Props = lists:map(fun(_) ->
- K = couch_util:encodeBase64Url(crypto:strong_rand_bytes(16)),
- {K, 1}
- end, lists:seq(1, 20)),
- [I, {Props}]
- end, lists:seq(1, 10)).
diff --git a/src/couch/test/eunit/couch_server_tests.erl b/src/couch/test/eunit/couch_server_tests.erl
deleted file mode 100644
index 7d50700d2..000000000
--- a/src/couch/test/eunit/couch_server_tests.erl
+++ /dev/null
@@ -1,294 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_server_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--include("../src/couch_db_int.hrl").
--include("../src/couch_server_int.hrl").
-
-start() ->
- Ctx = test_util:start_couch(),
- config:set("log", "include_sasl", "false", false),
- Ctx.
-
-setup() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, []),
- Db.
-
-setup(rename) ->
- config:set("couchdb", "enable_database_recovery", "true", false),
- setup();
-setup(_) ->
- setup().
-
-teardown(Db) ->
- FilePath = couch_db:get_filepath(Db),
- (catch couch_db:close(Db)),
- (catch file:delete(FilePath)).
-
-teardown(rename, Db) ->
- config:set("couchdb", "enable_database_recovery", "false", false),
- teardown(Db);
-teardown(_, Db) ->
- teardown(Db).
-
-
-delete_db_test_() ->
- {
- "Test for proper deletion of db file",
- {
- setup,
- fun start/0, fun test_util:stop/1,
- [
- make_test_case(rename, [fun should_rename_on_delete/2]),
- make_test_case(delete, [fun should_delete/2])
- ]
- }
- }.
-
-make_test_case(Mod, Funs) ->
- {
- lists:flatten(io_lib:format("~s", [Mod])),
- {foreachx, fun setup/1, fun teardown/2, [{Mod, Fun} || Fun <- Funs]}
- }.
-
-should_rename_on_delete(_, Db) ->
- DbName = couch_db:name(Db),
- Origin = couch_db:get_filepath(Db),
- ?_test(begin
- ?assert(filelib:is_regular(Origin)),
- ?assertMatch(ok, couch_server:delete(DbName, [])),
- ?assertNot(filelib:is_regular(Origin)),
- DeletedFiles = deleted_files(Origin),
- ?assertMatch([_], DeletedFiles),
- [Renamed] = DeletedFiles,
- ?assertEqual(
- filename:extension(Origin), filename:extension(Renamed)),
- ?assert(filelib:is_regular(Renamed))
- end).
-
-should_delete(_, Db) ->
- DbName = couch_db:name(Db),
- Origin = couch_db:get_filepath(Db),
- ?_test(begin
- ?assert(filelib:is_regular(Origin)),
- ?assertMatch(ok, couch_server:delete(DbName, [])),
- ?assertNot(filelib:is_regular(Origin)),
- ?assertMatch([], deleted_files(Origin))
- end).
-
-deleted_files(ViewFile) ->
- filelib:wildcard(filename:rootname(ViewFile) ++ "*.deleted.*").
-
-
-bad_engine_option_test_() ->
- {
- setup,
- fun start/0,
- fun test_util:stop/1,
- [
- fun t_bad_engine_option/0
- ]
- }.
-
-
-t_bad_engine_option() ->
- Resp = couch_server:create(?tempdb(), [{engine, <<"cowabunga!">>}]),
- ?assertEqual(Resp, {error, {invalid_engine_extension, <<"cowabunga!">>}}).
-
-
-get_engine_path_test_() ->
- {
- setup,
- fun start/0, fun test_util:stop/1,
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_return_engine_path/1,
- fun should_return_invalid_engine_error/1
- ]
- }
- }.
-
-
-should_return_engine_path(Db) ->
- DbName = couch_db:name(Db),
- Engine = couch_db_engine:get_engine(Db),
- Resp = couch_server:get_engine_path(DbName, Engine),
- FilePath = couch_db:get_filepath(Db),
- ?_assertMatch({ok, FilePath}, Resp).
-
-
-should_return_invalid_engine_error(Db) ->
- DbName = couch_db:name(Db),
- Engine = fake_engine,
- Resp = couch_server:get_engine_path(DbName, Engine),
- ?_assertMatch({error, {invalid_engine, Engine}}, Resp).
-
-
-interleaved_requests_test_() ->
- {
- setup,
- fun start_interleaved/0,
- fun stop_interleaved/1,
- fun make_interleaved_requests/1
- }.
-
-
-start_interleaved() ->
- TestDbName = ?tempdb(),
- meck:new(couch_db, [passthrough]),
- meck:expect(couch_db, start_link, fun(Engine, DbName, Filename, Options) ->
- case DbName of
- TestDbName ->
- receive
- go -> ok
- end,
- Res = meck:passthrough([Engine, DbName, Filename, Options]),
- % We're unlinking and sending a delayed
- % EXIT signal so that we can mimic a specific
- % message order in couch_server. On a test machine
- % this is a big race condition which affects the
- % ability to induce the bug.
- case Res of
- {ok, Db} ->
- DbPid = couch_db:get_pid(Db),
- unlink(DbPid),
- Msg = {'EXIT', DbPid, killed},
- erlang:send_after(2000, whereis(couch_server), Msg);
- _ ->
- ok
- end,
- Res;
- _ ->
- meck:passthrough([Engine, DbName, Filename, Options])
- end
- end),
- {test_util:start_couch(), TestDbName}.
-
-
-stop_interleaved({Ctx, TestDbName}) ->
- couch_server:delete(TestDbName, [?ADMIN_CTX]),
- meck:unload(),
- test_util:stop_couch(Ctx).
-
-
-make_interleaved_requests({_, TestDbName}) ->
- [
- fun() -> t_interleaved_create_delete_open(TestDbName) end
- ].
-
-
-t_interleaved_create_delete_open(DbName) ->
- {CrtRef, OpenRef} = {make_ref(), make_ref()},
- CrtMsg = {'$gen_call', {self(), CrtRef}, {create, DbName, [?ADMIN_CTX]}},
- FakePid = spawn(fun() -> ok end),
- OpenResult = {open_result, DbName, {ok, #db{main_pid = FakePid}}},
- OpenResultMsg = {'$gen_call', {self(), OpenRef}, OpenResult},
-
- % Get the current couch_server pid so we're sure
- % to not end up messaging two different pids
- CouchServer = whereis(couch_server),
-
- % Start our first instance that will succeed in
- % an invalid state. Notice that the opener pid
- % spawned by couch_server:open_async/5 will halt
- % in our meck expect function waiting for a message.
- %
- % We're using raw message passing here so that we don't
- % have to coordinate multiple processes for this test.
- CouchServer ! CrtMsg,
- {ok, Opener} = get_opener_pid(DbName),
-
- % We have to suspend couch_server so that we can enqueue
- % our next requests and let the opener finish processing.
- erlang:suspend_process(CouchServer),
-
- % We queue a confused open_result message in front of
- % the correct response from the opener.
- CouchServer ! OpenResultMsg,
-
- % Release the opener pid so it can continue
- Opener ! go,
-
- % Wait for the '$gen_call' message from OpenerPid to arrive
- % in couch_server's mailbox
- ok = wait_for_open_async_result(CouchServer, Opener),
-
- % Now monitor and resume the couch_server and assert that
- % couch_server does not crash while processing OpenResultMsg
- CSRef = erlang:monitor(process, CouchServer),
- erlang:resume_process(CouchServer),
- check_monitor_not_triggered(CSRef),
-
- % Our open_result message was processed and ignored
- ?assertEqual({OpenRef, ok}, get_next_message()),
-
- % Our create request was processed normally after we
- % ignored the spurious open_result
- ?assertMatch({CrtRef, {ok, _}}, get_next_message()),
-
- % And finally assert that couch_server is still
- % alive.
- ?assert(is_process_alive(CouchServer)),
- check_monitor_not_triggered(CSRef).
-
-
-get_opener_pid(DbName) ->
- WaitFun = fun() ->
- case ets:lookup(couch_dbs, DbName) of
- [#entry{pid = Pid}] ->
- {ok, Pid};
- [] ->
- wait
- end
- end,
- test_util:wait(WaitFun).
-
-
-wait_for_open_async_result(CouchServer, Opener) ->
- WaitFun = fun() ->
- {_, Messages} = erlang:process_info(CouchServer, messages),
- Found = lists:foldl(fun(Msg, Acc) ->
- case Msg of
- {'$gen_call', {Opener, _}, {open_result, _, {ok, _}}} ->
- true;
- _ ->
- Acc
- end
- end, false, Messages),
- if Found -> ok; true -> wait end
- end,
- test_util:wait(WaitFun).
-
-
-check_monitor_not_triggered(Ref) ->
- receive
- {'DOWN', Ref, _, _, Reason0} ->
- erlang:error({monitor_triggered, Reason0})
- after 100 ->
- ok
- end.
-
-
-get_next_message() ->
- receive
- Msg ->
- Msg
- after 5000 ->
- erlang:error(timeout)
- end.
diff --git a/src/couch/test/eunit/couch_stream_tests.erl b/src/couch/test/eunit/couch_stream_tests.erl
deleted file mode 100644
index a7fedf0af..000000000
--- a/src/couch/test/eunit/couch_stream_tests.erl
+++ /dev/null
@@ -1,124 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_stream_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
-
--define(ENGINE(FdVar), {couch_bt_engine_stream, {FdVar, []}}).
-
-setup() ->
- {ok, Fd} = couch_file:open(?tempfile(), [create, overwrite]),
- {ok, Stream} = couch_stream:open(?ENGINE(Fd), []),
- {Fd, Stream}.
-
-teardown({Fd, _}) ->
- ok = couch_file:close(Fd).
-
-
-stream_test_() ->
- {
- "CouchDB stream tests",
- {
- setup,
- fun() -> test_util:start(?MODULE, [ioq]) end, fun test_util:stop/1,
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_write/1,
- fun should_write_consecutive/1,
- fun should_write_empty_binary/1,
- fun should_return_file_pointers_on_close/1,
- fun should_return_stream_size_on_close/1,
- fun should_return_valid_pointers/1,
- fun should_recall_last_pointer_position/1,
- fun should_stream_more_with_4K_chunk_size/1,
- fun should_stop_on_normal_exit_of_stream_opener/1
- ]
- }
- }
- }.
-
-
-should_write({_, Stream}) ->
- ?_assertEqual(ok, couch_stream:write(Stream, <<"food">>)).
-
-should_write_consecutive({_, Stream}) ->
- couch_stream:write(Stream, <<"food">>),
- ?_assertEqual(ok, couch_stream:write(Stream, <<"foob">>)).
-
-should_write_empty_binary({_, Stream}) ->
- ?_assertEqual(ok, couch_stream:write(Stream, <<>>)).
-
-should_return_file_pointers_on_close({_, Stream}) ->
- couch_stream:write(Stream, <<"foodfoob">>),
- {NewEngine, _, _, _, _} = couch_stream:close(Stream),
- {ok, Ptrs} = couch_stream:to_disk_term(NewEngine),
- ?_assertEqual([{0, 8}], Ptrs).
-
-should_return_stream_size_on_close({_, Stream}) ->
- couch_stream:write(Stream, <<"foodfoob">>),
- {_, Length, _, _, _} = couch_stream:close(Stream),
- ?_assertEqual(8, Length).
-
-should_return_valid_pointers({_Fd, Stream}) ->
- couch_stream:write(Stream, <<"foodfoob">>),
- {NewEngine, _, _, _, _} = couch_stream:close(Stream),
- ?_assertEqual(<<"foodfoob">>, read_all(NewEngine)).
-
-should_recall_last_pointer_position({Fd, Stream}) ->
- couch_stream:write(Stream, <<"foodfoob">>),
- {_, _, _, _, _} = couch_stream:close(Stream),
- {ok, ExpPtr} = couch_file:bytes(Fd),
- {ok, Stream2} = couch_stream:open(?ENGINE(Fd)),
- ZeroBits = <<0:(8 * 10)>>,
- OneBits = <<1:(8 * 10)>>,
- ok = couch_stream:write(Stream2, OneBits),
- ok = couch_stream:write(Stream2, ZeroBits),
- {NewEngine, 20, _, _, _} = couch_stream:close(Stream2),
- {ok, Ptrs} = couch_stream:to_disk_term(NewEngine),
- [{ExpPtr, 20}] = Ptrs,
- AllBits = iolist_to_binary([OneBits, ZeroBits]),
- ?_assertEqual(AllBits, read_all(NewEngine)).
-
-should_stream_more_with_4K_chunk_size({Fd, _}) ->
- {ok, Stream} = couch_stream:open(?ENGINE(Fd), [{buffer_size, 4096}]),
- lists:foldl(
- fun(_, Acc) ->
- Data = <<"a1b2c">>,
- couch_stream:write(Stream, Data),
- [Data | Acc]
- end, [], lists:seq(1, 1024)),
- {NewEngine, Length, _, _, _} = couch_stream:close(Stream),
- {ok, Ptrs} = couch_stream:to_disk_term(NewEngine),
- ?_assertMatch({[{0, 4100}, {4106, 1020}], 5120}, {Ptrs, Length}).
-
-should_stop_on_normal_exit_of_stream_opener({Fd, _}) ->
- RunnerPid = self(),
- OpenerPid = spawn(
- fun() ->
- {ok, StreamPid} = couch_stream:open(?ENGINE(Fd)),
- RunnerPid ! {pid, StreamPid}
- end),
- StreamPid = receive
- {pid, StreamPid0} -> StreamPid0
- end,
- % Confirm the validity of the test by verifying the stream opener has died
- ?assertNot(is_process_alive(OpenerPid)),
- % Verify the stream itself has also died
- ?_assertNot(is_process_alive(StreamPid)).
-
-
-read_all(Engine) ->
- Data = couch_stream:foldl(Engine, fun(Bin, Acc) -> [Bin, Acc] end, []),
- iolist_to_binary(Data).
diff --git a/src/couch/test/eunit/couch_task_status_tests.erl b/src/couch/test/eunit/couch_task_status_tests.erl
deleted file mode 100644
index 0ec03563b..000000000
--- a/src/couch/test/eunit/couch_task_status_tests.erl
+++ /dev/null
@@ -1,233 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_task_status_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(TIMEOUT, 1000).
-
-
-setup() ->
- Ctx = test_util:start(?MODULE, [couch_log], [{dont_mock, [config]}]),
- {ok, TaskStatusPid} = couch_task_status:start_link(),
- TaskUpdaterPid = spawn(fun() -> loop() end),
- {TaskStatusPid, TaskUpdaterPid, Ctx}.
-
-
-teardown({TaskStatusPid, _, Ctx})->
- test_util:stop_sync_throw(TaskStatusPid, fun() ->
- couch_task_status:stop()
- end, timeout_error, ?TIMEOUT),
- test_util:stop(Ctx).
-
-
-couch_task_status_test_() ->
- {
- "CouchDB task status updates",
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_register_task/1,
- fun should_set_task_startup_time/1,
- fun should_have_update_time_as_startup_before_any_progress/1,
- fun should_set_task_type/1,
- fun should_not_register_multiple_tasks_for_same_pid/1,
- fun should_set_task_progress/1,
- fun should_update_task_progress/1,
- fun should_update_time_changes_on_task_progress/1,
- %% fun should_control_update_frequency/1,
- fun should_reset_control_update_frequency/1,
- fun should_track_multiple_tasks/1,
- fun should_finish_task/1
-
- ]
- }
- }.
-
-
-should_register_task({_, Pid, _Ctx}) ->
- ok = call(Pid, add, [{type, replication}, {progress, 0}]),
- ?_assertEqual(1, length(couch_task_status:all())).
-
-should_set_task_startup_time({_, Pid, _Ctx}) ->
- ok = call(Pid, add, [{type, replication}, {progress, 0}]),
- ?_assert(is_integer(get_task_prop(Pid, started_on))).
-
-should_have_update_time_as_startup_before_any_progress({_, Pid, _Ctx}) ->
- ok = call(Pid, add, [{type, replication}, {progress, 0}]),
- StartTime = get_task_prop(Pid, started_on),
- ?_assertEqual(StartTime, get_task_prop(Pid, updated_on)).
-
-should_set_task_type({_, Pid, _Ctx}) ->
- ok = call(Pid, add, [{type, replication}, {progress, 0}]),
- ?_assertEqual(replication, get_task_prop(Pid, type)).
-
-should_not_register_multiple_tasks_for_same_pid({_, Pid, _Ctx}) ->
- ok = call(Pid, add, [{type, replication}, {progress, 0}]),
- ?_assertEqual({add_task_error, already_registered},
- call(Pid, add, [{type, compaction}, {progress, 0}])).
-
-should_set_task_progress({_, Pid, _Ctx}) ->
- ok = call(Pid, add, [{type, replication}, {progress, 0}]),
- ?_assertEqual(0, get_task_prop(Pid, progress)).
-
-should_update_task_progress({_, Pid, _Ctx}) ->
- ok = call(Pid, add, [{type, replication}, {progress, 0}]),
- call(Pid, update, [{progress, 25}]),
- ?_assertEqual(25, get_task_prop(Pid, progress)).
-
-should_update_time_changes_on_task_progress({_, Pid, _Ctx}) ->
- ?_assert(
- begin
- ok = call(Pid, add, [{type, replication}, {progress, 0}]),
- ok = timer:sleep(1000), % sleep awhile to customize update time
- call(Pid, update, [{progress, 25}]),
- get_task_prop(Pid, updated_on) > get_task_prop(Pid, started_on)
- end).
-
-%%should_control_update_frequency({_, Pid, _Ctx}) ->
-%% ?_assertEqual(66,
-%% begin
-%% ok = call(Pid, add, [{type, replication}, {progress, 0}]),
-%% call(Pid, update, [{progress, 50}]),
-%% call(Pid, update_frequency, 500),
-%% call(Pid, update, [{progress, 66}]),
-%% call(Pid, update, [{progress, 77}]),
-%% get_task_prop(Pid, progress)
-%% end).
-
-should_reset_control_update_frequency({_, Pid, _Ctx}) ->
- ?_assertEqual(87,
- begin
- ok = call(Pid, add, [{type, replication}, {progress, 0}]),
- call(Pid, update, [{progress, 50}]),
- call(Pid, update_frequency, 500),
- call(Pid, update, [{progress, 66}]),
- call(Pid, update, [{progress, 77}]),
- call(Pid, update_frequency, 0),
- call(Pid, update, [{progress, 87}]),
- get_task_prop(Pid, progress)
- end).
-
-should_track_multiple_tasks(_) ->
- ?_assert(run_multiple_tasks()).
-
-should_finish_task({_, Pid, _Ctx}) ->
- ok = call(Pid, add, [{type, replication}, {progress, 0}]),
- ?assertEqual(1, length(couch_task_status:all())),
- ok = call(Pid, done),
- ?_assertEqual(0, length(couch_task_status:all())).
-
-
-run_multiple_tasks() ->
- Pid1 = spawn(fun() -> loop() end),
- Pid2 = spawn(fun() -> loop() end),
- Pid3 = spawn(fun() -> loop() end),
- call(Pid1, add, [{type, replication}, {progress, 0}]),
- call(Pid2, add, [{type, compaction}, {progress, 0}]),
- call(Pid3, add, [{type, indexer}, {progress, 0}]),
-
- ?assertEqual(3, length(couch_task_status:all())),
- ?assertEqual(replication, get_task_prop(Pid1, type)),
- ?assertEqual(compaction, get_task_prop(Pid2, type)),
- ?assertEqual(indexer, get_task_prop(Pid3, type)),
-
- call(Pid2, update, [{progress, 33}]),
- call(Pid3, update, [{progress, 42}]),
- call(Pid1, update, [{progress, 11}]),
- ?assertEqual(42, get_task_prop(Pid3, progress)),
- call(Pid1, update, [{progress, 72}]),
- ?assertEqual(72, get_task_prop(Pid1, progress)),
- ?assertEqual(33, get_task_prop(Pid2, progress)),
-
- call(Pid1, done),
- ?assertEqual(2, length(couch_task_status:all())),
- call(Pid3, done),
- ?assertEqual(1, length(couch_task_status:all())),
- call(Pid2, done),
- ?assertEqual(0, length(couch_task_status:all())),
-
- true.
-
-
-loop() ->
- receive
- {add, Props, From} ->
- Resp = couch_task_status:add_task(Props),
- From ! {ok, self(), Resp},
- loop();
- {update, Props, From} ->
- Resp = couch_task_status:update(Props),
- From ! {ok, self(), Resp},
- loop();
- {update_frequency, Msecs, From} ->
- Resp = couch_task_status:set_update_frequency(Msecs),
- From ! {ok, self(), Resp},
- loop();
- {done, From} ->
- From ! {ok, self(), ok}
- end.
-
-call(Pid, done) ->
- Ref = erlang:monitor(process, Pid),
- Pid ! {done, self()},
- Res = wait(Pid),
- receive
- {'DOWN', Ref, _Type, Pid, _Info} ->
- Res
- after ?TIMEOUT ->
- throw(timeout_error)
- end;
-call(Pid, Command) ->
- Pid ! {Command, self()},
- wait(Pid).
-
-call(Pid, Command, Arg) ->
- Pid ! {Command, Arg, self()},
- wait(Pid).
-
-wait(Pid) ->
- receive
- {ok, Pid, Msg} ->
- Msg
- after ?TIMEOUT ->
- throw(timeout_error)
- end.
-
-get_task_prop(Pid, Prop) ->
- From = list_to_binary(pid_to_list(Pid)),
- Element = lists:foldl(
- fun(PropList, Acc) ->
- case couch_util:get_value(pid, PropList) of
- From ->
- [PropList | Acc];
- _ ->
- Acc
- end
- end,
- [], couch_task_status:all()
- ),
- case couch_util:get_value(Prop, hd(Element), nil) of
- nil ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, "Could not get property '"
- ++ couch_util:to_list(Prop)
- ++ "' for task "
- ++ pid_to_list(Pid)}]});
- Value ->
- Value
- end.
diff --git a/src/couch/test/eunit/couch_totp_tests.erl b/src/couch/test/eunit/couch_totp_tests.erl
deleted file mode 100644
index 6817a092a..000000000
--- a/src/couch/test/eunit/couch_totp_tests.erl
+++ /dev/null
@@ -1,55 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_totp_tests).
-
--include_lib("eunit/include/eunit.hrl").
-
-totp_sha_test() ->
- Key = <<"12345678901234567890">>,
- ?assertEqual(94287082, couch_totp:generate(sha, Key, 59, 30, 8)),
- ?assertEqual(07081804, couch_totp:generate(sha, Key, 1111111109, 30, 8)),
- ?assertEqual(14050471, couch_totp:generate(sha, Key, 1111111111, 30, 8)),
- ?assertEqual(89005924, couch_totp:generate(sha, Key, 1234567890, 30, 8)),
- ?assertEqual(69279037, couch_totp:generate(sha, Key, 2000000000, 30, 8)),
- ?assertEqual(65353130, couch_totp:generate(sha, Key, 20000000000, 30, 8)).
-
-totp_sha256_test() ->
- Key = <<"12345678901234567890123456789012">>,
- case sha_256_512_supported() of
- true ->
- ?assertEqual(46119246, couch_totp:generate(sha256, Key, 59, 30, 8)),
- ?assertEqual(68084774, couch_totp:generate(sha256, Key, 1111111109, 30, 8)),
- ?assertEqual(67062674, couch_totp:generate(sha256, Key, 1111111111, 30, 8)),
- ?assertEqual(91819424, couch_totp:generate(sha256, Key, 1234567890, 30, 8)),
- ?assertEqual(90698825, couch_totp:generate(sha256, Key, 2000000000, 30, 8)),
- ?assertEqual(77737706, couch_totp:generate(sha256, Key, 20000000000, 30, 8));
- false ->
- ?debugMsg("sha256 not supported, tests skipped")
- end.
-
-totp_sha512_test() ->
- Key = <<"1234567890123456789012345678901234567890123456789012345678901234">>,
- case sha_256_512_supported() of
- true ->
- ?assertEqual(90693936, couch_totp:generate(sha512, Key, 59, 30, 8)),
- ?assertEqual(25091201, couch_totp:generate(sha512, Key, 1111111109, 30, 8)),
- ?assertEqual(99943326, couch_totp:generate(sha512, Key, 1111111111, 30, 8)),
- ?assertEqual(93441116, couch_totp:generate(sha512, Key, 1234567890, 30, 8)),
- ?assertEqual(38618901, couch_totp:generate(sha512, Key, 2000000000, 30, 8)),
- ?assertEqual(47863826, couch_totp:generate(sha512, Key, 20000000000, 30, 8));
- false ->
- ?debugMsg("sha512 not supported, tests skipped")
- end.
-
-sha_256_512_supported() ->
- erlang:function_exported(crypto, hmac, 3).
diff --git a/src/couch/test/eunit/couch_util_tests.erl b/src/couch/test/eunit/couch_util_tests.erl
deleted file mode 100644
index 012c961a4..000000000
--- a/src/couch/test/eunit/couch_util_tests.erl
+++ /dev/null
@@ -1,177 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_util_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
-
-
-setup() ->
- %% We cannot start driver from here since it becomes bounded to eunit
- %% master process and the next couch_server_sup:start_link call will
- %% fail because server couldn't load driver since it already is.
- %%
- %% On other hand, we cannot unload driver here due to
- %% {error, not_loaded_by_this_process} while it is. Any ideas is welcome.
- %%
- Ctx = test_util:start_couch(),
- %% config:start_link(?CONFIG_CHAIN),
- %% {ok, _} = couch_drv:start_link(),
- Ctx.
-
-teardown(Ctx) ->
- ok = test_util:stop_couch(Ctx),
- %% config:stop(),
- %% erl_ddll:unload_driver(couch_icu_driver),
- ok.
-
-
-collation_test_() ->
- {
- "Collation tests",
- [
- {
- setup,
- fun setup/0, fun teardown/1,
- [
- should_collate_ascii(),
- should_collate_non_ascii()
- ]
- }
- ]
- }.
-
-validate_callback_exists_test_() ->
- {
- "validate_callback_exists tests",
- [
- fun should_succeed_for_existent_cb/0,
- should_fail_for_missing_cb()
- ]
- }.
-
-should_collate_ascii() ->
- ?_assertEqual(1, couch_util:collate(<<"foo">>, <<"bar">>)).
-
-should_collate_non_ascii() ->
- ?_assertEqual(-1, couch_util:collate(<<"A">>, <<"aa">>)).
-
-to_existed_atom_test() ->
- ?assert(couch_util:to_existing_atom(true)),
- ?assertMatch(foo, couch_util:to_existing_atom(<<"foo">>)),
- ?assertMatch(foobarbaz, couch_util:to_existing_atom("foobarbaz")).
-
-implode_test() ->
- ?assertEqual([1, 38, 2, 38, 3], couch_util:implode([1, 2, 3], "&")).
-
-trim_test() ->
- lists:map(fun(S) -> ?assertEqual("foo", couch_util:trim(S)) end,
- [" foo", "foo ", "\tfoo", " foo ", "foo\t", "foo\n", "\nfoo"]).
-
-abs_pathname_test() ->
- {ok, Cwd} = file:get_cwd(),
- ?assertEqual(Cwd ++ "/foo", couch_util:abs_pathname("./foo")).
-
-flush_test() ->
- ?assertNot(couch_util:should_flush()),
- AcquireMem = fun() ->
- _IntsToAGazillion = lists:seq(1, 200000),
- _LotsOfData = lists:map(fun(_) -> <<"foobar">> end,
- lists:seq(1, 500000)),
- _ = list_to_binary(_LotsOfData),
-
- %% Allocation 200K tuples puts us above the memory threshold
- %% Originally, there should be:
- %% ?assertNot(should_flush())
- %% however, unlike for etap test, GC collects all allocated bits
- %% making this conditions fail. So we have to invert the condition
- %% since GC works, cleans the memory and everything is fine.
- ?assertNot(couch_util:should_flush())
- end,
- AcquireMem(),
-
- %% Checking to flush invokes GC
- ?assertNot(couch_util:should_flush()).
-
-verify_test() ->
- ?assert(couch_util:verify("It4Vooya", "It4Vooya")),
- ?assertNot(couch_util:verify("It4VooyaX", "It4Vooya")),
- ?assert(couch_util:verify(<<"ahBase3r">>, <<"ahBase3r">>)),
- ?assertNot(couch_util:verify(<<"ahBase3rX">>, <<"ahBase3r">>)),
- ?assertNot(couch_util:verify(nil, <<"ahBase3r">>)).
-
-find_in_binary_test_() ->
- Cases = [
- {<<"foo">>, <<"foobar">>, {exact, 0}},
- {<<"foo">>, <<"foofoo">>, {exact, 0}},
- {<<"foo">>, <<"barfoo">>, {exact, 3}},
- {<<"foo">>, <<"barfo">>, {partial, 3}},
- {<<"f">>, <<"fobarfff">>, {exact, 0}},
- {<<"f">>, <<"obarfff">>, {exact, 4}},
- {<<"f">>, <<"obarggf">>, {exact, 6}},
- {<<"f">>, <<"f">>, {exact, 0}},
- {<<"f">>, <<"g">>, not_found},
- {<<"foo">>, <<"f">>, {partial, 0}},
- {<<"foo">>, <<"g">>, not_found},
- {<<"foo">>, <<"">>, not_found},
- {<<"fofo">>, <<"foofo">>, {partial, 3}},
- {<<"foo">>, <<"gfobarfo">>, {partial, 6}},
- {<<"foo">>, <<"gfobarf">>, {partial, 6}},
- {<<"foo">>, <<"gfobar">>, not_found},
- {<<"fog">>, <<"gbarfogquiz">>, {exact, 4}},
- {<<"ggg">>, <<"ggg">>, {exact, 0}},
- {<<"ggg">>, <<"ggggg">>, {exact, 0}},
- {<<"ggg">>, <<"bggg">>, {exact, 1}},
- {<<"ggg">>, <<"bbgg">>, {partial, 2}},
- {<<"ggg">>, <<"bbbg">>, {partial, 3}},
- {<<"ggg">>, <<"bgbggbggg">>, {exact, 6}},
- {<<"ggg">>, <<"bgbggb">>, not_found}
- ],
- lists:map(
- fun({Needle, Haystack, Result}) ->
- Msg = lists:flatten(io_lib:format("Looking for ~s in ~s",
- [Needle, Haystack])),
- {Msg, ?_assertMatch(Result,
- couch_util:find_in_binary(Needle, Haystack))}
- end, Cases).
-
-should_succeed_for_existent_cb() ->
- ?_assert(couch_util:validate_callback_exists(lists, any, 2)).
-
-should_fail_for_missing_cb() ->
- Cases = [
- {unknown_module, any, 1},
- {erlang, unknown_function, 1},
- {erlang, whereis, 100}
- ],
- lists:map(
- fun({M, F, A} = MFA) ->
- Name = lists:flatten(io_lib:format("~w:~w/~w", [M, F, A])),
- {Name, ?_assertThrow(
- {error, {undefined_callback, Name, MFA}},
- couch_util:validate_callback_exists(M, F, A))}
- end, Cases).
-
-to_hex_test_() ->
- [
- ?_assertEqual("", couch_util:to_hex([])),
- ?_assertEqual("010203faff", couch_util:to_hex([1, 2, 3, 250, 255])),
- ?_assertEqual("", couch_util:to_hex(<<>>)),
- ?_assertEqual("010203faff", couch_util:to_hex(<<1, 2, 3, 250, 255>>))
- ].
-
-json_decode_test_() ->
- [
- ?_assertEqual({[]}, couch_util:json_decode(<<"{}">>)),
- ?_assertEqual({[]}, couch_util:json_decode(<<"{}">>, [])),
- ?_assertEqual(#{}, couch_util:json_decode(<<"{}">>, [return_maps]))
- ].
diff --git a/src/couch/test/eunit/couch_uuids_tests.erl b/src/couch/test/eunit/couch_uuids_tests.erl
deleted file mode 100644
index 9ca2c8a84..000000000
--- a/src/couch/test/eunit/couch_uuids_tests.erl
+++ /dev/null
@@ -1,125 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_uuids_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
-
--define(TIMEOUT, 20).
-
-
-setup_all() ->
- test_util:start_applications([config]),
- couch_uuids:start().
-
-
-teardown_all(_) ->
- couch_uuids:stop(),
- test_util:stop_applications([config]).
-
-
-uuids_test_() ->
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- [
- {timeout, ?TIMEOUT, fun default_algorithm/0},
- {timeout, ?TIMEOUT, fun sequential_algorithm/0},
- {timeout, ?TIMEOUT, fun utc_algorithm/0},
- {timeout, ?TIMEOUT, fun utc_id_suffix_algorithm/0}
- ]
- }.
-
-
-default_algorithm() ->
- config:delete("uuids", "algorithm", false),
- check_unique().
-
-
-sequential_algorithm() ->
- config:set("uuids", "algorithm", "sequential", false),
- check_unique(),
- check_increment_monotonically(),
- check_rollover().
-
-
-utc_algorithm() ->
- config:set("uuids", "algorithm", "utc_random", false),
- check_unique(),
- check_increment_monotonically().
-
-
-utc_id_suffix_algorithm() ->
- config:set("uuids", "algorithm", "utc_id", false),
- config:set("uuids", "utc_id_suffix", "bozo", false),
- check_unique(),
- check_increment_monotonically(),
- check_preserve_suffix().
-
-
-check_unique() ->
- %% this one may really runs for too long on slow hosts
- ?assert(test_unique(10000, [couch_uuids:new()])).
-
-
-check_increment_monotonically() ->
- ?assert(couch_uuids:new() < couch_uuids:new()).
-
-
-check_rollover() ->
- UUID = binary_to_list(couch_uuids:new()),
- Prefix = element(1, lists:split(26, UUID)),
- N = gen_until_pref_change(Prefix, 0),
- ?assert(N >= 5000 andalso N =< 11000).
-
-
-check_preserve_suffix() ->
- UUID = binary_to_list(couch_uuids:new()),
- Suffix = get_suffix(UUID),
- ?assert(test_same_suffix(10000, Suffix)).
-
-
-test_unique(0, _) ->
- true;
-test_unique(N, UUIDs) ->
- UUID = couch_uuids:new(),
- ?assertNot(lists:member(UUID, UUIDs)),
- test_unique(N - 1, [UUID| UUIDs]).
-
-
-gen_until_pref_change(_, Count) when Count > 8251 ->
- Count;
-gen_until_pref_change(Prefix, N) ->
- case get_prefix(couch_uuids:new()) of
- Prefix -> gen_until_pref_change(Prefix, N + 1);
- _ -> N
- end.
-
-
-test_same_suffix(0, _) ->
- true;
-test_same_suffix(N, Suffix) ->
- case get_suffix(couch_uuids:new()) of
- Suffix -> test_same_suffix(N - 1, Suffix);
- _ -> false
- end.
-
-
-get_prefix(UUID) ->
- element(1, lists:split(26, binary_to_list(UUID))).
-
-
-get_suffix(UUID) when is_binary(UUID) ->
- get_suffix(binary_to_list(UUID));
-get_suffix(UUID) ->
- element(2, lists:split(14, UUID)).
diff --git a/src/couch/test/eunit/couch_work_queue_tests.erl b/src/couch/test/eunit/couch_work_queue_tests.erl
deleted file mode 100644
index a192230ef..000000000
--- a/src/couch/test/eunit/couch_work_queue_tests.erl
+++ /dev/null
@@ -1,402 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_work_queue_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
-
--define(TIMEOUT, 100).
-
-
-setup(Opts) ->
- {ok, Q} = couch_work_queue:new(Opts),
- Producer = spawn_producer(Q),
- Consumer = spawn_consumer(Q),
- {Q, Producer, Consumer}.
-
-setup_max_items() ->
- setup([{max_items, 3}]).
-
-setup_max_size() ->
- setup([{max_size, 160}]).
-
-setup_max_items_and_size() ->
- setup([{max_size, 160}, {max_items, 3}]).
-
-setup_multi_workers() ->
- {Q, Producer, Consumer1} = setup([{max_size, 160},
- {max_items, 3},
- {multi_workers, true}]),
- Consumer2 = spawn_consumer(Q),
- Consumer3 = spawn_consumer(Q),
- {Q, Producer, [Consumer1, Consumer2, Consumer3]}.
-
-teardown({Q, Producer, Consumers}) when is_list(Consumers) ->
- % consume all to unblock and let producer/consumer stop without timeout
- [consume(Consumer, all) || Consumer <- Consumers],
-
- ok = close_queue(Q),
- ok = stop(Producer, "producer"),
- R = [stop(Consumer, "consumer") || Consumer <- Consumers],
- R = [ok || _ <- Consumers],
- ok;
-teardown({Q, Producer, Consumer}) ->
- teardown({Q, Producer, [Consumer]}).
-
-
-single_consumer_test_() ->
- {
- "Single producer and consumer",
- [
- {
- "Queue with 3 max items",
- {
- foreach,
- fun setup_max_items/0, fun teardown/1,
- single_consumer_max_item_count() ++ common_cases()
- }
- },
- {
- "Queue with max size of 160 bytes",
- {
- foreach,
- fun setup_max_size/0, fun teardown/1,
- single_consumer_max_size() ++ common_cases()
- }
- },
- {
- "Queue with max size of 160 bytes and 3 max items",
- {
- foreach,
- fun setup_max_items_and_size/0, fun teardown/1,
- single_consumer_max_items_and_size() ++ common_cases()
- }
- }
- ]
- }.
-
-multiple_consumers_test_() ->
- {
- "Single producer and multiple consumers",
- [
- {
- "Queue with max size of 160 bytes and 3 max items",
- {
- foreach,
- fun setup_multi_workers/0, fun teardown/1,
- common_cases() ++ multiple_consumers()
- }
-
- }
- ]
- }.
-
-common_cases()->
- [
- fun should_block_consumer_on_dequeue_from_empty_queue/1,
- fun should_consume_right_item/1,
- fun should_timeout_on_close_non_empty_queue/1,
- fun should_not_block_producer_for_non_empty_queue_after_close/1,
- fun should_be_closed/1
- ].
-
-single_consumer_max_item_count()->
- [
- fun should_have_no_items_for_new_queue/1,
- fun should_block_producer_on_full_queue_count/1,
- fun should_receive_first_queued_item/1,
- fun should_consume_multiple_items/1,
- fun should_consume_all/1
- ].
-
-single_consumer_max_size()->
- [
- fun should_have_zero_size_for_new_queue/1,
- fun should_block_producer_on_full_queue_size/1,
- fun should_increase_queue_size_on_produce/1,
- fun should_receive_first_queued_item/1,
- fun should_consume_multiple_items/1,
- fun should_consume_all/1
- ].
-
-single_consumer_max_items_and_size() ->
- single_consumer_max_item_count() ++ single_consumer_max_size().
-
-multiple_consumers() ->
- [
- fun should_have_zero_size_for_new_queue/1,
- fun should_have_no_items_for_new_queue/1,
- fun should_increase_queue_size_on_produce/1
- ].
-
-
-should_have_no_items_for_new_queue({Q, _, _}) ->
- ?_assertEqual(0, couch_work_queue:item_count(Q)).
-
-should_have_zero_size_for_new_queue({Q, _, _}) ->
- ?_assertEqual(0, couch_work_queue:size(Q)).
-
-should_block_consumer_on_dequeue_from_empty_queue({_, _, Consumers}) when is_list(Consumers) ->
- [consume(C, 2) || C <- Consumers],
- Pongs = [ping(C) || C <- Consumers],
- ?_assertEqual([timeout, timeout, timeout], Pongs);
-should_block_consumer_on_dequeue_from_empty_queue({_, _, Consumer}) ->
- consume(Consumer, 1),
- Pong = ping(Consumer),
- ?_assertEqual(timeout, Pong).
-
-should_consume_right_item({Q, Producer, Consumers}) when is_list(Consumers) ->
- [consume(C, 3) || C <- Consumers],
-
- Item1 = produce(Q, Producer, 10, false),
- ok = ping(Producer),
- ?assertEqual(0, couch_work_queue:item_count(Q)),
- ?assertEqual(0, couch_work_queue:size(Q)),
-
- Item2 = produce(Q, Producer, 10, false),
- ok = ping(Producer),
- ?assertEqual(0, couch_work_queue:item_count(Q)),
- ?assertEqual(0, couch_work_queue:size(Q)),
-
- Item3 = produce(Q, Producer, 10, false),
- ok = ping(Producer),
- ?assertEqual(0, couch_work_queue:item_count(Q)),
- ?assertEqual(0, couch_work_queue:size(Q)),
-
- R = [{ping(C), Item}
- || {C, Item} <- lists:zip(Consumers, [Item1, Item2, Item3])],
-
- ?_assertEqual([{ok, Item1}, {ok, Item2}, {ok, Item3}], R);
-should_consume_right_item({Q, Producer, Consumer}) ->
- consume(Consumer, 1),
- Item = produce(Q, Producer, 10, false),
- produce(Q, Producer, 20, true),
- ok = ping(Producer),
- ok = ping(Consumer),
- {ok, Items} = last_consumer_items(Consumer),
- ?_assertEqual([Item], Items).
-
-should_increase_queue_size_on_produce({Q, Producer, _}) ->
- produce(Q, Producer, 50, true),
- ok = ping(Producer),
- Count1 = couch_work_queue:item_count(Q),
- Size1 = couch_work_queue:size(Q),
-
- produce(Q, Producer, 10, true),
- Count2 = couch_work_queue:item_count(Q),
- Size2 = couch_work_queue:size(Q),
-
- ?_assertEqual([{Count1, Size1}, {Count2, Size2}], [{1, 50}, {2, 60}]).
-
-should_block_producer_on_full_queue_count({Q, Producer, _}) ->
- produce(Q, Producer, 10, true),
- ?assertEqual(1, couch_work_queue:item_count(Q)),
- ok = ping(Producer),
-
- produce(Q, Producer, 15, true),
- ?assertEqual(2, couch_work_queue:item_count(Q)),
- ok = ping(Producer),
-
- produce(Q, Producer, 20, true),
- ?assertEqual(3, couch_work_queue:item_count(Q)),
- Pong = ping(Producer),
-
- ?_assertEqual(timeout, Pong).
-
-should_block_producer_on_full_queue_size({Q, Producer, _}) ->
- produce(Q, Producer, 100, true),
- ok = ping(Producer),
- ?assertEqual(1, couch_work_queue:item_count(Q)),
- ?assertEqual(100, couch_work_queue:size(Q)),
-
- produce(Q, Producer, 110, false),
- Pong = ping(Producer),
- ?assertEqual(2, couch_work_queue:item_count(Q)),
- ?assertEqual(210, couch_work_queue:size(Q)),
-
- ?_assertEqual(timeout, Pong).
-
-should_consume_multiple_items({Q, Producer, Consumer}) ->
- Item1 = produce(Q, Producer, 10, true),
- ok = ping(Producer),
-
- Item2 = produce(Q, Producer, 15, true),
- ok = ping(Producer),
-
- consume(Consumer, 2),
-
- {ok, Items} = last_consumer_items(Consumer),
- ?_assertEqual([Item1, Item2], Items).
-
-should_receive_first_queued_item({Q, Producer, Consumer}) ->
- consume(Consumer, 100),
- timeout = ping(Consumer),
-
- Item = produce(Q, Producer, 11, false),
- ok = ping(Producer),
-
- ok = ping(Consumer),
- ?assertEqual(0, couch_work_queue:item_count(Q)),
-
- {ok, Items} = last_consumer_items(Consumer),
- ?_assertEqual([Item], Items).
-
-should_consume_all({Q, Producer, Consumer}) ->
- Item1 = produce(Q, Producer, 10, true),
- Item2 = produce(Q, Producer, 15, true),
- Item3 = produce(Q, Producer, 20, true),
-
- consume(Consumer, all),
-
- {ok, Items} = last_consumer_items(Consumer),
- ?_assertEqual([Item1, Item2, Item3], Items).
-
-should_timeout_on_close_non_empty_queue({Q, Producer, _}) ->
- produce(Q, Producer, 1, true),
- Status = close_queue(Q),
-
- ?_assertEqual(timeout, Status).
-
-should_not_block_producer_for_non_empty_queue_after_close({Q, Producer, _}) ->
- produce(Q, Producer, 1, true),
- close_queue(Q),
- Pong = ping(Producer),
- Size = couch_work_queue:size(Q),
- Count = couch_work_queue:item_count(Q),
-
- ?_assertEqual({ok, 1, 1}, {Pong, Size, Count}).
-
-should_be_closed({Q, _, Consumers}) when is_list(Consumers) ->
- ok = close_queue(Q),
-
- [consume(C, 1) || C <- Consumers],
-
- LastConsumerItems = [last_consumer_items(C) || C <- Consumers],
- ItemsCount = couch_work_queue:item_count(Q),
- Size = couch_work_queue:size(Q),
-
- ?_assertEqual({[closed, closed, closed], closed, closed},
- {LastConsumerItems, ItemsCount, Size});
-should_be_closed({Q, _, Consumer}) ->
- ok = close_queue(Q),
-
- consume(Consumer, 1),
-
- LastConsumerItems = last_consumer_items(Consumer),
- ItemsCount = couch_work_queue:item_count(Q),
- Size = couch_work_queue:size(Q),
-
- ?_assertEqual({closed, closed, closed},
- {LastConsumerItems, ItemsCount, Size}).
-
-
-close_queue(Q) ->
- test_util:stop_sync(Q, fun() ->
- ok = couch_work_queue:close(Q)
- end, ?TIMEOUT).
-
-spawn_consumer(Q) ->
- Parent = self(),
- spawn(fun() -> consumer_loop(Parent, Q, nil) end).
-
-consumer_loop(Parent, Q, PrevItem) ->
- receive
- {stop, Ref} ->
- Parent ! {ok, Ref};
- {ping, Ref} ->
- Parent ! {pong, Ref},
- consumer_loop(Parent, Q, PrevItem);
- {last_item, Ref} ->
- Parent ! {item, Ref, PrevItem},
- consumer_loop(Parent, Q, PrevItem);
- {consume, N} ->
- Result = couch_work_queue:dequeue(Q, N),
- consumer_loop(Parent, Q, Result)
- end.
-
-spawn_producer(Q) ->
- Parent = self(),
- spawn(fun() -> producer_loop(Parent, Q) end).
-
-producer_loop(Parent, Q) ->
- receive
- {stop, Ref} ->
- Parent ! {ok, Ref};
- {ping, Ref} ->
- Parent ! {pong, Ref},
- producer_loop(Parent, Q);
- {produce, Ref, Size} ->
- Item = crypto:strong_rand_bytes(Size),
- Parent ! {item, Ref, Item},
- ok = couch_work_queue:queue(Q, Item),
- producer_loop(Parent, Q)
- end.
-
-consume(Consumer, N) ->
- Consumer ! {consume, N}.
-
-last_consumer_items(Consumer) ->
- Ref = make_ref(),
- Consumer ! {last_item, Ref},
- receive
- {item, Ref, Items} ->
- Items
- after ?TIMEOUT ->
- timeout
- end.
-
-produce(Q, Producer, Size, Wait) ->
- Ref = make_ref(),
- ItemsCount = couch_work_queue:item_count(Q),
- Producer ! {produce, Ref, Size},
- receive
- {item, Ref, Item} when Wait ->
- ok = wait_increment(Q, ItemsCount),
- Item;
- {item, Ref, Item} ->
- Item
- after ?TIMEOUT ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, "Timeout asking producer to produce an item"}]})
- end.
-
-ping(Pid) ->
- Ref = make_ref(),
- Pid ! {ping, Ref},
- receive
- {pong, Ref} ->
- ok
- after ?TIMEOUT ->
- timeout
- end.
-
-stop(Pid, Name) ->
- Ref = make_ref(),
- Pid ! {stop, Ref},
- receive
- {ok, Ref} -> ok
- after ?TIMEOUT ->
- ?debugMsg("Timeout stopping " ++ Name),
- timeout
- end.
-
-wait_increment(Q, ItemsCount) ->
- test_util:wait(fun() ->
- case couch_work_queue:item_count(Q) > ItemsCount of
- true ->
- ok;
- false ->
- wait
- end
- end).
diff --git a/src/couch/test/eunit/couchdb_attachments_tests.erl b/src/couch/test/eunit/couchdb_attachments_tests.erl
deleted file mode 100644
index 04859dbc9..000000000
--- a/src/couch/test/eunit/couchdb_attachments_tests.erl
+++ /dev/null
@@ -1,765 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couchdb_attachments_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("mem3/include/mem3.hrl").
-
--define(COMPRESSION_LEVEL, 8).
--define(ATT_BIN_NAME, <<"logo.png">>).
--define(ATT_TXT_NAME, <<"file.erl">>).
--define(FIXTURE_PNG, filename:join([?FIXTURESDIR, "logo.png"])).
--define(FIXTURE_TXT, ?ABS_PATH(?FILE)).
--define(TIMEOUT, 5000).
--define(TIMEOUT_EUNIT, 100).
--define(TIMEWAIT, 1000).
--define(i2l(I), integer_to_list(I)).
-
-
-start() ->
- Ctx = test_util:start_couch(),
- % ensure in default compression settings for attachments_compression_tests
- config:set("attachments", "compression_level",
- ?i2l(?COMPRESSION_LEVEL), false),
- config:set("attachments", "compressible_types", "text/*", false),
- Ctx.
-
-setup() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, []),
- ok = couch_db:close(Db),
- Addr = config:get("httpd", "bind_address", "127.0.0.1"),
- Port = mochiweb_socket_server:get(couch_httpd, port),
- Host = Addr ++ ":" ++ ?i2l(Port),
- {Host, ?b2l(DbName)}.
-
-setup({binary, standalone}) ->
- {Host, DbName} = setup(),
- setup_att(fun create_standalone_png_att/2, Host, DbName, ?FIXTURE_PNG);
-setup({text, standalone}) ->
- {Host, DbName} = setup(),
- setup_att(fun create_standalone_text_att/2, Host, DbName, ?FIXTURE_TXT);
-setup({binary, inline}) ->
- {Host, DbName} = setup(),
- setup_att(fun create_inline_png_att/2, Host, DbName, ?FIXTURE_PNG);
-setup({text, inline}) ->
- {Host, DbName} = setup(),
- setup_att(fun create_inline_text_att/2, Host, DbName, ?FIXTURE_TXT);
-setup(compressed) ->
- {Host, DbName} = setup(),
- setup_att(fun create_already_compressed_att/2, Host, DbName, ?FIXTURE_TXT).
-setup_att(Fun, Host, DbName, File) ->
- HttpHost = "http://" ++ Host,
- AttUrl = Fun(HttpHost, DbName),
- {ok, Data} = file:read_file(File),
- DocUrl = string:join([HttpHost, DbName, "doc"], "/"),
- Helpers = {DbName, DocUrl, AttUrl},
- {Data, Helpers}.
-
-teardown(_, {_, {DbName, _, _}}) ->
- teardown(DbName).
-
-teardown({_, DbName}) ->
- teardown(DbName);
-teardown(DbName) ->
- ok = couch_server:delete(?l2b(DbName), []),
- ok.
-
-
-attachments_test_() ->
- {
- "Attachments tests",
- {
- setup,
- fun start/0, fun test_util:stop_couch/1,
- [
- attachments_md5_tests(),
- attachments_compression_tests()
- ]
- }
- }.
-
-attachments_md5_tests() ->
- {
- "Attachments MD5 tests",
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_upload_attachment_without_md5/1,
- fun should_upload_attachment_by_chunks_without_md5/1,
- fun should_upload_attachment_with_valid_md5_header/1,
- fun should_upload_attachment_by_chunks_with_valid_md5_header/1,
- fun should_upload_attachment_by_chunks_with_valid_md5_trailer/1,
- fun should_reject_attachment_with_invalid_md5/1,
- fun should_reject_chunked_attachment_with_invalid_md5/1,
- fun should_reject_chunked_attachment_with_invalid_md5_trailer/1
- ]
- }
- }.
-
-attachments_compression_tests() ->
- Funs = [
- fun should_get_att_without_accept_gzip_encoding/2,
- fun should_get_att_with_accept_gzip_encoding/2,
- fun should_get_att_with_accept_deflate_encoding/2,
- fun should_return_406_response_on_unsupported_encoding/2,
- fun should_get_doc_with_att_data/2,
- fun should_get_doc_with_att_data_stub/2
- ],
- {
- "Attachments compression tests",
- [
- {
- "Created via Attachments API",
- created_attachments_compression_tests(standalone, Funs)
- },
- {
- "Created inline via Document API",
- created_attachments_compression_tests(inline, Funs)
- },
- {
- "Created already been compressed via Attachments API",
- {
- foreachx,
- fun setup/1, fun teardown/2,
- [{compressed, Fun} || Fun <- Funs]
- }
- },
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_not_create_compressed_att_with_deflate_encoding/1,
- fun should_not_create_compressed_att_with_compress_encoding/1,
- fun should_create_compressible_att_with_ctype_params/1
- ]
- }
- ]
- }.
-
-created_attachments_compression_tests(Mod, Funs) ->
- [
- {
- "Compressiable attachments",
- {
- foreachx,
- fun setup/1, fun teardown/2,
- [{{text, Mod}, Fun} || Fun <- Funs]
- }
- },
- {
- "Uncompressiable attachments",
- {
- foreachx,
- fun setup/1, fun teardown/2,
- [{{binary, Mod}, Fun} || Fun <- Funs]
- }
- }
- ].
-
-
-
-should_upload_attachment_without_md5({Host, DbName}) ->
- ?_test(begin
- AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"),
- Body = "We all live in a yellow submarine!",
- Headers = [
- {"Content-Length", "34"},
- {"Content-Type", "text/plain"},
- {"Host", Host}
- ],
- {ok, Code, Json} = request("PUT", AttUrl, Headers, Body),
- ?assertEqual(201, Code),
- ?assertEqual(true, get_json(Json, [<<"ok">>]))
- end).
-
-should_upload_attachment_by_chunks_without_md5({Host, DbName}) ->
- ?_test(begin
- AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"),
- AttData = <<"We all live in a yellow submarine!">>,
- <<Part1:21/binary, Part2:13/binary>> = AttData,
- Body = [chunked_body([Part1, Part2]), "\r\n"],
- Headers = [
- {"Content-Type", "text/plain"},
- {"Transfer-Encoding", "chunked"},
- {"Host", Host}
- ],
- {ok, Code, Json} = request("PUT", AttUrl, Headers, Body),
- ?assertEqual(201, Code),
- ?assertEqual(true, get_json(Json, [<<"ok">>]))
- end).
-
-should_upload_attachment_with_valid_md5_header({Host, DbName}) ->
- ?_test(begin
- AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"),
- Body = "We all live in a yellow submarine!",
- Headers = [
- {"Content-Length", "34"},
- {"Content-Type", "text/plain"},
- {"Content-MD5", ?b2l(base64:encode(couch_hash:md5_hash(Body)))},
- {"Host", Host}
- ],
- {ok, Code, Json} = request("PUT", AttUrl, Headers, Body),
- ?assertEqual(201, Code),
- ?assertEqual(true, get_json(Json, [<<"ok">>]))
- end).
-
-should_upload_attachment_by_chunks_with_valid_md5_header({Host, DbName}) ->
- ?_test(begin
- AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"),
- AttData = <<"We all live in a yellow submarine!">>,
- <<Part1:21/binary, Part2:13/binary>> = AttData,
- Body = [chunked_body([Part1, Part2]), "\r\n"],
- Headers = [
- {"Content-Type", "text/plain"},
- {"Content-MD5", ?b2l(base64:encode(couch_hash:md5_hash(AttData)))},
- {"Host", Host},
- {"Transfer-Encoding", "chunked"}
- ],
- {ok, Code, Json} = request("PUT", AttUrl, Headers, Body),
- ?assertEqual(201, Code),
- ?assertEqual(true, get_json(Json, [<<"ok">>]))
- end).
-
-should_upload_attachment_by_chunks_with_valid_md5_trailer({Host, DbName}) ->
- ?_test(begin
- AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"),
- AttData = <<"We all live in a yellow submarine!">>,
- <<Part1:21/binary, Part2:13/binary>> = AttData,
- Body = [chunked_body([Part1, Part2]),
- "Content-MD5: ", base64:encode(couch_hash:md5_hash(AttData)),
- "\r\n\r\n"],
- Headers = [
- {"Content-Type", "text/plain"},
- {"Host", Host},
- {"Trailer", "Content-MD5"},
- {"Transfer-Encoding", "chunked"}
- ],
- {ok, Code, Json} = request("PUT", AttUrl, Headers, Body),
- ?assertEqual(201, Code),
- ?assertEqual(true, get_json(Json, [<<"ok">>]))
- end).
-
-should_reject_attachment_with_invalid_md5({Host, DbName}) ->
- ?_test(begin
- AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"),
- Body = "We all live in a yellow submarine!",
- Headers = [
- {"Content-Length", "34"},
- {"Content-Type", "text/plain"},
- {"Content-MD5", ?b2l(base64:encode(<<"foobar!">>))},
- {"Host", Host}
- ],
- {ok, Code, Json} = request("PUT", AttUrl, Headers, Body),
- ?assertEqual(400, Code),
- ?assertEqual(<<"content_md5_mismatch">>,
- get_json(Json, [<<"error">>]))
- end).
-
-
-should_reject_chunked_attachment_with_invalid_md5({Host, DbName}) ->
- ?_test(begin
- AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"),
- AttData = <<"We all live in a yellow submarine!">>,
- <<Part1:21/binary, Part2:13/binary>> = AttData,
- Body = [chunked_body([Part1, Part2]), "\r\n"],
- Headers = [
- {"Content-Type", "text/plain"},
- {"Content-MD5", ?b2l(base64:encode(<<"foobar!">>))},
- {"Host", Host},
- {"Transfer-Encoding", "chunked"}
- ],
- {ok, Code, Json} = request("PUT", AttUrl, Headers, Body),
- ?assertEqual(400, Code),
- ?assertEqual(<<"content_md5_mismatch">>,
- get_json(Json, [<<"error">>]))
- end).
-
-should_reject_chunked_attachment_with_invalid_md5_trailer({Host, DbName}) ->
- ?_test(begin
- AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"),
- AttData = <<"We all live in a yellow submarine!">>,
- <<Part1:21/binary, Part2:13/binary>> = AttData,
- Body = [chunked_body([Part1, Part2]),
- "Content-MD5: ", base64:encode(<<"foobar!">>),
- "\r\n\r\n"],
- Headers = [
- {"Content-Type", "text/plain"},
- {"Host", Host},
- {"Trailer", "Content-MD5"},
- {"Transfer-Encoding", "chunked"}
- ],
- {ok, Code, Json} = request("PUT", AttUrl, Headers, Body),
- ?assertEqual(400, Code),
- ?assertEqual(<<"content_md5_mismatch">>, get_json(Json, [<<"error">>]))
- end).
-
-should_get_att_without_accept_gzip_encoding(_, {Data, {_, _, AttUrl}}) ->
- ?_test(begin
- {ok, Code, Headers, Body} = test_request:get(AttUrl),
- ?assertEqual(200, Code),
- ?assertNot(lists:member({"Content-Encoding", "gzip"}, Headers)),
- ?assertEqual(Data, iolist_to_binary(Body))
- end).
-
-should_get_att_with_accept_gzip_encoding(compressed, {Data, {_, _, AttUrl}}) ->
- ?_test(begin
- {ok, Code, Headers, Body} = test_request:get(
- AttUrl, [{"Accept-Encoding", "gzip"}]),
- ?assertEqual(200, Code),
- ?assert(lists:member({"Content-Encoding", "gzip"}, Headers)),
- ?assertEqual(Data, zlib:gunzip(iolist_to_binary(Body)))
- end);
-should_get_att_with_accept_gzip_encoding({text, _}, {Data, {_, _, AttUrl}}) ->
- ?_test(begin
- {ok, Code, Headers, Body} = test_request:get(
- AttUrl, [{"Accept-Encoding", "gzip"}]),
- ?assertEqual(200, Code),
- ?assert(lists:member({"Content-Encoding", "gzip"}, Headers)),
- ?assertEqual(Data, zlib:gunzip(iolist_to_binary(Body)))
- end);
-should_get_att_with_accept_gzip_encoding({binary, _}, {Data, {_, _, AttUrl}}) ->
- ?_test(begin
- {ok, Code, Headers, Body} = test_request:get(
- AttUrl, [{"Accept-Encoding", "gzip"}]),
- ?assertEqual(200, Code),
- ?assertEqual(undefined,
- couch_util:get_value("Content-Encoding", Headers)),
- ?assertEqual(Data, iolist_to_binary(Body))
- end).
-
-should_get_att_with_accept_deflate_encoding(_, {Data, {_, _, AttUrl}}) ->
- ?_test(begin
- {ok, Code, Headers, Body} = test_request:get(
- AttUrl, [{"Accept-Encoding", "deflate"}]),
- ?assertEqual(200, Code),
- ?assertEqual(undefined,
- couch_util:get_value("Content-Encoding", Headers)),
- ?assertEqual(Data, iolist_to_binary(Body))
- end).
-
-should_return_406_response_on_unsupported_encoding(_, {_, {_, _, AttUrl}}) ->
- ?_assertEqual(406,
- begin
- {ok, Code, _, _} = test_request:get(
- AttUrl, [{"Accept-Encoding", "deflate, *;q=0"}]),
- Code
- end).
-
-should_get_doc_with_att_data(compressed, {Data, {_, DocUrl, _}}) ->
- ?_test(begin
- Url = DocUrl ++ "?attachments=true",
- {ok, Code, _, Body} = test_request:get(
- Url, [{"Accept", "application/json"}]),
- ?assertEqual(200, Code),
- Json = jiffy:decode(Body),
- AttJson = couch_util:get_nested_json_value(
- Json, [<<"_attachments">>, ?ATT_TXT_NAME]),
- AttData = couch_util:get_nested_json_value(
- AttJson, [<<"data">>]),
- ?assertEqual(
- <<"text/plain">>,
- couch_util:get_nested_json_value(AttJson,[<<"content_type">>])),
- ?assertEqual(Data, base64:decode(AttData))
- end);
-should_get_doc_with_att_data({text, _}, {Data, {_, DocUrl, _}}) ->
- ?_test(begin
- Url = DocUrl ++ "?attachments=true",
- {ok, Code, _, Body} = test_request:get(
- Url, [{"Accept", "application/json"}]),
- ?assertEqual(200, Code),
- Json = jiffy:decode(Body),
- AttJson = couch_util:get_nested_json_value(
- Json, [<<"_attachments">>, ?ATT_TXT_NAME]),
- AttData = couch_util:get_nested_json_value(
- AttJson, [<<"data">>]),
- ?assertEqual(
- <<"text/plain">>,
- couch_util:get_nested_json_value(AttJson,[<<"content_type">>])),
- ?assertEqual(Data, base64:decode(AttData))
- end);
-should_get_doc_with_att_data({binary, _}, {Data, {_, DocUrl, _}}) ->
- ?_test(begin
- Url = DocUrl ++ "?attachments=true",
- {ok, Code, _, Body} = test_request:get(
- Url, [{"Accept", "application/json"}]),
- ?assertEqual(200, Code),
- Json = jiffy:decode(Body),
- AttJson = couch_util:get_nested_json_value(
- Json, [<<"_attachments">>, ?ATT_BIN_NAME]),
- AttData = couch_util:get_nested_json_value(
- AttJson, [<<"data">>]),
- ?assertEqual(
- <<"image/png">>,
- couch_util:get_nested_json_value(AttJson,[<<"content_type">>])),
- ?assertEqual(Data, base64:decode(AttData))
- end).
-
-should_get_doc_with_att_data_stub(compressed, {Data, {_, DocUrl, _}}) ->
- ?_test(begin
- Url = DocUrl ++ "?att_encoding_info=true",
- {ok, Code, _, Body} = test_request:get(
- Url, [{"Accept", "application/json"}]),
- ?assertEqual(200, Code),
- Json = jiffy:decode(Body),
- {AttJson} = couch_util:get_nested_json_value(
- Json, [<<"_attachments">>, ?ATT_TXT_NAME]),
- ?assertEqual(<<"gzip">>,
- couch_util:get_value(<<"encoding">>, AttJson)),
- AttLength = couch_util:get_value(<<"length">>, AttJson),
- EncLength = couch_util:get_value(<<"encoded_length">>, AttJson),
- ?assertEqual(AttLength, EncLength),
- ?assertEqual(iolist_size(zlib:gzip(Data)), AttLength)
- end);
-should_get_doc_with_att_data_stub({text, _}, {Data, {_, DocUrl, _}}) ->
- ?_test(begin
- Url = DocUrl ++ "?att_encoding_info=true",
- {ok, Code, _, Body} = test_request:get(
- Url, [{"Accept", "application/json"}]),
- ?assertEqual(200, Code),
- Json = jiffy:decode(Body),
- {AttJson} = couch_util:get_nested_json_value(
- Json, [<<"_attachments">>, ?ATT_TXT_NAME]),
- ?assertEqual(<<"gzip">>,
- couch_util:get_value(<<"encoding">>, AttJson)),
- AttEncLength = iolist_size(gzip(Data)),
- ?assertEqual(AttEncLength,
- couch_util:get_value(<<"encoded_length">>, AttJson)),
- ?assertEqual(byte_size(Data),
- couch_util:get_value(<<"length">>, AttJson))
- end);
-should_get_doc_with_att_data_stub({binary, _}, {Data, {_, DocUrl, _}}) ->
- ?_test(begin
- Url = DocUrl ++ "?att_encoding_info=true",
- {ok, Code, _, Body} = test_request:get(
- Url, [{"Accept", "application/json"}]),
- ?assertEqual(200, Code),
- Json = jiffy:decode(Body),
- {AttJson} = couch_util:get_nested_json_value(
- Json, [<<"_attachments">>, ?ATT_BIN_NAME]),
- ?assertEqual(undefined,
- couch_util:get_value(<<"encoding">>, AttJson)),
- ?assertEqual(undefined,
- couch_util:get_value(<<"encoded_length">>, AttJson)),
- ?assertEqual(byte_size(Data),
- couch_util:get_value(<<"length">>, AttJson))
- end).
-
-should_not_create_compressed_att_with_deflate_encoding({Host, DbName}) ->
- ?_assertEqual(415,
- begin
- HttpHost = "http://" ++ Host,
- AttUrl = string:join([HttpHost, DbName, ?docid(), "file.txt"], "/"),
- {ok, Data} = file:read_file(?FIXTURE_TXT),
- Body = zlib:compress(Data),
- Headers = [
- {"Content-Encoding", "deflate"},
- {"Content-Type", "text/plain"}
- ],
- {ok, Code, _, _} = test_request:put(AttUrl, Headers, Body),
- Code
- end).
-
-should_not_create_compressed_att_with_compress_encoding({Host, DbName}) ->
- % Note: As of OTP R13B04, it seems there's no LZW compression
- % (i.e. UNIX compress utility implementation) lib in OTP.
- % However there's a simple working Erlang implementation at:
- % http://scienceblogs.com/goodmath/2008/01/simple_lempelziv_compression_i.php
- ?_assertEqual(415,
- begin
- HttpHost = "http://" ++ Host,
- AttUrl = string:join([HttpHost, DbName, ?docid(), "file.txt"], "/"),
- {ok, Data} = file:read_file(?FIXTURE_TXT),
- Headers = [
- {"Content-Encoding", "compress"},
- {"Content-Type", "text/plain"}
- ],
- {ok, Code, _, _} = test_request:put(AttUrl, Headers, Data),
- Code
- end).
-
-should_create_compressible_att_with_ctype_params({Host, DbName}) ->
- {timeout, ?TIMEOUT_EUNIT, ?_test(begin
- HttpHost = "http://" ++ Host,
- DocUrl = string:join([HttpHost, DbName, ?docid()], "/"),
- AttUrl = string:join([DocUrl, ?b2l(?ATT_TXT_NAME)], "/"),
- {ok, Data} = file:read_file(?FIXTURE_TXT),
- Headers = [{"Content-Type", "text/plain; charset=UTF-8"}],
- {ok, Code0, _, _} = test_request:put(AttUrl, Headers, Data),
- ?assertEqual(201, Code0),
-
- {ok, Code1, _, Body} = test_request:get(
- DocUrl ++ "?att_encoding_info=true"),
- ?assertEqual(200, Code1),
- Json = jiffy:decode(Body),
- {AttJson} = couch_util:get_nested_json_value(
- Json, [<<"_attachments">>, ?ATT_TXT_NAME]),
- ?assertEqual(<<"gzip">>,
- couch_util:get_value(<<"encoding">>, AttJson)),
- AttEncLength = iolist_size(gzip(Data)),
- ?assertEqual(AttEncLength,
- couch_util:get_value(<<"encoded_length">>, AttJson)),
- ?assertEqual(byte_size(Data),
- couch_util:get_value(<<"length">>, AttJson))
- end)}.
-
-
-compact_after_lowering_attachment_size_limit_test_() ->
- {
- "Compact after lowering attachment size limit",
- {
- foreach,
- fun() ->
- Ctx = test_util:start_couch(),
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- ok = couch_db:close(Db),
- {Ctx, DbName}
- end,
- fun({Ctx, DbName}) ->
- config:delete("couchdb", "max_attachment_size"),
- ok = couch_server:delete(DbName, [?ADMIN_CTX]),
- test_util:stop_couch(Ctx)
- end,
- [
- fun should_compact_after_lowering_attachment_size_limit/1
- ]
- }
- }.
-
-
-should_compact_after_lowering_attachment_size_limit({_Ctx, DbName}) ->
- {timeout, ?TIMEOUT_EUNIT, ?_test(begin
- {ok, Db1} = couch_db:open(DbName, [?ADMIN_CTX]),
- Doc1 = #doc{id = <<"doc1">>, atts = att(1000)},
- {ok, _} = couch_db:update_doc(Db1, Doc1, []),
- couch_db:close(Db1),
- config:set("couchdb", "max_attachment_size", "1", _Persist = false),
- compact_db(DbName),
- {ok, Db2} = couch_db:open_int(DbName, []),
- {ok, Doc2} = couch_db:open_doc(Db2, <<"doc1">>),
- couch_db:close(Db2),
- [Att] = Doc2#doc.atts,
- ?assertEqual(1000, couch_att:fetch(att_len, Att))
- end)}.
-
-
-att(Size) when is_integer(Size), Size >= 1 ->
- [couch_att:new([
- {name, <<"att">>},
- {type, <<"app/binary">>},
- {att_len, Size},
- {data, fun(_Bytes) ->
- << <<"x">> || _ <- lists:seq(1, Size) >>
- end}
- ])].
-
-
-compact_db(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, _CompactPid} = couch_db:start_compact(Db),
- wait_compaction(DbName, "database", ?LINE),
- ok = couch_db:close(Db).
-
-
-wait_compaction(DbName, Kind, Line) ->
- WaitFun = fun() ->
- case is_compaction_running(DbName) of
- true -> wait;
- false -> ok
- end
- end,
- case test_util:wait(WaitFun, ?TIMEOUT) of
- timeout ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, Line},
- {reason, "Timeout waiting for "
- ++ Kind
- ++ " database compaction"}]});
- _ ->
- ok
- end.
-
-
-is_compaction_running(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, DbInfo} = couch_db:get_db_info(Db),
- couch_db:close(Db),
- couch_util:get_value(compact_running, DbInfo) =:= true.
-
-
-internal_replication_after_lowering_attachment_size_limit_test_() ->
- {
- "Internal replication after lowering max attachment size",
- {
- foreach,
- fun() ->
- Ctx = test_util:start_couch([mem3]),
- SrcName = ?tempdb(),
- {ok, SrcDb} = couch_db:create(SrcName, [?ADMIN_CTX]),
- ok = couch_db:close(SrcDb),
- TgtName = ?tempdb(),
- {ok, TgtDb} = couch_db:create(TgtName, [?ADMIN_CTX]),
- ok = couch_db:close(TgtDb),
- {Ctx, SrcName, TgtName}
- end,
- fun({Ctx, SrcName, TgtName}) ->
- config:delete("couchdb", "max_attachment_size"),
- ok = couch_server:delete(SrcName, [?ADMIN_CTX]),
- ok = couch_server:delete(TgtName, [?ADMIN_CTX]),
- test_util:stop_couch(Ctx)
- end,
- [
- fun should_replicate_after_lowering_attachment_size/1
- ]
- }
- }.
-
-should_replicate_after_lowering_attachment_size({_Ctx, SrcName, TgtName}) ->
- {timeout, ?TIMEOUT_EUNIT, ?_test(begin
- {ok, SrcDb} = couch_db:open(SrcName, [?ADMIN_CTX]),
- SrcDoc = #doc{id = <<"doc">>, atts = att(1000)},
- {ok, _} = couch_db:update_doc(SrcDb, SrcDoc, []),
- couch_db:close(SrcDb),
- config:set("couchdb", "max_attachment_size", "1", _Persist = false),
- % Create a pair of "fake" shards
- SrcShard = #shard{name = SrcName, node = node()},
- TgtShard = #shard{name = TgtName, node = node()},
- mem3_rep:go(SrcShard, TgtShard, []),
- {ok, TgtDb} = couch_db:open_int(TgtName, []),
- {ok, TgtDoc} = couch_db:open_doc(TgtDb, <<"doc">>),
- couch_db:close(TgtDb),
- [Att] = TgtDoc#doc.atts,
- ?assertEqual(1000, couch_att:fetch(att_len, Att))
- end)}.
-
-
-get_json(Json, Path) ->
- couch_util:get_nested_json_value(Json, Path).
-
-to_hex(Val) ->
- to_hex(Val, []).
-
-to_hex(0, Acc) ->
- Acc;
-to_hex(Val, Acc) ->
- to_hex(Val div 16, [hex_char(Val rem 16) | Acc]).
-
-hex_char(V) when V < 10 -> $0 + V;
-hex_char(V) -> $A + V - 10.
-
-chunked_body(Chunks) ->
- chunked_body(Chunks, []).
-
-chunked_body([], Acc) ->
- iolist_to_binary(lists:reverse(Acc, "0\r\n"));
-chunked_body([Chunk | Rest], Acc) ->
- Size = to_hex(size(Chunk)),
- chunked_body(Rest, ["\r\n", Chunk, "\r\n", Size | Acc]).
-
-get_socket() ->
- Options = [binary, {packet, 0}, {active, false}],
- Port = mochiweb_socket_server:get(couch_httpd, port),
- {ok, Sock} = gen_tcp:connect(bind_address(), Port, Options),
- Sock.
-
-bind_address() ->
- case config:get("httpd", "bind_address") of
- undefined -> any;
- Address -> Address
- end.
-
-request(Method, Url, Headers, Body) ->
- RequestHead = [Method, " ", Url, " HTTP/1.1"],
- RequestHeaders = [[string:join([Key, Value], ": "), "\r\n"]
- || {Key, Value} <- Headers],
- Request = [RequestHead, "\r\n", RequestHeaders, "\r\n", Body],
- Sock = get_socket(),
- gen_tcp:send(Sock, list_to_binary(lists:flatten(Request))),
- timer:sleep(?TIMEWAIT), % must wait to receive complete response
- {ok, R} = gen_tcp:recv(Sock, 0),
- gen_tcp:close(Sock),
- [Header, Body1] = re:split(R, "\r\n\r\n", [{return, binary}]),
- {ok, {http_response, _, Code, _}, _} =
- erlang:decode_packet(http, Header, []),
- Json = jiffy:decode(Body1),
- {ok, Code, Json}.
-
-create_standalone_text_att(Host, DbName) ->
- {ok, Data} = file:read_file(?FIXTURE_TXT),
- Url = string:join([Host, DbName, "doc", ?b2l(?ATT_TXT_NAME)], "/"),
- {ok, Code, _Headers, _Body} = test_request:put(
- Url, [{"Content-Type", "text/plain"}], Data),
- ?assertEqual(201, Code),
- Url.
-
-create_standalone_png_att(Host, DbName) ->
- {ok, Data} = file:read_file(?FIXTURE_PNG),
- Url = string:join([Host, DbName, "doc", ?b2l(?ATT_BIN_NAME)], "/"),
- {ok, Code, _Headers, _Body} = test_request:put(
- Url, [{"Content-Type", "image/png"}], Data),
- ?assertEqual(201, Code),
- Url.
-
-create_inline_text_att(Host, DbName) ->
- {ok, Data} = file:read_file(?FIXTURE_TXT),
- Url = string:join([Host, DbName, "doc"], "/"),
- Doc = {[
- {<<"_attachments">>, {[
- {?ATT_TXT_NAME, {[
- {<<"content_type">>, <<"text/plain">>},
- {<<"data">>, base64:encode(Data)}
- ]}
- }]}}
- ]},
- {ok, Code, _Headers, _Body} = test_request:put(
- Url, [{"Content-Type", "application/json"}], jiffy:encode(Doc)),
- ?assertEqual(201, Code),
- string:join([Url, ?b2l(?ATT_TXT_NAME)], "/").
-
-create_inline_png_att(Host, DbName) ->
- {ok, Data} = file:read_file(?FIXTURE_PNG),
- Url = string:join([Host, DbName, "doc"], "/"),
- Doc = {[
- {<<"_attachments">>, {[
- {?ATT_BIN_NAME, {[
- {<<"content_type">>, <<"image/png">>},
- {<<"data">>, base64:encode(Data)}
- ]}
- }]}}
- ]},
- {ok, Code, _Headers, _Body} = test_request:put(
- Url, [{"Content-Type", "application/json"}], jiffy:encode(Doc)),
- ?assertEqual(201, Code),
- string:join([Url, ?b2l(?ATT_BIN_NAME)], "/").
-
-create_already_compressed_att(Host, DbName) ->
- {ok, Data} = file:read_file(?FIXTURE_TXT),
- Url = string:join([Host, DbName, "doc", ?b2l(?ATT_TXT_NAME)], "/"),
- {ok, Code, _Headers, _Body} = test_request:put(
- Url, [{"Content-Type", "text/plain"}, {"Content-Encoding", "gzip"}],
- zlib:gzip(Data)),
- ?assertEqual(201, Code),
- Url.
-
-gzip(Data) ->
- Z = zlib:open(),
- ok = zlib:deflateInit(Z, ?COMPRESSION_LEVEL, deflated, 16 + 15, 8, default),
- Chunk = zlib:deflate(Z, Data),
- Last = zlib:deflate(Z, [], finish),
- ok = zlib:deflateEnd(Z),
- ok = zlib:close(Z),
- [Chunk, Last].
diff --git a/src/couch/test/eunit/couchdb_auth_tests.erl b/src/couch/test/eunit/couchdb_auth_tests.erl
deleted file mode 100644
index 19d32d0c5..000000000
--- a/src/couch/test/eunit/couchdb_auth_tests.erl
+++ /dev/null
@@ -1,115 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couchdb_auth_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
-
-
-setup(PortType) ->
- Hashed = couch_passwords:hash_admin_password("artischocko"),
- ok = config:set("admins", "rocko", binary_to_list(Hashed), _Persist=false),
- Addr = config:get("httpd", "bind_address", "127.0.0.1"),
- lists:concat(["http://", Addr, ":", port(PortType), "/_session"]).
-
-setup_require_valid_user(PortType) ->
- ok = config:set("chttpd", "require_valid_user", "true", _Persist=false),
- setup(PortType).
-
-teardown(_, _) ->
- ok.
-
-teardown_require_valid_user(_, _) ->
- config:set("chttpd", "require_valid_user", "false", _Persist=false).
-
-
-auth_test_() ->
- Tests = [
- fun should_return_username_on_post_to_session/2,
- fun should_not_return_authenticated_field/2,
- fun should_return_list_of_handlers/2
- ],
- RequireValidUserTests = [
- % See #1947 - this should work even with require_valid_user
- fun should_return_username_on_post_to_session/2
- ],
- {
- "Auth tests",
- {
- setup,
- fun() -> test_util:start_couch([chttpd]) end, fun test_util:stop_couch/1,
- [
- make_test_cases(clustered, Tests),
- make_test_cases(backdoor, Tests),
- make_require_valid_user_test_cases(clustered, RequireValidUserTests)
- ]
- }
- }.
-
-make_test_cases(Mod, Funs) ->
- {
- lists:flatten(io_lib:format("~s", [Mod])),
- {foreachx, fun setup/1, fun teardown/2, [{Mod, Fun} || Fun <- Funs]}
- }.
-
-make_require_valid_user_test_cases(Mod, Funs) ->
- {
- lists:flatten(io_lib:format("~s require_valid_user=true", [Mod])),
- {foreachx, fun setup_require_valid_user/1, fun teardown_require_valid_user/2,
- [{Mod, Fun} || Fun <- Funs]}
- }.
-
-should_return_username_on_post_to_session(_PortType, Url) ->
- ?_assertEqual(<<"rocko">>,
- begin
- Hashed = couch_passwords:hash_admin_password(<<"artischocko">>),
- ok = config:set("admins", "rocko", binary_to_list(Hashed), false),
- {ok, _, _, Body} = test_request:post(Url, [{"Content-Type", "application/json"}],
- "{\"name\":\"rocko\", \"password\":\"artischocko\"}"),
- {Json} = jiffy:decode(Body),
- proplists:get_value(<<"name">>, Json)
- end).
-
-should_not_return_authenticated_field(_PortType, Url) ->
- ?_assertThrow({not_found, _},
- begin
- couch_util:get_nested_json_value(session(Url), [
- <<"info">>, <<"authenticated">>])
- end).
-
-should_return_list_of_handlers(backdoor, Url) ->
- ?_assertEqual([<<"cookie">>,<<"default">>],
- begin
- couch_util:get_nested_json_value(session(Url), [
- <<"info">>, <<"authentication_handlers">>])
- end);
-should_return_list_of_handlers(clustered, Url) ->
- ?_assertEqual([<<"cookie">>,<<"default">>],
- begin
- couch_util:get_nested_json_value(session(Url), [
- <<"info">>, <<"authentication_handlers">>])
- end).
-
-
-%% ------------------------------------------------------------------
-%% Internal Function Definitions
-%% ------------------------------------------------------------------
-
-session(Url) ->
- {ok, _, _, Body} = test_request:get(Url, [{"Content-Type", "application/json"}],
- "{\"name\":\"rocko\", \"password\":\"artischocko\"}"),
- jiffy:decode(Body).
-
-port(clustered) ->
- integer_to_list(mochiweb_socket_server:get(chttpd, port));
-port(backdoor) ->
- integer_to_list(mochiweb_socket_server:get(couch_httpd, port)).
diff --git a/src/couch/test/eunit/couchdb_cookie_domain_tests.erl b/src/couch/test/eunit/couchdb_cookie_domain_tests.erl
deleted file mode 100755
index c46352f35..000000000
--- a/src/couch/test/eunit/couchdb_cookie_domain_tests.erl
+++ /dev/null
@@ -1,80 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couchdb_cookie_domain_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(USER, "cookie_domain_test_admin").
--define(PASS, "pass").
-
-setup() ->
- Ctx = test_util:start_couch([chttpd]),
- Hashed = couch_passwords:hash_admin_password(?PASS),
- ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false),
- Addr = config:get("httpd", "bind_address", "127.0.0.1"),
- Port = mochiweb_socket_server:get(chttpd, port),
- Url = ?l2b(io_lib:format("http://~s:~b/_session", [Addr, Port])),
- ContentType = [{"Content-Type", "application/json"}],
- Payload = jiffy:encode({[{name, ?l2b(?USER)}, {password, ?l2b(?PASS)}]}),
- {ok, ?b2l(Url), ContentType, ?b2l(Payload), Ctx}.
-
-teardown({ok, _, _, _, Ctx}) ->
- ok = config:delete("admins", ?USER, _Persist=false),
- test_util:stop_couch(Ctx).
-
-cookie_test_() ->
- {
- "Cookie domain tests",
- {
- setup,
- fun setup/0,
- fun teardown/1,
- fun({ok, Url, ContentType, Payload, _}) ->
- [
- should_set_cookie_domain(Url, ContentType, Payload),
- should_not_set_cookie_domain(Url, ContentType, Payload),
- should_delete_cookie_domain(Url, ContentType, Payload)
- ]
- end
- }
- }.
-
-should_set_cookie_domain(Url, ContentType, Payload) ->
- ?_test(begin
- ok = config:set("couch_httpd_auth", "cookie_domain",
- "example.com", false),
- {ok, Code, Headers, _} = test_request:post(Url, ContentType, Payload),
- ?assertEqual(200, Code),
- Cookie = proplists:get_value("Set-Cookie", Headers),
- ?assert(string:str(Cookie, "; Domain=example.com") > 0)
- end).
-
-should_not_set_cookie_domain(Url, ContentType, Payload) ->
- ?_test(begin
- ok = config:set("couch_httpd_auth", "cookie_domain", "", false),
- {ok, Code, Headers, _} = test_request:post(Url, ContentType, Payload),
- ?assertEqual(200, Code),
- Cookie = proplists:get_value("Set-Cookie", Headers),
- ?assertEqual(0, string:str(Cookie, "; Domain="))
- end).
-
-should_delete_cookie_domain(Url, ContentType, Payload) ->
- ?_test(begin
- ok = config:set("couch_httpd_auth", "cookie_domain",
- "example.com", false),
- {ok, Code, Headers, _} = test_request:delete(Url, ContentType, Payload),
- ?assertEqual(200, Code),
- Cookie = proplists:get_value("Set-Cookie", Headers),
- ?assert(string:str(Cookie, "; Domain=example.com") > 0)
- end).
diff --git a/src/couch/test/eunit/couchdb_cors_tests.erl b/src/couch/test/eunit/couchdb_cors_tests.erl
deleted file mode 100644
index 82630bba7..000000000
--- a/src/couch/test/eunit/couchdb_cors_tests.erl
+++ /dev/null
@@ -1,344 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couchdb_cors_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--include_lib("chttpd/include/chttpd_cors.hrl").
-
--define(TIMEOUT, 1000).
-
--define(_assertEqualLists(A, B),
- ?_assertEqual(lists:usort(A), lists:usort(B))).
-
--define(assertEqualLists(A, B),
- ?assertEqual(lists:usort(A), lists:usort(B))).
-
-start() ->
- Ctx = test_util:start_couch([ioq]),
- ok = config:set("httpd", "enable_cors", "true", false),
- ok = config:set("vhosts", "example.com", "/", false),
- Ctx.
-
-setup() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- couch_db:close(Db),
-
- config:set("cors", "credentials", "false", false),
- config:set("cors", "origins", "http://example.com", false),
-
- Addr = config:get("httpd", "bind_address", "127.0.0.1"),
- Port = integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
- Host = "http://" ++ Addr ++ ":" ++ Port,
- {Host, ?b2l(DbName)}.
-
-setup({Mod, VHost}) ->
- {Host, DbName} = setup(),
- Url = case Mod of
- server ->
- Host;
- db ->
- Host ++ "/" ++ DbName
- end,
- DefaultHeaders = [{"Origin", "http://example.com"}]
- ++ maybe_append_vhost(VHost),
- {Host, DbName, Url, DefaultHeaders}.
-
-teardown(DbName) when is_list(DbName) ->
- ok = couch_server:delete(?l2b(DbName), [?ADMIN_CTX]),
- ok;
-teardown({_, DbName}) ->
- teardown(DbName).
-
-teardown(_, {_, DbName, _, _}) ->
- teardown(DbName).
-
-
-cors_test_() ->
- Funs = [
- fun should_not_allow_origin/2,
- fun should_not_allow_origin_with_port_mismatch/2,
- fun should_not_allow_origin_with_scheme_mismatch/2,
- fun should_not_all_origin_due_case_mismatch/2,
- fun should_make_simple_request/2,
- fun should_make_preflight_request/2,
- fun should_make_prefligh_request_with_port/2,
- fun should_make_prefligh_request_with_scheme/2,
- fun should_make_prefligh_request_with_wildcard_origin/2,
- fun should_make_request_with_credentials/2,
- fun should_make_origin_request_with_auth/2,
- fun should_make_preflight_request_with_auth/2
- ],
- {
- "CORS (COUCHDB-431)",
- {
- setup,
- fun start/0, fun test_util:stop_couch/1,
- [
- cors_tests(Funs),
- vhost_cors_tests(Funs),
- headers_tests()
- ]
- }
- }.
-
-headers_tests() ->
- {
- "Various headers tests",
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_not_return_cors_headers_for_invalid_origin/1,
- fun should_not_return_cors_headers_for_invalid_origin_preflight/1,
- fun should_make_request_against_attachment/1,
- fun should_make_range_request_against_attachment/1,
- fun should_make_request_with_if_none_match_header/1
- ]
- }
- }.
-
-cors_tests(Funs) ->
- {
- "CORS tests",
- [
- make_test_case(server, false, Funs),
- make_test_case(db, false, Funs)
- ]
- }.
-
-vhost_cors_tests(Funs) ->
- {
- "Virtual Host CORS",
- [
- make_test_case(server, true, Funs),
- make_test_case(db, true, Funs)
- ]
- }.
-
-make_test_case(Mod, UseVhost, Funs) ->
- {
- case Mod of server -> "Server"; db -> "Database" end,
- {foreachx, fun setup/1, fun teardown/2, [{{Mod, UseVhost}, Fun}
- || Fun <- Funs]}
- }.
-
-
-should_not_allow_origin(_, {_, _, Url, Headers0}) ->
- ?_assertEqual(undefined,
- begin
- config:delete("cors", "origins", false),
- Headers1 = proplists:delete("Origin", Headers0),
- Headers = [{"Origin", "http://127.0.0.1"}]
- ++ Headers1,
- {ok, _, Resp, _} = test_request:get(Url, Headers),
- proplists:get_value("Access-Control-Allow-Origin", Resp)
- end).
-
-should_not_allow_origin_with_port_mismatch({_, VHost}, {_, _, Url, _}) ->
- ?_assertEqual(undefined,
- begin
- Headers = [{"Origin", "http://example.com:5984"},
- {"Access-Control-Request-Method", "GET"}]
- ++ maybe_append_vhost(VHost),
- {ok, _, Resp, _} = test_request:options(Url, Headers),
- proplists:get_value("Access-Control-Allow-Origin", Resp)
- end).
-
-should_not_allow_origin_with_scheme_mismatch({_, VHost}, {_, _, Url, _}) ->
- ?_assertEqual(undefined,
- begin
- Headers = [{"Origin", "http://example.com:5984"},
- {"Access-Control-Request-Method", "GET"}]
- ++ maybe_append_vhost(VHost),
- {ok, _, Resp, _} = test_request:options(Url, Headers),
- proplists:get_value("Access-Control-Allow-Origin", Resp)
- end).
-
-should_not_all_origin_due_case_mismatch({_, VHost}, {_, _, Url, _}) ->
- ?_assertEqual(undefined,
- begin
- Headers = [{"Origin", "http://ExAmPlE.CoM"},
- {"Access-Control-Request-Method", "GET"}]
- ++ maybe_append_vhost(VHost),
- {ok, _, Resp, _} = test_request:options(Url, Headers),
- proplists:get_value("Access-Control-Allow-Origin", Resp)
- end).
-
-should_make_simple_request(_, {_, _, Url, DefaultHeaders}) ->
- ?_test(begin
- {ok, _, Resp, _} = test_request:get(Url, DefaultHeaders),
- ?assertEqual(
- undefined,
- proplists:get_value("Access-Control-Allow-Credentials", Resp)),
- ?assertEqual(
- "http://example.com",
- proplists:get_value("Access-Control-Allow-Origin", Resp)),
- ?assertEqualLists(
- ?COUCH_HEADERS ++ list_simple_headers(Resp),
- split_list(proplists:get_value("Access-Control-Expose-Headers", Resp)))
- end).
-
-should_make_preflight_request(_, {_, _, Url, DefaultHeaders}) ->
- ?_assertEqualLists(?SUPPORTED_METHODS,
- begin
- Headers = DefaultHeaders
- ++ [{"Access-Control-Request-Method", "GET"}],
- {ok, _, Resp, _} = test_request:options(Url, Headers),
- split_list(proplists:get_value("Access-Control-Allow-Methods", Resp))
- end).
-
-should_make_prefligh_request_with_port({_, VHost}, {_, _, Url, _}) ->
- ?_assertEqual("http://example.com:5984",
- begin
- config:set("cors", "origins", "http://example.com:5984",
- false),
- Headers = [{"Origin", "http://example.com:5984"},
- {"Access-Control-Request-Method", "GET"}]
- ++ maybe_append_vhost(VHost),
- {ok, _, Resp, _} = test_request:options(Url, Headers),
- proplists:get_value("Access-Control-Allow-Origin", Resp)
- end).
-
-should_make_prefligh_request_with_scheme({_, VHost}, {_, _, Url, _}) ->
- ?_assertEqual("https://example.com:5984",
- begin
- config:set("cors", "origins", "https://example.com:5984",
- false),
- Headers = [{"Origin", "https://example.com:5984"},
- {"Access-Control-Request-Method", "GET"}]
- ++ maybe_append_vhost(VHost),
- {ok, _, Resp, _} = test_request:options(Url, Headers),
- proplists:get_value("Access-Control-Allow-Origin", Resp)
- end).
-
-should_make_prefligh_request_with_wildcard_origin({_, VHost}, {_, _, Url, _}) ->
- ?_assertEqual("https://example.com:5984",
- begin
- config:set("cors", "origins", "*", false),
- Headers = [{"Origin", "https://example.com:5984"},
- {"Access-Control-Request-Method", "GET"}]
- ++ maybe_append_vhost(VHost),
- {ok, _, Resp, _} = test_request:options(Url, Headers),
- proplists:get_value("Access-Control-Allow-Origin", Resp)
- end).
-
-should_make_request_with_credentials(_, {_, _, Url, DefaultHeaders}) ->
- ?_assertEqual("true",
- begin
- ok = config:set("cors", "credentials", "true", false),
- {ok, _, Resp, _} = test_request:options(Url, DefaultHeaders),
- proplists:get_value("Access-Control-Allow-Credentials", Resp)
- end).
-
-should_make_origin_request_with_auth(_, {_, _, Url, DefaultHeaders}) ->
- ?_assertEqual("http://example.com",
- begin
- Hashed = couch_passwords:hash_admin_password(<<"test">>),
- config:set("admins", "test", ?b2l(Hashed), false),
- {ok, _, Resp, _} = test_request:get(
- Url, DefaultHeaders, [{basic_auth, {"test", "test"}}]),
- config:delete("admins", "test", false),
- proplists:get_value("Access-Control-Allow-Origin", Resp)
- end).
-
-should_make_preflight_request_with_auth(_, {_, _, Url, DefaultHeaders}) ->
- ?_assertEqualLists(?SUPPORTED_METHODS,
- begin
- Hashed = couch_passwords:hash_admin_password(<<"test">>),
- config:set("admins", "test", ?b2l(Hashed), false),
- Headers = DefaultHeaders
- ++ [{"Access-Control-Request-Method", "GET"}],
- {ok, _, Resp, _} = test_request:options(
- Url, Headers, [{basic_auth, {"test", "test"}}]),
- config:delete("admins", "test", false),
- split_list(proplists:get_value("Access-Control-Allow-Methods", Resp))
- end).
-
-should_not_return_cors_headers_for_invalid_origin({Host, _}) ->
- ?_assertEqual(undefined,
- begin
- Headers = [{"Origin", "http://127.0.0.1"}],
- {ok, _, Resp, _} = test_request:get(Host, Headers),
- proplists:get_value("Access-Control-Allow-Origin", Resp)
- end).
-
-should_not_return_cors_headers_for_invalid_origin_preflight({Host, _}) ->
- ?_assertEqual(undefined,
- begin
- Headers = [{"Origin", "http://127.0.0.1"},
- {"Access-Control-Request-Method", "GET"}],
- {ok, _, Resp, _} = test_request:options(Host, Headers),
- proplists:get_value("Access-Control-Allow-Origin", Resp)
- end).
-
-should_make_request_against_attachment({Host, DbName}) ->
- {"COUCHDB-1689",
- ?_assertEqual(200,
- begin
- Url = Host ++ "/" ++ DbName,
- {ok, Code0, _, _} = test_request:put(
- Url ++ "/doc/file.txt", [{"Content-Type", "text/plain"}],
- "hello, couch!"),
- ?assert(Code0 =:= 201),
- {ok, Code, _, _} = test_request:get(
- Url ++ "/doc?attachments=true",
- [{"Origin", "http://example.com"}]),
- Code
- end)}.
-
-should_make_range_request_against_attachment({Host, DbName}) ->
- {"COUCHDB-1689",
- ?_assertEqual(206,
- begin
- Url = Host ++ "/" ++ DbName,
- {ok, Code0, _, _} = test_request:put(
- Url ++ "/doc/file.txt",
- [{"Content-Type", "application/octet-stream"}],
- "hello, couch!"),
- ?assert(Code0 =:= 201),
- {ok, Code, _, _} = test_request:get(
- Url ++ "/doc/file.txt", [{"Origin", "http://example.com"},
- {"Range", "bytes=0-6"}]),
- Code
- end)}.
-
-should_make_request_with_if_none_match_header({Host, DbName}) ->
- {"COUCHDB-1697",
- ?_assertEqual(304,
- begin
- Url = Host ++ "/" ++ DbName,
- {ok, Code0, Headers0, _} = test_request:put(
- Url ++ "/doc", [{"Content-Type", "application/json"}], "{}"),
- ?assert(Code0 =:= 201),
- ETag = proplists:get_value("ETag", Headers0),
- {ok, Code, _, _} = test_request:get(
- Url ++ "/doc", [{"Origin", "http://example.com"},
- {"If-None-Match", ETag}]),
- Code
- end)}.
-
-
-maybe_append_vhost(true) ->
- [{"Host", "http://example.com"}];
-maybe_append_vhost(false) ->
- [].
-
-split_list(S) ->
- re:split(S, "\\s*,\\s*", [trim, {return, list}]).
-
-list_simple_headers(Headers) ->
- LCHeaders = [string:to_lower(K) || {K, _V} <- Headers],
- lists:filter(fun(H) -> lists:member(H, ?SIMPLE_HEADERS) end, LCHeaders).
diff --git a/src/couch/test/eunit/couchdb_db_tests.erl b/src/couch/test/eunit/couchdb_db_tests.erl
deleted file mode 100644
index 734bafb9f..000000000
--- a/src/couch/test/eunit/couchdb_db_tests.erl
+++ /dev/null
@@ -1,91 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couchdb_db_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("mem3/include/mem3.hrl").
-
-setup() ->
- DbName = ?b2l(?tempdb()),
- fabric:create_db(DbName),
- DbName.
-
-
-teardown(DbName) ->
- (catch fabric:delete_db(DbName)),
- ok.
-
-
-clustered_db_test_() ->
- {
- "Checking clustered db API",
- {
- setup,
- fun() -> test_util:start_couch([ddoc_cache, mem3]) end,
- fun test_util:stop/1,
- [
- {
- "DB deletion",
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_close_deleted_db/1,
- fun should_kill_caller_from_load_validation_funs_for_deleted_db/1
- ]
- }
- }
- ]
- }
- }.
-
-
-should_close_deleted_db(DbName) ->
- ?_test(begin
- [#shard{name = ShardName} | _] = mem3:shards(DbName),
- {ok, Db} = couch_db:open(ShardName, []),
-
- MonitorRef = couch_db:monitor(Db),
- fabric:delete_db(DbName),
- receive
- {'DOWN', MonitorRef, _Type, _Pid, _Info} ->
- ok
- after 2000 ->
- throw(timeout_error)
- end,
- test_util:wait(fun() ->
- case ets:lookup(couch_dbs, DbName) of
- [] -> ok;
- _ -> wait
- end
- end),
- ?assertEqual([], ets:lookup(couch_dbs, DbName))
- end).
-
-
-should_kill_caller_from_load_validation_funs_for_deleted_db(DbName) ->
- ?_test(begin
- [#shard{name = ShardName} | _] = mem3:shards(DbName),
- {ok, Db} = couch_db:open(ShardName, []),
-
- MonitorRef = couch_db:monitor(Db),
- fabric:delete_db(DbName),
- receive
- {'DOWN', MonitorRef, _Type, _Pid, _Info} ->
- ok
- after 2000 ->
- throw(timeout_error)
- end,
- ?assertError(database_does_not_exist, couch_db:load_validation_funs(Db))
- end).
diff --git a/src/couch/test/eunit/couchdb_design_doc_tests.erl b/src/couch/test/eunit/couchdb_design_doc_tests.erl
deleted file mode 100644
index 653a6cb17..000000000
--- a/src/couch/test/eunit/couchdb_design_doc_tests.erl
+++ /dev/null
@@ -1,87 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couchdb_design_doc_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-setup() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- ok = couch_db:close(Db),
- create_design_doc(DbName, <<"_design/foo">>),
- Addr = config:get("httpd", "bind_address", "127.0.0.1"),
- Port = integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
- BaseUrl = "http://" ++ Addr ++ ":" ++ Port,
- {?b2l(DbName), BaseUrl}.
-
-
-teardown({DbName, _}) ->
- couch_server:delete(?l2b(DbName), [?ADMIN_CTX]),
- ok.
-
-
-design_list_test_() ->
- {
- "Check _list functionality",
- {
- setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_return_empty_when_plain_return/1,
- fun should_return_empty_when_no_docs/1
- ]
- }
- }
- }.
-
-should_return_empty_when_plain_return({DbName, BaseUrl}) ->
- ?_test(begin
- ?assertEqual(<<>>,
- query_text(BaseUrl, DbName, "foo", "_list/plain_return/simple_view"))
- end).
-
-should_return_empty_when_no_docs({DbName, BaseUrl}) ->
- ?_test(begin
- ?assertEqual(<<>>,
- query_text(BaseUrl, DbName, "foo", "_list/simple_render/simple_view"))
- end).
-
-create_design_doc(DbName, DDName) ->
- {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
- DDoc = couch_doc:from_json_obj({[
- {<<"_id">>, DDName},
- {<<"language">>, <<"javascript">>},
- {<<"views">>, {[
- {<<"simple_view">>, {[
- {<<"map">>, <<"function(doc) {emit(doc._id, doc)}">> },
- {<<"reduce">>, <<"function (key, values, rereduce) {return sum(values);}">> }
- ]}}
- ]}},
- {<<"lists">>, {[
- {<<"plain_return">>, <<"function(head, req) {return;}">>},
- {<<"simple_render">>, <<"function(head, req) {var row; while(row=getRow()) {send(JSON.stringify(row)); }}">>}
- ]}}
- ]}),
- {ok, Rev} = couch_db:update_doc(Db, DDoc, []),
- couch_db:close(Db),
- Rev.
-
-query_text(BaseUrl, DbName, DDoc, Path) ->
- {ok, Code, _Headers, Body} = test_request:get(
- BaseUrl ++ "/" ++ DbName ++ "/_design/" ++ DDoc ++ "/" ++ Path),
- ?assertEqual(200, Code),
- Body.
diff --git a/src/couch/test/eunit/couchdb_file_compression_tests.erl b/src/couch/test/eunit/couchdb_file_compression_tests.erl
deleted file mode 100644
index 77250337c..000000000
--- a/src/couch/test/eunit/couchdb_file_compression_tests.erl
+++ /dev/null
@@ -1,250 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couchdb_file_compression_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(DDOC_ID, <<"_design/test">>).
--define(DOCS_COUNT, 1000).
--define(TIMEOUT, 60).
-
-setup_all() ->
- Ctx = test_util:start_couch(),
- config:set("couchdb", "file_compression", "none", false),
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- ok = populate_db(Db, ?DOCS_COUNT),
- DDoc = couch_doc:from_json_obj({[
- {<<"_id">>, ?DDOC_ID},
- {<<"language">>, <<"javascript">>},
- {<<"views">>, {[
- {<<"by_id">>, {[
- {<<"map">>, <<"function(doc){emit(doc._id, doc.string);}">>}
- ]}}
- ]}
- }
- ]}),
- {ok, _} = couch_db:update_doc(Db, DDoc, []),
- ok = couch_db:close(Db),
- {Ctx, DbName}.
-
-
-teardown_all({Ctx, DbName}) ->
- ok = couch_server:delete(DbName, [?ADMIN_CTX]),
- test_util:stop_couch(Ctx).
-
-
-couch_file_compression_test_() ->
- {
- "CouchDB file compression tests",
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- {with, [
- fun should_use_none/1,
- fun should_use_deflate_1/1,
- fun should_use_deflate_9/1,
- fun should_use_snappy/1,
- fun should_compare_compression_methods/1
- ]}
- }
- }.
-
-
-should_use_none({_, DbName}) -> run_test(DbName, "none").
-should_use_deflate_1({_, DbName}) -> run_test(DbName, "deflate_1").
-should_use_deflate_9({_, DbName}) -> run_test(DbName, "deflate_9").
-should_use_snappy({_, DbName}) -> run_test(DbName, "snappy").
-
-
-should_compare_compression_methods({_, DbName}) ->
- TestDb = setup_db(DbName),
- Name = "none > snappy > deflate_1 > deflate_9",
- try
- {Name, {timeout, ?TIMEOUT, ?_test(compare_methods(TestDb))}}
- after
- couch_server:delete(TestDb, [?ADMIN_CTX])
- end.
-
-
-run_test(DbName, Comp) ->
- config:set("couchdb", "file_compression", Comp, false),
- Timeout = 5 + ?TIMEOUT,
- TestDb = setup_db(DbName),
- Tests = [
- {"compact database", {timeout, Timeout, ?_test(compact_db(DbName))}},
- {"compact view", {timeout, Timeout, ?_test(compact_view(DbName))}}
- ],
- try
- {"Use compression: " ++ Comp, Tests}
- after
- ok = couch_server:delete(TestDb, [?ADMIN_CTX])
- end.
-
-
-compare_methods(DbName) ->
- config:set("couchdb", "file_compression", "none", false),
- ExternalSizePreCompact = db_external_size(DbName),
- compact_db(DbName),
- compact_view(DbName),
- DbSizeNone = db_disk_size(DbName),
- ViewSizeNone = view_disk_size(DbName),
- ExternalSizeNone = db_external_size(DbName),
- ViewExternalSizeNone = view_external_size(DbName),
-
- config:set("couchdb", "file_compression", "snappy", false),
- compact_db(DbName),
- compact_view(DbName),
- DbSizeSnappy = db_disk_size(DbName),
- ViewSizeSnappy = view_disk_size(DbName),
- ExternalSizeSnappy = db_external_size(DbName),
- ViewExternalSizeSnappy = view_external_size(DbName),
-
- ?assert(DbSizeNone > DbSizeSnappy),
- ?assert(ViewSizeNone > ViewSizeSnappy),
-
- config:set("couchdb", "file_compression", "deflate_1", false),
- compact_db(DbName),
- compact_view(DbName),
- DbSizeDeflate1 = db_disk_size(DbName),
- ViewSizeDeflate1 = view_disk_size(DbName),
-
- ?assert(DbSizeSnappy > DbSizeDeflate1),
- ?assert(ViewSizeSnappy > ViewSizeDeflate1),
-
- config:set("couchdb", "file_compression", "deflate_9", false),
- compact_db(DbName),
- compact_view(DbName),
- DbSizeDeflate9 = db_disk_size(DbName),
- ViewSizeDeflate9 = view_disk_size(DbName),
- ExternalSizeDeflate9 = db_external_size(DbName),
- ViewExternalSizeDeflate9 = view_external_size(DbName),
-
- ?assert(DbSizeDeflate1 > DbSizeDeflate9),
- ?assert(ViewSizeDeflate1 > ViewSizeDeflate9),
- ?assert(ExternalSizePreCompact >= ExternalSizeNone),
- ?assert(ExternalSizeNone =:= ExternalSizeSnappy),
- ?assert(ExternalSizeNone =:= ExternalSizeDeflate9),
- ?assert(ViewExternalSizeNone =:= ViewExternalSizeSnappy),
- ?assert(ViewExternalSizeNone =:= ViewExternalSizeDeflate9).
-
-
-populate_db(_Db, NumDocs) when NumDocs =< 0 ->
- ok;
-populate_db(Db, NumDocs) ->
- Docs = lists:map(
- fun(_) ->
- couch_doc:from_json_obj({[
- {<<"_id">>, couch_uuids:random()},
- {<<"string">>, ?l2b(lists:duplicate(1000, $X))}
- ]})
- end,
- lists:seq(1, 500)),
- {ok, _} = couch_db:update_docs(Db, Docs, []),
- populate_db(Db, NumDocs - 500).
-
-
-setup_db(SrcDbName) ->
- TgtDbName = ?tempdb(),
- TgtDbFileName = binary_to_list(TgtDbName) ++ ".couch",
- couch_util:with_db(SrcDbName, fun(Db) ->
- OldPath = couch_db:get_filepath(Db),
- NewPath = filename:join(filename:dirname(OldPath), TgtDbFileName),
- {ok, _} = file:copy(OldPath, NewPath)
- end),
- refresh_index(TgtDbName),
- TgtDbName.
-
-
-refresh_index(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, DDoc} = couch_db:open_doc(Db, ?DDOC_ID, [ejson_body]),
- couch_mrview:query_view(Db, DDoc, <<"by_id">>, [{update, true}]),
- ok = couch_db:close(Db).
-
-compact_db(DbName) ->
- DiskSizeBefore = db_disk_size(DbName),
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, _CompactPid} = couch_db:start_compact(Db),
- wait_compaction(DbName, "database", ?LINE),
- ok = couch_db:close(Db),
- DiskSizeAfter = db_disk_size(DbName),
- ?assert(DiskSizeBefore > DiskSizeAfter).
-
-compact_view(DbName) ->
- DiskSizeBefore = view_disk_size(DbName),
- {ok, _MonRef} = couch_mrview:compact(DbName, ?DDOC_ID, [monitor]),
- wait_compaction(DbName, "view group", ?LINE),
- DiskSizeAfter = view_disk_size(DbName),
- ?assert(DiskSizeBefore > DiskSizeAfter).
-
-db_disk_size(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, Info} = couch_db:get_db_info(Db),
- ok = couch_db:close(Db),
- active_size(Info).
-
-db_external_size(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, Info} = couch_db:get_db_info(Db),
- ok = couch_db:close(Db),
- external_size(Info).
-
-view_disk_size(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, DDoc} = couch_db:open_doc(Db, ?DDOC_ID, [ejson_body]),
- {ok, Info} = couch_mrview:get_info(Db, DDoc),
- ok = couch_db:close(Db),
- active_size(Info).
-
-view_external_size(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, DDoc} = couch_db:open_doc(Db, ?DDOC_ID, [ejson_body]),
- {ok, Info} = couch_mrview:get_info(Db, DDoc),
- ok = couch_db:close(Db),
- external_size(Info).
-
-active_size(Info) ->
- couch_util:get_nested_json_value({Info}, [sizes, active]).
-
-external_size(Info) ->
- couch_util:get_nested_json_value({Info}, [sizes, external]).
-
-wait_compaction(DbName, Kind, Line) ->
- WaitFun = fun() ->
- case is_compaction_running(DbName) of
- true -> wait;
- false -> ok
- end
- end,
- case test_util:wait(WaitFun, ?TIMEOUT) of
- timeout ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, Line},
- {reason, "Timeout waiting for "
- ++ Kind
- ++ " database compaction"}]});
- _ ->
- ok
- end.
-
-is_compaction_running(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, DbInfo} = couch_db:get_db_info(Db),
- {ok, ViewInfo} = couch_mrview:get_info(Db, ?DDOC_ID),
- couch_db:close(Db),
- (couch_util:get_value(compact_running, ViewInfo) =:= true)
- orelse (couch_util:get_value(compact_running, DbInfo) =:= true).
diff --git a/src/couch/test/eunit/couchdb_location_header_tests.erl b/src/couch/test/eunit/couchdb_location_header_tests.erl
deleted file mode 100644
index c6c039eb0..000000000
--- a/src/couch/test/eunit/couchdb_location_header_tests.erl
+++ /dev/null
@@ -1,78 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couchdb_location_header_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(TIMEOUT, 1000).
-
-
-setup() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- couch_db:close(Db),
-
- Addr = config:get("httpd", "bind_address", "127.0.0.1"),
- Port = integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
- Host = "http://" ++ Addr ++ ":" ++ Port,
- {Host, ?b2l(DbName)}.
-
-teardown({_, DbName}) ->
- ok = couch_server:delete(?l2b(DbName), [?ADMIN_CTX]),
- ok.
-
-
-header_test_() ->
- {
- "CouchDB Location Header Tests",
- {
- setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_work_with_newlines_in_docs/1,
- fun should_work_with_newlines_in_attachments/1
- ]
- }
- }
- }.
-
-should_work_with_newlines_in_docs({Host, DbName}) ->
- Url = Host ++ "/" ++ DbName ++ "/docid%0A",
- {"COUCHDB-708",
- ?_assertEqual(
- Url,
- begin
- {ok, _, Headers, _} = test_request:put(Url,
- [{"Content-Type", "application/json"}], "{}"),
- proplists:get_value("Location", Headers)
- end)}.
-
-should_work_with_newlines_in_attachments({Host, DbName}) ->
- Url = Host ++ "/" ++ DbName,
- AttUrl = Url ++ "/docid%0A/readme.txt",
- {"COUCHDB-708",
- ?_assertEqual(
- AttUrl,
- begin
- Body = "We all live in a yellow submarine!",
- Headers0 = [
- {"Content-Length", "34"},
- {"Content-Type", "text/plain"}
- ],
- {ok, _, Headers, _} = test_request:put(AttUrl, Headers0, Body),
- proplists:get_value("Location", Headers)
- end)}.
diff --git a/src/couch/test/eunit/couchdb_mrview_cors_tests.erl b/src/couch/test/eunit/couchdb_mrview_cors_tests.erl
deleted file mode 100644
index 0f69048a0..000000000
--- a/src/couch/test/eunit/couchdb_mrview_cors_tests.erl
+++ /dev/null
@@ -1,140 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couchdb_mrview_cors_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-
-
--define(DDOC, {[
- {<<"_id">>, <<"_design/foo">>},
- {<<"shows">>, {[
- {<<"bar">>, <<"function(doc, req) {return '<h1>wosh</h1>';}">>}
- ]}}
-]}).
-
--define(USER, "mrview_cors_test_admin").
--define(PASS, "pass").
--define(AUTH, {basic_auth, {?USER, ?PASS}}).
-
-
-start() ->
- Ctx = test_util:start_couch([chttpd]),
- Hashed = couch_passwords:hash_admin_password(?PASS),
- ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false),
- ok = config:set("httpd", "enable_cors", "true", false),
- ok = config:set("vhosts", "example.com", "/", false),
- Ctx.
-
-setup(PortType) ->
- DbName = ?tempdb(),
- ok = create_db(PortType, DbName),
-
- config:set("cors", "credentials", "false", false),
- config:set("cors", "origins", "http://example.com", false),
-
- Host = host_url(PortType),
- upload_ddoc(Host, ?b2l(DbName)),
- {Host, ?b2l(DbName)}.
-
-teardown(Ctx) ->
- ok = config:delete("admins", ?USER, _Persist=false),
- test_util:stop_couch(Ctx).
-
-teardown(PortType, {_Host, DbName}) ->
- delete_db(PortType, ?l2b(DbName)),
- ok.
-
-cors_test_() ->
- {
- "CORS for mrview",
- {
- setup,
- fun start/0, fun teardown/1,
- [show_tests()]
- }
- }.
-
-show_tests() ->
- {
- "Check CORS for show",
- [
- make_test_case(clustered, [fun should_make_shows_request/2]),
- make_test_case(backdoor, [fun should_make_shows_request/2])
- ]
- }.
-
-make_test_case(Mod, Funs) ->
- {
- lists:flatten(io_lib:format("~s", [Mod])),
- {foreachx, fun setup/1, fun teardown/2, [{Mod, Fun} || Fun <- Funs]}
- }.
-
-should_make_shows_request(_, {Host, DbName}) ->
- ?_test(begin
- ReqUrl = Host ++ "/" ++ DbName ++ "/_design/foo/_show/bar",
- Headers = [{"Origin", "http://example.com"},
- {"Access-Control-Request-Method", "GET"}, ?AUTH],
- {ok, _, Resp, Body} = test_request:get(ReqUrl, Headers),
- Origin = proplists:get_value("Access-Control-Allow-Origin", Resp),
- ?assertEqual("http://example.com", Origin),
- ?assertEqual(<<"<h1>wosh</h1>">>, Body)
- end).
-
-create_db(backdoor, DbName) ->
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- couch_db:close(Db);
-create_db(clustered, DbName) ->
- {ok, Status, _, _} = test_request:put(db_url(DbName), [?AUTH], ""),
- assert_success(create_db, Status),
- ok.
-
-delete_db(backdoor, DbName) ->
- couch_server:delete(DbName, [?ADMIN_CTX]);
-delete_db(clustered, DbName) ->
- {ok, Status, _, _} = test_request:delete(db_url(DbName), [?AUTH]),
- assert_success(delete_db, Status),
- ok.
-
-assert_success(create_db, Status) ->
- true = lists:member(Status, [201, 202]);
-assert_success(delete_db, Status) ->
- true = lists:member(Status, [200, 202]).
-
-
-host_url(PortType) ->
- "http://" ++ bind_address(PortType) ++ ":" ++ port(PortType).
-
-bind_address(PortType) ->
- config:get(section(PortType), "bind_address", "127.0.0.1").
-
-section(backdoor) -> "http";
-section(clustered) -> "chttpd".
-
-db_url(DbName) when is_binary(DbName) ->
- db_url(binary_to_list(DbName));
-db_url(DbName) when is_list(DbName) ->
- host_url(clustered) ++ "/" ++ DbName.
-
-port(clustered) ->
- integer_to_list(mochiweb_socket_server:get(chttpd, port));
-port(backdoor) ->
- integer_to_list(mochiweb_socket_server:get(couch_httpd, port)).
-
-
-upload_ddoc(Host, DbName) ->
- Url = Host ++ "/" ++ DbName ++ "/_design/foo",
- Body = couch_util:json_encode(?DDOC),
- {ok, 201, _Resp, _Body} = test_request:put(Url, [?AUTH], Body),
- ok.
diff --git a/src/couch/test/eunit/couchdb_mrview_tests.erl b/src/couch/test/eunit/couchdb_mrview_tests.erl
deleted file mode 100644
index ec77b190d..000000000
--- a/src/couch/test/eunit/couchdb_mrview_tests.erl
+++ /dev/null
@@ -1,261 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couchdb_mrview_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-
-
--define(DDOC, {[
- {<<"_id">>, <<"_design/foo">>},
- {<<"shows">>, {[
- {<<"bar">>, <<"function(doc, req) {return '<h1>wosh</h1>';}">>}
- ]}},
- {<<"updates">>, {[
- {<<"report">>, <<"function(doc, req) {"
- "var data = JSON.parse(req.body); "
- "return ['test', data];"
- "}">>}
- ]}},
- {<<"views">>, {[
- {<<"view1">>, {[
- {<<"map">>, <<"function(doc){emit(doc._id, doc._rev)}">>}
- ]}}
- ]}}
-]}).
-
--define(USER, "admin").
--define(PASS, "pass").
--define(AUTH, {basic_auth, {?USER, ?PASS}}).
-
-
-setup_all() ->
- Ctx = test_util:start_couch([chttpd]),
- ok = meck:new(mochiweb_socket, [passthrough]),
- Hashed = couch_passwords:hash_admin_password(?PASS),
- ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false),
- Ctx.
-
-teardown_all(Ctx) ->
- meck:unload(),
- ok = config:delete("admins", ?USER, _Persist=false),
- test_util:stop_couch(Ctx).
-
-setup(PortType) ->
- meck:reset([mochiweb_socket]),
- ok = meck:expect(mochiweb_socket, recv, fun mochiweb_socket_recv/3),
-
- DbName = ?tempdb(),
- ok = create_db(PortType, DbName),
-
- Host = host_url(PortType),
- upload_ddoc(Host, ?b2l(DbName)),
- {Host, ?b2l(DbName)}.
-
-teardown(PortType, {_Host, DbName}) ->
- delete_db(PortType, ?l2b(DbName)),
- ok.
-
-mrview_show_test_() ->
- {
- "Check show functionality",
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- [
- make_test_case(clustered, [fun should_return_invalid_request_body/2]),
- make_test_case(backdoor, [fun should_return_invalid_request_body/2])
- ]
- }
- }.
-
-mrview_query_test_() ->
- {
- "Check view query functionality",
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- [
- make_test_case(clustered, [fun should_return_400_for_wrong_order_of_keys/2]),
- make_test_case(backdoor, [fun should_return_400_for_wrong_order_of_keys/2])
- ]
- }
- }.
-
-mrview_cleanup_index_files_test_() ->
- {
- "Check index files cleanup",
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- [
- make_test_case(clustered, [fun should_cleanup_index_files/2])
- ]
- }
- }.
-
-
-make_test_case(Mod, Funs) ->
- {
- lists:flatten(io_lib:format("~s", [Mod])),
- {
- foreachx,
- fun setup/1,
- fun teardown/2,
- [{Mod, Fun} || Fun <- Funs]
- }
- }.
-
-should_return_invalid_request_body(PortType, {Host, DbName}) ->
- ?_test(begin
- ok = create_doc(PortType, ?l2b(DbName), <<"doc_id">>, {[]}),
- ReqUrl = Host ++ "/" ++ DbName ++ "/_design/foo/_update/report/doc_id",
- {ok, Status, _Headers, Body} =
- test_request:post(ReqUrl, [?AUTH], <<"{truncated}">>),
- {Props} = jiffy:decode(Body),
- ?assertEqual(
- <<"bad_request">>, couch_util:get_value(<<"error">>, Props)),
- ?assertEqual(
- <<"Invalid request body">>, couch_util:get_value(<<"reason">>, Props)),
- ?assertEqual(400, Status),
- ok
- end).
-
-should_return_400_for_wrong_order_of_keys(_PortType, {Host, DbName}) ->
- Args = [{start_key, "\"bbb\""}, {end_key, "\"aaa\""}],
- ?_test(begin
- ReqUrl = Host ++ "/" ++ DbName
- ++ "/_design/foo/_view/view1?" ++ mochiweb_util:urlencode(Args),
- {ok, Status, _Headers, Body} = test_request:get(ReqUrl, [?AUTH]),
- {Props} = jiffy:decode(Body),
- ?assertEqual(
- <<"query_parse_error">>, couch_util:get_value(<<"error">>, Props)),
- ?assertEqual(
- <<"No rows can match your key range, reverse your start_key and end_key or set descending=true">>,
- couch_util:get_value(<<"reason">>, Props)),
- ?assertEqual(400, Status),
- ok
- end).
-
-should_cleanup_index_files(_PortType, {Host, DbName}) ->
- ?_test(begin
- IndexWildCard = [
- config:get("couchdb", "view_index_dir"),
- "/.shards/*/",
- DbName,
- ".[0-9]*_design/mrview/*"
- ],
- ReqUrl = Host ++ "/" ++ DbName ++ "/_design/foo/_view/view1",
- {ok, _Status0, _Headers0, _Body0} = test_request:get(ReqUrl, [?AUTH]),
- FileList0 = filelib:wildcard(IndexWildCard),
- ?assertNotEqual([], FileList0),
-
- % It is hard to simulate inactive view.
- % Since couch_mrview:cleanup is called on view definition change.
- % That's why we just create extra files in place
- ToDelete = lists:map(fun(FilePath) ->
- ViewFile = filename:join([
- filename:dirname(FilePath),
- "11111111111111111111111111111111.view"]),
- file:write_file(ViewFile, <<>>),
- ViewFile
- end, FileList0),
- FileList1 = filelib:wildcard(IndexWildCard),
- ?assertEqual([], lists:usort(FileList1 -- (FileList0 ++ ToDelete))),
-
- CleanupUrl = Host ++ "/" ++ DbName ++ "/_view_cleanup",
- {ok, _Status1, _Headers1, _Body1} = test_request:post(
- CleanupUrl, [], <<>>, [?AUTH]),
- test_util:wait(fun() ->
- IndexFiles = filelib:wildcard(IndexWildCard),
- case lists:usort(FileList0) == lists:usort(IndexFiles) of
- false -> wait;
- true -> ok
- end
- end),
- ok
- end).
-
-
-create_doc(backdoor, DbName, Id, Body) ->
- JsonDoc = couch_util:json_apply_field({<<"_id">>, Id}, Body),
- Doc = couch_doc:from_json_obj(JsonDoc),
- {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
- {ok, _} = couch_db:update_docs(Db, [Doc]),
- couch_db:close(Db);
-create_doc(clustered, DbName, Id, Body) ->
- JsonDoc = couch_util:json_apply_field({<<"_id">>, Id}, Body),
- Doc = couch_doc:from_json_obj(JsonDoc),
- {ok, _} = fabric:update_docs(DbName, [Doc], [?ADMIN_CTX]),
- ok.
-
-create_db(backdoor, DbName) ->
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- couch_db:close(Db);
-create_db(clustered, DbName) ->
- {ok, Status, _, _} = test_request:put(db_url(DbName), [?AUTH], ""),
- assert_success(create_db, Status),
- ok.
-
-delete_db(backdoor, DbName) ->
- couch_server:delete(DbName, [?ADMIN_CTX]);
-delete_db(clustered, DbName) ->
- {ok, Status, _, _} = test_request:delete(db_url(DbName), [?AUTH]),
- assert_success(delete_db, Status),
- ok.
-
-assert_success(create_db, Status) ->
- ?assert(lists:member(Status, [201, 202]));
-assert_success(delete_db, Status) ->
- ?assert(lists:member(Status, [200, 202])).
-
-
-host_url(PortType) ->
- "http://" ++ bind_address(PortType) ++ ":" ++ port(PortType).
-
-bind_address(PortType) ->
- config:get(section(PortType), "bind_address", "127.0.0.1").
-
-section(backdoor) -> "http";
-section(clustered) -> "chttpd".
-
-db_url(DbName) when is_binary(DbName) ->
- db_url(binary_to_list(DbName));
-db_url(DbName) when is_list(DbName) ->
- host_url(clustered) ++ "/" ++ DbName.
-
-port(clustered) ->
- integer_to_list(mochiweb_socket_server:get(chttpd, port));
-port(backdoor) ->
- integer_to_list(mochiweb_socket_server:get(couch_httpd, port)).
-
-
-upload_ddoc(Host, DbName) ->
- Url = Host ++ "/" ++ DbName ++ "/_design/foo",
- Body = couch_util:json_encode(?DDOC),
- {ok, 201, _Resp, _Body} = test_request:put(Url, [?AUTH], Body),
- ok.
-
-mochiweb_socket_recv(Sock, Len, Timeout) ->
- case meck:passthrough([Sock, Len, Timeout]) of
- {ok, <<"{truncated}">>} ->
- {error, closed};
- {ok, Data} ->
- {ok, Data};
- Else ->
- Else
- end.
diff --git a/src/couch/test/eunit/couchdb_os_proc_pool.erl b/src/couch/test/eunit/couchdb_os_proc_pool.erl
deleted file mode 100644
index 69f8051ad..000000000
--- a/src/couch/test/eunit/couchdb_os_proc_pool.erl
+++ /dev/null
@@ -1,306 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couchdb_os_proc_pool).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(TIMEOUT, 1000).
-
-
-setup() ->
- ok = couch_proc_manager:reload(),
- ok = setup_config().
-
-teardown(_) ->
- ok.
-
-os_proc_pool_test_() ->
- {
- "OS processes pool tests",
- {
- setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- should_block_new_proc_on_full_pool(),
- should_free_slot_on_proc_unexpected_exit(),
- should_reuse_known_proc(),
-% should_process_waiting_queue_as_fifo(),
- should_reduce_pool_on_idle_os_procs()
- ]
- }
- }
- }.
-
-
-should_block_new_proc_on_full_pool() ->
- ?_test(begin
- Client1 = spawn_client(),
- Client2 = spawn_client(),
- Client3 = spawn_client(),
-
- ?assertEqual(ok, ping_client(Client1)),
- ?assertEqual(ok, ping_client(Client2)),
- ?assertEqual(ok, ping_client(Client3)),
-
- Proc1 = get_client_proc(Client1, "1"),
- Proc2 = get_client_proc(Client2, "2"),
- Proc3 = get_client_proc(Client3, "3"),
-
- ?assertNotEqual(Proc1, Proc2),
- ?assertNotEqual(Proc2, Proc3),
- ?assertNotEqual(Proc3, Proc1),
-
- Client4 = spawn_client(),
- ?assertEqual(timeout, ping_client(Client4)),
-
- ?assertEqual(ok, stop_client(Client1)),
- ?assertEqual(ok, ping_client(Client4)),
-
- Proc4 = get_client_proc(Client4, "4"),
-
- ?assertEqual(Proc1#proc.pid, Proc4#proc.pid),
- ?assertNotEqual(Proc1#proc.client, Proc4#proc.client),
-
- lists:map(fun(C) ->
- ?assertEqual(ok, stop_client(C))
- end, [Client2, Client3, Client4])
- end).
-
-
-should_free_slot_on_proc_unexpected_exit() ->
- ?_test(begin
- Client1 = spawn_client(),
- Client2 = spawn_client(),
- Client3 = spawn_client(),
-
- ?assertEqual(ok, ping_client(Client1)),
- ?assertEqual(ok, ping_client(Client2)),
- ?assertEqual(ok, ping_client(Client3)),
-
- Proc1 = get_client_proc(Client1, "1"),
- Proc2 = get_client_proc(Client2, "2"),
- Proc3 = get_client_proc(Client3, "3"),
-
- ?assertNotEqual(Proc1#proc.pid, Proc2#proc.pid),
- ?assertNotEqual(Proc1#proc.client, Proc2#proc.client),
- ?assertNotEqual(Proc2#proc.pid, Proc3#proc.pid),
- ?assertNotEqual(Proc2#proc.client, Proc3#proc.client),
- ?assertNotEqual(Proc3#proc.pid, Proc1#proc.pid),
- ?assertNotEqual(Proc3#proc.client, Proc1#proc.client),
-
- ?assertEqual(ok, kill_client(Client1)),
-
- Client4 = spawn_client(),
- ?assertEqual(ok, ping_client(Client4)),
-
- Proc4 = get_client_proc(Client4, "4"),
-
- ?assertEqual(Proc4#proc.pid, Proc1#proc.pid),
- ?assertNotEqual(Proc4#proc.client, Proc1#proc.client),
- ?assertNotEqual(Proc2#proc.pid, Proc4#proc.pid),
- ?assertNotEqual(Proc2#proc.client, Proc4#proc.client),
- ?assertNotEqual(Proc3#proc.pid, Proc4#proc.pid),
- ?assertNotEqual(Proc3#proc.client, Proc4#proc.client),
-
- lists:map(fun(C) ->
- ?assertEqual(ok, stop_client(C))
- end, [Client2, Client3, Client4])
- end).
-
-
-should_reuse_known_proc() ->
- ?_test(begin
- Client1 = spawn_client(<<"ddoc1">>),
- Client2 = spawn_client(<<"ddoc2">>),
-
- ?assertEqual(ok, ping_client(Client1)),
- ?assertEqual(ok, ping_client(Client2)),
-
- Proc1 = get_client_proc(Client1, "1"),
- Proc2 = get_client_proc(Client2, "2"),
- ?assertNotEqual(Proc1#proc.pid, Proc2#proc.pid),
-
- ?assertEqual(ok, stop_client(Client1)),
- ?assertEqual(ok, stop_client(Client2)),
- ?assert(is_process_alive(Proc1#proc.pid)),
- ?assert(is_process_alive(Proc2#proc.pid)),
-
- Client1Again = spawn_client(<<"ddoc1">>),
- ?assertEqual(ok, ping_client(Client1Again)),
- Proc1Again = get_client_proc(Client1Again, "1-again"),
- ?assertEqual(Proc1#proc.pid, Proc1Again#proc.pid),
- ?assertNotEqual(Proc1#proc.client, Proc1Again#proc.client),
- ?assertEqual(ok, stop_client(Client1Again))
- end).
-
-
-%should_process_waiting_queue_as_fifo() ->
-% ?_test(begin
-% Client1 = spawn_client(<<"ddoc1">>),
-% Client2 = spawn_client(<<"ddoc2">>),
-% Client3 = spawn_client(<<"ddoc3">>),
-% Client4 = spawn_client(<<"ddoc4">>),
-% timer:sleep(100),
-% Client5 = spawn_client(<<"ddoc5">>),
-%
-% ?assertEqual(ok, ping_client(Client1)),
-% ?assertEqual(ok, ping_client(Client2)),
-% ?assertEqual(ok, ping_client(Client3)),
-% ?assertEqual(timeout, ping_client(Client4)),
-% ?assertEqual(timeout, ping_client(Client5)),
-%
-% Proc1 = get_client_proc(Client1, "1"),
-% ?assertEqual(ok, stop_client(Client1)),
-% ?assertEqual(ok, ping_client(Client4)),
-% Proc4 = get_client_proc(Client4, "4"),
-%
-% ?assertNotEqual(Proc4#proc.client, Proc1#proc.client),
-% ?assertEqual(Proc1#proc.pid, Proc4#proc.pid),
-% ?assertEqual(timeout, ping_client(Client5)),
-%
-% ?assertEqual(ok, stop_client(Client2)),
-% ?assertEqual(ok, stop_client(Client3)),
-% ?assertEqual(ok, stop_client(Client4)),
-% ?assertEqual(ok, stop_client(Client5))
-% end).
-
-
-should_reduce_pool_on_idle_os_procs() ->
- ?_test(begin
- %% os_process_idle_limit is in sec
- config:set("query_server_config",
- "os_process_idle_limit", "1", false),
- ok = confirm_config("os_process_idle_limit", "1"),
-
- Client1 = spawn_client(<<"ddoc1">>),
- Client2 = spawn_client(<<"ddoc2">>),
- Client3 = spawn_client(<<"ddoc3">>),
-
- ?assertEqual(ok, ping_client(Client1)),
- ?assertEqual(ok, ping_client(Client2)),
- ?assertEqual(ok, ping_client(Client3)),
-
- ?assertEqual(3, couch_proc_manager:get_proc_count()),
-
- ?assertEqual(ok, stop_client(Client1)),
- ?assertEqual(ok, stop_client(Client2)),
- ?assertEqual(ok, stop_client(Client3)),
-
- timer:sleep(1200),
- ?assertEqual(1, couch_proc_manager:get_proc_count())
- end).
-
-
-setup_config() ->
- config:set("native_query_servers", "enable_erlang_query_server", "true", false),
- config:set("query_server_config", "os_process_limit", "3", false),
- config:set("query_server_config", "os_process_soft_limit", "2", false),
- ok = confirm_config("os_process_soft_limit", "2").
-
-confirm_config(Key, Value) ->
- confirm_config(Key, Value, 0).
-
-confirm_config(Key, Value, Count) ->
- case config:get("query_server_config", Key) of
- Value ->
- ok;
- _ when Count > 10 ->
- erlang:error({config_setup, [
- {module, ?MODULE},
- {line, ?LINE},
- {value, timeout}
- ]});
- _ ->
- %% we need to wait to let gen_server:cast finish
- timer:sleep(10),
- confirm_config(Key, Value, Count + 1)
- end.
-
-spawn_client() ->
- Parent = self(),
- Ref = make_ref(),
- Pid = spawn(fun() ->
- Proc = couch_query_servers:get_os_process(<<"erlang">>),
- loop(Parent, Ref, Proc)
- end),
- {Pid, Ref}.
-
-spawn_client(DDocId) ->
- Parent = self(),
- Ref = make_ref(),
- Pid = spawn(fun() ->
- DDocKey = {DDocId, <<"1-abcdefgh">>},
- DDoc = #doc{body={[{<<"language">>, <<"erlang">>}]}},
- Proc = couch_query_servers:get_ddoc_process(DDoc, DDocKey),
- loop(Parent, Ref, Proc)
- end),
- {Pid, Ref}.
-
-ping_client({Pid, Ref}) ->
- Pid ! ping,
- receive
- {pong, Ref} ->
- ok
- after ?TIMEOUT ->
- timeout
- end.
-
-get_client_proc({Pid, Ref}, ClientName) ->
- Pid ! get_proc,
- receive
- {proc, Ref, Proc} -> Proc
- after ?TIMEOUT ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, "Timeout getting client "
- ++ ClientName ++ " proc"}]})
- end.
-
-stop_client({Pid, Ref}) ->
- Pid ! stop,
- receive
- {stop, Ref} ->
- ok
- after ?TIMEOUT ->
- timeout
- end.
-
-kill_client({Pid, Ref}) ->
- Pid ! die,
- receive
- {die, Ref} ->
- ok
- after ?TIMEOUT ->
- timeout
- end.
-
-loop(Parent, Ref, Proc) ->
- receive
- ping ->
- Parent ! {pong, Ref},
- loop(Parent, Ref, Proc);
- get_proc ->
- Parent ! {proc, Ref, Proc},
- loop(Parent, Ref, Proc);
- stop ->
- couch_query_servers:ret_os_process(Proc),
- Parent ! {stop, Ref};
- die ->
- Parent ! {die, Ref},
- exit(some_error)
- end.
diff --git a/src/couch/test/eunit/couchdb_update_conflicts_tests.erl b/src/couch/test/eunit/couchdb_update_conflicts_tests.erl
deleted file mode 100644
index 1329aba27..000000000
--- a/src/couch/test/eunit/couchdb_update_conflicts_tests.erl
+++ /dev/null
@@ -1,280 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couchdb_update_conflicts_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(i2l(I), integer_to_list(I)).
--define(DOC_ID, <<"foobar">>).
--define(LOCAL_DOC_ID, <<"_local/foobar">>).
--define(NUM_CLIENTS, [100, 500, 1000, 2000, 5000, 10000]).
--define(TIMEOUT, 20000).
-
-start() ->
- test_util:start_couch().
-
-
-setup() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX, overwrite]),
- Doc = couch_doc:from_json_obj({[{<<"_id">>, ?DOC_ID},
- {<<"value">>, 0}]}),
- {ok, Rev} = couch_db:update_doc(Db, Doc, []),
- ok = couch_db:close(Db),
- RevStr = couch_doc:rev_to_str(Rev),
- {DbName, RevStr}.
-setup(_) ->
- setup().
-
-teardown({DbName, _}) ->
- ok = couch_server:delete(DbName, []),
- ok.
-teardown(_, {DbName, _RevStr}) ->
- teardown({DbName, _RevStr}).
-
-
-view_indexes_cleanup_test_() ->
- {
- "Update conflicts",
- {
- setup,
- fun start/0, fun test_util:stop_couch/1,
- [
- concurrent_updates(),
- bulk_docs_updates()
- ]
- }
- }.
-
-concurrent_updates()->
- {
- "Concurrent updates",
- {
- foreachx,
- fun setup/1, fun teardown/2,
- [{NumClients, fun should_concurrently_update_doc/2}
- || NumClients <- ?NUM_CLIENTS]
- }
- }.
-
-bulk_docs_updates()->
- {
- "Bulk docs updates",
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_bulk_create_delete_doc/1,
- fun should_bulk_create_local_doc/1,
- fun should_ignore_invalid_local_doc/1
- ]
- }
- }.
-
-
-should_concurrently_update_doc(NumClients, {DbName, InitRev})->
- {?i2l(NumClients) ++ " clients",
- {inorder,
- [{"update doc",
- {timeout, ?TIMEOUT div 1000,
- ?_test(concurrent_doc_update(NumClients, DbName, InitRev))}},
- {"ensure in single leaf",
- ?_test(ensure_in_single_revision_leaf(DbName))}]}}.
-
-should_bulk_create_delete_doc({DbName, InitRev})->
- ?_test(bulk_delete_create(DbName, InitRev)).
-
-should_bulk_create_local_doc({DbName, _})->
- ?_test(bulk_create_local_doc(DbName)).
-
-should_ignore_invalid_local_doc({DbName, _})->
- ?_test(ignore_invalid_local_doc(DbName)).
-
-
-concurrent_doc_update(NumClients, DbName, InitRev) ->
- Clients = lists:map(
- fun(Value) ->
- ClientDoc = couch_doc:from_json_obj({[
- {<<"_id">>, ?DOC_ID},
- {<<"_rev">>, InitRev},
- {<<"value">>, Value}
- ]}),
- Pid = spawn_client(DbName, ClientDoc),
- {Value, Pid, erlang:monitor(process, Pid)}
- end,
- lists:seq(1, NumClients)),
-
- lists:foreach(fun({_, Pid, _}) -> Pid ! go end, Clients),
-
- {NumConflicts, SavedValue} = lists:foldl(
- fun({Value, Pid, MonRef}, {AccConflicts, AccValue}) ->
- receive
- {'DOWN', MonRef, process, Pid, {ok, _NewRev}} ->
- {AccConflicts, Value};
- {'DOWN', MonRef, process, Pid, conflict} ->
- {AccConflicts + 1, AccValue};
- {'DOWN', MonRef, process, Pid, Error} ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, "Client " ++ ?i2l(Value)
- ++ " got update error: "
- ++ couch_util:to_list(Error)}]})
- after ?TIMEOUT div 2 ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, "Timeout waiting for client "
- ++ ?i2l(Value) ++ " to die"}]})
- end
- end, {0, nil}, Clients),
- ?assertEqual(NumClients - 1, NumConflicts),
-
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, Leaves} = couch_db:open_doc_revs(Db, ?DOC_ID, all, []),
- ok = couch_db:close(Db),
- ?assertEqual(1, length(Leaves)),
-
- [{ok, Doc2}] = Leaves,
- {JsonDoc} = couch_doc:to_json_obj(Doc2, []),
- ?assertEqual(SavedValue, couch_util:get_value(<<"value">>, JsonDoc)).
-
-ensure_in_single_revision_leaf(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, Leaves} = couch_db:open_doc_revs(Db, ?DOC_ID, all, []),
- ok = couch_db:close(Db),
- [{ok, Doc}] = Leaves,
-
- %% FIXME: server restart won't work from test side
- %% stop(ok),
- %% start(),
-
- {ok, Db2} = couch_db:open_int(DbName, []),
- {ok, Leaves2} = couch_db:open_doc_revs(Db2, ?DOC_ID, all, []),
- ok = couch_db:close(Db2),
- ?assertEqual(1, length(Leaves2)),
-
- [{ok, Doc2}] = Leaves,
- ?assertEqual(Doc, Doc2).
-
-bulk_delete_create(DbName, InitRev) ->
- {ok, Db} = couch_db:open_int(DbName, []),
-
- DeletedDoc = couch_doc:from_json_obj({[
- {<<"_id">>, ?DOC_ID},
- {<<"_rev">>, InitRev},
- {<<"_deleted">>, true}
- ]}),
- NewDoc = couch_doc:from_json_obj({[
- {<<"_id">>, ?DOC_ID},
- {<<"value">>, 666}
- ]}),
-
- {ok, Results} = couch_db:update_docs(Db, [DeletedDoc, NewDoc], []),
- ok = couch_db:close(Db),
-
- ?assertEqual(2, length([ok || {ok, _} <- Results])),
- [{ok, Rev1}, {ok, Rev2}] = Results,
-
- {ok, Db2} = couch_db:open_int(DbName, []),
- {ok, [{ok, Doc1}]} = couch_db:open_doc_revs(
- Db2, ?DOC_ID, [Rev1], [conflicts, deleted_conflicts]),
- {ok, [{ok, Doc2}]} = couch_db:open_doc_revs(
- Db2, ?DOC_ID, [Rev2], [conflicts, deleted_conflicts]),
- ok = couch_db:close(Db2),
-
- {Doc1Props} = couch_doc:to_json_obj(Doc1, []),
- {Doc2Props} = couch_doc:to_json_obj(Doc2, []),
-
- %% Document was deleted
- ?assert(couch_util:get_value(<<"_deleted">>, Doc1Props)),
- %% New document not flagged as deleted
- ?assertEqual(undefined, couch_util:get_value(<<"_deleted">>,
- Doc2Props)),
- %% New leaf revision has the right value
- ?assertEqual(666, couch_util:get_value(<<"value">>,
- Doc2Props)),
- %% Deleted document has no conflicts
- ?assertEqual(undefined, couch_util:get_value(<<"_conflicts">>,
- Doc1Props)),
- %% Deleted document has no deleted conflicts
- ?assertEqual(undefined, couch_util:get_value(<<"_deleted_conflicts">>,
- Doc1Props)),
- %% New leaf revision doesn't have conflicts
- ?assertEqual(undefined, couch_util:get_value(<<"_conflicts">>,
- Doc1Props)),
- %% New leaf revision doesn't have deleted conflicts
- ?assertEqual(undefined, couch_util:get_value(<<"_deleted_conflicts">>,
- Doc1Props)),
-
- %% Deleted revision has position 2
- ?assertEqual(2, element(1, Rev1)),
- %% New leaf revision has position 3
- ?assertEqual(3, element(1, Rev2)).
-
-
-bulk_create_local_doc(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
-
- LocalDoc = couch_doc:from_json_obj({[
- {<<"_id">>, ?LOCAL_DOC_ID},
- {<<"_rev">>, <<"0-1">>}
- ]}),
-
- {ok, Results} = couch_db:update_docs(Db, [LocalDoc],
- [], replicated_changes),
- ok = couch_db:close(Db),
- ?assertEqual([], Results),
-
- {ok, Db2} = couch_db:open_int(DbName, []),
- {ok, LocalDoc1} = couch_db:open_doc_int(Db2, ?LOCAL_DOC_ID, []),
- ok = couch_db:close(Db2),
- ?assertEqual(?LOCAL_DOC_ID, LocalDoc1#doc.id),
- ?assertEqual({0, [<<"2">>]}, LocalDoc1#doc.revs).
-
-
-ignore_invalid_local_doc(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
-
- LocalDoc = couch_doc:from_json_obj({[
- {<<"_id">>, ?LOCAL_DOC_ID},
- {<<"_rev">>, <<"0-abcdef">>}
- ]}),
-
- {ok, Results} = couch_db:update_docs(Db, [LocalDoc],
- [], replicated_changes),
- ok = couch_db:close(Db),
- ?assertEqual([], Results),
-
- {ok, Db2} = couch_db:open_int(DbName, []),
- Result2 = couch_db:open_doc_int(Db2, ?LOCAL_DOC_ID, []),
- ok = couch_db:close(Db2),
- ?assertEqual({not_found, missing}, Result2).
-
-
-spawn_client(DbName, Doc) ->
- spawn(fun() ->
- {ok, Db} = couch_db:open_int(DbName, []),
- receive
- go -> ok
- end,
- erlang:yield(),
- Result = try
- couch_db:update_doc(Db, Doc, [])
- catch _:Error ->
- Error
- end,
- ok = couch_db:close(Db),
- exit(Result)
- end).
diff --git a/src/couch/test/eunit/couchdb_vhosts_tests.erl b/src/couch/test/eunit/couchdb_vhosts_tests.erl
deleted file mode 100644
index fbe5579cd..000000000
--- a/src/couch/test/eunit/couchdb_vhosts_tests.erl
+++ /dev/null
@@ -1,271 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couchdb_vhosts_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(TIMEOUT, 1000).
--define(iofmt(S, A), lists:flatten(io_lib:format(S, A))).
-
-
-setup() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"doc1">>},
- {<<"value">>, 666}
- ]}),
-
- Doc1 = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/doc1">>},
- {<<"shows">>, {[
- {<<"test">>, <<"function(doc, req) {
- return { json: {
- requested_path: '/' + req.requested_path.join('/'),
- path: '/' + req.path.join('/')}};}">>}
- ]}},
- {<<"rewrites">>, [
- {[
- {<<"from">>, <<"/">>},
- {<<"to">>, <<"_show/test">>}
- ]}
- ]}
- ]}),
- {ok, _} = couch_db:update_docs(Db, [Doc, Doc1]),
- couch_db:close(Db),
-
- Addr = config:get("httpd", "bind_address", "127.0.0.1"),
- Port = integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
- Url = "http://" ++ Addr ++ ":" ++ Port,
- {Url, ?b2l(DbName)}.
-
-teardown({_, DbName}) ->
- ok = couch_server:delete(?l2b(DbName), []),
- ok.
-
-
-vhosts_test_() ->
- {
- "Virtual Hosts rewrite tests",
- {
- setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_return_database_info/1,
- fun should_return_revs_info/1,
- fun should_return_virtual_request_path_field_in_request/1,
- fun should_return_real_request_path_field_in_request/1,
- fun should_match_wildcard_vhost/1,
- fun should_return_db_info_for_wildcard_vhost_for_custom_db/1,
- fun should_replace_rewrite_variables_for_db_and_doc/1,
- fun should_return_db_info_for_vhost_with_resource/1,
- fun should_return_revs_info_for_vhost_with_resource/1,
- fun should_return_db_info_for_vhost_with_wildcard_resource/1,
- fun should_return_path_for_vhost_with_wildcard_host/1
- ]
- }
- }
- }.
-
-should_return_database_info({Url, DbName}) ->
- ?_test(begin
- ok = config:set("vhosts", "example.com", "/" ++ DbName, false),
- case test_request:get(Url, [], [{host_header, "example.com"}]) of
- {ok, _, _, Body} ->
- {JsonBody} = jiffy:decode(Body),
- ?assert(proplists:is_defined(<<"db_name">>, JsonBody));
- Else ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, ?iofmt("Request failed: ~p", [Else])}]})
- end
- end).
-
-should_return_revs_info({Url, DbName}) ->
- ?_test(begin
- ok = config:set("vhosts", "example.com", "/" ++ DbName, false),
- case test_request:get(Url ++ "/doc1?revs_info=true", [],
- [{host_header, "example.com"}]) of
- {ok, _, _, Body} ->
- {JsonBody} = jiffy:decode(Body),
- ?assert(proplists:is_defined(<<"_revs_info">>, JsonBody));
- Else ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, ?iofmt("Request failed: ~p", [Else])}]})
- end
- end).
-
-should_return_virtual_request_path_field_in_request({Url, DbName}) ->
- ?_test(begin
- ok = config:set("vhosts", "example1.com",
- "/" ++ DbName ++ "/_design/doc1/_rewrite/",
- false),
- case test_request:get(Url, [], [{host_header, "example1.com"}]) of
- {ok, _, _, Body} ->
- {Json} = jiffy:decode(Body),
- ?assertEqual(<<"/">>,
- proplists:get_value(<<"requested_path">>, Json));
- Else ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, ?iofmt("Request failed: ~p", [Else])}]})
- end
- end).
-
-should_return_real_request_path_field_in_request({Url, DbName}) ->
- ?_test(begin
- ok = config:set("vhosts", "example1.com",
- "/" ++ DbName ++ "/_design/doc1/_rewrite/",
- false),
- case test_request:get(Url, [], [{host_header, "example1.com"}]) of
- {ok, _, _, Body} ->
- {Json} = jiffy:decode(Body),
- Path = ?l2b("/" ++ DbName ++ "/_design/doc1/_show/test"),
- ?assertEqual(Path, proplists:get_value(<<"path">>, Json));
- Else ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, ?iofmt("Request failed: ~p", [Else])}]})
- end
- end).
-
-should_match_wildcard_vhost({Url, DbName}) ->
- ?_test(begin
- ok = config:set("vhosts", "*.example.com",
- "/" ++ DbName ++ "/_design/doc1/_rewrite", false),
- case test_request:get(Url, [], [{host_header, "test.example.com"}]) of
- {ok, _, _, Body} ->
- {Json} = jiffy:decode(Body),
- Path = ?l2b("/" ++ DbName ++ "/_design/doc1/_show/test"),
- ?assertEqual(Path, proplists:get_value(<<"path">>, Json));
- Else ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, ?iofmt("Request failed: ~p", [Else])}]})
- end
- end).
-
-should_return_db_info_for_wildcard_vhost_for_custom_db({Url, DbName}) ->
- ?_test(begin
- ok = config:set("vhosts", ":dbname.example1.com",
- "/:dbname", false),
- Host = DbName ++ ".example1.com",
- case test_request:get(Url, [], [{host_header, Host}]) of
- {ok, _, _, Body} ->
- {JsonBody} = jiffy:decode(Body),
- ?assert(proplists:is_defined(<<"db_name">>, JsonBody));
- Else ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, ?iofmt("Request failed: ~p", [Else])}]})
- end
- end).
-
-should_replace_rewrite_variables_for_db_and_doc({Url, DbName}) ->
- ?_test(begin
- ok = config:set("vhosts",":appname.:dbname.example1.com",
- "/:dbname/_design/:appname/_rewrite/", false),
- Host = "doc1." ++ DbName ++ ".example1.com",
- case test_request:get(Url, [], [{host_header, Host}]) of
- {ok, _, _, Body} ->
- {Json} = jiffy:decode(Body),
- Path = ?l2b("/" ++ DbName ++ "/_design/doc1/_show/test"),
- ?assertEqual(Path, proplists:get_value(<<"path">>, Json));
- Else ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, ?iofmt("Request failed: ~p", [Else])}]})
- end
- end).
-
-should_return_db_info_for_vhost_with_resource({Url, DbName}) ->
- ?_test(begin
- ok = config:set("vhosts",
- "example.com/test", "/" ++ DbName, false),
- ReqUrl = Url ++ "/test",
- case test_request:get(ReqUrl, [], [{host_header, "example.com"}]) of
- {ok, _, _, Body} ->
- {JsonBody} = jiffy:decode(Body),
- ?assert(proplists:is_defined(<<"db_name">>, JsonBody));
- Else ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, ?iofmt("Request failed: ~p", [Else])}]})
- end
- end).
-
-
-should_return_revs_info_for_vhost_with_resource({Url, DbName}) ->
- ?_test(begin
- ok = config:set("vhosts",
- "example.com/test", "/" ++ DbName, false),
- ReqUrl = Url ++ "/test/doc1?revs_info=true",
- case test_request:get(ReqUrl, [], [{host_header, "example.com"}]) of
- {ok, _, _, Body} ->
- {JsonBody} = jiffy:decode(Body),
- ?assert(proplists:is_defined(<<"_revs_info">>, JsonBody));
- Else ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, ?iofmt("Request failed: ~p", [Else])}]})
- end
- end).
-
-should_return_db_info_for_vhost_with_wildcard_resource({Url, DbName}) ->
- ?_test(begin
- ok = config:set("vhosts", "*.example2.com/test", "/*", false),
- ReqUrl = Url ++ "/test",
- Host = DbName ++ ".example2.com",
- case test_request:get(ReqUrl, [], [{host_header, Host}]) of
- {ok, _, _, Body} ->
- {JsonBody} = jiffy:decode(Body),
- ?assert(proplists:is_defined(<<"db_name">>, JsonBody));
- Else ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, ?iofmt("Request failed: ~p", [Else])}]})
- end
- end).
-
-should_return_path_for_vhost_with_wildcard_host({Url, DbName}) ->
- ?_test(begin
- ok = config:set("vhosts", "*/test1",
- "/" ++ DbName ++ "/_design/doc1/_show/test",
- false),
- case test_request:get(Url ++ "/test1") of
- {ok, _, _, Body} ->
- {Json} = jiffy:decode(Body),
- Path = ?l2b("/" ++ DbName ++ "/_design/doc1/_show/test"),
- ?assertEqual(Path, proplists:get_value(<<"path">>, Json));
- Else ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, ?iofmt("Request failed: ~p", [Else])}]})
- end
- end).
diff --git a/src/couch/test/eunit/couchdb_views_tests.erl b/src/couch/test/eunit/couchdb_views_tests.erl
deleted file mode 100644
index 06e2f03eb..000000000
--- a/src/couch/test/eunit/couchdb_views_tests.erl
+++ /dev/null
@@ -1,668 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couchdb_views_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
--define(DELAY, 100).
--define(TIMEOUT, 1000).
--define(WAIT_DELAY_COUNT, 40).
-
-setup() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- ok = couch_db:close(Db),
- FooRev = create_design_doc(DbName, <<"_design/foo">>, <<"bar">>),
- query_view(DbName, "foo", "bar"),
- BooRev = create_design_doc(DbName, <<"_design/boo">>, <<"baz">>),
- query_view(DbName, "boo", "baz"),
- {DbName, {FooRev, BooRev}}.
-
-setup_with_docs() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- ok = couch_db:close(Db),
- create_docs(DbName),
- create_design_doc(DbName, <<"_design/foo">>, <<"bar">>),
- DbName.
-
-setup_legacy() ->
- DbName = <<"test">>,
- DbFileName = "test.couch",
- OldDbFilePath = filename:join([?FIXTURESDIR, DbFileName]),
- OldViewName = "6cf2c2f766f87b618edf6630b00f8736.view",
- FixtureViewFilePath = filename:join([?FIXTURESDIR, OldViewName]),
- NewViewName = "a1c5929f912aca32f13446122cc6ce50.view",
-
- DbDir = config:get("couchdb", "database_dir"),
- ViewDir = config:get("couchdb", "view_index_dir"),
- OldViewFilePath = filename:join([ViewDir, ".test_design", "mrview",
- OldViewName]),
- NewViewFilePath = filename:join([ViewDir, ".test_design", "mrview",
- NewViewName]),
-
- NewDbFilePath = filename:join([DbDir, DbFileName]),
-
- Files = [NewDbFilePath, OldViewFilePath, NewViewFilePath],
-
- %% make sure there is no left over
- lists:foreach(fun(File) -> file:delete(File) end, Files),
-
- % copy old db file into db dir
- {ok, _} = file:copy(OldDbFilePath, NewDbFilePath),
-
- % copy old view file into view dir
- ok = filelib:ensure_dir(OldViewFilePath),
-
- {ok, _} = file:copy(FixtureViewFilePath, OldViewFilePath),
-
- {DbName, Files}.
-
-teardown({DbName, _}) ->
- teardown(DbName);
-teardown(DbName) when is_binary(DbName) ->
- couch_server:delete(DbName, [?ADMIN_CTX]),
- ok.
-
-teardown_legacy({_DbName, Files}) ->
- lists:foreach(fun(File) -> file:delete(File) end, Files).
-
-view_indexes_cleanup_test_() ->
- {
- "View indexes cleanup",
- {
- setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_have_two_indexes_alive_before_deletion/1,
- fun should_cleanup_index_file_after_ddoc_deletion/1,
- fun should_cleanup_all_index_files/1
- ]
- }
- }
- }.
-
-view_group_db_leaks_test_() ->
- {
- "View group db leaks",
- {
- setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
- {
- foreach,
- fun setup_with_docs/0, fun teardown/1,
- [
- fun couchdb_1138/1,
- fun couchdb_1309/1
- ]
- }
- }
- }.
-
-view_group_shutdown_test_() ->
- {
- "View group shutdown",
- {
- setup,
- fun() ->
- meck:new(couch_mrview_index, [passthrough]),
- test_util:start_couch()
- end,
- fun(Ctx) ->
- test_util:stop_couch(Ctx),
- meck:unload()
- end,
- [couchdb_1283()]
- }
- }.
-
-backup_restore_test_() ->
- {
- "Upgrade and bugs related tests",
- {
- setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
- {
- foreach,
- fun setup_with_docs/0, fun teardown/1,
- [
- fun should_not_remember_docs_in_index_after_backup_restore/1
- ]
- }
- }
- }.
-
-
-upgrade_test_() ->
- {
- "Upgrade tests",
- {
- setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
- {
- foreach,
- fun setup_legacy/0, fun teardown_legacy/1,
- [
- fun should_upgrade_legacy_view_files/1
- ]
- }
- }
- }.
-
-should_not_remember_docs_in_index_after_backup_restore(DbName) ->
- ?_test(begin
- %% COUCHDB-640
-
- ok = backup_db_file(DbName),
- create_doc(DbName, "doc666"),
-
- Rows0 = query_view(DbName, "foo", "bar"),
- ?assert(has_doc("doc1", Rows0)),
- ?assert(has_doc("doc2", Rows0)),
- ?assert(has_doc("doc3", Rows0)),
- ?assert(has_doc("doc666", Rows0)),
-
- ?assertEqual(ok, restore_backup_db_file(DbName)),
-
- Rows1 = query_view(DbName, "foo", "bar"),
- ?assert(has_doc("doc1", Rows1)),
- ?assert(has_doc("doc2", Rows1)),
- ?assert(has_doc("doc3", Rows1)),
- ?assertNot(has_doc("doc666", Rows1))
- end).
-
-should_upgrade_legacy_view_files({DbName, Files}) ->
- ?_test(begin
- [_NewDbFilePath, OldViewFilePath, NewViewFilePath] = Files,
- ok = config:set("query_server_config", "commit_freq", "0", false),
-
- % ensure old header
- OldHeader = read_header(OldViewFilePath),
- ?assertEqual(6, tuple_size(OldHeader)),
- ?assertMatch(mrheader, element(1, OldHeader)),
-
- % query view for expected results
- Rows0 = query_view(DbName, "test", "test"),
- ?assertEqual(3, length(Rows0)),
-
- % ensure old file gone
- ?assertNot(filelib:is_regular(OldViewFilePath)),
-
- % add doc to trigger update
- DocUrl = db_url(DbName) ++ "/bar",
- {ok, _, _, _} = test_request:put(
- DocUrl, [{"Content-Type", "application/json"}], <<"{\"a\":4}">>),
-
- % query view for expected results
- Rows1 = query_view(DbName, "test", "test"),
- ?assertEqual(4, length(Rows1)),
-
- % ensure new header
- timer:sleep(2000), % have to wait for awhile to upgrade the index
- NewHeader = read_header(NewViewFilePath),
- ?assertMatch(#mrheader{}, NewHeader),
- NewViewStatus = hd(NewHeader#mrheader.view_states),
- ?assertEqual(3, tuple_size(NewViewStatus))
- end).
-
-
-should_have_two_indexes_alive_before_deletion({DbName, _}) ->
- view_cleanup(DbName),
- ?_assertEqual(2, count_index_files(DbName)).
-
-should_cleanup_index_file_after_ddoc_deletion({DbName, {FooRev, _}}) ->
- delete_design_doc(DbName, <<"_design/foo">>, FooRev),
- view_cleanup(DbName),
- ?_assertEqual(1, count_index_files(DbName)).
-
-should_cleanup_all_index_files({DbName, {FooRev, BooRev}})->
- delete_design_doc(DbName, <<"_design/foo">>, FooRev),
- delete_design_doc(DbName, <<"_design/boo">>, BooRev),
- view_cleanup(DbName),
- ?_assertEqual(0, count_index_files(DbName)).
-
-couchdb_1138(DbName) ->
- ?_test(begin
- {ok, IndexerPid} = couch_index_server:get_index(
- couch_mrview_index, DbName, <<"_design/foo">>),
- ?assert(is_pid(IndexerPid)),
- ?assert(is_process_alive(IndexerPid)),
- ?assertEqual(2, count_users(DbName)),
-
- wait_indexer(IndexerPid),
-
- Rows0 = query_view(DbName, "foo", "bar"),
- ?assertEqual(3, length(Rows0)),
- ?assertEqual(2, count_users(DbName)),
- ?assert(is_process_alive(IndexerPid)),
-
- create_doc(DbName, "doc1000"),
- Rows1 = query_view(DbName, "foo", "bar"),
- ?assertEqual(4, length(Rows1)),
- ?assertEqual(2, count_users(DbName)),
-
- ?assert(is_process_alive(IndexerPid)),
-
- compact_db(DbName),
- ?assert(is_process_alive(IndexerPid)),
-
- compact_view_group(DbName, "foo"),
- ?assertEqual(2, count_users(DbName)),
-
- ?assert(is_process_alive(IndexerPid)),
-
- create_doc(DbName, "doc1001"),
- Rows2 = query_view(DbName, "foo", "bar"),
- ?assertEqual(5, length(Rows2)),
- ?assertEqual(2, count_users(DbName)),
-
- ?assert(is_process_alive(IndexerPid))
- end).
-
-couchdb_1309(DbName) ->
- ?_test(begin
- {ok, IndexerPid} = couch_index_server:get_index(
- couch_mrview_index, DbName, <<"_design/foo">>),
- ?assert(is_pid(IndexerPid)),
- ?assert(is_process_alive(IndexerPid)),
- ?assertEqual(2, count_users(DbName)),
-
- wait_indexer(IndexerPid),
-
- create_doc(DbName, "doc1001"),
- Rows0 = query_view(DbName, "foo", "bar"),
- check_rows_value(Rows0, null),
- ?assertEqual(4, length(Rows0)),
- ?assertEqual(2, count_users(DbName)),
-
- ?assert(is_process_alive(IndexerPid)),
-
- update_design_doc(DbName, <<"_design/foo">>, <<"bar">>),
- {ok, NewIndexerPid} = couch_index_server:get_index(
- couch_mrview_index, DbName, <<"_design/foo">>),
- ?assert(is_pid(NewIndexerPid)),
- ?assert(is_process_alive(NewIndexerPid)),
- ?assertNotEqual(IndexerPid, NewIndexerPid),
- UserCnt = case count_users(DbName) of
- N when N > 2 ->
- timer:sleep(1000),
- count_users(DbName);
- N -> N
- end,
- ?assertEqual(2, UserCnt),
-
- Rows1 = query_view(DbName, "foo", "bar", ok),
- ?assertEqual(0, length(Rows1)),
- Rows2 = query_view(DbName, "foo", "bar"),
- check_rows_value(Rows2, 1),
- ?assertEqual(4, length(Rows2)),
-
- ok = stop_indexer( %% FIXME we need to grab monitor earlier
- fun() -> ok end,
- IndexerPid, ?LINE,
- "old view group is not dead after ddoc update"),
-
- ok = stop_indexer(
- fun() -> couch_server:delete(DbName, [?ADMIN_USER]) end,
- NewIndexerPid, ?LINE,
- "new view group did not die after DB deletion")
- end).
-
-couchdb_1283() ->
- ?_test(begin
- ok = config:set("couchdb", "max_dbs_open", "3", false),
-
- {ok, MDb1} = couch_db:create(?tempdb(), [?ADMIN_CTX]),
- DDoc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/foo">>},
- {<<"language">>, <<"javascript">>},
- {<<"views">>, {[
- {<<"foo">>, {[
- {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>}
- ]}},
- {<<"foo2">>, {[
- {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>}
- ]}},
- {<<"foo3">>, {[
- {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>}
- ]}},
- {<<"foo4">>, {[
- {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>}
- ]}},
- {<<"foo5">>, {[
- {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>}
- ]}}
- ]}}
- ]}),
- {ok, _} = couch_db:update_doc(MDb1, DDoc, []),
- ok = populate_db(MDb1, 100, 100),
- query_view(couch_db:name(MDb1), "foo", "foo"),
- ok = couch_db:close(MDb1),
-
- {ok, Pid} = couch_index_server:get_index(
- couch_mrview_index, couch_db:name(MDb1), <<"_design/foo">>),
-
- % Start and pause compacton
- WaitRef = erlang:make_ref(),
- meck:expect(couch_mrview_index, compact, fun(Db, State, Opts) ->
- receive {WaitRef, From, init} -> ok end,
- From ! {WaitRef, inited},
- receive {WaitRef, go} -> ok end,
- meck:passthrough([Db, State, Opts])
- end),
-
- {ok, CPid} = gen_server:call(Pid, compact),
- CRef = erlang:monitor(process, CPid),
- ?assert(is_process_alive(CPid)),
-
- % Make sure that our compactor is waiting for us
- % before we continue our assertions
- CPid ! {WaitRef, self(), init},
- receive {WaitRef, inited} -> ok end,
-
- % Make sure that a compaction process takes a monitor
- % on the database's main_pid
- ?assertEqual(true, lists:member(CPid, couch_db:monitored_by(MDb1))),
-
- % Finish compaction to and make sure the monitor
- % disappears
- CPid ! {WaitRef, go},
- wait_for_process_shutdown(CRef, normal,
- {reason, "Failure compacting view group"}),
-
- % Make sure that the monitor was removed
- ?assertEqual(false, lists:member(CPid, couch_db:monitored_by(MDb1)))
- end).
-
-wait_for_process_shutdown(Pid, ExpectedReason, Error) ->
- receive
- {'DOWN', Pid, process, _, Reason} ->
- ?assertEqual(ExpectedReason, Reason)
- after ?TIMEOUT ->
- erlang:error(
- {assertion_failed,
- [{module, ?MODULE}, {line, ?LINE}, Error]})
- end.
-
-
-create_doc(DbName, DocId) when is_list(DocId) ->
- create_doc(DbName, ?l2b(DocId));
-create_doc(DbName, DocId) when is_binary(DocId) ->
- {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
- Doc666 = couch_doc:from_json_obj({[
- {<<"_id">>, DocId},
- {<<"value">>, 999}
- ]}),
- {ok, _} = couch_db:update_docs(Db, [Doc666]),
- couch_db:close(Db).
-
-create_docs(DbName) ->
- {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
- Doc1 = couch_doc:from_json_obj({[
- {<<"_id">>, <<"doc1">>},
- {<<"value">>, 1}
-
- ]}),
- Doc2 = couch_doc:from_json_obj({[
- {<<"_id">>, <<"doc2">>},
- {<<"value">>, 2}
-
- ]}),
- Doc3 = couch_doc:from_json_obj({[
- {<<"_id">>, <<"doc3">>},
- {<<"value">>, 3}
-
- ]}),
- {ok, _} = couch_db:update_docs(Db, [Doc1, Doc2, Doc3]),
- couch_db:close(Db).
-
-populate_db(Db, BatchSize, N) when N > 0 ->
- Docs = lists:map(
- fun(_) ->
- couch_doc:from_json_obj({[
- {<<"_id">>, couch_uuids:new()},
- {<<"value">>, base64:encode(crypto:strong_rand_bytes(1000))}
- ]})
- end,
- lists:seq(1, BatchSize)),
- {ok, _} = couch_db:update_docs(Db, Docs, []),
- populate_db(Db, BatchSize, N - length(Docs));
-populate_db(_Db, _, _) ->
- ok.
-
-create_design_doc(DbName, DDName, ViewName) ->
- {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
- DDoc = couch_doc:from_json_obj({[
- {<<"_id">>, DDName},
- {<<"language">>, <<"javascript">>},
- {<<"views">>, {[
- {ViewName, {[
- {<<"map">>, <<"function(doc) { emit(doc.value, null); }">>}
- ]}}
- ]}}
- ]}),
- {ok, Rev} = couch_db:update_doc(Db, DDoc, []),
- couch_db:close(Db),
- Rev.
-
-update_design_doc(DbName, DDName, ViewName) ->
- {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
- {ok, Doc} = couch_db:open_doc(Db, DDName, [?ADMIN_CTX]),
- {Props} = couch_doc:to_json_obj(Doc, []),
- Rev = couch_util:get_value(<<"_rev">>, Props),
- DDoc = couch_doc:from_json_obj({[
- {<<"_id">>, DDName},
- {<<"_rev">>, Rev},
- {<<"language">>, <<"javascript">>},
- {<<"views">>, {[
- {ViewName, {[
- {<<"map">>, <<"function(doc) { emit(doc.value, 1); }">>}
- ]}}
- ]}}
- ]}),
- {ok, NewRev} = couch_db:update_doc(Db, DDoc, [?ADMIN_CTX]),
- couch_db:close(Db),
- NewRev.
-
-delete_design_doc(DbName, DDName, Rev) ->
- {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
- DDoc = couch_doc:from_json_obj({[
- {<<"_id">>, DDName},
- {<<"_rev">>, couch_doc:rev_to_str(Rev)},
- {<<"_deleted">>, true}
- ]}),
- {ok, _} = couch_db:update_doc(Db, DDoc, [Rev]),
- couch_db:close(Db).
-
-db_url(DbName) ->
- Addr = config:get("httpd", "bind_address", "127.0.0.1"),
- Port = integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
- "http://" ++ Addr ++ ":" ++ Port ++ "/" ++ ?b2l(DbName).
-
-query_view(DbName, DDoc, View) ->
- query_view(DbName, DDoc, View, false).
-
-query_view(DbName, DDoc, View, Stale) ->
- {ok, Code, _Headers, Body} = test_request:get(
- db_url(DbName) ++ "/_design/" ++ DDoc ++ "/_view/" ++ View
- ++ case Stale of
- false -> [];
- _ -> "?stale=" ++ atom_to_list(Stale)
- end),
- ?assertEqual(200, Code),
- {Props} = jiffy:decode(Body),
- couch_util:get_value(<<"rows">>, Props, []).
-
-check_rows_value(Rows, Value) ->
- lists:foreach(
- fun({Row}) ->
- ?assertEqual(Value, couch_util:get_value(<<"value">>, Row))
- end, Rows).
-
-view_cleanup(DbName) ->
- {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
- couch_mrview:cleanup(Db),
- couch_db:close(Db).
-
-count_users(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX]),
- DbPid = couch_db:get_pid(Db),
- {monitored_by, Monitors0} = process_info(DbPid, monitored_by),
- Monitors = lists:filter(fun is_pid/1, Monitors0),
- CouchFiles = [P || P <- Monitors, couch_file:process_info(P) =/= undefined],
- ok = couch_db:close(Db),
- length(lists:usort(Monitors) -- [self() | CouchFiles]).
-
-count_index_files(DbName) ->
- % call server to fetch the index files
- RootDir = config:get("couchdb", "view_index_dir"),
- length(filelib:wildcard(RootDir ++ "/." ++
- binary_to_list(DbName) ++ "_design"++"/mrview/*")).
-
-has_doc(DocId1, Rows) ->
- DocId = iolist_to_binary(DocId1),
- lists:any(fun({R}) -> lists:member({<<"id">>, DocId}, R) end, Rows).
-
-backup_db_file(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- try
- SrcPath = couch_db:get_filepath(Db),
- Src = if
- is_list(SrcPath) -> SrcPath;
- true -> binary_to_list(SrcPath)
- end,
- ok = copy_tree(Src, Src ++ ".backup")
- after
- couch_db:close(Db)
- end.
-
-restore_backup_db_file(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- Src = couch_db:get_filepath(Db),
- ok = couch_db:close(Db),
- DbPid = couch_db:get_pid(Db),
- exit(DbPid, shutdown),
- ok = copy_tree(Src ++ ".backup", Src),
-
- test_util:wait(fun() ->
- case couch_server:open(DbName, [{timeout, ?TIMEOUT}]) of
- {ok, WaitDb} ->
- case couch_db:get_pid(WaitDb) == DbPid of
- true -> wait;
- false -> ok
- end;
- Else ->
- Else
- end
- end, ?TIMEOUT, ?DELAY).
-
-compact_db(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, _} = couch_db:start_compact(Db),
- ok = couch_db:close(Db),
- wait_db_compact_done(DbName, ?WAIT_DELAY_COUNT).
-
-wait_db_compact_done(_DbName, 0) ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, "DB compaction failed to finish"}]});
-wait_db_compact_done(DbName, N) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- ok = couch_db:close(Db),
- CompactorPid = couch_db:get_compactor_pid(Db),
- case is_pid(CompactorPid) of
- false ->
- ok;
- true ->
- ok = timer:sleep(?DELAY),
- wait_db_compact_done(DbName, N - 1)
- end.
-
-compact_view_group(DbName, DDocId) when is_list(DDocId) ->
- compact_view_group(DbName, ?l2b("_design/" ++ DDocId));
-compact_view_group(DbName, DDocId) when is_binary(DDocId) ->
- ok = couch_mrview:compact(DbName, DDocId),
- wait_view_compact_done(DbName, DDocId, 10).
-
-wait_view_compact_done(_DbName, _DDocId, 0) ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, "DB compaction failed to finish"}]});
-wait_view_compact_done(DbName, DDocId, N) ->
- {ok, Code, _Headers, Body} = test_request:get(
- db_url(DbName) ++ "/" ++ ?b2l(DDocId) ++ "/_info"),
- ?assertEqual(200, Code),
- {Info} = jiffy:decode(Body),
- {IndexInfo} = couch_util:get_value(<<"view_index">>, Info),
- CompactRunning = couch_util:get_value(<<"compact_running">>, IndexInfo),
- case CompactRunning of
- false ->
- ok;
- true ->
- ok = timer:sleep(?DELAY),
- wait_view_compact_done(DbName, DDocId, N - 1)
- end.
-
-read_header(File) ->
- {ok, Fd} = couch_file:open(File),
- {ok, {_Sig, Header}} = couch_file:read_header(Fd),
- couch_file:close(Fd),
- Header.
-
-stop_indexer(StopFun, Pid, Line, Reason) ->
- case test_util:stop_sync(Pid, StopFun) of
- timeout ->
- erlang:error(
- {assertion_failed,
- [{module, ?MODULE}, {line, Line},
- {reason, Reason}]});
- ok ->
- ok
- end.
-
-wait_indexer(IndexerPid) ->
- test_util:wait(fun() ->
- {ok, Info} = couch_index:get_info(IndexerPid),
- case couch_util:get_value(compact_running, Info) of
- true ->
- wait;
- false ->
- ok
- end
- end).
-
-copy_tree(Src, Dst) ->
- case filelib:is_dir(Src) of
- true ->
- {ok, Files} = file:list_dir(Src),
- copy_tree(Files, Src, Dst);
- false ->
- ok = filelib:ensure_dir(Dst),
- {ok, _} = file:copy(Src, Dst),
- ok
- end.
-
-copy_tree([], _Src, _Dst) ->
- ok;
-copy_tree([File | Rest], Src, Dst) ->
- FullSrc = filename:join(Src, File),
- FullDst = filename:join(Dst, File),
- ok = copy_tree(FullSrc, FullDst),
- copy_tree(Rest, Src, Dst).
diff --git a/src/couch/test/eunit/fixtures/6cf2c2f766f87b618edf6630b00f8736.view b/src/couch/test/eunit/fixtures/6cf2c2f766f87b618edf6630b00f8736.view
deleted file mode 100644
index a5668eeaa..000000000
--- a/src/couch/test/eunit/fixtures/6cf2c2f766f87b618edf6630b00f8736.view
+++ /dev/null
Binary files differ
diff --git a/src/couch/test/eunit/fixtures/couch_stats_aggregates.cfg b/src/couch/test/eunit/fixtures/couch_stats_aggregates.cfg
deleted file mode 100644
index 30e475da8..000000000
--- a/src/couch/test/eunit/fixtures/couch_stats_aggregates.cfg
+++ /dev/null
@@ -1,19 +0,0 @@
-% Licensed to the Apache Software Foundation (ASF) under one
-% or more contributor license agreements. See the NOTICE file
-% distributed with this work for additional information
-% regarding copyright ownership. The ASF licenses this file
-% to you under the Apache License, Version 2.0 (the
-% "License"); you may not use this file except in compliance
-% with the License. You may obtain a copy of the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing,
-% software distributed under the License is distributed on an
-% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-% KIND, either express or implied. See the License for the
-% specific language governing permissions and limitations
-% under the License.
-
-{testing, stuff, "yay description"}.
-{number, '11', "randomosity"}.
diff --git a/src/couch/test/eunit/fixtures/couch_stats_aggregates.ini b/src/couch/test/eunit/fixtures/couch_stats_aggregates.ini
deleted file mode 100644
index cc5cd2187..000000000
--- a/src/couch/test/eunit/fixtures/couch_stats_aggregates.ini
+++ /dev/null
@@ -1,20 +0,0 @@
-; Licensed to the Apache Software Foundation (ASF) under one
-; or more contributor license agreements. See the NOTICE file
-; distributed with this work for additional information
-; regarding copyright ownership. The ASF licenses this file
-; to you under the Apache License, Version 2.0 (the
-; "License"); you may not use this file except in compliance
-; with the License. You may obtain a copy of the License at
-;
-; http://www.apache.org/licenses/LICENSE-2.0
-;
-; Unless required by applicable law or agreed to in writing,
-; software distributed under the License is distributed on an
-; "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-; KIND, either express or implied. See the License for the
-; specific language governing permissions and limitations
-; under the License.
-
-[stats]
-rate = 10000000 ; We call collect_sample in testing
-samples = [0, 1]
diff --git a/src/couch/test/eunit/fixtures/db_non_partitioned.couch b/src/couch/test/eunit/fixtures/db_non_partitioned.couch
deleted file mode 100644
index 327d9bb5d..000000000
--- a/src/couch/test/eunit/fixtures/db_non_partitioned.couch
+++ /dev/null
Binary files differ
diff --git a/src/couch/test/eunit/fixtures/db_v6_with_1_purge_req.couch b/src/couch/test/eunit/fixtures/db_v6_with_1_purge_req.couch
deleted file mode 100644
index b0d39c9ec..000000000
--- a/src/couch/test/eunit/fixtures/db_v6_with_1_purge_req.couch
+++ /dev/null
Binary files differ
diff --git a/src/couch/test/eunit/fixtures/db_v6_with_1_purge_req_for_2_docs.couch b/src/couch/test/eunit/fixtures/db_v6_with_1_purge_req_for_2_docs.couch
deleted file mode 100644
index b584fce31..000000000
--- a/src/couch/test/eunit/fixtures/db_v6_with_1_purge_req_for_2_docs.couch
+++ /dev/null
Binary files differ
diff --git a/src/couch/test/eunit/fixtures/db_v6_with_2_purge_req.couch b/src/couch/test/eunit/fixtures/db_v6_with_2_purge_req.couch
deleted file mode 100644
index ee4e11b7f..000000000
--- a/src/couch/test/eunit/fixtures/db_v6_with_2_purge_req.couch
+++ /dev/null
Binary files differ
diff --git a/src/couch/test/eunit/fixtures/db_v6_without_purge_req.couch b/src/couch/test/eunit/fixtures/db_v6_without_purge_req.couch
deleted file mode 100644
index 814feb8e1..000000000
--- a/src/couch/test/eunit/fixtures/db_v6_without_purge_req.couch
+++ /dev/null
Binary files differ
diff --git a/src/couch/test/eunit/fixtures/db_v7_with_1_purge_req.couch b/src/couch/test/eunit/fixtures/db_v7_with_1_purge_req.couch
deleted file mode 100644
index cab8331db..000000000
--- a/src/couch/test/eunit/fixtures/db_v7_with_1_purge_req.couch
+++ /dev/null
Binary files differ
diff --git a/src/couch/test/eunit/fixtures/db_v7_with_1_purge_req_for_2_docs.couch b/src/couch/test/eunit/fixtures/db_v7_with_1_purge_req_for_2_docs.couch
deleted file mode 100644
index b613646b1..000000000
--- a/src/couch/test/eunit/fixtures/db_v7_with_1_purge_req_for_2_docs.couch
+++ /dev/null
Binary files differ
diff --git a/src/couch/test/eunit/fixtures/db_v7_with_2_purge_req.couch b/src/couch/test/eunit/fixtures/db_v7_with_2_purge_req.couch
deleted file mode 100644
index 126fc919e..000000000
--- a/src/couch/test/eunit/fixtures/db_v7_with_2_purge_req.couch
+++ /dev/null
Binary files differ
diff --git a/src/couch/test/eunit/fixtures/db_v7_without_purge_req.couch b/src/couch/test/eunit/fixtures/db_v7_without_purge_req.couch
deleted file mode 100644
index 762dc8dad..000000000
--- a/src/couch/test/eunit/fixtures/db_v7_without_purge_req.couch
+++ /dev/null
Binary files differ
diff --git a/src/couch/test/eunit/fixtures/logo.png b/src/couch/test/eunit/fixtures/logo.png
deleted file mode 100644
index d21ac025b..000000000
--- a/src/couch/test/eunit/fixtures/logo.png
+++ /dev/null
Binary files differ
diff --git a/src/couch/test/eunit/fixtures/multipart.http b/src/couch/test/eunit/fixtures/multipart.http
deleted file mode 100644
index fe9f271cc..000000000
--- a/src/couch/test/eunit/fixtures/multipart.http
+++ /dev/null
@@ -1,13 +0,0 @@
-{
- "_id": "our document goes here"
-}
-
---multipart_related_boundary~~~~~~~~~~~~~~~~~~~~
-Content-Type: application/json
-
-{"value":0,"_id":"doc0","_rev":"1-7e97409c987eac3a99385a17ad4cbabe","_attachments":{"plus1":{"stub":false,"follows":true,"content_type":"application/json","length":14}},".cache":{"plus1":{"timestamp":"2012-08-13T13:59:27.826Z"}}}
---multipart_related_boundary~~~~~~~~~~~~~~~~~~~~
-
-{"value":"01"}
---multipart_related_boundary~~~~~~~~~~~~~~~~~~~~--
-
diff --git a/src/couch/test/eunit/fixtures/os_daemon_bad_perm.sh b/src/couch/test/eunit/fixtures/os_daemon_bad_perm.sh
deleted file mode 100644
index 345c8b40b..000000000
--- a/src/couch/test/eunit/fixtures/os_daemon_bad_perm.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/sh -e
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-#
-# Please do not make this file executable as that's the error being tested.
-
-sleep 5
diff --git a/src/couch/test/eunit/fixtures/os_daemon_can_reboot.sh b/src/couch/test/eunit/fixtures/os_daemon_can_reboot.sh
deleted file mode 100755
index 5bc10e83f..000000000
--- a/src/couch/test/eunit/fixtures/os_daemon_can_reboot.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/bin/sh -e
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-sleep 2
diff --git a/src/couch/test/eunit/fixtures/os_daemon_configer.escript b/src/couch/test/eunit/fixtures/os_daemon_configer.escript
deleted file mode 100755
index f146b8314..000000000
--- a/src/couch/test/eunit/fixtures/os_daemon_configer.escript
+++ /dev/null
@@ -1,97 +0,0 @@
-#! /usr/bin/env escript
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--include("../../include/couch_eunit.hrl").
-
-read() ->
- case io:get_line('') of
- eof ->
- stop;
- Data ->
- jiffy:decode(Data)
- end.
-
-write(Mesg) ->
- Data = iolist_to_binary(jiffy:encode(Mesg)),
- io:format(binary_to_list(Data) ++ "\n", []).
-
-get_cfg(Section) ->
- write([<<"get">>, Section]),
- read().
-
-get_cfg(Section, Name) ->
- write([<<"get">>, Section, Name]),
- read().
-
-log(Mesg) ->
- write([<<"log">>, Mesg]).
-
-log(Mesg, Level) ->
- write([<<"log">>, Mesg, {[{<<"level">>, Level}]}]).
-
-test_get_cfg1() ->
- Path = list_to_binary(?FILE),
- FileName = list_to_binary(filename:basename(?FILE)),
- {[{FileName, Path}]} = get_cfg(<<"os_daemons">>).
-
-test_get_cfg2() ->
- Path = list_to_binary(?FILE),
- FileName = list_to_binary(filename:basename(?FILE)),
- Path = get_cfg(<<"os_daemons">>, FileName),
- <<"sequential">> = get_cfg(<<"uuids">>, <<"algorithm">>).
-
-
-test_get_unknown_cfg() ->
- {[]} = get_cfg(<<"aal;3p4">>),
- null = get_cfg(<<"aal;3p4">>, <<"313234kjhsdfl">>).
-
-test_log() ->
- log(<<"foobar!">>),
- log(<<"some stuff!">>, <<"debug">>),
- log(2),
- log(true),
- write([<<"log">>, <<"stuff">>, 2]),
- write([<<"log">>, 3, null]),
- write([<<"log">>, [1, 2], {[{<<"level">>, <<"debug">>}]}]),
- write([<<"log">>, <<"true">>, {[]}]).
-
-do_tests() ->
- test_get_cfg1(),
- test_get_cfg2(),
- test_get_unknown_cfg(),
- test_log(),
- loop(io:read("")).
-
-loop({ok, _}) ->
- loop(io:read(""));
-loop(eof) ->
- init:stop();
-loop({error, _Reason}) ->
- init:stop().
-
-main([]) ->
- init_code_path(),
- do_tests().
-
-init_code_path() ->
- Paths = [
- "couchdb",
- "jiffy",
- "ibrowse",
- "mochiweb",
- "snappy"
- ],
- lists:foreach(fun(Name) ->
- code:add_patha(filename:join([?BUILDDIR(), "src", Name, "ebin"]))
- end, Paths).
diff --git a/src/couch/test/eunit/fixtures/os_daemon_die_on_boot.sh b/src/couch/test/eunit/fixtures/os_daemon_die_on_boot.sh
deleted file mode 100755
index 256ee7935..000000000
--- a/src/couch/test/eunit/fixtures/os_daemon_die_on_boot.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/bin/sh -e
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-exit 1
diff --git a/src/couch/test/eunit/fixtures/os_daemon_die_quickly.sh b/src/couch/test/eunit/fixtures/os_daemon_die_quickly.sh
deleted file mode 100755
index f5a13684e..000000000
--- a/src/couch/test/eunit/fixtures/os_daemon_die_quickly.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/bin/sh -e
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-sleep 1
diff --git a/src/couch/test/eunit/fixtures/os_daemon_looper.escript b/src/couch/test/eunit/fixtures/os_daemon_looper.escript
deleted file mode 100755
index 73974e905..000000000
--- a/src/couch/test/eunit/fixtures/os_daemon_looper.escript
+++ /dev/null
@@ -1,26 +0,0 @@
-#! /usr/bin/env escript
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-loop() ->
- loop(io:read("")).
-
-loop({ok, _}) ->
- loop(io:read(""));
-loop(eof) ->
- stop;
-loop({error, Reason}) ->
- throw({error, Reason}).
-
-main([]) ->
- loop().
diff --git a/src/couch/test/eunit/fixtures/test.couch b/src/couch/test/eunit/fixtures/test.couch
deleted file mode 100644
index 5347a222f..000000000
--- a/src/couch/test/eunit/fixtures/test.couch
+++ /dev/null
Binary files differ
diff --git a/src/couch/test/eunit/global_changes_tests.erl b/src/couch/test/eunit/global_changes_tests.erl
deleted file mode 100644
index 4392aafac..000000000
--- a/src/couch/test/eunit/global_changes_tests.erl
+++ /dev/null
@@ -1,159 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(global_changes_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(USER, "admin").
--define(PASS, "pass").
--define(AUTH, {basic_auth, {?USER, ?PASS}}).
-
-setup() ->
- Host = get_host(),
- ok = add_admin(?USER, ?PASS),
- DbName = "foo/" ++ ?b2l(?tempdb()),
- ok = http_create_db(DbName),
- {Host, DbName}.
-
-teardown({_, DbName}) ->
- ok = http_delete_db(DbName),
- delete_admin(?USER),
- ok.
-
-http_create_db(Name) ->
- {ok, Status, _, _} = test_request:put(db_url(Name), [?AUTH], ""),
- true = lists:member(Status, [201, 202]),
- ok.
-
-http_delete_db(Name) ->
- {ok, Status, _, _} = test_request:delete(db_url(Name), [?AUTH]),
- true = lists:member(Status, [200, 202]),
- ok.
-
-db_url(Name) ->
- get_host() ++ "/" ++ escape(Name).
-
-start_couch() ->
- Ctx = test_util:start_couch([chttpd, global_changes]),
- ok = ensure_db_exists("_global_changes"),
- Ctx.
-
-ensure_db_exists(Name) ->
- case fabric:create_db(Name) of
- ok ->
- ok;
- {error, file_exists} ->
- ok
- end.
-
-global_changes_test_() ->
- {
- "Checking global_changes endpoint",
- {
- setup,
- fun start_couch/0,
- fun test_util:stop/1,
- [
- check_response()
- ]
- }
- }.
-
-check_response() ->
- {
- "Check response",
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_return_correct_response_on_create/1,
- fun should_return_correct_response_on_update/1
- ]
- }
- }.
-
-should_return_correct_response_on_create({Host, DbName}) ->
- ?_test(begin
- Headers = [?AUTH],
- create_doc(Host, DbName, "bar/baz"),
- {Status, Events} = request_updates(Host, DbName, Headers),
- ?assertEqual(200, Status),
- ?assertEqual([<<"created">>, <<"updated">>], Events)
- end).
-
-should_return_correct_response_on_update({Host, DbName}) ->
- ?_test(begin
- Headers = [?AUTH],
- create_doc(Host, DbName, "bar/baz"),
- update_doc(Host, DbName, "bar/baz", "new_value"),
- {Status, Events} = request_updates(Host, DbName, Headers),
- ?assertEqual(200, Status),
- ?assertEqual([<<"created">>, <<"updated">>], Events)
- end).
-
-create_doc(Host, DbName, Id) ->
- Headers = [?AUTH],
- Url = Host ++ "/" ++ escape(DbName) ++ "/" ++ escape(Id),
- Body = jiffy:encode({[
- {key, "value"}
- ]}),
- {ok, Status, _Headers, _Body} = test_request:put(Url, Headers, Body),
- ?assert(Status =:= 201 orelse Status =:= 202),
- timer:sleep(1000),
- ok.
-
-update_doc(Host, DbName, Id, Value) ->
- Headers = [?AUTH],
- Url = Host ++ "/" ++ escape(DbName) ++ "/" ++ escape(Id),
- {ok, 200, _Headers0, BinBody} = test_request:get(Url, Headers),
- [Rev] = decode_response(BinBody, [<<"_rev">>]),
- Body = jiffy:encode({[
- {key, Value},
- {'_rev', Rev}
- ]}),
- {ok, Status, _Headers1, _Body} = test_request:put(Url, Headers, Body),
- ?assert(Status =:= 201 orelse Status =:= 202),
- timer:sleep(1000),
- ok.
-
-request_updates(Host, DbName, Headers) ->
- Url = Host ++ "/_db_updates",
- {ok, Status, _Headers, BinBody} = test_request:get(Url, Headers),
- [Results] = decode_response(BinBody, [<<"results">>]),
- ToDecode = [<<"db_name">>, <<"type">>],
- Values = [decode_result(Result, ToDecode) || Result <- Results],
- Result = [Type || [DB, Type] <- Values, DB == ?l2b(DbName)],
- {Status, lists:sort(Result)}.
-
-decode_result({Props}, ToDecode) ->
- [couch_util:get_value(Key, Props) || Key <- ToDecode].
-
-decode_response(BinBody, ToDecode) ->
- {Body} = jiffy:decode(BinBody),
- [couch_util:get_value(Key, Body) || Key <- ToDecode].
-
-add_admin(User, Pass) ->
- Hashed = couch_passwords:hash_admin_password(Pass),
- config:set("admins", User, ?b2l(Hashed), _Persist=false).
-
-delete_admin(User) ->
- config:delete("admins", User, false).
-
-get_host() ->
- Addr = config:get("httpd", "bind_address", "127.0.0.1"),
- Port = integer_to_list(mochiweb_socket_server:get(chttpd, port)),
- "http://" ++ Addr ++ ":" ++ Port.
-
-escape(Path) ->
- re:replace(Path, "/", "%2f", [global, {return, list}]).
diff --git a/src/couch/test/eunit/json_stream_parse_tests.erl b/src/couch/test/eunit/json_stream_parse_tests.erl
deleted file mode 100644
index e690d7728..000000000
--- a/src/couch/test/eunit/json_stream_parse_tests.erl
+++ /dev/null
@@ -1,151 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(json_stream_parse_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
-
--define(CASES,
- [
- {1, "1", "integer numeric literial"},
- {3.1416, "3.14160", "float numeric literal"}, % text representation may truncate, trail zeroes
- {-1, "-1", "negative integer numeric literal"},
- {-3.1416, "-3.14160", "negative float numeric literal"},
- {12.0e10, "1.20000e+11", "float literal in scientific notation"},
- {1.234E+10, "1.23400e+10", "another float literal in scientific notation"},
- {-1.234E-10, "-1.23400e-10", "negative float literal in scientific notation"},
- {10.0, "1.0e+01", "yet another float literal in scientific notation"},
- {123.456, "1.23456E+2", "yet another float literal in scientific notation"},
- {10.0, "1e1", "yet another float literal in scientific notation"},
- {<<"foo">>, "\"foo\"", "string literal"},
- {<<"foo", 5, "bar">>, "\"foo\\u0005bar\"", "string literal with \\u0005"},
- {<<"">>, "\"\"", "empty string literal"},
- {<<"\n\n\n">>, "\"\\n\\n\\n\"", "only new lines literal"},
- {<<"\" \b\f\r\n\t\"">>, "\"\\\" \\b\\f\\r\\n\\t\\\"\"",
- "only white spaces string literal"},
- {null, "null", "null literal"},
- {true, "true", "true literal"},
- {false, "false", "false literal"},
- {<<"null">>, "\"null\"", "null string literal"},
- {<<"true">>, "\"true\"", "true string literal"},
- {<<"false">>, "\"false\"", "false string literal"},
- {{[]}, "{}", "empty object literal"},
- {{[{<<"foo">>, <<"bar">>}]}, "{\"foo\":\"bar\"}",
- "simple object literal"},
- {{[{<<"foo">>, <<"bar">>}, {<<"baz">>, 123}]},
- "{\"foo\":\"bar\",\"baz\":123}", "another simple object literal"},
- {[], "[]", "empty array literal"},
- {[[]], "[[]]", "empty array literal inside a single element array literal"},
- {[1, <<"foo">>], "[1,\"foo\"]", "simple non-empty array literal"},
- {[1199344435545.0, 1], "[1199344435545.0,1]",
- "another simple non-empty array literal"},
- {[false, true, 321, null], "[false, true, 321, null]", "array of literals"},
- {{[{<<"foo">>, [123]}]}, "{\"foo\":[123]}",
- "object literal with an array valued property"},
- {{[{<<"foo">>, {[{<<"bar">>, true}]}}]},
- "{\"foo\":{\"bar\":true}}", "nested object literal"},
- {{[{<<"foo">>, []}, {<<"bar">>, {[{<<"baz">>, true}]}},
- {<<"alice">>, <<"bob">>}]},
- "{\"foo\":[],\"bar\":{\"baz\":true},\"alice\":\"bob\"}",
- "complex object literal"},
- {[-123, <<"foo">>, {[{<<"bar">>, []}]}, null],
- "[-123,\"foo\",{\"bar\":[]},null]",
- "complex array literal"}
- ]
-).
-
-
-raw_json_input_test_() ->
- Tests = lists:map(
- fun({EJson, JsonString, Desc}) ->
- {Desc,
- ?_assert(equiv(EJson, json_stream_parse:to_ejson(JsonString)))}
- end, ?CASES),
- {"Tests with raw JSON string as the input", Tests}.
-
-one_byte_data_fun_test_() ->
- Tests = lists:map(
- fun({EJson, JsonString, Desc}) ->
- DataFun = fun() -> single_byte_data_fun(JsonString) end,
- {Desc,
- ?_assert(equiv(EJson, json_stream_parse:to_ejson(DataFun)))}
- end, ?CASES),
- {"Tests with a 1 byte output data function as the input", Tests}.
-
-test_multiple_bytes_data_fun_test_() ->
- Tests = lists:map(
- fun({EJson, JsonString, Desc}) ->
- DataFun = fun() -> multiple_bytes_data_fun(JsonString) end,
- {Desc,
- ?_assert(equiv(EJson, json_stream_parse:to_ejson(DataFun)))}
- end, ?CASES),
- {"Tests with a multiple bytes output data function as the input", Tests}.
-
-
-%% Test for equivalence of Erlang terms.
-%% Due to arbitrary order of construction, equivalent objects might
-%% compare unequal as erlang terms, so we need to carefully recurse
-%% through aggregates (tuples and objects).
-equiv({Props1}, {Props2}) ->
- equiv_object(Props1, Props2);
-equiv(L1, L2) when is_list(L1), is_list(L2) ->
- equiv_list(L1, L2);
-equiv(N1, N2) when is_number(N1), is_number(N2) ->
- N1 == N2;
-equiv(B1, B2) when is_binary(B1), is_binary(B2) ->
- B1 == B2;
-equiv(true, true) ->
- true;
-equiv(false, false) ->
- true;
-equiv(null, null) ->
- true.
-
-%% Object representation and traversal order is unknown.
-%% Use the sledgehammer and sort property lists.
-equiv_object(Props1, Props2) ->
- L1 = lists:keysort(1, Props1),
- L2 = lists:keysort(1, Props2),
- Pairs = lists:zip(L1, L2),
- true = lists:all(
- fun({{K1, V1}, {K2, V2}}) ->
- equiv(K1, K2) andalso equiv(V1, V2)
- end,
- Pairs).
-
-%% Recursively compare tuple elements for equivalence.
-equiv_list([], []) ->
- true;
-equiv_list([V1 | L1], [V2 | L2]) ->
- equiv(V1, V2) andalso equiv_list(L1, L2).
-
-single_byte_data_fun([]) ->
- done;
-single_byte_data_fun([H | T]) ->
- {<<H>>, fun() -> single_byte_data_fun(T) end}.
-
-multiple_bytes_data_fun([]) ->
- done;
-multiple_bytes_data_fun(L) ->
- N = couch_rand:uniform(7) - 1,
- {Part, Rest} = split(L, N),
- {list_to_binary(Part), fun() -> multiple_bytes_data_fun(Rest) end}.
-
-split(L, N) when length(L) =< N ->
- {L, []};
-split(L, N) ->
- take(N, L, []).
-
-take(0, L, Acc) ->
- {lists:reverse(Acc), L};
-take(N, [H|L], Acc) ->
- take(N - 1, L, [H | Acc]).
diff --git a/src/couch/test/eunit/test_web.erl b/src/couch/test/eunit/test_web.erl
deleted file mode 100644
index b1b3e65c9..000000000
--- a/src/couch/test/eunit/test_web.erl
+++ /dev/null
@@ -1,114 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(test_web).
--behaviour(gen_server).
-
--compile(tuple_calls).
-
--include_lib("couch/include/couch_eunit.hrl").
-
--export([start_link/0, stop/0, loop/1, get_port/0, set_assert/1, check_last/0]).
--export([init/1, terminate/2, code_change/3]).
--export([handle_call/3, handle_cast/2, handle_info/2]).
-
--define(SERVER, test_web_server).
--define(HANDLER, test_web_handler).
--define(DELAY, 500).
-
-start_link() ->
- gen_server:start({local, ?HANDLER}, ?MODULE, [], []),
- mochiweb_http:start([
- {name, ?SERVER},
- {loop, {?MODULE, loop}},
- {port, 0}
- ]).
-
-loop(Req) ->
- %?debugFmt("Handling request: ~p", [Req]),
- case gen_server:call(?HANDLER, {check_request, Req}) of
- {ok, RespInfo} ->
- {ok, Req:respond(RespInfo)};
- {raw, {Status, Headers, BodyChunks}} ->
- Resp = Req:start_response({Status, Headers}),
- lists:foreach(fun(C) -> Resp:send(C) end, BodyChunks),
- erlang:put(mochiweb_request_force_close, true),
- {ok, Resp};
- {chunked, {Status, Headers, BodyChunks}} ->
- Resp = Req:respond({Status, Headers, chunked}),
- timer:sleep(?DELAY),
- lists:foreach(fun(C) -> Resp:write_chunk(C) end, BodyChunks),
- Resp:write_chunk([]),
- {ok, Resp};
- {error, Reason} ->
- ?debugFmt("Error: ~p", [Reason]),
- Body = lists:flatten(io_lib:format("Error: ~p", [Reason])),
- {ok, Req:respond({200, [], Body})}
- end.
-
-get_port() ->
- mochiweb_socket_server:get(?SERVER, port).
-
-set_assert(Fun) ->
- ?assertEqual(ok, gen_server:call(?HANDLER, {set_assert, Fun})).
-
-check_last() ->
- gen_server:call(?HANDLER, last_status).
-
-init(_) ->
- {ok, nil}.
-
-terminate(_Reason, _State) ->
- ok.
-
-stop() ->
- mochiweb_http:stop(?SERVER).
-
-
-handle_call({check_request, Req}, _From, State) when is_function(State, 1) ->
- Resp2 = case (catch State(Req)) of
- {ok, Resp} ->
- {reply, {ok, Resp}, was_ok};
- {raw, Resp} ->
- {reply, {raw, Resp}, was_ok};
- {chunked, Resp} ->
- {reply, {chunked, Resp}, was_ok};
- Error ->
- {reply, {error, Error}, not_ok}
- end,
- Req:cleanup(),
- Resp2;
-handle_call({check_request, _Req}, _From, _State) ->
- {reply, {error, no_assert_function}, not_ok};
-handle_call(last_status, _From, State) when is_atom(State) ->
- {reply, State, nil};
-handle_call(last_status, _From, State) ->
- {reply, {error, not_checked}, State};
-handle_call({set_assert, Fun}, _From, nil) ->
- {reply, ok, Fun};
-handle_call({set_assert, _}, _From, State) ->
- {reply, {error, assert_function_set}, State};
-handle_call(Msg, _From, State) ->
- {reply, {ignored, Msg}, State}.
-
-handle_cast(stop, State) ->
- {stop, normal, State};
-handle_cast(Msg, State) ->
- ?debugFmt("Ignoring cast message: ~p", [Msg]),
- {noreply, State}.
-
-handle_info(Msg, State) ->
- ?debugFmt("Ignoring info message: ~p", [Msg]),
- {noreply, State}.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.