summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlexander Shorin <kxepal@apache.org>2015-12-03 02:04:44 +0300
committerAlexander Shorin <kxepal@apache.org>2015-12-03 02:04:44 +0300
commit921006fa49b362e5673a6aae4586e832ec325dc6 (patch)
treef1ed547079e4b53f85d15d1d8e1b1df4041043ae
parentca0ce9b8c7fad24c58b1a45b2bde16164718452d (diff)
parentffe3b33001b558307cd0812ab9226910389d6339 (diff)
downloadcouchdb-921006fa49b362e5673a6aae4586e832ec325dc6.tar.gz
Merge branch '1963-eunit-1.7' into 1.x.x
-rw-r--r--.gitignore11
-rw-r--r--.travis.yml3
-rw-r--r--LICENSE25
-rw-r--r--Makefile.am18
-rw-r--r--NOTICE4
-rw-r--r--configure.ac8
-rw-r--r--license.skip18
-rw-r--r--src/Makefile.am1
-rw-r--r--src/couch_mrview/Makefile.am16
-rw-r--r--src/couch_mrview/test/04-index-info.t54
-rw-r--r--src/couch_mrview/test/05-collation.t163
-rw-r--r--src/couch_mrview/test/07-compact-swap.t57
-rw-r--r--src/couch_mrview/test/couch_mrview_all_docs_tests.erl (renamed from src/couch_mrview/test/06-all-docs.t)91
-rw-r--r--src/couch_mrview/test/couch_mrview_collation_tests.erl202
-rw-r--r--src/couch_mrview/test/couch_mrview_compact_tests.erl101
-rw-r--r--src/couch_mrview/test/couch_mrview_index_info_tests.erl87
-rw-r--r--src/couch_mrview/test/couch_mrview_map_views_tests.erl (renamed from src/couch_mrview/test/02-map-views.t)119
-rw-r--r--src/couch_mrview/test/couch_mrview_modules_load_tests.erl (renamed from src/couch_mrview/test/01-load.t)27
-rw-r--r--src/couch_mrview/test/couch_mrview_red_views_tests.erl (renamed from src/couch_mrview/test/03-red-views.t)76
-rw-r--r--src/couch_replicator/Makefile.am16
-rwxr-xr-xsrc/couch_replicator/test/02-httpc-pool.t250
-rwxr-xr-xsrc/couch_replicator/test/03-replication-compact.t488
-rwxr-xr-xsrc/couch_replicator/test/04-replication-large-atts.t267
-rwxr-xr-xsrc/couch_replicator/test/05-replication-many-leaves.t294
-rwxr-xr-xsrc/couch_replicator/test/06-doc-missing-stubs.t304
-rwxr-xr-xsrc/couch_replicator/test/07-use-checkpoints.t273
-rw-r--r--src/couch_replicator/test/couch_replicator_compact_tests.erl448
-rw-r--r--src/couch_replicator/test/couch_replicator_httpc_pool_tests.erl189
-rw-r--r--src/couch_replicator/test/couch_replicator_large_atts_tests.erl218
-rw-r--r--src/couch_replicator/test/couch_replicator_many_leaves_tests.erl232
-rw-r--r--src/couch_replicator/test/couch_replicator_missing_stubs_tests.erl260
-rw-r--r--src/couch_replicator/test/couch_replicator_modules_load_tests.erl (renamed from src/couch_replicator/test/01-load.t)27
-rw-r--r--src/couch_replicator/test/couch_replicator_use_checkpoints_tests.erl200
-rw-r--r--src/couchdb/couch_key_tree.erl2
-rw-r--r--src/etap/etap.erl614
-rw-r--r--test/Makefile.am2
-rw-r--r--test/couchdb/Makefile.am83
-rw-r--r--test/couchdb/couch_auth_cache_tests.erl238
-rw-r--r--test/couchdb/couch_btree_tests.erl551
-rw-r--r--test/couchdb/couch_changes_tests.erl612
-rw-r--r--test/couchdb/couch_config_tests.erl463
-rw-r--r--test/couchdb/couch_db_tests.erl114
-rw-r--r--test/couchdb/couch_doc_json_tests.erl391
-rw-r--r--test/couchdb/couch_file_tests.erl265
-rw-r--r--test/couchdb/couch_key_tree_tests.erl380
-rw-r--r--test/couchdb/couch_passwords_tests.erl54
-rw-r--r--test/couchdb/couch_ref_counter_tests.erl107
-rw-r--r--test/couchdb/couch_stats_tests.erl421
-rw-r--r--test/couchdb/couch_stream_tests.erl100
-rw-r--r--test/couchdb/couch_task_status_tests.erl225
-rw-r--r--test/couchdb/couch_util_tests.erl136
-rw-r--r--test/couchdb/couch_uuids_tests.erl161
-rw-r--r--test/couchdb/couch_work_queue_tests.erl393
-rw-r--r--test/couchdb/couchdb_attachments_tests.erl638
-rw-r--r--test/couchdb/couchdb_compaction_daemon.erl231
-rw-r--r--test/couchdb/couchdb_cors_tests.erl344
-rw-r--r--test/couchdb/couchdb_file_compression_tests.erl239
-rw-r--r--test/couchdb/couchdb_http_proxy_tests.erl554
-rw-r--r--[-rwxr-xr-x]test/couchdb/couchdb_modules_load_tests.erl (renamed from test/etap/001-load.t)30
-rw-r--r--test/couchdb/couchdb_os_daemons_tests.erl329
-rw-r--r--test/couchdb/couchdb_os_proc_pool.erl179
-rw-r--r--test/couchdb/couchdb_update_conflicts_tests.erl243
-rw-r--r--test/couchdb/couchdb_vhosts_tests.erl441
-rw-r--r--test/couchdb/couchdb_views_tests.erl677
-rw-r--r--test/couchdb/eunit.ini (renamed from test/etap/041-uuid-gen-id.ini)14
-rw-r--r--test/couchdb/fixtures/3b835456c235b1827e012e25666152f3.view (renamed from test/etap/fixtures/3b835456c235b1827e012e25666152f3.view)bin4192 -> 4192 bytes
-rw-r--r--test/couchdb/fixtures/Makefile.am (renamed from src/etap/Makefile.am)25
-rw-r--r--test/couchdb/fixtures/couch_config_tests_1.ini (renamed from test/etap/081-config-override.1.ini)0
-rw-r--r--test/couchdb/fixtures/couch_config_tests_2.ini (renamed from test/etap/081-config-override.2.ini)0
-rw-r--r--test/couchdb/fixtures/couch_stats_aggregates.cfg (renamed from test/etap/121-stats-aggregates.cfg)0
-rw-r--r--test/couchdb/fixtures/couch_stats_aggregates.ini (renamed from test/etap/121-stats-aggregates.ini)0
-rw-r--r--test/couchdb/fixtures/logo.pngbin0 -> 3010 bytes
-rw-r--r--test/couchdb/fixtures/os_daemon_bad_perm.sh (renamed from test/etap/172-os-daemon-errors.1.sh)0
-rwxr-xr-xtest/couchdb/fixtures/os_daemon_can_reboot.sh (renamed from test/etap/172-os-daemon-errors.4.sh)0
-rwxr-xr-xtest/couchdb/fixtures/os_daemon_configer.escript.in (renamed from test/etap/171-os-daemons-config.es)43
-rwxr-xr-xtest/couchdb/fixtures/os_daemon_die_on_boot.sh (renamed from test/etap/172-os-daemon-errors.2.sh)0
-rwxr-xr-xtest/couchdb/fixtures/os_daemon_die_quickly.sh (renamed from test/etap/172-os-daemon-errors.3.sh)0
-rwxr-xr-xtest/couchdb/fixtures/os_daemon_looper.escript (renamed from test/etap/170-os-daemons.es)0
-rw-r--r--test/couchdb/fixtures/test.couch (renamed from test/etap/fixtures/test.couch)bin16482 -> 16482 bytes
-rw-r--r--test/couchdb/fixtures/test_cfg_register.c (renamed from test/etap/test_cfg_register.c)0
-rw-r--r--test/couchdb/include/couch_eunit.hrl.in64
-rw-r--r--[-rwxr-xr-x]test/couchdb/json_stream_parse_tests.erl (renamed from test/etap/190-json-stream-parse.t)97
-rw-r--r--test/couchdb/run.in111
-rw-r--r--test/couchdb/test_request.erl75
-rw-r--r--test/couchdb/test_web.erl (renamed from test/etap/test_web.erl)35
-rwxr-xr-xtest/etap/002-icu-driver.t33
-rwxr-xr-xtest/etap/010-file-basics.t113
-rwxr-xr-xtest/etap/011-file-headers.t152
-rwxr-xr-xtest/etap/020-btree-basics.t265
-rwxr-xr-xtest/etap/021-btree-reductions.t237
-rwxr-xr-xtest/etap/030-doc-from-json.t236
-rwxr-xr-xtest/etap/031-doc-to-json.t197
-rwxr-xr-xtest/etap/040-util.t80
-rw-r--r--test/etap/041-uuid-gen-seq.ini19
-rw-r--r--test/etap/041-uuid-gen-utc.ini19
-rwxr-xr-xtest/etap/041-uuid-gen.t147
-rwxr-xr-xtest/etap/042-work-queue.t500
-rwxr-xr-xtest/etap/043-find-in-binary.t68
-rwxr-xr-xtest/etap/050-stream.t87
-rwxr-xr-xtest/etap/060-kt-merging.t176
-rwxr-xr-xtest/etap/061-kt-missing-leaves.t65
-rwxr-xr-xtest/etap/062-kt-remove-leaves.t69
-rwxr-xr-xtest/etap/063-kt-get-leaves.t98
-rwxr-xr-xtest/etap/064-kt-counting.t46
-rwxr-xr-xtest/etap/065-kt-stemming.t42
-rwxr-xr-xtest/etap/070-couch-db.t73
-rwxr-xr-xtest/etap/072-cleanup.t126
-rwxr-xr-xtest/etap/073-changes.t558
-rwxr-xr-xtest/etap/074-doc-update-conflicts.t218
-rwxr-xr-xtest/etap/075-auth-cache.t276
-rwxr-xr-xtest/etap/076-file-compression.t186
-rw-r--r--test/etap/077-couch-db-fast-db-delete-create.t61
-rwxr-xr-xtest/etap/080-config-get-set.t128
-rwxr-xr-xtest/etap/081-config-override.t212
-rwxr-xr-xtest/etap/082-config-register.t94
-rwxr-xr-xtest/etap/083-config-no-files.t53
-rwxr-xr-xtest/etap/090-task-status.t279
-rwxr-xr-xtest/etap/100-ref-counter.t114
-rwxr-xr-xtest/etap/120-stats-collect.t150
-rwxr-xr-xtest/etap/121-stats-aggregates.t171
-rwxr-xr-xtest/etap/130-attachments-md5.t248
-rwxr-xr-xtest/etap/140-attachment-comp.t728
-rwxr-xr-xtest/etap/150-invalid-view-seq.t183
-rwxr-xr-xtest/etap/160-vhosts.t371
-rwxr-xr-xtest/etap/170-os-daemons.t114
-rwxr-xr-xtest/etap/171-os-daemons-config.t74
-rwxr-xr-xtest/etap/172-os-daemon-errors.t126
-rwxr-xr-xtest/etap/173-os-daemon-cfg-register.t116
-rw-r--r--test/etap/180-http-proxy.ini20
-rwxr-xr-xtest/etap/180-http-proxy.t376
-rwxr-xr-xtest/etap/200-view-group-no-db-leaks.t308
-rwxr-xr-xtest/etap/201-view-group-shutdown.t293
-rwxr-xr-xtest/etap/210-os-proc-pool.t163
-rwxr-xr-xtest/etap/220-compaction-daemon.t225
-rw-r--r--test/etap/230-pbkfd2.t38
-rw-r--r--test/etap/231-cors.t430
-rw-r--r--test/etap/250-upgrade-legacy-view-files.t168
-rw-r--r--test/etap/Makefile.am109
-rw-r--r--test/etap/run.tpl32
-rw-r--r--test/etap/test_util.erl.in94
140 files changed, 11113 insertions, 12679 deletions
diff --git a/.gitignore b/.gitignore
index 19f0748df..b7109d021 100644
--- a/.gitignore
+++ b/.gitignore
@@ -127,12 +127,11 @@ src/snappy/priv
src/snappy/snappy.app
stamp-h1
test/.deps/
-test/etap/.deps/
-test/etap/run
-test/etap/run
-test/etap/temp.*
-test/etap/test_cfg_register
-test/etap/test_util.erl
+test/couchdb/run
+test/couchdb/fixtures/.deps/
+test/couchdb/fixtures/os_daemon_configer.escript
+test/couchdb/include/couch_eunit.hrl
+test/couchdb/fixtures/.deps/
test/javascript/run
test/javascript/run_js_tests.sh
test/local.ini
diff --git a/.travis.yml b/.travis.yml
index 1bddb6a99..f48b33652 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -17,3 +17,6 @@ otp_release:
- R16B03-1
- R15B03
- R14B04
+matrix:
+ allow_failures:
+ - otp_release: R14B04
diff --git a/LICENSE b/LICENSE
index 4c58f19c8..0fbc12312 100644
--- a/LICENSE
+++ b/LICENSE
@@ -474,31 +474,6 @@ For the src/erlang-oauth component:
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
-For the src/etap component:
-
- Copyright (c) 2008-2009 Nick Gerakines <nick@gerakines.net>
-
- Permission is hereby granted, free of charge, to any person
- obtaining a copy of this software and associated documentation
- files (the "Software"), to deal in the Software without
- restriction, including without limitation the rights to use,
- copy, modify, merge, publish, distribute, sublicense, and/or sell
- copies of the Software, and to permit persons to whom the
- Software is furnished to do so, subject to the following
- conditions:
-
- The above copyright notice and this permission notice shall be
- included in all copies or substantial portions of the Software.
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- OTHER DEALINGS IN THE SOFTWARE.
-
For the src/ejson/yajl component
Copyright 2010, Lloyd Hilaiel.
diff --git a/Makefile.am b/Makefile.am
index 22809f8a6..740885c5a 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -77,7 +77,7 @@ THANKS.gz: THANKS
check: dev check-js
if TESTS
- $(top_builddir)/test/etap/run $(top_srcdir)/test/etap
+ $(top_builddir)/test/couchdb/run -v $(top_srcdir)/test/couchdb
endif
check-js: dev
@@ -87,20 +87,9 @@ if USE_CURL
endif
endif
-check-etap: dev
+check-eunit: dev
if TESTS
- $(top_builddir)/test/etap/run $(top_srcdir)/test/etap
-endif
-
-cover: dev
-if TESTS
- rm -f cover/*.coverdata
- COVER=1 COVER_BIN=./src/couchdb/ $(top_builddir)/test/etap/run
- SRC=./src/couchdb/ \
- $(ERL) -noshell \
- -pa src/etap \
- -eval 'etap_report:create()' \
- -s init stop > /dev/null 2>&1
+ $(top_builddir)/test/couchdb/run -v $(top_srcdir)/test/couchdb
endif
dev: all
@@ -137,7 +126,6 @@ local-clean: maintainer-clean
rm -f $(top_srcdir)/aclocal.m4
rm -f $(top_srcdir)/config.h.in
rm -f $(top_srcdir)/configure
- rm -f $(top_srcdir)/test/etap/temp.*
rm -f $(top_srcdir)/*.tar.gz
rm -f $(top_srcdir)/*.tar.gz.*
find $(top_srcdir) -name Makefile.in -exec rm -f {} \;
diff --git a/NOTICE b/NOTICE
index 08e3b828d..be5ed49a2 100644
--- a/NOTICE
+++ b/NOTICE
@@ -42,10 +42,6 @@ This product also includes the following third-party components:
Copyright 2012, the authors and contributors
- * ETap (http://github.com/ngerakines/etap/)
-
- Copyright 2009, Nick Gerakines <nick@gerakines.net>
-
* mimeparse.js (http://code.google.com/p/mimeparse/)
Copyright 2009, Chris Anderson <jchris@apache.org>
diff --git a/configure.ac b/configure.ac
index 656b77ce9..fb592e53d 100644
--- a/configure.ac
+++ b/configure.ac
@@ -750,15 +750,17 @@ AC_CONFIG_FILES([src/couchdb/couch.app.tpl])
AC_CONFIG_FILES([src/couchdb/Makefile])
AC_CONFIG_FILES([src/couchdb/priv/Makefile])
AC_CONFIG_FILES([src/erlang-oauth/Makefile])
-AC_CONFIG_FILES([src/etap/Makefile])
AC_CONFIG_FILES([src/ibrowse/Makefile])
AC_CONFIG_FILES([src/mochiweb/Makefile])
AC_CONFIG_FILES([src/snappy/Makefile])
AC_CONFIG_FILES([src/snappy/google-snappy/snappy-stubs-public.h])
AC_CONFIG_FILES([src/ejson/Makefile])
AC_CONFIG_FILES([test/Makefile])
-AC_CONFIG_FILES([test/etap/Makefile])
-AC_CONFIG_FILES([test/etap/test_util.erl])
+AC_CONFIG_FILES([test/couchdb/run])
+AC_CONFIG_FILES([test/couchdb/Makefile])
+AC_CONFIG_FILES([test/couchdb/fixtures/Makefile])
+AC_CONFIG_FILES([test/couchdb/fixtures/os_daemon_configer.escript])
+AC_CONFIG_FILES([test/couchdb/include/couch_eunit.hrl])
AC_CONFIG_FILES([test/javascript/Makefile])
AC_CONFIG_FILES([test/view_server/Makefile])
AC_CONFIG_FILES([utils/Makefile])
diff --git a/license.skip b/license.skip
index 2e541a1ea..a32cc19bc 100644
--- a/license.skip
+++ b/license.skip
@@ -164,14 +164,16 @@
^stamp-h1
^test/Makefile
^test/Makefile.in
-^test/etap/.*.beam
-^test/etap/.*.o
-^test/etap/.deps/.*
-^test/etap/test_cfg_register
-^test/etap/Makefile
-^test/etap/Makefile.in
-^test/etap/temp..*
-^test/etap/fixtures/*
+^test/couchdb/Makefile
+^test/couchdb/Makefile.in
+^test/couchdb/fixtures/logo.png
+^test/couchdb/fixtures/3b835456c235b1827e012e25666152f3.view
+^test/couchdb/fixtures/Makefile
+^test/couchdb/fixtures/Makefile.in
+^test/couchdb/fixtures/test.couch
+^test/couchdb/fixtures/.deps/test_cfg_register-test_cfg_register.Po
+^test/couchdb/fixtures/test_cfg_register
+^test/couchdb/fixtures/test_cfg_register.o
^test/javascript/Makefile
^test/javascript/Makefile.in
^test/local.ini
diff --git a/src/Makefile.am b/src/Makefile.am
index a17674cc3..a535dd7b6 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -19,7 +19,6 @@ SUBDIRS = \
couchdb \
ejson \
erlang-oauth \
- etap \
ibrowse \
mochiweb \
snappy \
diff --git a/src/couch_mrview/Makefile.am b/src/couch_mrview/Makefile.am
index 2b9ef86fa..b9abe286d 100644
--- a/src/couch_mrview/Makefile.am
+++ b/src/couch_mrview/Makefile.am
@@ -33,13 +33,13 @@ source_files = \
src/couch_mrview_util.erl
test_files = \
- test/01-load.t \
- test/02-map-views.t \
- test/03-red-views.t \
- test/04-index-info.t \
- test/05-collation.t \
- test/06-all-docs.t \
- test/07-compact-swap.t
+ test/couch_mrview_all_docs_tests.erl \
+ test/couch_mrview_collation_tests.erl \
+ test/couch_mrview_compact_tests.erl \
+ test/couch_mrview_index_info_tests.erl \
+ test/couch_mrview_map_views_tests.erl \
+ test/couch_mrview_modules_load_tests.erl \
+ test/couch_mrview_red_views_tests.erl
compiled_files = \
ebin/couch_mrview.app \
@@ -58,7 +58,7 @@ CLEANFILES = $(compiled_files)
check:
if TESTS
- $(abs_top_builddir)/test/etap/run $(abs_top_srcdir)/src/couch_mrview/test
+ $(abs_top_builddir)/test/couchdb/run -v $(abs_top_srcdir)/src/couch_mrview/test
endif
ebin/%.app: src/%.app.src
diff --git a/src/couch_mrview/test/04-index-info.t b/src/couch_mrview/test/04-index-info.t
deleted file mode 100644
index 6b67b56d3..000000000
--- a/src/couch_mrview/test/04-index-info.t
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-main(_) ->
- test_util:init_code_path(),
-
- etap:plan(9),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- etap:bail(Other)
- end,
- timer:sleep(300),
- ok.
-
-sig() -> <<"276df562b152b3c4e5d34024f62672ed">>.
-
-test() ->
- couch_server_sup:start_link(test_util:config_files()),
-
- {ok, Db} = couch_mrview_test_util:init_db(<<"foo">>, map),
- couch_mrview:query_view(Db, <<"_design/bar">>, <<"baz">>),
-
- {ok, Info} = couch_mrview:get_info(Db, <<"_design/bar">>),
-
- etap:is(getval(signature, Info), sig(), "Signature is ok."),
- etap:is(getval(language, Info), <<"javascript">>, "Language is ok."),
- etap:is_greater(getval(disk_size, Info), 0, "Disk size is ok."),
- etap:is_greater(getval(data_size, Info), 0, "Data size is ok."),
- etap:is(getval(update_seq, Info), 11, "Update seq is ok."),
- etap:is(getval(purge_seq, Info), 0, "Purge seq is ok."),
- etap:is(getval(updater_running, Info), false, "No updater running."),
- etap:is(getval(compact_running, Info), false, "No compaction running."),
- etap:is(getval(waiting_clients, Info), 0, "No waiting clients."),
-
- ok.
-
-getval(Key, PL) ->
- {value, {Key, Val}} = lists:keysearch(Key, 1, PL),
- Val.
diff --git a/src/couch_mrview/test/05-collation.t b/src/couch_mrview/test/05-collation.t
deleted file mode 100644
index ac8f8bcf2..000000000
--- a/src/couch_mrview/test/05-collation.t
+++ /dev/null
@@ -1,163 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-main(_) ->
- test_util:run(9, fun() -> test() end).
-
-
-test() ->
- couch_server_sup:start_link(test_util:config_files()),
- {ok, Db0} = couch_mrview_test_util:new_db(<<"foo">>, map),
- {ok, Db1} = couch_mrview_test_util:save_docs(Db0, docs()),
-
- test_collated_fwd(Db1),
- test_collated_rev(Db1),
- test_range_collation(Db1),
- test_inclusive_end(Db1),
- test_uninclusive_end(Db1),
- test_with_endkey_docid(Db1),
-
- ok.
-
-test_collated_fwd(Db) ->
- {ok, Results} = run_query(Db, []),
- Expect = [{meta, [{total, 26}, {offset, 0}]}] ++ rows(),
- etap:is(Results, Expect, "Values were collated correctly.").
-
-
-test_collated_rev(Db) ->
- {ok, Results} = run_query(Db, [{direction, rev}]),
- Expect = [{meta, [{total, 26}, {offset, 0}]}] ++ lists:reverse(rows()),
- etap:is(Results, Expect, "Values were collated correctly descending.").
-
-
-test_range_collation(Db) ->
- {_, Error} = lists:foldl(fun(V, {Count, Error}) ->
- {ok, Results} = run_query(Db, [{start_key, V}, {end_key, V}]),
- Id = list_to_binary(integer_to_list(Count)),
- Expect = [
- {meta, [{total, 26}, {offset, Count}]},
- {row, [{id, Id}, {key, V}, {value, 0}]}
- ],
- case Results == Expect of
- true -> {Count+1, Error};
- _ -> {Count+1, true}
- end
- end, {0, false}, vals()),
- etap:is(Error, false, "Found each individual key correctly.").
-
-
-test_inclusive_end(Db) ->
- Opts = [{end_key, <<"b">>}, {inclusive_end, true}],
- {ok, Rows0} = run_query(Db, Opts),
- LastRow0 = lists:last(Rows0),
- Expect0 = {row, [{id,<<"10">>}, {key,<<"b">>}, {value,0}]},
- etap:is(LastRow0, Expect0, "Inclusive end is correct."),
-
- {ok, Rows1} = run_query(Db, Opts ++ [{direction, rev}]),
- LastRow1 = lists:last(Rows1),
- Expect1 = {row, [{id,<<"10">>}, {key,<<"b">>}, {value,0}]},
- etap:is(LastRow1, Expect1,
- "Inclusive end is correct with descending=true").
-
-test_uninclusive_end(Db) ->
- Opts = [{end_key, <<"b">>}, {inclusive_end, false}],
- {ok, Rows0} = run_query(Db, Opts),
- LastRow0 = lists:last(Rows0),
- Expect0 = {row, [{id,<<"9">>}, {key,<<"aa">>}, {value,0}]},
- etap:is(LastRow0, Expect0, "Uninclusive end is correct."),
-
- {ok, Rows1} = run_query(Db, Opts ++ [{direction, rev}]),
- LastRow1 = lists:last(Rows1),
- Expect1 = {row, [{id,<<"11">>}, {key,<<"B">>}, {value,0}]},
- etap:is(LastRow1, Expect1,
- "Uninclusive end is correct with descending=true").
-
-
-test_with_endkey_docid(Db) ->
- {ok, Rows0} = run_query(Db, [
- {end_key, <<"b">>}, {end_key_docid, <<"10">>},
- {inclusive_end, false}
- ]),
- Result0 = lists:last(Rows0),
- Expect0 = {row, [{id,<<"9">>}, {key,<<"aa">>}, {value,0}]},
- etap:is(Result0, Expect0, "Uninclsuive end with endkey_docid set is ok."),
-
- {ok, Rows1} = run_query(Db, [
- {end_key, <<"b">>}, {end_key_docid, <<"11">>},
- {inclusive_end, false}
- ]),
- Result1 = lists:last(Rows1),
- Expect1 = {row, [{id,<<"10">>}, {key,<<"b">>}, {value,0}]},
- etap:is(Result1, Expect1, "Uninclsuive end with endkey_docid set is ok.").
-
-
-run_query(Db, Opts) ->
- couch_mrview:query_view(Db, <<"_design/bar">>, <<"zing">>, Opts).
-
-
-docs() ->
- {Docs, _} = lists:foldl(fun(V, {Docs0, Count}) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, list_to_binary(integer_to_list(Count))},
- {<<"foo">>, V}
- ]}),
- {[Doc | Docs0], Count+1}
- end, {[], 0}, vals()),
- Docs.
-
-
-rows() ->
- {Rows, _} = lists:foldl(fun(V, {Rows0, Count}) ->
- Id = list_to_binary(integer_to_list(Count)),
- Row = {row, [{id, Id}, {key, V}, {value, 0}]},
- {[Row | Rows0], Count+1}
- end, {[], 0}, vals()),
- lists:reverse(Rows).
-
-
-vals() ->
- [
- null,
- false,
- true,
-
- 1,
- 2,
- 3.0,
- 4,
-
- <<"a">>,
- <<"A">>,
- <<"aa">>,
- <<"b">>,
- <<"B">>,
- <<"ba">>,
- <<"bb">>,
-
- [<<"a">>],
- [<<"b">>],
- [<<"b">>, <<"c">>],
- [<<"b">>, <<"c">>, <<"a">>],
- [<<"b">>, <<"d">>],
- [<<"b">>, <<"d">>, <<"e">>],
-
- {[{<<"a">>, 1}]},
- {[{<<"a">>, 2}]},
- {[{<<"b">>, 1}]},
- {[{<<"b">>, 2}]},
- {[{<<"b">>, 2}, {<<"a">>, 1}]},
- {[{<<"b">>, 2}, {<<"c">>, 2}]}
- ].
diff --git a/src/couch_mrview/test/07-compact-swap.t b/src/couch_mrview/test/07-compact-swap.t
deleted file mode 100644
index 4bfe12406..000000000
--- a/src/couch_mrview/test/07-compact-swap.t
+++ /dev/null
@@ -1,57 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-main(_) ->
- test_util:run(1, fun() -> test() end).
-
-
-test() ->
- couch_server_sup:start_link(test_util:config_files()),
- {ok, Db} = couch_mrview_test_util:init_db(<<"foo">>, map, 1000),
- couch_mrview:query_view(Db, <<"_design/bar">>, <<"baz">>),
- test_swap(Db),
- ok.
-
-
-test_swap(Db) ->
- {ok, QPid} = start_query(Db),
- {ok, MonRef} = couch_mrview:compact(Db, <<"_design/bar">>, [monitor]),
- receive
- {'DOWN', MonRef, process, _, _} -> ok
- after 1000 ->
- throw(compaction_failed)
- end,
- QPid ! {self(), continue},
- receive
- {QPid, Count} ->
- etap:is(Count, 1000, "View finished successfully.")
- after 1000 ->
- throw("query failed")
- end.
-
-
-start_query(Db) ->
- Self = self(),
- Pid = spawn(fun() ->
- CB = fun
- (_, wait) -> receive {Self, continue} -> {ok, 0} end;
- ({row, _}, Count) -> {ok, Count+1};
- (_, Count) -> {ok, Count}
- end,
- {ok, Result} =
- couch_mrview:query_view(Db, <<"_design/bar">>, <<"baz">>, [], CB, wait),
- Self ! {self(), Result}
- end),
- {ok, Pid}.
diff --git a/src/couch_mrview/test/06-all-docs.t b/src/couch_mrview/test/couch_mrview_all_docs_tests.erl
index 4501aa5cb..4e098ffad 100644
--- a/src/couch_mrview/test/06-all-docs.t
+++ b/src/couch_mrview/test/couch_mrview_all_docs_tests.erl
@@ -1,6 +1,3 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
@@ -13,26 +10,62 @@
% License for the specific language governing permissions and limitations under
% the License.
-main(_) ->
- test_util:run(6, fun() -> test() end).
+-module(couch_mrview_all_docs_tests).
+
+-include("couch_eunit.hrl").
+-include_lib("couchdb/couch_db.hrl").
+
+-define(ADMIN_USER, {user_ctx, #user_ctx{roles=[<<"_admin">>]}}).
+-define(TIMEOUT, 1000).
-test() ->
- couch_server_sup:start_link(test_util:config_files()),
+start() ->
+ {ok, Pid} = couch_server_sup:start_link(?CONFIG_CHAIN),
+ Pid.
- {ok, Db} = couch_mrview_test_util:init_db(<<"foo">>, map),
+stop(Pid) ->
+ erlang:monitor(process, Pid),
+ couch_server_sup:stop(),
+ receive
+ {'DOWN', _, _, Pid, _} ->
+ ok
+ after ?TIMEOUT ->
+ throw({timeout, server_stop})
+ end.
- test_basic(Db),
- test_range(Db),
- test_rev_range(Db),
- test_limit_and_skip(Db),
- test_include_docs(Db),
- test_empty_view(Db),
+setup() ->
+ {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), map),
+ Db.
+teardown(Db) ->
+ couch_db:close(Db),
+ couch_server:delete(Db#db.name, [?ADMIN_USER]),
ok.
-test_basic(Db) ->
+all_docs_test_() ->
+ {
+ "_all_docs view tests",
+ {
+ setup,
+ fun start/0, fun stop/1,
+ {
+ foreach,
+ fun setup/0, fun teardown/1,
+ [
+ fun should_query/1,
+ fun should_query_with_range/1,
+ fun should_query_with_range_rev/1,
+ fun should_query_with_limit_and_skip/1,
+ fun should_query_with_include_docs/1,
+ fun should_query_empty_views/1
+ ]
+ }
+ }
+ }.
+
+
+should_query(Db) ->
Result = run_query(Db, []),
Expect = {ok, [
{meta, [{total, 11}, {offset, 0}]},
@@ -48,10 +81,9 @@ test_basic(Db) ->
mk_row(<<"9">>, <<"1-558c8487d9aee25399a91b5d31d90fe2">>),
mk_row(<<"_design/bar">>, <<"1-a44e1dd1994a7717bf89c894ebd1f081">>)
]},
- etap:is(Result, Expect, "Simple view query worked.").
+ ?_assertEqual(Expect, Result).
-
-test_range(Db) ->
+should_query_with_range(Db) ->
Result = run_query(Db, [{start_key, <<"3">>}, {end_key, <<"5">>}]),
Expect = {ok, [
{meta, [{total, 11}, {offset, 3}]},
@@ -59,10 +91,9 @@ test_range(Db) ->
mk_row(<<"4">>, <<"1-fcaf5852c08ffb239ac8ce16c409f253">>),
mk_row(<<"5">>, <<"1-aaac5d460fd40f9286e57b9bf12e23d2">>)
]},
- etap:is(Result, Expect, "Query with range works.").
-
+ ?_assertEqual(Expect, Result).
-test_rev_range(Db) ->
+should_query_with_range_rev(Db) ->
Result = run_query(Db, [
{direction, rev},
{start_key, <<"5">>}, {end_key, <<"3">>},
@@ -74,10 +105,9 @@ test_rev_range(Db) ->
mk_row(<<"4">>, <<"1-fcaf5852c08ffb239ac8ce16c409f253">>),
mk_row(<<"3">>, <<"1-7fbf84d56f8017880974402d60f5acd6">>)
]},
- etap:is(Result, Expect, "Query with reversed range works.").
+ ?_assertEqual(Expect, Result).
-
-test_limit_and_skip(Db) ->
+should_query_with_limit_and_skip(Db) ->
Result = run_query(Db, [
{start_key, <<"2">>},
{limit, 3},
@@ -89,10 +119,9 @@ test_limit_and_skip(Db) ->
mk_row(<<"6">>, <<"1-aca21c2e7bc5f8951424fcfc5d1209d8">>),
mk_row(<<"7">>, <<"1-4374aeec17590d82f16e70f318116ad9">>)
]},
- etap:is(Result, Expect, "Query with limit and skip works.").
-
+ ?_assertEqual(Expect, Result).
-test_include_docs(Db) ->
+should_query_with_include_docs(Db) ->
Result = run_query(Db, [
{start_key, <<"8">>},
{end_key, <<"8">>},
@@ -108,20 +137,18 @@ test_include_docs(Db) ->
{meta, [{total, 11}, {offset, 8}]},
{row, [{id, <<"8">>}, {key, <<"8">>}, {value, Val}, {doc, Doc}]}
]},
- etap:is(Result, Expect, "Query with include docs works.").
+ ?_assertEqual(Expect, Result).
-
-test_empty_view(Db) ->
+should_query_empty_views(Db) ->
Result = couch_mrview:query_view(Db, <<"_design/bar">>, <<"bing">>),
Expect = {ok, [
{meta, [{total, 0}, {offset, 0}]}
]},
- etap:is(Result, Expect, "Empty views are correct.").
+ ?_assertEqual(Expect, Result).
mk_row(Id, Rev) ->
{row, [{id, Id}, {key, Id}, {value, {[{rev, Rev}]}}]}.
-
run_query(Db, Opts) ->
couch_mrview:query_all_docs(Db, Opts).
diff --git a/src/couch_mrview/test/couch_mrview_collation_tests.erl b/src/couch_mrview/test/couch_mrview_collation_tests.erl
new file mode 100644
index 000000000..2e0b75b73
--- /dev/null
+++ b/src/couch_mrview/test/couch_mrview_collation_tests.erl
@@ -0,0 +1,202 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_mrview_collation_tests).
+
+-include("couch_eunit.hrl").
+-include_lib("couchdb/couch_db.hrl").
+
+-define(ADMIN_USER, {user_ctx, #user_ctx{roles=[<<"_admin">>]}}).
+-define(TIMEOUT, 1000).
+-define(VALUES, [
+ null,
+ false,
+ true,
+
+ 1,
+ 2,
+ 3.0,
+ 4,
+
+ <<"a">>,
+ <<"A">>,
+ <<"aa">>,
+ <<"b">>,
+ <<"B">>,
+ <<"ba">>,
+ <<"bb">>,
+
+ [<<"a">>],
+ [<<"b">>],
+ [<<"b">>, <<"c">>],
+ [<<"b">>, <<"c">>, <<"a">>],
+ [<<"b">>, <<"d">>],
+ [<<"b">>, <<"d">>, <<"e">>],
+
+ {[{<<"a">>, 1}]},
+ {[{<<"a">>, 2}]},
+ {[{<<"b">>, 1}]},
+ {[{<<"b">>, 2}]},
+ {[{<<"b">>, 2}, {<<"a">>, 1}]},
+ {[{<<"b">>, 2}, {<<"c">>, 2}]}
+]).
+
+
+start() ->
+ {ok, Pid} = couch_server_sup:start_link(?CONFIG_CHAIN),
+ Pid.
+
+stop(Pid) ->
+ erlang:monitor(process, Pid),
+ couch_server_sup:stop(),
+ receive
+ {'DOWN', _, _, Pid, _} ->
+ ok
+ after ?TIMEOUT ->
+ throw({timeout, server_stop})
+ end.
+
+setup() ->
+ {ok, Db1} = couch_mrview_test_util:new_db(?tempdb(), map),
+ {ok, Db2} = couch_mrview_test_util:save_docs(Db1, make_docs()),
+ Db2.
+
+teardown(Db) ->
+ couch_db:close(Db),
+ couch_server:delete(Db#db.name, [?ADMIN_USER]),
+ ok.
+
+
+collation_test_() ->
+ {
+ "Collation tests",
+ {
+ setup,
+ fun start/0, fun stop/1,
+ {
+ foreach,
+ fun setup/0, fun teardown/1,
+ [
+ fun should_collate_fwd/1,
+ fun should_collate_rev/1,
+ fun should_collate_range/1,
+ fun should_collate_with_inclusive_end_fwd/1,
+ fun should_collate_with_inclusive_end_rev/1,
+ fun should_collate_without_inclusive_end_fwd/1,
+ fun should_collate_without_inclusive_end_rev/1,
+ fun should_collate_with_endkey_docid/1
+ ]
+ }
+ }
+ }.
+
+
+should_collate_fwd(Db) ->
+ {ok, Results} = run_query(Db, []),
+ Expect = [{meta, [{total, 26}, {offset, 0}]}] ++ rows(),
+ %% cannot use _assertEqual since mrview converts
+ %% value 3.0 to 3 making assertion fail
+ ?_assert(Expect == Results).
+
+should_collate_rev(Db) ->
+ {ok, Results} = run_query(Db, [{direction, rev}]),
+ Expect = [{meta, [{total, 26}, {offset, 0}]}] ++ lists:reverse(rows()),
+ %% cannot use _assertEqual since mrview converts
+ %% value 3.0 to 3 making assertion fail
+ ?_assert(Expect == Results).
+
+should_collate_range(Db) ->
+ ?_assertNot(
+ begin
+ {_, Error} = lists:foldl(fun(V, {Count, Error}) ->
+ {ok, Results} = run_query(Db, [{start_key, V}, {end_key, V}]),
+ Id = list_to_binary(integer_to_list(Count)),
+ Expect = [
+ {meta, [{total, 26}, {offset, Count}]},
+ {row, [{id, Id}, {key, V}, {value, 0}]}
+ ],
+ case Results == Expect of
+ true -> {Count+1, Error};
+ _ -> {Count+1, true}
+ end
+ end, {0, false}, ?VALUES),
+ Error
+ end).
+
+should_collate_with_inclusive_end_fwd(Db) ->
+ Opts = [{end_key, <<"b">>}, {inclusive_end, true}],
+ {ok, Rows0} = run_query(Db, Opts),
+ LastRow = lists:last(Rows0),
+ Expect = {row, [{id,<<"10">>}, {key,<<"b">>}, {value,0}]},
+ ?_assertEqual(Expect, LastRow).
+
+should_collate_with_inclusive_end_rev(Db) ->
+ Opts = [{end_key, <<"b">>}, {inclusive_end, true}, {direction, rev}],
+ {ok, Rows} = run_query(Db, Opts),
+ LastRow = lists:last(Rows),
+ Expect = {row, [{id,<<"10">>}, {key,<<"b">>}, {value,0}]},
+ ?_assertEqual(Expect, LastRow).
+
+should_collate_without_inclusive_end_fwd(Db) ->
+ Opts = [{end_key, <<"b">>}, {inclusive_end, false}],
+ {ok, Rows0} = run_query(Db, Opts),
+ LastRow = lists:last(Rows0),
+ Expect = {row, [{id,<<"9">>}, {key,<<"aa">>}, {value,0}]},
+ ?_assertEqual(Expect, LastRow).
+
+should_collate_without_inclusive_end_rev(Db) ->
+ Opts = [{end_key, <<"b">>}, {inclusive_end, false}, {direction, rev}],
+ {ok, Rows} = run_query(Db, Opts),
+ LastRow = lists:last(Rows),
+ Expect = {row, [{id,<<"11">>}, {key,<<"B">>}, {value,0}]},
+ ?_assertEqual(Expect, LastRow).
+
+should_collate_with_endkey_docid(Db) ->
+ ?_test(begin
+ {ok, Rows0} = run_query(Db, [
+ {end_key, <<"b">>}, {end_key_docid, <<"10">>},
+ {inclusive_end, false}
+ ]),
+ Result0 = lists:last(Rows0),
+ Expect0 = {row, [{id,<<"9">>}, {key,<<"aa">>}, {value,0}]},
+ ?assertEqual(Expect0, Result0),
+
+ {ok, Rows1} = run_query(Db, [
+ {end_key, <<"b">>}, {end_key_docid, <<"11">>},
+ {inclusive_end, false}
+ ]),
+ Result1 = lists:last(Rows1),
+ Expect1 = {row, [{id,<<"10">>}, {key,<<"b">>}, {value,0}]},
+ ?assertEqual(Expect1, Result1)
+ end).
+
+
+make_docs() ->
+ {Docs, _} = lists:foldl(fun(V, {Docs0, Count}) ->
+ Doc = couch_doc:from_json_obj({[
+ {<<"_id">>, list_to_binary(integer_to_list(Count))},
+ {<<"foo">>, V}
+ ]}),
+ {[Doc | Docs0], Count+1}
+ end, {[], 0}, ?VALUES),
+ Docs.
+
+rows() ->
+ {Rows, _} = lists:foldl(fun(V, {Rows0, Count}) ->
+ Id = list_to_binary(integer_to_list(Count)),
+ Row = {row, [{id, Id}, {key, V}, {value, 0}]},
+ {[Row | Rows0], Count+1}
+ end, {[], 0}, ?VALUES),
+ lists:reverse(Rows).
+
+run_query(Db, Opts) ->
+ couch_mrview:query_view(Db, <<"_design/bar">>, <<"zing">>, Opts).
diff --git a/src/couch_mrview/test/couch_mrview_compact_tests.erl b/src/couch_mrview/test/couch_mrview_compact_tests.erl
new file mode 100644
index 000000000..4cb7dafd0
--- /dev/null
+++ b/src/couch_mrview/test/couch_mrview_compact_tests.erl
@@ -0,0 +1,101 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_mrview_compact_tests).
+
+-include("couch_eunit.hrl").
+-include_lib("couchdb/couch_db.hrl").
+
+-define(ADMIN_USER, {user_ctx, #user_ctx{roles=[<<"_admin">>]}}).
+-define(TIMEOUT, 1000).
+
+
+start() ->
+ {ok, Pid} = couch_server_sup:start_link(?CONFIG_CHAIN),
+ Pid.
+
+stop(Pid) ->
+ erlang:monitor(process, Pid),
+ couch_server_sup:stop(),
+ receive
+ {'DOWN', _, _, Pid, _} ->
+ ok
+ after ?TIMEOUT ->
+ throw({timeout, server_stop})
+ end.
+
+setup() ->
+ {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), map, 1000),
+ Db.
+
+teardown(Db) ->
+ couch_db:close(Db),
+ couch_server:delete(Db#db.name, [?ADMIN_USER]),
+ ok.
+
+
+compaction_test_() ->
+ {
+ "Compaction tests",
+ {
+ setup,
+ fun start/0, fun stop/1,
+ {
+ foreach,
+ fun setup/0, fun teardown/1,
+ [
+ fun should_swap/1
+ ]
+ }
+ }
+ }.
+
+
+should_swap(Db) ->
+ ?_test(begin
+ couch_mrview:query_view(Db, <<"_design/bar">>, <<"baz">>),
+ {ok, QPid} = start_query(Db),
+ {ok, MonRef} = couch_mrview:compact(Db, <<"_design/bar">>, [monitor]),
+ receive
+ {'DOWN', MonRef, process, _, _} -> ok
+ after ?TIMEOUT ->
+ erlang:error(
+ {assertion_failed,
+ [{module, ?MODULE}, {line, ?LINE},
+ {reason, "compaction failed"}]})
+ end,
+ QPid ! {self(), continue},
+ receive
+ {QPid, Count} ->
+ ?assertEqual(1000, Count)
+ after ?TIMEOUT ->
+ erlang:error(
+ {assertion_failed,
+ [{module, ?MODULE}, {line, ?LINE},
+ {reason, "query failed"}]})
+ end
+ end).
+
+
+start_query(Db) ->
+ Self = self(),
+ Pid = spawn(fun() ->
+ CB = fun
+ (_, wait) -> receive {Self, continue} -> {ok, 0} end;
+ ({row, _}, Count) -> {ok, Count+1};
+ (_, Count) -> {ok, Count}
+ end,
+ {ok, Result} =
+ couch_mrview:query_view(Db, <<"_design/bar">>, <<"baz">>, [], CB, wait),
+ Self ! {self(), Result}
+ end),
+ {ok, Pid}.
diff --git a/src/couch_mrview/test/couch_mrview_index_info_tests.erl b/src/couch_mrview/test/couch_mrview_index_info_tests.erl
new file mode 100644
index 000000000..6c30da822
--- /dev/null
+++ b/src/couch_mrview/test/couch_mrview_index_info_tests.erl
@@ -0,0 +1,87 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_mrview_index_info_tests).
+
+-include("couch_eunit.hrl").
+-include_lib("couchdb/couch_db.hrl").
+
+-define(ADMIN_USER, {user_ctx, #user_ctx{roles=[<<"_admin">>]}}).
+-define(TIMEOUT, 1000).
+
+
+start() ->
+ {ok, Pid} = couch_server_sup:start_link(?CONFIG_CHAIN),
+ Pid.
+
+stop(Pid) ->
+ erlang:monitor(process, Pid),
+ couch_server_sup:stop(),
+ receive
+ {'DOWN', _, _, Pid, _} ->
+ ok
+ after ?TIMEOUT ->
+ throw({timeout, server_stop})
+ end.
+
+setup() ->
+ {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), map),
+ couch_mrview:query_view(Db, <<"_design/bar">>, <<"baz">>),
+ {ok, Info} = couch_mrview:get_info(Db, <<"_design/bar">>),
+ {Db, Info}.
+
+teardown({Db, _}) ->
+ couch_db:close(Db),
+ couch_server:delete(Db#db.name, [?ADMIN_USER]),
+ ok.
+
+
+view_info_test_() ->
+ {
+ "Views index tests",
+ {
+ setup,
+ fun start/0, fun stop/1,
+ {
+ foreach,
+ fun setup/0, fun teardown/1,
+ [
+ fun should_get_property/1
+ ]
+ }
+ }
+ }.
+
+
+should_get_property({_, Info}) ->
+ InfoProps = [
+ {signature, <<"276df562b152b3c4e5d34024f62672ed">>},
+ {language, <<"javascript">>},
+ {disk_size, 314},
+ {data_size, 263},
+ {update_seq, 11},
+ {purge_seq, 0},
+ {updater_running, false},
+ {compact_running, false},
+ {waiting_clients, 0}
+ ],
+ [
+ {atom_to_list(Key), ?_assertEqual(Val, getval(Key, Info))}
+ || {Key, Val} <- InfoProps
+ ].
+
+
+getval(Key, PL) ->
+ {value, {Key, Val}} = lists:keysearch(Key, 1, PL),
+ Val.
+
+
diff --git a/src/couch_mrview/test/02-map-views.t b/src/couch_mrview/test/couch_mrview_map_views_tests.erl
index 7e1ca0c3d..b2f857588 100644
--- a/src/couch_mrview/test/02-map-views.t
+++ b/src/couch_mrview/test/couch_mrview_map_views_tests.erl
@@ -1,6 +1,3 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
@@ -13,36 +10,59 @@
% License for the specific language governing permissions and limitations under
% the License.
-main(_) ->
- test_util:init_code_path(),
-
- etap:plan(6),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- etap:bail(Other)
- end,
- timer:sleep(300),
- ok.
-
-test() ->
- couch_server_sup:start_link(test_util:config_files()),
-
- {ok, Db} = couch_mrview_test_util:init_db(<<"foo">>, map),
-
- test_basic(Db),
- test_range(Db),
- test_rev_range(Db),
- test_limit_and_skip(Db),
- test_include_docs(Db),
- test_empty_view(Db),
-
- ok.
-
-
-test_basic(Db) ->
+-module(couch_mrview_map_views_tests).
+
+-include("couch_eunit.hrl").
+-include_lib("couchdb/couch_db.hrl").
+
+-define(TIMEOUT, 1000).
+-define(ADMIN_USER, {user_ctx, #user_ctx{roles=[<<"_admin">>]}}).
+
+
+start() ->
+ {ok, Pid} = couch_server_sup:start_link(?CONFIG_CHAIN),
+ Pid.
+
+stop(Pid) ->
+ erlang:monitor(process, Pid),
+ couch_server_sup:stop(),
+ receive
+ {'DOWN', _, _, Pid, _} ->
+ ok
+ after ?TIMEOUT ->
+ throw({timeout, server_stop})
+ end.
+
+setup() ->
+ {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), map),
+ Db.
+
+teardown(Db) ->
+ ok = couch_db:close(Db).
+
+
+map_views_test_() ->
+ {
+ "Map views",
+ {
+ setup,
+ fun start/0, fun stop/1,
+ {
+ foreach,
+ fun setup/0, fun teardown/1,
+ [
+ fun should_map/1,
+ fun should_map_with_range/1,
+ fun should_map_with_limit_and_skip/1,
+ fun should_map_with_include_docs/1,
+ fun should_map_empty_views/1
+ ]
+ }
+ }
+ }.
+
+
+should_map(Db) ->
Result = run_query(Db, []),
Expect = {ok, [
{meta, [{total, 10}, {offset, 0}]},
@@ -57,21 +77,9 @@ test_basic(Db) ->
{row, [{id, <<"9">>}, {key, 9}, {value, 9}]},
{row, [{id, <<"10">>}, {key, 10}, {value, 10}]}
]},
- etap:is(Result, Expect, "Simple view query worked.").
-
-
-test_range(Db) ->
- Result = run_query(Db, [{start_key, 3}, {end_key, 5}]),
- Expect = {ok, [
- {meta, [{total, 10}, {offset, 2}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
- ]},
- etap:is(Result, Expect, "Query with range works.").
+ ?_assertEqual(Expect, Result).
-
-test_rev_range(Db) ->
+should_map_with_range(Db) ->
Result = run_query(Db, [
{direction, rev},
{start_key, 5}, {end_key, 3},
@@ -83,10 +91,9 @@ test_rev_range(Db) ->
{row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
{row, [{id, <<"3">>}, {key, 3}, {value, 3}]}
]},
- etap:is(Result, Expect, "Query with reversed range works.").
-
+ ?_assertEqual(Expect, Result).
-test_limit_and_skip(Db) ->
+should_map_with_limit_and_skip(Db) ->
Result = run_query(Db, [
{start_key, 2},
{limit, 3},
@@ -98,10 +105,9 @@ test_limit_and_skip(Db) ->
{row, [{id, <<"6">>}, {key, 6}, {value, 6}]},
{row, [{id, <<"7">>}, {key, 7}, {value, 7}]}
]},
- etap:is(Result, Expect, "Query with limit and skip works.").
+ ?_assertEqual(Expect, Result).
-
-test_include_docs(Db) ->
+should_map_with_include_docs(Db) ->
Result = run_query(Db, [
{start_key, 8},
{end_key, 8},
@@ -116,15 +122,14 @@ test_include_docs(Db) ->
{meta, [{total, 10}, {offset, 7}]},
{row, [{id, <<"8">>}, {key, 8}, {value, 8}, {doc, Doc}]}
]},
- etap:is(Result, Expect, "Query with include docs works.").
-
+ ?_assertEqual(Expect, Result).
-test_empty_view(Db) ->
+should_map_empty_views(Db) ->
Result = couch_mrview:query_view(Db, <<"_design/bar">>, <<"bing">>),
Expect = {ok, [
{meta, [{total, 0}, {offset, 0}]}
]},
- etap:is(Result, Expect, "Empty views are correct.").
+ ?_assertEqual(Expect, Result).
run_query(Db, Opts) ->
diff --git a/src/couch_mrview/test/01-load.t b/src/couch_mrview/test/couch_mrview_modules_load_tests.erl
index a57c1a775..bfab646dd 100644
--- a/src/couch_mrview/test/01-load.t
+++ b/src/couch_mrview/test/couch_mrview_modules_load_tests.erl
@@ -1,6 +1,3 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
@@ -13,10 +10,19 @@
% License for the specific language governing permissions and limitations under
% the License.
-% Test that we can load each module.
+-module(couch_mrview_modules_load_tests).
+
+-include("couch_eunit.hrl").
+
+
+modules_load_test_() ->
+ {
+ "Verify that all modules loads",
+ should_load_modules()
+ }.
+
-main(_) ->
- test_util:init_code_path(),
+should_load_modules() ->
Modules = [
couch_mrview,
couch_mrview_compactor,
@@ -25,10 +31,7 @@ main(_) ->
couch_mrview_updater,
couch_mrview_util
],
+ [should_load_module(Mod) || Mod <- Modules].
- etap:plan(length(Modules)),
- lists:foreach(
- fun(Module) ->
- etap:loaded_ok(Module, lists:concat(["Loaded: ", Module]))
- end, Modules),
- etap:end_tests().
+should_load_module(Mod) ->
+ {atom_to_list(Mod), ?_assertMatch({module, _}, code:load_file(Mod))}.
diff --git a/src/couch_mrview/test/03-red-views.t b/src/couch_mrview/test/couch_mrview_red_views_tests.erl
index 6ad341bd8..430d3e259 100644
--- a/src/couch_mrview/test/03-red-views.t
+++ b/src/couch_mrview/test/couch_mrview_red_views_tests.erl
@@ -1,6 +1,3 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
@@ -13,50 +10,83 @@
% License for the specific language governing permissions and limitations under
% the License.
-main(_) ->
- test_util:run(4, fun() -> test() end).
+-module(couch_mrview_red_views_tests).
+
+-include("couch_eunit.hrl").
+-include_lib("couchdb/couch_db.hrl").
+
+-define(TIMEOUT, 1000).
+-define(ADMIN_USER, {user_ctx, #user_ctx{roles=[<<"_admin">>]}}).
+
-test() ->
- couch_server_sup:start_link(test_util:config_files()),
+start() ->
+ {ok, Pid} = couch_server_sup:start_link(?CONFIG_CHAIN),
+ Pid.
- {ok, Db} = couch_mrview_test_util:init_db(<<"foo">>, red),
+stop(Pid) ->
+ erlang:monitor(process, Pid),
+ couch_server_sup:stop(),
+ receive
+ {'DOWN', _, _, Pid, _} ->
+ ok
+ after ?TIMEOUT ->
+ throw({timeout, server_stop})
+ end.
- test_basic(Db),
- test_key_range(Db),
- test_group_level(Db),
- test_group_exact(Db),
+setup() ->
+ {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), red),
+ Db.
- ok.
+teardown(Db) ->
+ ok = couch_db:close(Db).
-test_basic(Db) ->
+reduce_views_test_() ->
+ {
+ "Reduce views",
+ {
+ setup,
+ fun start/0, fun stop/1,
+ {
+ foreach,
+ fun setup/0, fun teardown/1,
+ [
+ fun should_reduce_basic/1,
+ fun should_reduce_key_range/1,
+ fun should_reduce_with_group_level/1,
+ fun should_reduce_with_group_exact/1
+ ]
+ }
+ }
+ }.
+
+
+should_reduce_basic(Db) ->
Result = run_query(Db, []),
Expect = {ok, [
{meta, []},
{row, [{key, null}, {value, 55}]}
]},
- etap:is(Result, Expect, "Simple reduce view works.").
+ ?_assertEqual(Expect, Result).
-
-test_key_range(Db) ->
+should_reduce_key_range(Db) ->
Result = run_query(Db, [{start_key, [0, 2]}, {end_key, [0, 4]}]),
Expect = {ok, [
{meta, []},
{row, [{key, null}, {value, 6}]}
]},
- etap:is(Result, Expect, "Reduce with key range works.").
-
+ ?_assertEqual(Expect, Result).
-test_group_level(Db) ->
+should_reduce_with_group_level(Db) ->
Result = run_query(Db, [{group_level, 1}]),
Expect = {ok, [
{meta, []},
{row, [{key, [0]}, {value, 30}]},
{row, [{key, [1]}, {value, 25}]}
]},
- etap:is(Result, Expect, "Group level works.").
+ ?_assertEqual(Expect, Result).
-test_group_exact(Db) ->
+should_reduce_with_group_exact(Db) ->
Result = run_query(Db, [{group_level, exact}]),
Expect = {ok, [
{meta, []},
@@ -71,7 +101,7 @@ test_group_exact(Db) ->
{row, [{key, [1, 7]}, {value, 7}]},
{row, [{key, [1, 9]}, {value, 9}]}
]},
- etap:is(Result, Expect, "Group exact works.").
+ ?_assertEqual(Expect, Result).
run_query(Db, Opts) ->
diff --git a/src/couch_replicator/Makefile.am b/src/couch_replicator/Makefile.am
index 2dcd47dfd..67c987237 100644
--- a/src/couch_replicator/Makefile.am
+++ b/src/couch_replicator/Makefile.am
@@ -36,13 +36,13 @@ source_files = \
src/couch_replicator.erl
test_files = \
- test/01-load.t \
- test/02-httpc-pool.t \
- test/03-replication-compact.t \
- test/04-replication-large-atts.t \
- test/05-replication-many-leaves.t \
- test/06-doc-missing-stubs.t \
- test/07-use-checkpoints.t
+ test/couch_replicator_compact_tests.erl \
+ test/couch_replicator_httpc_pool_tests.erl \
+ test/couch_replicator_large_atts_tests.erl \
+ test/couch_replicator_many_leaves_tests.erl \
+ test/couch_replicator_missing_stubs_tests.erl \
+ test/couch_replicator_modules_load_tests.erl \
+ test/couch_replicator_use_checkpoints_tests.erl
compiled_files = \
ebin/couch_replicator_api_wrap.beam \
@@ -62,7 +62,7 @@ CLEANFILES = $(compiled_files)
check:
if TESTS
- $(abs_top_builddir)/test/etap/run $(abs_top_srcdir)/src/couch_replicator/test
+ $(abs_top_builddir)/test/couchdb/run -v $(abs_top_srcdir)/src/couch_replicator/test
endif
ebin/%.app: src/%.app.src
diff --git a/src/couch_replicator/test/02-httpc-pool.t b/src/couch_replicator/test/02-httpc-pool.t
deleted file mode 100755
index a7bde6c88..000000000
--- a/src/couch_replicator/test/02-httpc-pool.t
+++ /dev/null
@@ -1,250 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-main(_) ->
- test_util:init_code_path(),
-
- etap:plan(55),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- etap:bail(Other)
- end,
- ok.
-
-
-test() ->
- couch_server_sup:start_link(test_util:config_files()),
- ibrowse:start(),
-
- test_pool_full(),
- test_worker_dead_pool_non_full(),
- test_worker_dead_pool_full(),
-
- couch_server_sup:stop(),
- ok.
-
-
-test_pool_full() ->
- Pool = spawn_pool(),
- Client1 = spawn_client(Pool),
- Client2 = spawn_client(Pool),
- Client3 = spawn_client(Pool),
-
- etap:diag("Check that we can spawn the max number of connections."),
- etap:is(ping_client(Client1), ok, "Client 1 started ok."),
- etap:is(ping_client(Client2), ok, "Client 2 started ok."),
- etap:is(ping_client(Client3), ok, "Client 3 started ok."),
-
- Worker1 = get_client_worker(Client1, "1"),
- Worker2 = get_client_worker(Client2, "2"),
- Worker3 = get_client_worker(Client3, "3"),
- etap:is(is_process_alive(Worker1), true, "Client's 1 worker is alive."),
- etap:is(is_process_alive(Worker2), true, "Client's 2 worker is alive."),
- etap:is(is_process_alive(Worker3), true, "Client's 3 worker is alive."),
-
- etap:isnt(Worker1, Worker2, "Clients 1 and 2 got different workers."),
- etap:isnt(Worker2, Worker3, "Clients 2 and 3 got different workers."),
- etap:isnt(Worker1, Worker3, "Clients 1 and 3 got different workers."),
-
- etap:diag("Check that client 4 blocks waiting for a worker."),
- Client4 = spawn_client(Pool),
- etap:is(ping_client(Client4), timeout, "Client 4 blocked while waiting."),
-
- etap:diag("Check that stopping a client gives up its worker."),
- etap:is(stop_client(Client1), ok, "First client stopped."),
-
- etap:diag("And check that our blocked client has been unblocked."),
- etap:is(ping_client(Client4), ok, "Client 4 was unblocked."),
-
- Worker4 = get_client_worker(Client4, "4"),
- etap:is(is_process_alive(Worker4), true, "Client's 4 worker is alive."),
- etap:is(Worker4, Worker1, "Client 4 got worker that client 1 got before."),
-
- lists:foreach(fun(C) -> ok = stop_client(C) end, [Client2, Client3, Client4]),
- stop_pool(Pool).
-
-
-test_worker_dead_pool_non_full() ->
- Pool = spawn_pool(),
- Client1 = spawn_client(Pool),
-
- etap:is(ping_client(Client1), ok, "Client 1 started ok."),
- Worker1 = get_client_worker(Client1, "1"),
- etap:is(is_process_alive(Worker1), true, "Client's 1 worker is alive."),
-
- etap:diag("Kill client's 1 worker."),
- etap:is(kill_client_worker(Client1), ok, "Killed client's 1 worker."),
- etap:is(is_process_alive(Worker1), false, "Client's 1 worker process is dead."),
-
- etap:is(stop_client(Client1), ok, "First client stopped and released its worker."),
-
- Client2 = spawn_client(Pool),
- etap:is(ping_client(Client2), ok, "Client 2 started ok."),
- Worker2 = get_client_worker(Client2, "2"),
- etap:isnt(Worker2, Worker1, "Client 2 got a different worker from client 1"),
- etap:is(is_process_alive(Worker2), true, "Client's 2 worker is alive."),
-
- etap:is(stop_client(Client2), ok, "Second client stopped."),
- stop_pool(Pool).
-
-
-test_worker_dead_pool_full() ->
- Pool = spawn_pool(),
- Client1 = spawn_client(Pool),
- Client2 = spawn_client(Pool),
- Client3 = spawn_client(Pool),
-
- etap:diag("Check that we can spawn the max number of connections."),
- etap:is(ping_client(Client1), ok, "Client 1 started ok."),
- etap:is(ping_client(Client2), ok, "Client 2 started ok."),
- etap:is(ping_client(Client3), ok, "Client 3 started ok."),
-
- Worker1 = get_client_worker(Client1, "1"),
- Worker2 = get_client_worker(Client2, "2"),
- Worker3 = get_client_worker(Client3, "3"),
- etap:is(is_process_alive(Worker1), true, "Client's 1 worker is alive."),
- etap:is(is_process_alive(Worker2), true, "Client's 2 worker is alive."),
- etap:is(is_process_alive(Worker3), true, "Client's 3 worker is alive."),
-
- etap:isnt(Worker1, Worker2, "Clients 1 and 2 got different workers."),
- etap:isnt(Worker2, Worker3, "Clients 2 and 3 got different workers."),
- etap:isnt(Worker1, Worker3, "Clients 1 and 3 got different workers."),
-
- etap:diag("Check that client 4 blocks waiting for a worker."),
- Client4 = spawn_client(Pool),
- etap:is(ping_client(Client4), timeout, "Client 4 blocked while waiting."),
-
- etap:diag("Kill client's 1 worker."),
- etap:is(kill_client_worker(Client1), ok, "Killed client's 1 worker."),
- etap:is(is_process_alive(Worker1), false, "Client's 1 worker process is dead."),
-
- etap:diag("Check client 4 got unblocked after first worker's death"),
- etap:is(ping_client(Client4), ok, "Client 4 not blocked anymore."),
-
- Worker4 = get_client_worker(Client4, "4"),
- etap:is(is_process_alive(Worker4), true, "Client's 4 worker is alive."),
- etap:isnt(Worker4, Worker1, "Client 4 got a worker different from client 1."),
- etap:isnt(Worker4, Worker2, "Client 4 got a worker different from client 2."),
- etap:isnt(Worker4, Worker3, "Client 4 got a worker different from client 3."),
-
- etap:diag("Check that stopping client 1 is a noop."),
- etap:is(stop_client(Client1), ok, "First client stopped."),
-
- etap:is(is_process_alive(Worker2), true, "Client's 2 worker still alive."),
- etap:is(is_process_alive(Worker3), true, "Client's 3 worker still alive."),
- etap:is(is_process_alive(Worker4), true, "Client's 4 worker still alive."),
-
- etap:diag("Check that client 5 blocks waiting for a worker."),
- Client5 = spawn_client(Pool),
- etap:is(ping_client(Client5), timeout, "Client 5 blocked while waiting."),
-
- etap:diag("Check that stopping client 2 gives up its worker."),
- etap:is(stop_client(Client2), ok, "Second client stopped."),
-
- etap:diag("Now check that client 5 has been unblocked."),
- etap:is(ping_client(Client5), ok, "Client 5 was unblocked."),
-
- Worker5 = get_client_worker(Client5, "5"),
- etap:is(is_process_alive(Worker5), true, "Client's 5 worker is alive."),
- etap:isnt(Worker5, Worker1, "Client 5 got a worker different from client 1."),
- etap:is(Worker5, Worker2, "Client 5 got same worker as client 2."),
- etap:isnt(Worker5, Worker3, "Client 5 got a worker different from client 3."),
- etap:isnt(Worker5, Worker4, "Client 5 got a worker different from client 4."),
-
- etap:is(is_process_alive(Worker3), true, "Client's 3 worker still alive."),
- etap:is(is_process_alive(Worker4), true, "Client's 4 worker still alive."),
- etap:is(is_process_alive(Worker5), true, "Client's 5 worker still alive."),
-
- lists:foreach(fun(C) -> ok = stop_client(C) end, [Client3, Client4, Client5]),
- stop_pool(Pool).
-
-
-spawn_client(Pool) ->
- Parent = self(),
- Ref = make_ref(),
- Pid = spawn(fun() ->
- {ok, Worker} = couch_replicator_httpc_pool:get_worker(Pool),
- loop(Parent, Ref, Worker, Pool)
- end),
- {Pid, Ref}.
-
-
-ping_client({Pid, Ref}) ->
- Pid ! ping,
- receive
- {pong, Ref} ->
- ok
- after 3000 ->
- timeout
- end.
-
-
-get_client_worker({Pid, Ref}, ClientName) ->
- Pid ! get_worker,
- receive
- {worker, Ref, Worker} ->
- Worker
- after 3000 ->
- etap:bail("Timeout getting client " ++ ClientName ++ " worker.")
- end.
-
-
-stop_client({Pid, Ref}) ->
- Pid ! stop,
- receive
- {stop, Ref} ->
- ok
- after 3000 ->
- timeout
- end.
-
-
-kill_client_worker({Pid, Ref}) ->
- Pid ! get_worker,
- receive
- {worker, Ref, Worker} ->
- exit(Worker, kill),
- ok
- after 3000 ->
- timeout
- end.
-
-
-loop(Parent, Ref, Worker, Pool) ->
- receive
- ping ->
- Parent ! {pong, Ref},
- loop(Parent, Ref, Worker, Pool);
- get_worker ->
- Parent ! {worker, Ref, Worker},
- loop(Parent, Ref, Worker, Pool);
- stop ->
- couch_replicator_httpc_pool:release_worker(Pool, Worker),
- Parent ! {stop, Ref}
- end.
-
-
-spawn_pool() ->
- Host = couch_config:get("httpd", "bind_address", "127.0.0.1"),
- Port = couch_config:get("httpd", "port", "5984"),
- {ok, Pool} = couch_replicator_httpc_pool:start_link(
- "http://" ++ Host ++ ":5984", [{max_connections, 3}]),
- Pool.
-
-
-stop_pool(Pool) ->
- ok = couch_replicator_httpc_pool:stop(Pool).
diff --git a/src/couch_replicator/test/03-replication-compact.t b/src/couch_replicator/test/03-replication-compact.t
deleted file mode 100755
index 7c4d38c93..000000000
--- a/src/couch_replicator/test/03-replication-compact.t
+++ /dev/null
@@ -1,488 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-% Verify that compacting databases that are being used as the source or
-% target of a replication doesn't affect the replication and that the
-% replication doesn't hold their reference counters forever.
-
--define(b2l(B), binary_to_list(B)).
-
--record(user_ctx, {
- name = null,
- roles = [],
- handler
-}).
-
--record(db, {
- main_pid = nil,
- update_pid = nil,
- compactor_pid = nil,
- instance_start_time, % number of microsecs since jan 1 1970 as a binary string
- fd,
- updater_fd,
- fd_ref_counter,
- header = nil,
- committed_update_seq,
- fulldocinfo_by_id_btree,
- docinfo_by_seq_btree,
- local_docs_btree,
- update_seq,
- name,
- filepath,
- validate_doc_funs = [],
- security = [],
- security_ptr = nil,
- user_ctx = #user_ctx{},
- waiting_delayed_commit = nil,
- revs_limit = 1000,
- fsync_options = [],
- options = [],
- compression,
- before_doc_update,
- after_doc_read
-}).
-
--record(rep, {
- id,
- source,
- target,
- options,
- user_ctx,
- doc_id
-}).
-
-
-source_db_name() -> <<"couch_test_rep_db_a">>.
-target_db_name() -> <<"couch_test_rep_db_b">>.
-
-
-main(_) ->
- test_util:init_code_path(),
-
- etap:plan(376),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- etap:bail(Other)
- end,
- ok.
-
-
-test() ->
- couch_server_sup:start_link(test_util:config_files()),
- ibrowse:start(),
-
- Pairs = [
- {source_db_name(), target_db_name()},
- {{remote, source_db_name()}, target_db_name()},
- {source_db_name(), {remote, target_db_name()}},
- {{remote, source_db_name()}, {remote, (target_db_name())}}
- ],
-
- lists:foreach(
- fun({Source, Target}) ->
- {ok, SourceDb} = create_db(source_db_name()),
- etap:is(couch_db:is_idle(SourceDb), true,
- "Source database is idle before starting replication"),
-
- {ok, TargetDb} = create_db(target_db_name()),
- etap:is(couch_db:is_idle(TargetDb), true,
- "Target database is idle before starting replication"),
-
- {ok, RepPid, RepId} = replicate(Source, Target),
- check_active_tasks(RepPid, RepId, Source, Target),
- {ok, DocsWritten} = populate_and_compact_test(
- RepPid, SourceDb, TargetDb),
-
- wait_target_in_sync(DocsWritten, TargetDb),
- check_active_tasks(RepPid, RepId, Source, Target),
- cancel_replication(RepId, RepPid),
- compare_dbs(SourceDb, TargetDb),
-
- delete_db(SourceDb),
- delete_db(TargetDb),
- couch_server_sup:stop(),
- ok = timer:sleep(1000),
- couch_server_sup:start_link(test_util:config_files())
- end,
- Pairs),
-
- couch_server_sup:stop(),
- ok.
-
-
-populate_and_compact_test(RepPid, SourceDb0, TargetDb0) ->
- etap:is(is_process_alive(RepPid), true, "Replication process is alive"),
- check_db_alive("source", SourceDb0),
- check_db_alive("target", TargetDb0),
-
- Writer = spawn_writer(SourceDb0),
-
- lists:foldl(
- fun(_, {SourceDb, TargetDb, DocCount}) ->
- pause_writer(Writer),
-
- compact_db("source", SourceDb),
- etap:is(is_process_alive(RepPid), true,
- "Replication process is alive after source database compaction"),
- check_db_alive("source", SourceDb),
- check_ref_counter("source", SourceDb),
-
- compact_db("target", TargetDb),
- etap:is(is_process_alive(RepPid), true,
- "Replication process is alive after target database compaction"),
- check_db_alive("target", TargetDb),
- check_ref_counter("target", TargetDb),
-
- {ok, SourceDb2} = reopen_db(SourceDb),
- {ok, TargetDb2} = reopen_db(TargetDb),
-
- resume_writer(Writer),
- wait_writer(Writer, DocCount),
-
- compact_db("source", SourceDb2),
- etap:is(is_process_alive(RepPid), true,
- "Replication process is alive after source database compaction"),
- check_db_alive("source", SourceDb2),
- pause_writer(Writer),
- check_ref_counter("source", SourceDb2),
- resume_writer(Writer),
-
- compact_db("target", TargetDb2),
- etap:is(is_process_alive(RepPid), true,
- "Replication process is alive after target database compaction"),
- check_db_alive("target", TargetDb2),
- pause_writer(Writer),
- check_ref_counter("target", TargetDb2),
- resume_writer(Writer),
-
- {ok, SourceDb3} = reopen_db(SourceDb2),
- {ok, TargetDb3} = reopen_db(TargetDb2),
- {SourceDb3, TargetDb3, DocCount + 50}
- end,
- {SourceDb0, TargetDb0, 50}, lists:seq(1, 5)),
-
- DocsWritten = stop_writer(Writer),
- {ok, DocsWritten}.
-
-
-check_db_alive(Type, #db{main_pid = Pid}) ->
- etap:is(is_process_alive(Pid), true,
- "Local " ++ Type ++ " database main pid is alive").
-
-
-compact_db(Type, #db{name = Name}) ->
- {ok, Db} = couch_db:open_int(Name, []),
- {ok, CompactPid} = couch_db:start_compact(Db),
- MonRef = erlang:monitor(process, CompactPid),
- receive
- {'DOWN', MonRef, process, CompactPid, normal} ->
- ok;
- {'DOWN', MonRef, process, CompactPid, Reason} ->
- etap:bail("Error compacting " ++ Type ++ " database " ++ ?b2l(Name) ++
- ": " ++ couch_util:to_list(Reason))
- after 30000 ->
- etap:bail("Compaction for " ++ Type ++ " database " ++ ?b2l(Name) ++
- " didn't finish")
- end,
- ok = couch_db:close(Db).
-
-
-check_ref_counter(Type, #db{name = Name, fd_ref_counter = OldRefCounter}) ->
- MonRef = erlang:monitor(process, OldRefCounter),
- receive
- {'DOWN', MonRef, process, OldRefCounter, _} ->
- etap:diag("Old " ++ Type ++ " database ref counter terminated")
- after 30000 ->
- etap:bail("Old " ++ Type ++ " database ref counter didn't terminate")
- end,
- {ok, #db{fd_ref_counter = NewRefCounter} = Db} = couch_db:open_int(Name, []),
- ok = couch_db:close(Db),
- etap:isnt(
- NewRefCounter, OldRefCounter, Type ++ " database has new ref counter").
-
-
-reopen_db(#db{name = Name}) ->
- {ok, Db} = couch_db:open_int(Name, []),
- ok = couch_db:close(Db),
- {ok, Db}.
-
-
-wait_target_in_sync(DocCount, #db{name = TargetName}) ->
- wait_target_in_sync_loop(DocCount, TargetName, 300).
-
-
-wait_target_in_sync_loop(_DocCount, _TargetName, 0) ->
- etap:bail("Could not get source and target databases in sync");
-wait_target_in_sync_loop(DocCount, TargetName, RetriesLeft) ->
- {ok, Target} = couch_db:open_int(TargetName, []),
- {ok, TargetInfo} = couch_db:get_db_info(Target),
- ok = couch_db:close(Target),
- TargetDocCount = couch_util:get_value(doc_count, TargetInfo),
- case TargetDocCount == DocCount of
- true ->
- etap:diag("Source and target databases are in sync");
- false ->
- ok = timer:sleep(100),
- wait_target_in_sync_loop(DocCount, TargetName, RetriesLeft - 1)
- end.
-
-
-compare_dbs(#db{name = SourceName}, #db{name = TargetName}) ->
- {ok, SourceDb} = couch_db:open_int(SourceName, []),
- {ok, TargetDb} = couch_db:open_int(TargetName, []),
- Fun = fun(FullDocInfo, _, Acc) ->
- {ok, Doc} = couch_db:open_doc(SourceDb, FullDocInfo),
- {Props} = DocJson = couch_doc:to_json_obj(Doc, [attachments]),
- DocId = couch_util:get_value(<<"_id">>, Props),
- DocTarget = case couch_db:open_doc(TargetDb, DocId) of
- {ok, DocT} ->
- DocT;
- Error ->
- etap:bail("Error opening document '" ++ ?b2l(DocId) ++
- "' from target: " ++ couch_util:to_list(Error))
- end,
- DocTargetJson = couch_doc:to_json_obj(DocTarget, [attachments]),
- case DocTargetJson of
- DocJson ->
- ok;
- _ ->
- etap:bail("Content from document '" ++ ?b2l(DocId) ++
- "' differs in target database")
- end,
- {ok, Acc}
- end,
- {ok, _, _} = couch_db:enum_docs(SourceDb, Fun, [], []),
- etap:diag("Target database has the same documents as the source database"),
- ok = couch_db:close(SourceDb),
- ok = couch_db:close(TargetDb).
-
-
-check_active_tasks(RepPid, {BaseId, Ext} = _RepId, Src, Tgt) ->
- Source = case Src of
- {remote, NameSrc} ->
- <<(db_url(NameSrc))/binary, $/>>;
- _ ->
- Src
- end,
- Target = case Tgt of
- {remote, NameTgt} ->
- <<(db_url(NameTgt))/binary, $/>>;
- _ ->
- Tgt
- end,
- FullRepId = list_to_binary(BaseId ++ Ext),
- Pid = list_to_binary(pid_to_list(RepPid)),
- [RepTask] = couch_task_status:all(),
- etap:is(couch_util:get_value(pid, RepTask), Pid,
- "_active_tasks entry has correct pid property"),
- etap:is(couch_util:get_value(replication_id, RepTask), FullRepId,
- "_active_tasks entry has right replication id"),
- etap:is(couch_util:get_value(continuous, RepTask), true,
- "_active_tasks entry has continuous property set to true"),
- etap:is(couch_util:get_value(source, RepTask), Source,
- "_active_tasks entry has correct source property"),
- etap:is(couch_util:get_value(target, RepTask), Target,
- "_active_tasks entry has correct target property"),
- etap:is(is_integer(couch_util:get_value(docs_read, RepTask)), true,
- "_active_tasks entry has integer docs_read property"),
- etap:is(is_integer(couch_util:get_value(docs_written, RepTask)), true,
- "_active_tasks entry has integer docs_written property"),
- etap:is(is_integer(couch_util:get_value(doc_write_failures, RepTask)), true,
- "_active_tasks entry has integer doc_write_failures property"),
- etap:is(is_integer(couch_util:get_value(revisions_checked, RepTask)), true,
- "_active_tasks entry has integer revisions_checked property"),
- etap:is(is_integer(couch_util:get_value(missing_revisions_found, RepTask)), true,
- "_active_tasks entry has integer missing_revisions_found property"),
- etap:is(is_integer(couch_util:get_value(checkpointed_source_seq, RepTask)), true,
- "_active_tasks entry has integer checkpointed_source_seq property"),
- etap:is(is_integer(couch_util:get_value(source_seq, RepTask)), true,
- "_active_tasks entry has integer source_seq property"),
- Progress = couch_util:get_value(progress, RepTask),
- etap:is(is_integer(Progress), true,
- "_active_tasks entry has an integer progress property"),
- etap:is(Progress =< 100, true, "Progress is not greater than 100%").
-
-
-wait_writer(Pid, NumDocs) ->
- case get_writer_num_docs_written(Pid) of
- N when N >= NumDocs ->
- ok;
- _ ->
- wait_writer(Pid, NumDocs)
- end.
-
-
-spawn_writer(Db) ->
- Parent = self(),
- Pid = spawn(fun() -> writer_loop(Db, Parent, 0) end),
- etap:diag("Started source database writer"),
- Pid.
-
-
-pause_writer(Pid) ->
- Ref = make_ref(),
- Pid ! {pause, Ref},
- receive
- {paused, Ref} ->
- ok
- after 30000 ->
- etap:bail("Failed to pause source database writer")
- end.
-
-
-resume_writer(Pid) ->
- Ref = make_ref(),
- Pid ! {continue, Ref},
- receive
- {ok, Ref} ->
- ok
- after 30000 ->
- etap:bail("Failed to unpause source database writer")
- end.
-
-
-get_writer_num_docs_written(Pid) ->
- Ref = make_ref(),
- Pid ! {get_count, Ref},
- receive
- {count, Ref, Count} ->
- Count
- after 30000 ->
- etap:bail("Timeout getting number of documents written from "
- "source database writer")
- end.
-
-
-stop_writer(Pid) ->
- Ref = make_ref(),
- Pid ! {stop, Ref},
- receive
- {stopped, Ref, DocsWritten} ->
- MonRef = erlang:monitor(process, Pid),
- receive
- {'DOWN', MonRef, process, Pid, _Reason} ->
- etap:diag("Stopped source database writer"),
- DocsWritten
- after 30000 ->
- etap:bail("Timeout stopping source database writer")
- end
- after 30000 ->
- etap:bail("Timeout stopping source database writer")
- end.
-
-
-writer_loop(#db{name = DbName}, Parent, Counter) ->
- maybe_pause(Parent, Counter),
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, list_to_binary(integer_to_list(Counter + 1))},
- {<<"value">>, Counter + 1},
- {<<"_attachments">>, {[
- {<<"icon1.png">>, {[
- {<<"data">>, base64:encode(att_data())},
- {<<"content_type">>, <<"image/png">>}
- ]}},
- {<<"icon2.png">>, {[
- {<<"data">>, base64:encode(iolist_to_binary(
- [att_data(), att_data()]))},
- {<<"content_type">>, <<"image/png">>}
- ]}}
- ]}}
- ]}),
- maybe_pause(Parent, Counter),
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, _} = couch_db:update_doc(Db, Doc, []),
- ok = couch_db:close(Db),
- receive
- {get_count, Ref} ->
- Parent ! {count, Ref, Counter + 1},
- writer_loop(Db, Parent, Counter + 1);
- {stop, Ref} ->
- Parent ! {stopped, Ref, Counter + 1}
- after 0 ->
- ok = timer:sleep(500),
- writer_loop(Db, Parent, Counter + 1)
- end.
-
-
-maybe_pause(Parent, Counter) ->
- receive
- {get_count, Ref} ->
- Parent ! {count, Ref, Counter};
- {pause, Ref} ->
- Parent ! {paused, Ref},
- receive {continue, Ref2} -> Parent ! {ok, Ref2} end
- after 0 ->
- ok
- end.
-
-
-db_url(DbName) ->
- iolist_to_binary([
- "http://", couch_config:get("httpd", "bind_address", "127.0.0.1"),
- ":", integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
- "/", DbName
- ]).
-
-
-create_db(DbName) ->
- {ok, Db} = couch_db:create(
- DbName,
- [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}, overwrite]),
- couch_db:close(Db),
- {ok, Db}.
-
-
-delete_db(#db{name = DbName, main_pid = Pid}) ->
- ok = couch_server:delete(
- DbName, [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}]),
- MonRef = erlang:monitor(process, Pid),
- receive
- {'DOWN', MonRef, process, Pid, _Reason} ->
- ok
- after 30000 ->
- etap:bail("Timeout deleting database")
- end.
-
-
-replicate({remote, Db}, Target) ->
- replicate(db_url(Db), Target);
-
-replicate(Source, {remote, Db}) ->
- replicate(Source, db_url(Db));
-
-replicate(Source, Target) ->
- RepObject = {[
- {<<"source">>, Source},
- {<<"target">>, Target},
- {<<"continuous">>, true}
- ]},
- {ok, Rep} = couch_replicator_utils:parse_rep_doc(
- RepObject, #user_ctx{roles = [<<"_admin">>]}),
- {ok, Pid} = couch_replicator:async_replicate(Rep),
- {ok, Pid, Rep#rep.id}.
-
-
-cancel_replication(RepId, RepPid) ->
- {ok, _} = couch_replicator:cancel_replication(RepId),
- etap:is(is_process_alive(RepPid), false,
- "Replication process is no longer alive after cancel").
-
-
-att_data() ->
- {ok, Data} = file:read_file(
- test_util:source_file("share/www/image/logo.png")),
- Data.
diff --git a/src/couch_replicator/test/04-replication-large-atts.t b/src/couch_replicator/test/04-replication-large-atts.t
deleted file mode 100755
index a7063c7a4..000000000
--- a/src/couch_replicator/test/04-replication-large-atts.t
+++ /dev/null
@@ -1,267 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-% Test replication of large attachments. Verify that both source and
-% target have the same attachment data and metadata.
-
--define(b2l(Bin), binary_to_list(Bin)).
-
--record(user_ctx, {
- name = null,
- roles = [],
- handler
-}).
-
--record(doc, {
- id = <<"">>,
- revs = {0, []},
- body = {[]},
- atts = [],
- deleted = false,
- meta = []
-}).
-
--record(att, {
- name,
- type,
- att_len,
- disk_len,
- md5= <<>>,
- revpos=0,
- data,
- encoding=identity
-}).
-
-
-source_db_name() -> <<"couch_test_rep_db_a">>.
-target_db_name() -> <<"couch_test_rep_db_b">>.
-
-
-main(_) ->
- test_util:init_code_path(),
-
- etap:plan(1192),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- etap:bail(Other)
- end,
- ok.
-
-
-test() ->
- couch_server_sup:start_link(test_util:config_files()),
- application:start(ibrowse),
- application:start(crypto),
- couch_config:set("attachments", "compressible_types", "text/*", false),
-
- Pairs = [
- {source_db_name(), target_db_name()},
- {{remote, source_db_name()}, target_db_name()},
- {source_db_name(), {remote, target_db_name()}},
- {{remote, source_db_name()}, {remote, (target_db_name())}}
- ],
-
- {ok, SourceDb} = create_db(source_db_name()),
- etap:diag("Populating source database"),
- populate_db(SourceDb, 11),
- ok = couch_db:close(SourceDb),
-
- lists:foreach(
- fun({Source, Target}) ->
- etap:diag("Creating target database"),
- {ok, TargetDb} = create_db(target_db_name()),
-
- ok = couch_db:close(TargetDb),
- etap:diag("Triggering replication"),
- replicate(Source, Target),
- etap:diag("Replication finished, comparing source and target databases"),
- compare_dbs(SourceDb, TargetDb),
-
- etap:diag("Deleting target database"),
- delete_db(TargetDb),
- ok = timer:sleep(1000)
- end,
- Pairs),
-
- delete_db(SourceDb),
- couch_server_sup:stop(),
- ok.
-
-
-populate_db(Db, DocCount) ->
- Docs = lists:foldl(
- fun(DocIdCounter, Acc) ->
- Doc = #doc{
- id = iolist_to_binary(["doc", integer_to_list(DocIdCounter)]),
- body = {[]},
- atts = [
- att(<<"att1">>, 2 * 1024 * 1024, <<"text/plain">>),
- att(<<"att2">>, round(6.6 * 1024 * 1024), <<"app/binary">>)
- ]
- },
- [Doc | Acc]
- end,
- [], lists:seq(1, DocCount)),
- {ok, _} = couch_db:update_docs(Db, Docs, []).
-
-
-att(Name, Size, Type) ->
- #att{
- name = Name,
- type = Type,
- att_len = Size,
- data = fun(Count) -> crypto:rand_bytes(Count) end
- }.
-
-
-compare_dbs(Source, Target) ->
- {ok, SourceDb} = couch_db:open_int(couch_db:name(Source), []),
- {ok, TargetDb} = couch_db:open_int(couch_db:name(Target), []),
-
- Fun = fun(FullDocInfo, _, Acc) ->
- {ok, DocSource} = couch_db:open_doc(SourceDb, FullDocInfo),
- Id = DocSource#doc.id,
-
- etap:diag("Verifying document " ++ ?b2l(Id)),
-
- {ok, DocTarget} = couch_db:open_doc(TargetDb, Id),
- etap:is(DocTarget#doc.body, DocSource#doc.body,
- "Same body in source and target databases"),
-
- #doc{atts = SourceAtts} = DocSource,
- #doc{atts = TargetAtts} = DocTarget,
- etap:is(
- lists:sort([N || #att{name = N} <- SourceAtts]),
- lists:sort([N || #att{name = N} <- TargetAtts]),
- "Document has same number (and names) of attachments in "
- "source and target databases"),
-
- lists:foreach(
- fun(#att{name = AttName} = Att) ->
- etap:diag("Verifying attachment " ++ ?b2l(AttName)),
-
- {ok, AttTarget} = find_att(TargetAtts, AttName),
- SourceMd5 = att_md5(Att),
- TargetMd5 = att_md5(AttTarget),
- case AttName of
- <<"att1">> ->
- etap:is(Att#att.encoding, gzip,
- "Attachment is gzip encoded in source database"),
- etap:is(AttTarget#att.encoding, gzip,
- "Attachment is gzip encoded in target database"),
- DecSourceMd5 = att_decoded_md5(Att),
- DecTargetMd5 = att_decoded_md5(AttTarget),
- etap:is(DecTargetMd5, DecSourceMd5,
- "Same identity content in source and target databases");
- _ ->
- etap:is(Att#att.encoding, identity,
- "Attachment is not encoded in source database"),
- etap:is(AttTarget#att.encoding, identity,
- "Attachment is not encoded in target database")
- end,
- etap:is(TargetMd5, SourceMd5,
- "Same content in source and target databases"),
- etap:is(is_integer(Att#att.disk_len), true,
- "#att.disk_len is an integer in source database"),
- etap:is(is_integer(Att#att.att_len), true,
- "#att.att_len is an integer in source database"),
- etap:is(is_integer(AttTarget#att.disk_len), true,
- "#att.disk_len is an integer in target database"),
- etap:is(is_integer(AttTarget#att.att_len), true,
- "#att.att_len is an integer in target database"),
- etap:is(Att#att.disk_len, AttTarget#att.disk_len,
- "Same identity length in source and target databases"),
- etap:is(Att#att.att_len, AttTarget#att.att_len,
- "Same encoded length in source and target databases"),
- etap:is(Att#att.type, AttTarget#att.type,
- "Same type in source and target databases"),
- etap:is(Att#att.md5, SourceMd5, "Correct MD5 in source database"),
- etap:is(AttTarget#att.md5, SourceMd5, "Correct MD5 in target database")
- end,
- SourceAtts),
-
- {ok, Acc}
- end,
-
- {ok, _, _} = couch_db:enum_docs(SourceDb, Fun, [], []),
- ok = couch_db:close(SourceDb),
- ok = couch_db:close(TargetDb).
-
-
-find_att([], _Name) ->
- nil;
-find_att([#att{name = Name} = Att | _], Name) ->
- {ok, Att};
-find_att([_ | Rest], Name) ->
- find_att(Rest, Name).
-
-
-att_md5(Att) ->
- Md50 = couch_doc:att_foldl(
- Att,
- fun(Chunk, Acc) -> couch_util:md5_update(Acc, Chunk) end,
- couch_util:md5_init()),
- couch_util:md5_final(Md50).
-
-att_decoded_md5(Att) ->
- Md50 = couch_doc:att_foldl_decode(
- Att,
- fun(Chunk, Acc) -> couch_util:md5_update(Acc, Chunk) end,
- couch_util:md5_init()),
- couch_util:md5_final(Md50).
-
-
-db_url(DbName) ->
- iolist_to_binary([
- "http://", couch_config:get("httpd", "bind_address", "127.0.0.1"),
- ":", integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
- "/", DbName
- ]).
-
-
-create_db(DbName) ->
- couch_db:create(
- DbName,
- [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}, overwrite]).
-
-
-delete_db(Db) ->
- ok = couch_server:delete(
- couch_db:name(Db), [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}]).
-
-
-replicate({remote, Db}, Target) ->
- replicate(db_url(Db), Target);
-
-replicate(Source, {remote, Db}) ->
- replicate(Source, db_url(Db));
-
-replicate(Source, Target) ->
- RepObject = {[
- {<<"source">>, Source},
- {<<"target">>, Target}
- ]},
- {ok, Rep} = couch_replicator_utils:parse_rep_doc(
- RepObject, #user_ctx{roles = [<<"_admin">>]}),
- {ok, Pid} = couch_replicator:async_replicate(Rep),
- MonRef = erlang:monitor(process, Pid),
- receive
- {'DOWN', MonRef, process, Pid, Reason} ->
- etap:is(Reason, normal, "Replication finished successfully")
- after 300000 ->
- etap:bail("Timeout waiting for replication to finish")
- end.
diff --git a/src/couch_replicator/test/05-replication-many-leaves.t b/src/couch_replicator/test/05-replication-many-leaves.t
deleted file mode 100755
index 212ee9924..000000000
--- a/src/couch_replicator/test/05-replication-many-leaves.t
+++ /dev/null
@@ -1,294 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-% Test replication of documents with many leaf revisions.
-% Motivated by COUCHDB-1340 and other similar issues where a document
-% GET with a too long ?open_revs revision list doesn't work due to
-% maximum web server limits for the HTTP request path.
-
--record(user_ctx, {
- name = null,
- roles = [],
- handler
-}).
-
--record(doc, {
- id = <<"">>,
- revs = {0, []},
- body = {[]},
- atts = [],
- deleted = false,
- meta = []
-}).
-
--record(att, {
- name,
- type,
- att_len,
- disk_len,
- md5= <<>>,
- revpos=0,
- data,
- encoding=identity
-}).
-
--define(b2l(B), binary_to_list(B)).
--define(l2b(L), list_to_binary(L)).
--define(i2l(I), integer_to_list(I)).
-
-
-source_db_name() -> <<"couch_test_rep_db_a">>.
-target_db_name() -> <<"couch_test_rep_db_b">>.
-
-doc_ids() ->
- [<<"doc1">>, <<"doc2">>, <<"doc3">>].
-
-doc_num_conflicts(<<"doc1">>) -> 10;
-doc_num_conflicts(<<"doc2">>) -> 100;
-% a number > MaxURLlength (7000) / length(DocRevisionString)
-doc_num_conflicts(<<"doc3">>) -> 210.
-
-
-main(_) ->
- test_util:init_code_path(),
-
- etap:plan(56),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- etap:bail(Other)
- end,
- ok.
-
-
-test() ->
- couch_server_sup:start_link(test_util:config_files()),
- ibrowse:start(),
- crypto:start(),
- couch_config:set("replicator", "connection_timeout", "90000", false),
-
- Pairs = [
- {source_db_name(), target_db_name()},
- {{remote, source_db_name()}, target_db_name()},
- {source_db_name(), {remote, target_db_name()}},
- {{remote, source_db_name()}, {remote, (target_db_name())}}
- ],
-
- lists:foreach(
- fun({Source, Target}) ->
- {ok, SourceDb} = create_db(source_db_name()),
- etap:diag("Populating source database"),
- {ok, DocRevs} = populate_db(SourceDb),
- ok = couch_db:close(SourceDb),
- etap:diag("Creating target database"),
- {ok, TargetDb} = create_db(target_db_name()),
-
- ok = couch_db:close(TargetDb),
- etap:diag("Triggering replication"),
- replicate(Source, Target),
- etap:diag("Replication finished, comparing source and target databases"),
- {ok, SourceDb2} = couch_db:open_int(source_db_name(), []),
- {ok, TargetDb2} = couch_db:open_int(target_db_name(), []),
- verify_target(SourceDb2, TargetDb2, DocRevs),
- ok = couch_db:close(SourceDb2),
- ok = couch_db:close(TargetDb2),
-
- {ok, SourceDb3} = couch_db:open_int(source_db_name(), []),
- {ok, DocRevs2} = add_attachments(SourceDb3, DocRevs, 2),
- ok = couch_db:close(SourceDb3),
- etap:diag("Triggering replication again"),
- replicate(Source, Target),
- etap:diag("Replication finished, comparing source and target databases"),
- {ok, SourceDb4} = couch_db:open_int(source_db_name(), []),
- {ok, TargetDb4} = couch_db:open_int(target_db_name(), []),
- verify_target(SourceDb4, TargetDb4, DocRevs2),
- ok = couch_db:close(SourceDb4),
- ok = couch_db:close(TargetDb4),
-
- etap:diag("Deleting source and target databases"),
- delete_db(TargetDb),
- delete_db(SourceDb),
- ok = timer:sleep(1000)
- end,
- Pairs),
-
- couch_server_sup:stop(),
- ok.
-
-
-populate_db(Db) ->
- DocRevsDict = lists:foldl(
- fun(DocId, Acc) ->
- Value = <<"0">>,
- Doc = #doc{
- id = DocId,
- body = {[ {<<"value">>, Value} ]}
- },
- {ok, Rev} = couch_db:update_doc(Db, Doc, []),
- {ok, DocRevs} = add_doc_siblings(Db, DocId, doc_num_conflicts(DocId)),
- dict:store(DocId, [Rev | DocRevs], Acc)
- end,
- dict:new(), doc_ids()),
- {ok, dict:to_list(DocRevsDict)}.
-
-
-add_doc_siblings(Db, DocId, NumLeaves) when NumLeaves > 0 ->
- add_doc_siblings(Db, DocId, NumLeaves, [], []).
-
-
-add_doc_siblings(Db, _DocId, 0, AccDocs, AccRevs) ->
- {ok, []} = couch_db:update_docs(Db, AccDocs, [], replicated_changes),
- {ok, AccRevs};
-
-add_doc_siblings(Db, DocId, NumLeaves, AccDocs, AccRevs) ->
- Value = list_to_binary(integer_to_list(NumLeaves)),
- Rev = couch_util:md5(Value),
- Doc = #doc{
- id = DocId,
- revs = {1, [Rev]},
- body = {[ {<<"value">>, Value} ]}
- },
- add_doc_siblings(Db, DocId, NumLeaves - 1, [Doc | AccDocs], [{1, Rev} | AccRevs]).
-
-
-verify_target(_SourceDb, _TargetDb, []) ->
- ok;
-
-verify_target(SourceDb, TargetDb, [{DocId, RevList} | Rest]) ->
- {ok, Lookups} = couch_db:open_doc_revs(
- TargetDb,
- DocId,
- RevList,
- [conflicts, deleted_conflicts]),
- Docs = [Doc || {ok, Doc} <- Lookups],
- {ok, SourceLookups} = couch_db:open_doc_revs(
- SourceDb,
- DocId,
- RevList,
- [conflicts, deleted_conflicts]),
- SourceDocs = [Doc || {ok, Doc} <- SourceLookups],
- Total = doc_num_conflicts(DocId) + 1,
- etap:is(
- length(Docs),
- Total,
- "Target has " ++ ?i2l(Total) ++ " leaf revisions of document " ++ ?b2l(DocId)),
- etap:diag("Verifying all revisions of document " ++ ?b2l(DocId)),
- lists:foreach(
- fun({#doc{id = Id, revs = Revs} = TgtDoc, #doc{id = Id, revs = Revs} = SrcDoc}) ->
- SourceJson = couch_doc:to_json_obj(SrcDoc, [attachments]),
- TargetJson = couch_doc:to_json_obj(TgtDoc, [attachments]),
- case TargetJson of
- SourceJson ->
- ok;
- _ ->
- {Pos, [Rev | _]} = Revs,
- etap:bail("Wrong value for revision " ++
- ?b2l(couch_doc:rev_to_str({Pos, Rev})) ++
- " of document " ++ ?b2l(DocId))
- end
- end,
- lists:zip(Docs, SourceDocs)),
- verify_target(SourceDb, TargetDb, Rest).
-
-
-add_attachments(Source, DocIdRevs, NumAtts) ->
- add_attachments(Source, DocIdRevs, NumAtts, []).
-
-add_attachments(_SourceDb, [], _NumAtts, Acc) ->
- {ok, Acc};
-
-add_attachments(SourceDb, [{DocId, RevList} | Rest], NumAtts, IdRevsAcc) ->
- {ok, SourceLookups} = couch_db:open_doc_revs(
- SourceDb,
- DocId,
- RevList,
- []),
- SourceDocs = [Doc || {ok, Doc} <- SourceLookups],
- Total = doc_num_conflicts(DocId) + 1,
- etap:is(
- length(SourceDocs),
- Total,
- "Source still has " ++ ?i2l(Total) ++
- " leaf revisions of document " ++ ?b2l(DocId)),
- etap:diag("Adding " ++ ?i2l(NumAtts) ++
- " attachments to each revision of the document " ++ ?b2l(DocId)),
- NewDocs = lists:foldl(
- fun(#doc{atts = Atts, revs = {Pos, [Rev | _]}} = Doc, Acc) ->
- NewAtts = lists:foldl(
- fun(I, AttAcc) ->
- AttData = crypto:rand_bytes(100),
- NewAtt = #att{
- name = iolist_to_binary(
- ["att_", ?i2l(I), "_", couch_doc:rev_to_str({Pos, Rev})]),
- type = <<"application/foobar">>,
- att_len = byte_size(AttData),
- data = AttData
- },
- [NewAtt | AttAcc]
- end,
- [], lists:seq(1, NumAtts)),
- [Doc#doc{atts = Atts ++ NewAtts} | Acc]
- end,
- [], SourceDocs),
- {ok, UpdateResults} = couch_db:update_docs(SourceDb, NewDocs, []),
- NewRevs = [R || {ok, R} <- UpdateResults],
- etap:is(
- length(NewRevs),
- length(NewDocs),
- "Document revisions updated with " ++ ?i2l(NumAtts) ++ " attachments"),
- add_attachments(SourceDb, Rest, NumAtts, [{DocId, NewRevs} | IdRevsAcc]).
-
-
-db_url(DbName) ->
- iolist_to_binary([
- "http://", couch_config:get("httpd", "bind_address", "127.0.0.1"),
- ":", integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
- "/", DbName
- ]).
-
-
-create_db(DbName) ->
- couch_db:create(
- DbName,
- [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}, overwrite]).
-
-
-delete_db(Db) ->
- ok = couch_server:delete(
- couch_db:name(Db), [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}]).
-
-
-replicate({remote, Db}, Target) ->
- replicate(db_url(Db), Target);
-
-replicate(Source, {remote, Db}) ->
- replicate(Source, db_url(Db));
-
-replicate(Source, Target) ->
- RepObject = {[
- {<<"source">>, Source},
- {<<"target">>, Target}
- ]},
- {ok, Rep} = couch_replicator_utils:parse_rep_doc(
- RepObject, #user_ctx{roles = [<<"_admin">>]}),
- {ok, Pid} = couch_replicator:async_replicate(Rep),
- MonRef = erlang:monitor(process, Pid),
- receive
- {'DOWN', MonRef, process, Pid, Reason} ->
- etap:is(Reason, normal, "Replication finished successfully")
- after 900000 ->
- etap:bail("Timeout waiting for replication to finish")
- end.
diff --git a/src/couch_replicator/test/06-doc-missing-stubs.t b/src/couch_replicator/test/06-doc-missing-stubs.t
deleted file mode 100755
index e17efc900..000000000
--- a/src/couch_replicator/test/06-doc-missing-stubs.t
+++ /dev/null
@@ -1,304 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-% Test replication of documents with many leaf revisions.
-% Motivated by COUCHDB-1340 and other similar issues where a document
-% GET with a too long ?open_revs revision list doesn't work due to
-% maximum web server limits for the HTTP request path.
-
--record(user_ctx, {
- name = null,
- roles = [],
- handler
-}).
-
--record(doc, {
- id = <<"">>,
- revs = {0, []},
- body = {[]},
- atts = [],
- deleted = false,
- meta = []
-}).
-
--record(att, {
- name,
- type,
- att_len,
- disk_len,
- md5= <<>>,
- revpos=0,
- data,
- encoding=identity
-}).
-
--define(b2l(B), binary_to_list(B)).
-
-source_db_name() -> <<"couch_test_rep_db_a">>.
-target_db_name() -> <<"couch_test_rep_db_b">>.
-
-target_revs_limit() -> 3.
-
-
-main(_) ->
- test_util:init_code_path(),
-
- etap:plan(128),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- etap:bail(Other)
- end,
- ok.
-
-
-% Test motivated by COUCHDB-1365.
-test() ->
- couch_server_sup:start_link(test_util:config_files()),
- ibrowse:start(),
-
- Pairs = [
- {source_db_name(), target_db_name()},
- {{remote, source_db_name()}, target_db_name()},
- {source_db_name(), {remote, target_db_name()}},
- {{remote, source_db_name()}, {remote, (target_db_name())}}
- ],
-
- lists:foreach(
- fun({Source, Target}) ->
- {ok, SourceDb} = create_db(source_db_name()),
- etap:diag("Populating source database"),
- populate_db(SourceDb),
- ok = couch_db:close(SourceDb),
-
- etap:diag("Creating target database"),
- {ok, TargetDb} = create_db(target_db_name()),
- ok = couch_db:set_revs_limit(TargetDb, target_revs_limit()),
- ok = couch_db:close(TargetDb),
-
- etap:diag("Triggering replication"),
- replicate(Source, Target),
- etap:diag("Replication finished, comparing source and target databases"),
- compare_dbs(SourceDb, TargetDb),
-
- etap:diag("Updating source database docs"),
- update_db_docs(couch_db:name(SourceDb), target_revs_limit() + 2),
-
- etap:diag("Triggering replication again"),
- replicate(Source, Target),
- etap:diag("Replication finished, comparing source and target databases"),
- compare_dbs(SourceDb, TargetDb),
-
- etap:diag("Deleting databases"),
- delete_db(TargetDb),
- delete_db(SourceDb),
- ok = timer:sleep(1000)
- end,
- Pairs),
-
- couch_server_sup:stop(),
- ok.
-
-
-populate_db(Db) ->
- AttData = crypto:rand_bytes(6000),
- Doc1 = #doc{
- id = <<"doc1">>,
- atts = [
- #att{
- name = <<"doc1_att1">>,
- type = <<"application/foobar">>,
- att_len = byte_size(AttData),
- data = AttData
- }
- ]
- },
- {ok, _} = couch_db:update_doc(Db, Doc1, []).
-
-
-update_db_docs(DbName, Times) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, _, _} = couch_db:enum_docs(
- Db,
- fun(FDI, _, Acc) -> db_fold_fun(FDI, Acc) end,
- {DbName, Times},
- []),
- ok = couch_db:close(Db).
-
-
-db_fold_fun(FullDocInfo, {DbName, Times}) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, Doc} = couch_db:open_doc(Db, FullDocInfo),
- lists:foldl(
- fun(_, {Pos, RevId}) ->
- {ok, Db2} = couch_db:reopen(Db),
- NewDocVersion = Doc#doc{
- revs = {Pos, [RevId]},
- body = {[{<<"value">>, base64:encode(crypto:rand_bytes(100))}]}
- },
- {ok, NewRev} = couch_db:update_doc(Db2, NewDocVersion, []),
- NewRev
- end,
- {element(1, Doc#doc.revs), hd(element(2, Doc#doc.revs))},
- lists:seq(1, Times)),
- ok = couch_db:close(Db),
- {ok, {DbName, Times}}.
-
-
-compare_dbs(Source, Target) ->
- {ok, SourceDb} = couch_db:open_int(couch_db:name(Source), []),
- {ok, TargetDb} = couch_db:open_int(couch_db:name(Target), []),
-
- Fun = fun(FullDocInfo, _, Acc) ->
- {ok, DocSource} = couch_db:open_doc(
- SourceDb, FullDocInfo, [conflicts, deleted_conflicts]),
- Id = DocSource#doc.id,
-
- etap:diag("Verifying document " ++ ?b2l(Id)),
-
- {ok, DocTarget} = couch_db:open_doc(
- TargetDb, Id, [conflicts, deleted_conflicts]),
- etap:is(DocTarget#doc.body, DocSource#doc.body,
- "Same body in source and target databases"),
-
- etap:is(
- couch_doc:to_json_obj(DocTarget, []),
- couch_doc:to_json_obj(DocSource, []),
- "Same doc body in source and target databases"),
-
- #doc{atts = SourceAtts} = DocSource,
- #doc{atts = TargetAtts} = DocTarget,
- etap:is(
- lists:sort([N || #att{name = N} <- SourceAtts]),
- lists:sort([N || #att{name = N} <- TargetAtts]),
- "Document has same number (and names) of attachments in "
- "source and target databases"),
-
- lists:foreach(
- fun(#att{name = AttName} = Att) ->
- etap:diag("Verifying attachment " ++ ?b2l(AttName)),
-
- {ok, AttTarget} = find_att(TargetAtts, AttName),
- SourceMd5 = att_md5(Att),
- TargetMd5 = att_md5(AttTarget),
- case AttName of
- <<"att1">> ->
- etap:is(Att#att.encoding, gzip,
- "Attachment is gzip encoded in source database"),
- etap:is(AttTarget#att.encoding, gzip,
- "Attachment is gzip encoded in target database"),
- DecSourceMd5 = att_decoded_md5(Att),
- DecTargetMd5 = att_decoded_md5(AttTarget),
- etap:is(DecTargetMd5, DecSourceMd5,
- "Same identity content in source and target databases");
- _ ->
- etap:is(Att#att.encoding, identity,
- "Attachment is not encoded in source database"),
- etap:is(AttTarget#att.encoding, identity,
- "Attachment is not encoded in target database")
- end,
- etap:is(TargetMd5, SourceMd5,
- "Same content in source and target databases"),
- etap:is(is_integer(Att#att.disk_len), true,
- "#att.disk_len is an integer in source database"),
- etap:is(is_integer(Att#att.att_len), true,
- "#att.att_len is an integer in source database"),
- etap:is(is_integer(AttTarget#att.disk_len), true,
- "#att.disk_len is an integer in target database"),
- etap:is(is_integer(AttTarget#att.att_len), true,
- "#att.att_len is an integer in target database"),
- etap:is(Att#att.disk_len, AttTarget#att.disk_len,
- "Same identity length in source and target databases"),
- etap:is(Att#att.att_len, AttTarget#att.att_len,
- "Same encoded length in source and target databases"),
- etap:is(Att#att.type, AttTarget#att.type,
- "Same type in source and target databases"),
- etap:is(Att#att.md5, SourceMd5, "Correct MD5 in source database"),
- etap:is(AttTarget#att.md5, SourceMd5, "Correct MD5 in target database")
- end,
- SourceAtts),
-
- {ok, Acc}
- end,
-
- {ok, _, _} = couch_db:enum_docs(SourceDb, Fun, [], []),
- ok = couch_db:close(SourceDb),
- ok = couch_db:close(TargetDb).
-
-
-find_att([], _Name) ->
- nil;
-find_att([#att{name = Name} = Att | _], Name) ->
- {ok, Att};
-find_att([_ | Rest], Name) ->
- find_att(Rest, Name).
-
-
-att_md5(Att) ->
- Md50 = couch_doc:att_foldl(
- Att,
- fun(Chunk, Acc) -> couch_util:md5_update(Acc, Chunk) end,
- couch_util:md5_init()),
- couch_util:md5_final(Md50).
-
-att_decoded_md5(Att) ->
- Md50 = couch_doc:att_foldl_decode(
- Att,
- fun(Chunk, Acc) -> couch_util:md5_update(Acc, Chunk) end,
- couch_util:md5_init()),
- couch_util:md5_final(Md50).
-
-
-db_url(DbName) ->
- iolist_to_binary([
- "http://", couch_config:get("httpd", "bind_address", "127.0.0.1"),
- ":", integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
- "/", DbName
- ]).
-
-
-create_db(DbName) ->
- couch_db:create(
- DbName,
- [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}, overwrite]).
-
-
-delete_db(Db) ->
- ok = couch_server:delete(
- couch_db:name(Db), [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}]).
-
-
-replicate({remote, Db}, Target) ->
- replicate(db_url(Db), Target);
-
-replicate(Source, {remote, Db}) ->
- replicate(Source, db_url(Db));
-
-replicate(Source, Target) ->
- RepObject = {[
- {<<"source">>, Source},
- {<<"target">>, Target}
- ]},
- {ok, Rep} = couch_replicator_utils:parse_rep_doc(
- RepObject, #user_ctx{roles = [<<"_admin">>]}),
- {ok, Pid} = couch_replicator:async_replicate(Rep),
- MonRef = erlang:monitor(process, Pid),
- receive
- {'DOWN', MonRef, process, Pid, Reason} ->
- etap:is(Reason, normal, "Replication finished successfully")
- after 300000 ->
- etap:bail("Timeout waiting for replication to finish")
- end.
diff --git a/src/couch_replicator/test/07-use-checkpoints.t b/src/couch_replicator/test/07-use-checkpoints.t
deleted file mode 100755
index a3295c7a1..000000000
--- a/src/couch_replicator/test/07-use-checkpoints.t
+++ /dev/null
@@ -1,273 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-% Verify that compacting databases that are being used as the source or
-% target of a replication doesn't affect the replication and that the
-% replication doesn't hold their reference counters forever.
-
--define(b2l(B), binary_to_list(B)).
-
--record(user_ctx, {
- name = null,
- roles = [],
- handler
-}).
-
--record(doc, {
- id = <<"">>,
- revs = {0, []},
- body = {[]},
- atts = [],
- deleted = false,
- meta = []
-}).
-
--record(db, {
- main_pid = nil,
- update_pid = nil,
- compactor_pid = nil,
- instance_start_time, % number of microsecs since jan 1 1970 as a binary string
- fd,
- updater_fd,
- fd_ref_counter,
- header = nil,
- committed_update_seq,
- fulldocinfo_by_id_btree,
- docinfo_by_seq_btree,
- local_docs_btree,
- update_seq,
- name,
- filepath,
- validate_doc_funs = [],
- security = [],
- security_ptr = nil,
- user_ctx = #user_ctx{},
- waiting_delayed_commit = nil,
- revs_limit = 1000,
- fsync_options = [],
- options = [],
- compression,
- before_doc_update,
- after_doc_read
-}).
-
--record(rep, {
- id,
- source,
- target,
- options,
- user_ctx,
- doc_id
-}).
-
-
-source_db_name() -> <<"couch_test_rep_db_a">>.
-target_db_name() -> <<"couch_test_rep_db_b">>.
-
-
-main(_) ->
- test_util:init_code_path(),
-
- etap:plan(16),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- etap:bail(Other)
- end,
- ok.
-
-
-test() ->
- couch_server_sup:start_link(test_util:config_files()),
- ibrowse:start(),
-
- % order matters
- test_use_checkpoints(false),
- test_use_checkpoints(true),
-
- couch_server_sup:stop(),
- ok.
-
-
-test_use_checkpoints(UseCheckpoints) ->
- Pairs = [
- {source_db_name(), target_db_name()},
- {{remote, source_db_name()}, target_db_name()},
- {source_db_name(), {remote, target_db_name()}},
- {{remote, source_db_name()}, {remote, (target_db_name())}}
- ],
-
- ListenerFun = case UseCheckpoints of
- false ->
- fun({finished, _, {CheckpointHistory}}) ->
- etap:is(CheckpointHistory,
- [{<<"use_checkpoints">>,false}],
- "No checkpoints found");
- (_) ->
- ok
- end;
- true ->
- fun({finished, _, {CheckpointHistory}}) ->
- SessionId = lists:keyfind(
- <<"session_id">>, 1, CheckpointHistory),
- case SessionId of
- false ->
- OtpRel = erlang:system_info(otp_release),
- case OtpRel >= "R14B01" orelse OtpRel < "R14B03" of
- false ->
- etap:bail("Checkpoint expected, but not found");
- true ->
- etap:ok(true,
- " Checkpoint expected, but wan't found."
- " Your Erlang " ++ OtpRel ++ " version is"
- " affected to OTP-9167 issue which causes"
- " failure of this test. Try to upgrade Erlang"
- " and if this failure repeats file the bug.")
- end;
- _ ->
- etap:ok(true, "There's a checkpoint")
- end;
- (_) ->
- ok
- end
- end,
- {ok, Listener} = couch_replicator_notifier:start_link(ListenerFun),
-
- lists:foreach(
- fun({Source, Target}) ->
- {ok, SourceDb} = create_db(source_db_name()),
- etap:diag("Populating source database"),
- populate_db(SourceDb, 100),
- ok = couch_db:close(SourceDb),
-
- etap:diag("Creating target database"),
- {ok, TargetDb} = create_db(target_db_name()),
- ok = couch_db:close(TargetDb),
-
- etap:diag("Setup replicator notifier listener"),
-
- etap:diag("Triggering replication"),
- replicate(Source, Target, UseCheckpoints),
-
- etap:diag("Replication finished, comparing source and target databases"),
- compare_dbs(SourceDb, TargetDb),
-
- etap:diag("Deleting databases"),
- delete_db(TargetDb),
- delete_db(SourceDb),
-
- ok = timer:sleep(1000)
- end,
- Pairs),
-
- couch_replicator_notifier:stop(Listener).
-
-
-populate_db(Db, DocCount) ->
- Docs = lists:foldl(
- fun(DocIdCounter, Acc) ->
- Id = iolist_to_binary(["doc", integer_to_list(DocIdCounter)]),
- Value = iolist_to_binary(["val", integer_to_list(DocIdCounter)]),
- Doc = #doc{
- id = Id,
- body = {[ {<<"value">>, Value} ]}
- },
- [Doc | Acc]
- end,
- [], lists:seq(1, DocCount)),
- {ok, _} = couch_db:update_docs(Db, Docs, []).
-
-
-compare_dbs(#db{name = SourceName}, #db{name = TargetName}) ->
- {ok, SourceDb} = couch_db:open_int(SourceName, []),
- {ok, TargetDb} = couch_db:open_int(TargetName, []),
- Fun = fun(FullDocInfo, _, Acc) ->
- {ok, Doc} = couch_db:open_doc(SourceDb, FullDocInfo),
- {Props} = DocJson = couch_doc:to_json_obj(Doc, [attachments]),
- DocId = couch_util:get_value(<<"_id">>, Props),
- DocTarget = case couch_db:open_doc(TargetDb, DocId) of
- {ok, DocT} ->
- DocT;
- Error ->
- etap:bail("Error opening document '" ++ ?b2l(DocId) ++
- "' from target: " ++ couch_util:to_list(Error))
- end,
- DocTargetJson = couch_doc:to_json_obj(DocTarget, [attachments]),
- case DocTargetJson of
- DocJson ->
- ok;
- _ ->
- etap:bail("Content from document '" ++ ?b2l(DocId) ++
- "' differs in target database")
- end,
- {ok, Acc}
- end,
- {ok, _, _} = couch_db:enum_docs(SourceDb, Fun, [], []),
- etap:diag("Target database has the same documents as the source database"),
- ok = couch_db:close(SourceDb),
- ok = couch_db:close(TargetDb).
-
-
-db_url(DbName) ->
- iolist_to_binary([
- "http://", couch_config:get("httpd", "bind_address", "127.0.0.1"),
- ":", integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
- "/", DbName
- ]).
-
-
-create_db(DbName) ->
- {ok, Db} = couch_db:create(
- DbName,
- [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}, overwrite]),
- couch_db:close(Db),
- {ok, Db}.
-
-
-delete_db(#db{name = DbName, main_pid = Pid}) ->
- ok = couch_server:delete(
- DbName, [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}]),
- MonRef = erlang:monitor(process, Pid),
- receive
- {'DOWN', MonRef, process, Pid, _Reason} ->
- ok
- after 30000 ->
- etap:bail("Timeout deleting database")
- end.
-
-
-replicate({remote, Db}, Target, UseCheckpoints) ->
- replicate(db_url(Db), Target, UseCheckpoints);
-
-replicate(Source, {remote, Db}, UseCheckpoints) ->
- replicate(Source, db_url(Db), UseCheckpoints);
-
-replicate(Source, Target, UseCheckpoints) ->
- RepObject = {[
- {<<"source">>, Source},
- {<<"target">>, Target},
- {<<"use_checkpoints">>, UseCheckpoints}
- ]},
- {ok, Rep} = couch_replicator_utils:parse_rep_doc(
- RepObject, #user_ctx{roles = [<<"_admin">>]}),
- {ok, Pid} = couch_replicator:async_replicate(Rep),
- MonRef = erlang:monitor(process, Pid),
- receive
- {'DOWN', MonRef, process, Pid, Reason} ->
- etap:is(Reason, normal, "Replication finished successfully")
- after 300000 ->
- etap:bail("Timeout waiting for replication to finish")
- end.
diff --git a/src/couch_replicator/test/couch_replicator_compact_tests.erl b/src/couch_replicator/test/couch_replicator_compact_tests.erl
new file mode 100644
index 000000000..05b368e38
--- /dev/null
+++ b/src/couch_replicator/test/couch_replicator_compact_tests.erl
@@ -0,0 +1,448 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_replicator_compact_tests).
+
+-include("couch_eunit.hrl").
+-include_lib("couchdb/couch_db.hrl").
+-include_lib("couch_replicator/src/couch_replicator.hrl").
+
+-define(ADMIN_ROLE, #user_ctx{roles=[<<"_admin">>]}).
+-define(ADMIN_USER, {user_ctx, ?ADMIN_ROLE}).
+-define(ATTFILE, filename:join([?FIXTURESDIR, "logo.png"])).
+-define(DELAY, 100).
+-define(TIMEOUT, 30000).
+-define(TIMEOUT_STOP, 1000).
+-define(TIMEOUT_WRITER, 3000).
+-define(TIMEOUT_EUNIT, ?TIMEOUT div 1000 + 5).
+
+setup() ->
+ DbName = ?tempdb(),
+ {ok, Db} = couch_db:create(DbName, [?ADMIN_USER]),
+ ok = couch_db:close(Db),
+ DbName.
+
+setup(local) ->
+ setup();
+setup(remote) ->
+ {remote, setup()};
+setup({A, B}) ->
+ {ok, _} = couch_server_sup:start_link(?CONFIG_CHAIN),
+ Source = setup(A),
+ Target = setup(B),
+ {Source, Target}.
+
+teardown({remote, DbName}) ->
+ teardown(DbName);
+teardown(DbName) ->
+ ok = couch_server:delete(DbName, [?ADMIN_USER]),
+ ok.
+
+teardown(_, {Source, Target}) ->
+ teardown(Source),
+ teardown(Target),
+
+ Pid = whereis(couch_server_sup),
+ erlang:monitor(process, Pid),
+ couch_server_sup:stop(),
+ receive
+ {'DOWN', _, _, Pid, _} ->
+ ok
+ after ?TIMEOUT_STOP ->
+ throw({timeout, server_stop})
+ end.
+
+
+compact_test_() ->
+ Pairs = [{local, local}, {local, remote},
+ {remote, local}, {remote, remote}],
+ {
+ "Compaction during replication tests",
+ {
+ foreachx,
+ fun setup/1, fun teardown/2,
+ [{Pair, fun should_populate_replicate_compact/2}
+ || Pair <- Pairs]
+ }
+ }.
+
+
+should_populate_replicate_compact({From, To}, {Source, Target}) ->
+ {ok, RepPid, RepId} = replicate(Source, Target),
+ {lists:flatten(io_lib:format("~p -> ~p", [From, To])),
+ {inorder, [
+ should_run_replication(RepPid, RepId, Source, Target),
+ should_all_processes_be_alive(RepPid, Source, Target),
+ should_populate_and_compact(RepPid, Source, Target, 50, 5),
+ should_wait_target_in_sync(Source, Target),
+ should_ensure_replication_still_running(RepPid, RepId, Source, Target),
+ should_cancel_replication(RepId, RepPid),
+ should_compare_databases(Source, Target)
+ ]}}.
+
+should_all_processes_be_alive(RepPid, Source, Target) ->
+ ?_test(begin
+ {ok, SourceDb} = reopen_db(Source),
+ {ok, TargetDb} = reopen_db(Target),
+ ?assert(is_process_alive(RepPid)),
+ ?assert(is_process_alive(SourceDb#db.main_pid)),
+ ?assert(is_process_alive(TargetDb#db.main_pid))
+ end).
+
+should_run_replication(RepPid, RepId, Source, Target) ->
+ ?_test(check_active_tasks(RepPid, RepId, Source, Target)).
+
+should_ensure_replication_still_running(RepPid, RepId, Source, Target) ->
+ ?_test(check_active_tasks(RepPid, RepId, Source, Target)).
+
+check_active_tasks(RepPid, {BaseId, Ext} = _RepId, Src, Tgt) ->
+ Source = case Src of
+ {remote, NameSrc} ->
+ <<(db_url(NameSrc))/binary, $/>>;
+ _ ->
+ Src
+ end,
+ Target = case Tgt of
+ {remote, NameTgt} ->
+ <<(db_url(NameTgt))/binary, $/>>;
+ _ ->
+ Tgt
+ end,
+ FullRepId = ?l2b(BaseId ++ Ext),
+ Pid = ?l2b(pid_to_list(RepPid)),
+ [RepTask] = couch_task_status:all(),
+ ?assertEqual(Pid, couch_util:get_value(pid, RepTask)),
+ ?assertEqual(FullRepId, couch_util:get_value(replication_id, RepTask)),
+ ?assertEqual(true, couch_util:get_value(continuous, RepTask)),
+ ?assertEqual(Source, couch_util:get_value(source, RepTask)),
+ ?assertEqual(Target, couch_util:get_value(target, RepTask)),
+ ?assert(is_integer(couch_util:get_value(docs_read, RepTask))),
+ ?assert(is_integer(couch_util:get_value(docs_written, RepTask))),
+ ?assert(is_integer(couch_util:get_value(doc_write_failures, RepTask))),
+ ?assert(is_integer(couch_util:get_value(revisions_checked, RepTask))),
+ ?assert(is_integer(couch_util:get_value(missing_revisions_found, RepTask))),
+ ?assert(is_integer(couch_util:get_value(checkpointed_source_seq, RepTask))),
+ ?assert(is_integer(couch_util:get_value(source_seq, RepTask))),
+ Progress = couch_util:get_value(progress, RepTask),
+ ?assert(is_integer(Progress)),
+ ?assert(Progress =< 100).
+
+should_cancel_replication(RepId, RepPid) ->
+ ?_assertNot(begin
+ {ok, _} = couch_replicator:cancel_replication(RepId),
+ is_process_alive(RepPid)
+ end).
+
+should_populate_and_compact(RepPid, Source, Target, BatchSize, Rounds) ->
+ {timeout, ?TIMEOUT_EUNIT, ?_test(begin
+ {ok, SourceDb0} = reopen_db(Source),
+ Writer = spawn_writer(SourceDb0),
+ lists:foreach(
+ fun(N) ->
+ {ok, SourceDb} = reopen_db(Source),
+ {ok, TargetDb} = reopen_db(Target),
+ pause_writer(Writer),
+
+ compact_db("source", SourceDb),
+ ?assert(is_process_alive(RepPid)),
+ ?assert(is_process_alive(SourceDb#db.main_pid)),
+ check_ref_counter("source", SourceDb),
+
+ compact_db("target", TargetDb),
+ ?assert(is_process_alive(RepPid)),
+ ?assert(is_process_alive(TargetDb#db.main_pid)),
+ check_ref_counter("target", TargetDb),
+
+ {ok, SourceDb2} = reopen_db(SourceDb),
+ {ok, TargetDb2} = reopen_db(TargetDb),
+
+ resume_writer(Writer),
+ wait_writer(Writer, BatchSize * N),
+
+ compact_db("source", SourceDb2),
+ ?assert(is_process_alive(RepPid)),
+ ?assert(is_process_alive(SourceDb2#db.main_pid)),
+ pause_writer(Writer),
+ check_ref_counter("source", SourceDb2),
+ resume_writer(Writer),
+
+ compact_db("target", TargetDb2),
+ ?assert(is_process_alive(RepPid)),
+ ?assert(is_process_alive(TargetDb2#db.main_pid)),
+ pause_writer(Writer),
+ check_ref_counter("target", TargetDb2),
+ resume_writer(Writer)
+ end, lists:seq(1, Rounds)),
+ stop_writer(Writer)
+ end)}.
+
+should_wait_target_in_sync({remote, Source}, Target) ->
+ should_wait_target_in_sync(Source, Target);
+should_wait_target_in_sync(Source, {remote, Target}) ->
+ should_wait_target_in_sync(Source, Target);
+should_wait_target_in_sync(Source, Target) ->
+ {timeout, ?TIMEOUT_EUNIT, ?_assert(begin
+ {ok, SourceDb} = couch_db:open_int(Source, []),
+ {ok, SourceInfo} = couch_db:get_db_info(SourceDb),
+ ok = couch_db:close(SourceDb),
+ SourceDocCount = couch_util:get_value(doc_count, SourceInfo),
+ wait_target_in_sync_loop(SourceDocCount, Target, 300)
+ end)}.
+
+wait_target_in_sync_loop(_DocCount, _TargetName, 0) ->
+ erlang:error(
+ {assertion_failed,
+ [{module, ?MODULE}, {line, ?LINE},
+ {reason, "Could not get source and target databases in sync"}]});
+wait_target_in_sync_loop(DocCount, {remote, TargetName}, RetriesLeft) ->
+ wait_target_in_sync_loop(DocCount, TargetName, RetriesLeft);
+wait_target_in_sync_loop(DocCount, TargetName, RetriesLeft) ->
+ {ok, Target} = couch_db:open_int(TargetName, []),
+ {ok, TargetInfo} = couch_db:get_db_info(Target),
+ ok = couch_db:close(Target),
+ TargetDocCount = couch_util:get_value(doc_count, TargetInfo),
+ case TargetDocCount == DocCount of
+ true ->
+ true;
+ false ->
+ ok = timer:sleep(?DELAY),
+ wait_target_in_sync_loop(DocCount, TargetName, RetriesLeft - 1)
+ end.
+
+should_compare_databases({remote, Source}, Target) ->
+ should_compare_databases(Source, Target);
+should_compare_databases(Source, {remote, Target}) ->
+ should_compare_databases(Source, Target);
+should_compare_databases(Source, Target) ->
+ {timeout, 35, ?_test(begin
+ {ok, SourceDb} = couch_db:open_int(Source, []),
+ {ok, TargetDb} = couch_db:open_int(Target, []),
+ Fun = fun(FullDocInfo, _, Acc) ->
+ {ok, Doc} = couch_db:open_doc(SourceDb, FullDocInfo),
+ {Props} = DocJson = couch_doc:to_json_obj(Doc, [attachments]),
+ DocId = couch_util:get_value(<<"_id">>, Props),
+ DocTarget = case couch_db:open_doc(TargetDb, DocId) of
+ {ok, DocT} ->
+ DocT;
+ Error ->
+ erlang:error(
+ {assertion_failed,
+ [{module, ?MODULE}, {line, ?LINE},
+ {reason, lists:concat(["Error opening document '",
+ ?b2l(DocId), "' from target: ",
+ couch_util:to_list(Error)])}]})
+ end,
+ DocTargetJson = couch_doc:to_json_obj(DocTarget, [attachments]),
+ ?assertEqual(DocJson, DocTargetJson),
+ {ok, Acc}
+ end,
+ {ok, _, _} = couch_db:enum_docs(SourceDb, Fun, [], []),
+ ok = couch_db:close(SourceDb),
+ ok = couch_db:close(TargetDb)
+ end)}.
+
+
+reopen_db({remote, Db}) ->
+ reopen_db(Db);
+reopen_db(#db{name=DbName}) ->
+ reopen_db(DbName);
+reopen_db(DbName) ->
+ {ok, Db} = couch_db:open_int(DbName, []),
+ ok = couch_db:close(Db),
+ {ok, Db}.
+
+compact_db(Type, #db{name = Name}) ->
+ {ok, Db} = couch_db:open_int(Name, []),
+ {ok, CompactPid} = couch_db:start_compact(Db),
+ MonRef = erlang:monitor(process, CompactPid),
+ receive
+ {'DOWN', MonRef, process, CompactPid, normal} ->
+ ok;
+ {'DOWN', MonRef, process, CompactPid, Reason} ->
+ erlang:error(
+ {assertion_failed,
+ [{module, ?MODULE}, {line, ?LINE},
+ {reason,
+ lists:concat(["Error compacting ", Type, " database ",
+ ?b2l(Name), ": ",
+ couch_util:to_list(Reason)])}]})
+ after ?TIMEOUT ->
+ erlang:error(
+ {assertion_failed,
+ [{module, ?MODULE}, {line, ?LINE},
+ {reason, lists:concat(["Compaction for ", Type, " database ",
+ ?b2l(Name), " didn't finish"])}]})
+ end,
+ ok = couch_db:close(Db).
+
+check_ref_counter(Type, #db{name = Name, fd_ref_counter = OldRefCounter}) ->
+ MonRef = erlang:monitor(process, OldRefCounter),
+ receive
+ {'DOWN', MonRef, process, OldRefCounter, _} ->
+ ok
+ after ?TIMEOUT ->
+ erlang:error(
+ {assertion_failed,
+ [{module, ?MODULE}, {line, ?LINE},
+ {reason, lists:concat(["Old ", Type,
+ " database ref counter didn't"
+ " terminate"])}]})
+ end,
+ {ok, #db{fd_ref_counter = NewRefCounter} = Db} = couch_db:open_int(Name, []),
+ ok = couch_db:close(Db),
+ ?assertNotEqual(OldRefCounter, NewRefCounter).
+
+db_url(DbName) ->
+ iolist_to_binary([
+ "http://", couch_config:get("httpd", "bind_address", "127.0.0.1"),
+ ":", integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
+ "/", DbName
+ ]).
+
+replicate({remote, Db}, Target) ->
+ replicate(db_url(Db), Target);
+
+replicate(Source, {remote, Db}) ->
+ replicate(Source, db_url(Db));
+
+replicate(Source, Target) ->
+ RepObject = {[
+ {<<"source">>, Source},
+ {<<"target">>, Target},
+ {<<"continuous">>, true}
+ ]},
+ {ok, Rep} = couch_replicator_utils:parse_rep_doc(RepObject, ?ADMIN_ROLE),
+ {ok, Pid} = couch_replicator:async_replicate(Rep),
+ {ok, Pid, Rep#rep.id}.
+
+
+wait_writer(Pid, NumDocs) ->
+ case get_writer_num_docs_written(Pid) of
+ N when N >= NumDocs ->
+ ok;
+ _ ->
+ wait_writer(Pid, NumDocs)
+ end.
+
+spawn_writer(Db) ->
+ Parent = self(),
+ Pid = spawn(fun() -> writer_loop(Db, Parent, 0) end),
+ Pid.
+
+
+pause_writer(Pid) ->
+ Ref = make_ref(),
+ Pid ! {pause, Ref},
+ receive
+ {paused, Ref} ->
+ ok
+ after ?TIMEOUT_WRITER ->
+ erlang:error({assertion_failed,
+ [{module, ?MODULE},
+ {line, ?LINE},
+ {reason, "Failed to pause source database writer"}]})
+ end.
+
+resume_writer(Pid) ->
+ Ref = make_ref(),
+ Pid ! {continue, Ref},
+ receive
+ {ok, Ref} ->
+ ok
+ after ?TIMEOUT_WRITER ->
+ erlang:error({assertion_failed,
+ [{module, ?MODULE},
+ {line, ?LINE},
+ {reason, "Failed to pause source database writer"}]})
+ end.
+
+get_writer_num_docs_written(Pid) ->
+ Ref = make_ref(),
+ Pid ! {get_count, Ref},
+ receive
+ {count, Ref, Count} ->
+ Count
+ after ?TIMEOUT_WRITER ->
+ erlang:error({assertion_failed,
+ [{module, ?MODULE},
+ {line, ?LINE},
+ {reason, "Timeout getting number of documents written"
+ " from source database writer"}]})
+ end.
+
+stop_writer(Pid) ->
+ Ref = make_ref(),
+ Pid ! {stop, Ref},
+ receive
+ {stopped, Ref, DocsWritten} ->
+ MonRef = erlang:monitor(process, Pid),
+ receive
+ {'DOWN', MonRef, process, Pid, _Reason} ->
+ DocsWritten
+ after ?TIMEOUT ->
+ erlang:error({assertion_failed,
+ [{module, ?MODULE},
+ {line, ?LINE},
+ {reason, "Timeout stopping source database writer"}]})
+ end
+ after ?TIMEOUT_WRITER ->
+ erlang:error({assertion_failed,
+ [{module, ?MODULE},
+ {line, ?LINE},
+ {reason, "Timeout stopping source database writer"}]})
+ end.
+
+writer_loop(#db{name = DbName}, Parent, Counter) ->
+ {ok, Data} = file:read_file(?ATTFILE),
+ maybe_pause(Parent, Counter),
+ Doc = couch_doc:from_json_obj({[
+ {<<"_id">>, ?l2b(integer_to_list(Counter + 1))},
+ {<<"value">>, Counter + 1},
+ {<<"_attachments">>, {[
+ {<<"icon1.png">>, {[
+ {<<"data">>, base64:encode(Data)},
+ {<<"content_type">>, <<"image/png">>}
+ ]}},
+ {<<"icon2.png">>, {[
+ {<<"data">>, base64:encode(iolist_to_binary([Data, Data]))},
+ {<<"content_type">>, <<"image/png">>}
+ ]}}
+ ]}}
+ ]}),
+ maybe_pause(Parent, Counter),
+ {ok, Db} = couch_db:open_int(DbName, []),
+ {ok, _} = couch_db:update_doc(Db, Doc, []),
+ ok = couch_db:close(Db),
+ receive
+ {get_count, Ref} ->
+ Parent ! {count, Ref, Counter + 1},
+ writer_loop(Db, Parent, Counter + 1);
+ {stop, Ref} ->
+ Parent ! {stopped, Ref, Counter + 1}
+ after 0 ->
+ timer:sleep(?DELAY),
+ writer_loop(Db, Parent, Counter + 1)
+ end.
+
+maybe_pause(Parent, Counter) ->
+ receive
+ {get_count, Ref} ->
+ Parent ! {count, Ref, Counter};
+ {pause, Ref} ->
+ Parent ! {paused, Ref},
+ receive
+ {continue, Ref2} ->
+ Parent ! {ok, Ref2}
+ end
+ after 0 ->
+ ok
+ end.
diff --git a/src/couch_replicator/test/couch_replicator_httpc_pool_tests.erl b/src/couch_replicator/test/couch_replicator_httpc_pool_tests.erl
new file mode 100644
index 000000000..88534edcd
--- /dev/null
+++ b/src/couch_replicator/test/couch_replicator_httpc_pool_tests.erl
@@ -0,0 +1,189 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_replicator_httpc_pool_tests).
+
+-include("couch_eunit.hrl").
+-include_lib("couchdb/couch_db.hrl").
+
+-define(ADMIN_USER, {user_ctx, #user_ctx{roles=[<<"_admin">>]}}).
+-define(TIMEOUT, 1000).
+
+
+start() ->
+ {ok, Pid} = couch_server_sup:start_link(?CONFIG_CHAIN),
+ Pid.
+
+stop(Pid) ->
+ erlang:monitor(process, Pid),
+ couch_server_sup:stop(),
+ receive
+ {'DOWN', _, _, Pid, _} ->
+ ok
+ after ?TIMEOUT ->
+ throw({timeout, server_stop})
+ end.
+
+setup() ->
+ spawn_pool().
+
+teardown(Pool) ->
+ stop_pool(Pool).
+
+
+httpc_pool_test_() ->
+ {
+ "httpc pool tests",
+ {
+ setup,
+ fun start/0, fun stop/1,
+ {
+ foreach,
+ fun setup/0, fun teardown/1,
+ [
+ fun should_block_new_clients_when_full/1,
+ fun should_replace_worker_on_death/1
+ ]
+ }
+ }
+ }.
+
+
+should_block_new_clients_when_full(Pool) ->
+ ?_test(begin
+ Client1 = spawn_client(Pool),
+ Client2 = spawn_client(Pool),
+ Client3 = spawn_client(Pool),
+
+ ?assertEqual(ok, ping_client(Client1)),
+ ?assertEqual(ok, ping_client(Client2)),
+ ?assertEqual(ok, ping_client(Client3)),
+
+ Worker1 = get_client_worker(Client1, "1"),
+ Worker2 = get_client_worker(Client2, "2"),
+ Worker3 = get_client_worker(Client3, "3"),
+
+ ?assert(is_process_alive(Worker1)),
+ ?assert(is_process_alive(Worker2)),
+ ?assert(is_process_alive(Worker3)),
+
+ ?assertNotEqual(Worker1, Worker2),
+ ?assertNotEqual(Worker2, Worker3),
+ ?assertNotEqual(Worker3, Worker1),
+
+ Client4 = spawn_client(Pool),
+ ?assertEqual(timeout, ping_client(Client4)),
+
+ ?assertEqual(ok, stop_client(Client1)),
+ ?assertEqual(ok, ping_client(Client4)),
+
+ Worker4 = get_client_worker(Client4, "4"),
+ ?assertEqual(Worker1, Worker4),
+
+ lists:foreach(
+ fun(C) ->
+ ?assertEqual(ok, stop_client(C))
+ end, [Client2, Client3, Client4])
+ end).
+
+should_replace_worker_on_death(Pool) ->
+ ?_test(begin
+ Client1 = spawn_client(Pool),
+ ?assertEqual(ok, ping_client(Client1)),
+ Worker1 = get_client_worker(Client1, "1"),
+ ?assert(is_process_alive(Worker1)),
+
+ ?assertEqual(ok, kill_client_worker(Client1)),
+ ?assertNot(is_process_alive(Worker1)),
+ ?assertEqual(ok, stop_client(Client1)),
+
+ Client2 = spawn_client(Pool),
+ ?assertEqual(ok, ping_client(Client2)),
+ Worker2 = get_client_worker(Client2, "2"),
+ ?assert(is_process_alive(Worker2)),
+
+ ?assertNotEqual(Worker1, Worker2),
+ ?assertEqual(ok, stop_client(Client2))
+ end).
+
+
+spawn_client(Pool) ->
+ Parent = self(),
+ Ref = make_ref(),
+ Pid = spawn(fun() ->
+ {ok, Worker} = couch_replicator_httpc_pool:get_worker(Pool),
+ loop(Parent, Ref, Worker, Pool)
+ end),
+ {Pid, Ref}.
+
+ping_client({Pid, Ref}) ->
+ Pid ! ping,
+ receive
+ {pong, Ref} ->
+ ok
+ after ?TIMEOUT ->
+ timeout
+ end.
+
+get_client_worker({Pid, Ref}, ClientName) ->
+ Pid ! get_worker,
+ receive
+ {worker, Ref, Worker} ->
+ Worker
+ after ?TIMEOUT ->
+ erlang:error(
+ {assertion_failed,
+ [{module, ?MODULE}, {line, ?LINE},
+ {reason, "Timeout getting client " ++ ClientName ++ " worker"}]})
+ end.
+
+stop_client({Pid, Ref}) ->
+ Pid ! stop,
+ receive
+ {stop, Ref} ->
+ ok
+ after ?TIMEOUT ->
+ timeout
+ end.
+
+kill_client_worker({Pid, Ref}) ->
+ Pid ! get_worker,
+ receive
+ {worker, Ref, Worker} ->
+ exit(Worker, kill),
+ ok
+ after ?TIMEOUT ->
+ timeout
+ end.
+
+loop(Parent, Ref, Worker, Pool) ->
+ receive
+ ping ->
+ Parent ! {pong, Ref},
+ loop(Parent, Ref, Worker, Pool);
+ get_worker ->
+ Parent ! {worker, Ref, Worker},
+ loop(Parent, Ref, Worker, Pool);
+ stop ->
+ couch_replicator_httpc_pool:release_worker(Pool, Worker),
+ Parent ! {stop, Ref}
+ end.
+
+spawn_pool() ->
+ Host = couch_config:get("httpd", "bind_address", "127.0.0.1"),
+ Port = couch_config:get("httpd", "port", "5984"),
+ {ok, Pool} = couch_replicator_httpc_pool:start_link(
+ "http://" ++ Host ++ ":" ++ Port, [{max_connections, 3}]),
+ Pool.
+
+stop_pool(Pool) ->
+ ok = couch_replicator_httpc_pool:stop(Pool).
diff --git a/src/couch_replicator/test/couch_replicator_large_atts_tests.erl b/src/couch_replicator/test/couch_replicator_large_atts_tests.erl
new file mode 100644
index 000000000..7c4e334be
--- /dev/null
+++ b/src/couch_replicator/test/couch_replicator_large_atts_tests.erl
@@ -0,0 +1,218 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_replicator_large_atts_tests).
+
+-include("couch_eunit.hrl").
+-include_lib("couchdb/couch_db.hrl").
+
+-define(ADMIN_ROLE, #user_ctx{roles=[<<"_admin">>]}).
+-define(ADMIN_USER, {user_ctx, ?ADMIN_ROLE}).
+-define(ATT_SIZE_1, 2 * 1024 * 1024).
+-define(ATT_SIZE_2, round(6.6 * 1024 * 1024)).
+-define(DOCS_COUNT, 11).
+-define(TIMEOUT_EUNIT, 30).
+-define(TIMEOUT_STOP, 1000).
+
+
+setup() ->
+ DbName = ?tempdb(),
+ {ok, Db} = couch_db:create(DbName, [?ADMIN_USER]),
+ ok = couch_db:close(Db),
+ DbName.
+
+setup(local) ->
+ setup();
+setup(remote) ->
+ {remote, setup()};
+setup({A, B}) ->
+ {ok, _} = couch_server_sup:start_link(?CONFIG_CHAIN),
+ couch_config:set("attachments", "compressible_types", "text/*", false),
+ Source = setup(A),
+ Target = setup(B),
+ {Source, Target}.
+
+teardown({remote, DbName}) ->
+ teardown(DbName);
+teardown(DbName) ->
+ ok = couch_server:delete(DbName, [?ADMIN_USER]),
+ ok.
+
+teardown(_, {Source, Target}) ->
+ teardown(Source),
+ teardown(Target),
+
+ Pid = whereis(couch_server_sup),
+ erlang:monitor(process, Pid),
+ couch_server_sup:stop(),
+ receive
+ {'DOWN', _, _, Pid, _} ->
+ ok
+ after ?TIMEOUT_STOP ->
+ throw({timeout, server_stop})
+ end.
+
+
+large_atts_test_() ->
+ Pairs = [{local, local}, {local, remote},
+ {remote, local}, {remote, remote}],
+ {
+ "Replicate docs with large attachments",
+ {
+ foreachx,
+ fun setup/1, fun teardown/2,
+ [{Pair, fun should_populate_replicate_compact/2}
+ || Pair <- Pairs]
+ }
+ }.
+
+
+should_populate_replicate_compact({From, To}, {Source, Target}) ->
+ {lists:flatten(io_lib:format("~p -> ~p", [From, To])),
+ {inorder, [should_populate_source(Source),
+ should_replicate(Source, Target),
+ should_compare_databases(Source, Target)]}}.
+
+should_populate_source({remote, Source}) ->
+ should_populate_source(Source);
+should_populate_source(Source) ->
+ {timeout, ?TIMEOUT_EUNIT, ?_test(populate_db(Source, ?DOCS_COUNT))}.
+
+should_replicate({remote, Source}, Target) ->
+ should_replicate(db_url(Source), Target);
+should_replicate(Source, {remote, Target}) ->
+ should_replicate(Source, db_url(Target));
+should_replicate(Source, Target) ->
+ {timeout, ?TIMEOUT_EUNIT, ?_test(replicate(Source, Target))}.
+
+should_compare_databases({remote, Source}, Target) ->
+ should_compare_databases(Source, Target);
+should_compare_databases(Source, {remote, Target}) ->
+ should_compare_databases(Source, Target);
+should_compare_databases(Source, Target) ->
+ {timeout, ?TIMEOUT_EUNIT, ?_test(compare_dbs(Source, Target))}.
+
+
+populate_db(DbName, DocCount) ->
+ {ok, Db} = couch_db:open_int(DbName, []),
+ Docs = lists:foldl(
+ fun(DocIdCounter, Acc) ->
+ Doc = #doc{
+ id = iolist_to_binary(["doc", integer_to_list(DocIdCounter)]),
+ body = {[]},
+ atts = [
+ att(<<"att1">>, ?ATT_SIZE_1, <<"text/plain">>),
+ att(<<"att2">>, ?ATT_SIZE_2, <<"app/binary">>)
+ ]
+ },
+ [Doc | Acc]
+ end,
+ [], lists:seq(1, DocCount)),
+ {ok, _} = couch_db:update_docs(Db, Docs, []),
+ couch_db:close(Db).
+
+compare_dbs(Source, Target) ->
+ {ok, SourceDb} = couch_db:open_int(Source, []),
+ {ok, TargetDb} = couch_db:open_int(Target, []),
+
+ Fun = fun(FullDocInfo, _, Acc) ->
+ {ok, DocSource} = couch_db:open_doc(SourceDb, FullDocInfo),
+ Id = DocSource#doc.id,
+
+ {ok, DocTarget} = couch_db:open_doc(TargetDb, Id),
+ ?assertEqual(DocSource#doc.body, DocTarget#doc.body),
+
+ #doc{atts = SourceAtts} = DocSource,
+ #doc{atts = TargetAtts} = DocTarget,
+ ?assertEqual(lists:sort([N || #att{name = N} <- SourceAtts]),
+ lists:sort([N || #att{name = N} <- TargetAtts])),
+
+ FunCompareAtts = fun(#att{name = AttName} = Att) ->
+ {ok, AttTarget} = find_att(TargetAtts, AttName),
+ SourceMd5 = att_md5(Att),
+ TargetMd5 = att_md5(AttTarget),
+ case AttName of
+ <<"att1">> ->
+ ?assertEqual(gzip, Att#att.encoding),
+ ?assertEqual(gzip, AttTarget#att.encoding),
+ DecSourceMd5 = att_decoded_md5(Att),
+ DecTargetMd5 = att_decoded_md5(AttTarget),
+ ?assertEqual(DecSourceMd5, DecTargetMd5);
+ _ ->
+ ?assertEqual(identity, Att#att.encoding),
+ ?assertEqual(identity, AttTarget#att.encoding)
+ end,
+ ?assertEqual(SourceMd5, TargetMd5),
+ ?assert(is_integer(Att#att.disk_len)),
+ ?assert(is_integer(Att#att.att_len)),
+ ?assert(is_integer(AttTarget#att.disk_len)),
+ ?assert(is_integer(AttTarget#att.att_len)),
+ ?assertEqual(Att#att.disk_len, AttTarget#att.disk_len),
+ ?assertEqual(Att#att.att_len, AttTarget#att.att_len),
+ ?assertEqual(Att#att.type, AttTarget#att.type),
+ ?assertEqual(Att#att.md5, AttTarget#att.md5)
+ end,
+
+ lists:foreach(FunCompareAtts, SourceAtts),
+
+ {ok, Acc}
+ end,
+
+ {ok, _, _} = couch_db:enum_docs(SourceDb, Fun, [], []),
+ ok = couch_db:close(SourceDb),
+ ok = couch_db:close(TargetDb).
+
+att(Name, Size, Type) ->
+ #att{
+ name = Name,
+ type = Type,
+ att_len = Size,
+ data = fun(Count) -> crypto:rand_bytes(Count) end
+ }.
+
+find_att([], _Name) ->
+ nil;
+find_att([#att{name = Name} = Att | _], Name) ->
+ {ok, Att};
+find_att([_ | Rest], Name) ->
+ find_att(Rest, Name).
+
+att_md5(Att) ->
+ Md50 = couch_doc:att_foldl(
+ Att,
+ fun(Chunk, Acc) -> couch_util:md5_update(Acc, Chunk) end,
+ couch_util:md5_init()),
+ couch_util:md5_final(Md50).
+
+att_decoded_md5(Att) ->
+ Md50 = couch_doc:att_foldl_decode(
+ Att,
+ fun(Chunk, Acc) -> couch_util:md5_update(Acc, Chunk) end,
+ couch_util:md5_init()),
+ couch_util:md5_final(Md50).
+
+db_url(DbName) ->
+ iolist_to_binary([
+ "http://", couch_config:get("httpd", "bind_address", "127.0.0.1"),
+ ":", integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
+ "/", DbName
+ ]).
+
+replicate(Source, Target) ->
+ RepObject = {[{<<"source">>, Source}, {<<"target">>, Target}]},
+ {ok, Rep} = couch_replicator_utils:parse_rep_doc(RepObject, ?ADMIN_ROLE),
+ {ok, Pid} = couch_replicator:async_replicate(Rep),
+ MonRef = erlang:monitor(process, Pid),
+ receive
+ {'DOWN', MonRef, process, Pid, _} ->
+ ok
+ end.
diff --git a/src/couch_replicator/test/couch_replicator_many_leaves_tests.erl b/src/couch_replicator/test/couch_replicator_many_leaves_tests.erl
new file mode 100644
index 000000000..27d51db9f
--- /dev/null
+++ b/src/couch_replicator/test/couch_replicator_many_leaves_tests.erl
@@ -0,0 +1,232 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_replicator_many_leaves_tests).
+
+-include("couch_eunit.hrl").
+-include_lib("couchdb/couch_db.hrl").
+
+-define(ADMIN_ROLE, #user_ctx{roles=[<<"_admin">>]}).
+-define(ADMIN_USER, {user_ctx, ?ADMIN_ROLE}).
+-define(DOCS_CONFLICTS, [
+ {<<"doc1">>, 10},
+ {<<"doc2">>, 100},
+ % a number > MaxURLlength (7000) / length(DocRevisionString)
+ {<<"doc3">>, 210}
+]).
+-define(NUM_ATTS, 2).
+-define(TIMEOUT_STOP, 1000).
+-define(TIMEOUT_EUNIT, 60).
+-define(i2l(I), integer_to_list(I)).
+-define(io2b(Io), iolist_to_binary(Io)).
+
+setup() ->
+ DbName = ?tempdb(),
+ {ok, Db} = couch_db:create(DbName, [?ADMIN_USER]),
+ ok = couch_db:close(Db),
+ DbName.
+
+setup(local) ->
+ setup();
+setup(remote) ->
+ {remote, setup()};
+setup({A, B}) ->
+ {ok, _} = couch_server_sup:start_link(?CONFIG_CHAIN),
+ Source = setup(A),
+ Target = setup(B),
+ {Source, Target}.
+
+teardown({remote, DbName}) ->
+ teardown(DbName);
+teardown(DbName) ->
+ ok = couch_server:delete(DbName, [?ADMIN_USER]),
+ ok.
+
+teardown(_, {Source, Target}) ->
+ teardown(Source),
+ teardown(Target),
+
+ Pid = whereis(couch_server_sup),
+ erlang:monitor(process, Pid),
+ couch_server_sup:stop(),
+ receive
+ {'DOWN', _, _, Pid, _} ->
+ ok
+ after ?TIMEOUT_STOP ->
+ throw({timeout, server_stop})
+ end.
+
+
+docs_with_many_leaves_test_() ->
+ Pairs = [{local, local}, {local, remote},
+ {remote, local}, {remote, remote}],
+ {
+ "Replicate documents with many leaves",
+ {
+ foreachx,
+ fun setup/1, fun teardown/2,
+ [{Pair, fun should_populate_replicate_compact/2}
+ || Pair <- Pairs]
+ }
+ }.
+
+
+should_populate_replicate_compact({From, To}, {Source, Target}) ->
+ {lists:flatten(io_lib:format("~p -> ~p", [From, To])),
+ {inorder, [
+ should_populate_source(Source),
+ should_replicate(Source, Target),
+ should_verify_target(Source, Target),
+ should_add_attachments_to_source(Source),
+ should_replicate(Source, Target),
+ should_verify_target(Source, Target)
+ ]}}.
+
+should_populate_source({remote, Source}) ->
+ should_populate_source(Source);
+should_populate_source(Source) ->
+ {timeout, ?TIMEOUT_EUNIT, ?_test(populate_db(Source))}.
+
+should_replicate({remote, Source}, Target) ->
+ should_replicate(db_url(Source), Target);
+should_replicate(Source, {remote, Target}) ->
+ should_replicate(Source, db_url(Target));
+should_replicate(Source, Target) ->
+ {timeout, ?TIMEOUT_EUNIT, ?_test(replicate(Source, Target))}.
+
+should_verify_target({remote, Source}, Target) ->
+ should_verify_target(Source, Target);
+should_verify_target(Source, {remote, Target}) ->
+ should_verify_target(Source, Target);
+should_verify_target(Source, Target) ->
+ {timeout, ?TIMEOUT_EUNIT, ?_test(begin
+ {ok, SourceDb} = couch_db:open_int(Source, []),
+ {ok, TargetDb} = couch_db:open_int(Target, []),
+ verify_target(SourceDb, TargetDb, ?DOCS_CONFLICTS),
+ ok = couch_db:close(SourceDb),
+ ok = couch_db:close(TargetDb)
+ end)}.
+
+should_add_attachments_to_source({remote, Source}) ->
+ should_add_attachments_to_source(Source);
+should_add_attachments_to_source(Source) ->
+ {timeout, ?TIMEOUT_EUNIT, ?_test(begin
+ {ok, SourceDb} = couch_db:open_int(Source, []),
+ add_attachments(SourceDb, ?NUM_ATTS, ?DOCS_CONFLICTS),
+ ok = couch_db:close(SourceDb)
+ end)}.
+
+populate_db(DbName) ->
+ {ok, Db} = couch_db:open_int(DbName, []),
+ lists:foreach(
+ fun({DocId, NumConflicts}) ->
+ Value = <<"0">>,
+ Doc = #doc{
+ id = DocId,
+ body = {[ {<<"value">>, Value} ]}
+ },
+ {ok, _} = couch_db:update_doc(Db, Doc, []),
+ {ok, _} = add_doc_siblings(Db, DocId, NumConflicts)
+ end, ?DOCS_CONFLICTS),
+ couch_db:close(Db).
+
+add_doc_siblings(Db, DocId, NumLeaves) when NumLeaves > 0 ->
+ add_doc_siblings(Db, DocId, NumLeaves, [], []).
+
+add_doc_siblings(Db, _DocId, 0, AccDocs, AccRevs) ->
+ {ok, []} = couch_db:update_docs(Db, AccDocs, [], replicated_changes),
+ {ok, AccRevs};
+
+add_doc_siblings(Db, DocId, NumLeaves, AccDocs, AccRevs) ->
+ Value = ?l2b(?i2l(NumLeaves)),
+ Rev = couch_util:md5(Value),
+ Doc = #doc{
+ id = DocId,
+ revs = {1, [Rev]},
+ body = {[ {<<"value">>, Value} ]}
+ },
+ add_doc_siblings(Db, DocId, NumLeaves - 1,
+ [Doc | AccDocs], [{1, Rev} | AccRevs]).
+
+verify_target(_SourceDb, _TargetDb, []) ->
+ ok;
+verify_target(SourceDb, TargetDb, [{DocId, NumConflicts} | Rest]) ->
+ {ok, SourceLookups} = couch_db:open_doc_revs(
+ SourceDb,
+ DocId,
+ all,
+ [conflicts, deleted_conflicts]),
+ {ok, TargetLookups} = couch_db:open_doc_revs(
+ TargetDb,
+ DocId,
+ all,
+ [conflicts, deleted_conflicts]),
+ SourceDocs = [Doc || {ok, Doc} <- SourceLookups],
+ TargetDocs = [Doc || {ok, Doc} <- TargetLookups],
+ Total = NumConflicts + 1,
+ ?assertEqual(Total, length(TargetDocs)),
+ lists:foreach(
+ fun({SourceDoc, TargetDoc}) ->
+ SourceJson = couch_doc:to_json_obj(SourceDoc, [attachments]),
+ TargetJson = couch_doc:to_json_obj(TargetDoc, [attachments]),
+ ?assertEqual(SourceJson, TargetJson)
+ end,
+ lists:zip(SourceDocs, TargetDocs)),
+ verify_target(SourceDb, TargetDb, Rest).
+
+add_attachments(_SourceDb, _NumAtts, []) ->
+ ok;
+add_attachments(SourceDb, NumAtts, [{DocId, NumConflicts} | Rest]) ->
+ {ok, SourceLookups} = couch_db:open_doc_revs(SourceDb, DocId, all, []),
+ SourceDocs = [Doc || {ok, Doc} <- SourceLookups],
+ Total = NumConflicts + 1,
+ ?assertEqual(Total, length(SourceDocs)),
+ NewDocs = lists:foldl(
+ fun(#doc{atts = Atts, revs = {Pos, [Rev | _]}} = Doc, Acc) ->
+ NewAtts = lists:foldl(fun(I, AttAcc) ->
+ AttData = crypto:rand_bytes(100),
+ NewAtt = #att{
+ name = ?io2b(["att_", ?i2l(I), "_",
+ couch_doc:rev_to_str({Pos, Rev})]),
+ type = <<"application/foobar">>,
+ att_len = byte_size(AttData),
+ data = AttData
+ },
+ [NewAtt | AttAcc]
+ end, [], lists:seq(1, NumAtts)),
+ [Doc#doc{atts = Atts ++ NewAtts} | Acc]
+ end,
+ [], SourceDocs),
+ {ok, UpdateResults} = couch_db:update_docs(SourceDb, NewDocs, []),
+ NewRevs = [R || {ok, R} <- UpdateResults],
+ ?assertEqual(length(NewDocs), length(NewRevs)),
+ add_attachments(SourceDb, NumAtts, Rest).
+
+db_url(DbName) ->
+ iolist_to_binary([
+ "http://", couch_config:get("httpd", "bind_address", "127.0.0.1"),
+ ":", integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
+ "/", DbName
+ ]).
+
+replicate(Source, Target) ->
+ RepObject = {[
+ {<<"source">>, Source},
+ {<<"target">>, Target}
+ ]},
+ {ok, Rep} = couch_replicator_utils:parse_rep_doc(RepObject, ?ADMIN_ROLE),
+ {ok, Pid} = couch_replicator:async_replicate(Rep),
+ MonRef = erlang:monitor(process, Pid),
+ receive
+ {'DOWN', MonRef, process, Pid, _} ->
+ ok
+ end.
diff --git a/src/couch_replicator/test/couch_replicator_missing_stubs_tests.erl b/src/couch_replicator/test/couch_replicator_missing_stubs_tests.erl
new file mode 100644
index 000000000..8c6492905
--- /dev/null
+++ b/src/couch_replicator/test/couch_replicator_missing_stubs_tests.erl
@@ -0,0 +1,260 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_replicator_missing_stubs_tests).
+
+-include("couch_eunit.hrl").
+-include_lib("couchdb/couch_db.hrl").
+
+-define(ADMIN_ROLE, #user_ctx{roles=[<<"_admin">>]}).
+-define(ADMIN_USER, {user_ctx, ?ADMIN_ROLE}).
+-define(REVS_LIMIT, 3).
+-define(TIMEOUT_STOP, 1000).
+-define(TIMEOUT_EUNIT, 30).
+
+
+setup() ->
+ DbName = ?tempdb(),
+ {ok, Db} = couch_db:create(DbName, [?ADMIN_USER]),
+ ok = couch_db:close(Db),
+ DbName.
+
+setup(local) ->
+ setup();
+setup(remote) ->
+ {remote, setup()};
+setup({A, B}) ->
+ {ok, _} = couch_server_sup:start_link(?CONFIG_CHAIN),
+ Source = setup(A),
+ Target = setup(B),
+ {Source, Target}.
+
+teardown({remote, DbName}) ->
+ teardown(DbName);
+teardown(DbName) ->
+ ok = couch_server:delete(DbName, [?ADMIN_USER]),
+ ok.
+
+teardown(_, {Source, Target}) ->
+ teardown(Source),
+ teardown(Target),
+
+ Pid = whereis(couch_server_sup),
+ erlang:monitor(process, Pid),
+ couch_server_sup:stop(),
+ receive
+ {'DOWN', _, _, Pid, _} ->
+ ok
+ after ?TIMEOUT_STOP ->
+ throw({timeout, server_stop})
+ end.
+
+
+missing_stubs_test_() ->
+ Pairs = [{local, local}, {local, remote},
+ {remote, local}, {remote, remote}],
+ {
+ "Replicate docs with missing stubs (COUCHDB-1365)",
+ {
+ foreachx,
+ fun setup/1, fun teardown/2,
+ [{Pair, fun should_replicate_docs_with_missed_att_stubs/2}
+ || Pair <- Pairs]
+ }
+ }.
+
+
+should_replicate_docs_with_missed_att_stubs({From, To}, {Source, Target}) ->
+ {lists:flatten(io_lib:format("~p -> ~p", [From, To])),
+ {inorder, [
+ should_populate_source(Source),
+ should_set_target_revs_limit(Target, ?REVS_LIMIT),
+ should_replicate(Source, Target),
+ should_compare_databases(Source, Target),
+ should_update_source_docs(Source, ?REVS_LIMIT * 2),
+ should_replicate(Source, Target),
+ should_compare_databases(Source, Target)
+ ]}}.
+
+should_populate_source({remote, Source}) ->
+ should_populate_source(Source);
+should_populate_source(Source) ->
+ {timeout, ?TIMEOUT_EUNIT, ?_test(populate_db(Source))}.
+
+should_replicate({remote, Source}, Target) ->
+ should_replicate(db_url(Source), Target);
+should_replicate(Source, {remote, Target}) ->
+ should_replicate(Source, db_url(Target));
+should_replicate(Source, Target) ->
+ {timeout, ?TIMEOUT_EUNIT, ?_test(replicate(Source, Target))}.
+
+should_set_target_revs_limit({remote, Target}, RevsLimit) ->
+ should_set_target_revs_limit(Target, RevsLimit);
+should_set_target_revs_limit(Target, RevsLimit) ->
+ ?_test(begin
+ {ok, Db} = couch_db:open_int(Target, [?ADMIN_USER]),
+ ?assertEqual(ok, couch_db:set_revs_limit(Db, RevsLimit)),
+ ok = couch_db:close(Db)
+ end).
+
+should_compare_databases({remote, Source}, Target) ->
+ should_compare_databases(Source, Target);
+should_compare_databases(Source, {remote, Target}) ->
+ should_compare_databases(Source, Target);
+should_compare_databases(Source, Target) ->
+ {timeout, ?TIMEOUT_EUNIT, ?_test(compare_dbs(Source, Target))}.
+
+should_update_source_docs({remote, Source}, Times) ->
+ should_update_source_docs(Source, Times);
+should_update_source_docs(Source, Times) ->
+ {timeout, ?TIMEOUT_EUNIT, ?_test(update_db_docs(Source, Times))}.
+
+
+populate_db(DbName) ->
+ {ok, Db} = couch_db:open_int(DbName, []),
+ AttData = crypto:rand_bytes(6000),
+ Doc = #doc{
+ id = <<"doc1">>,
+ atts = [
+ #att{
+ name = <<"doc1_att1">>,
+ type = <<"application/foobar">>,
+ att_len = byte_size(AttData),
+ data = AttData
+ }
+ ]
+ },
+ {ok, _} = couch_db:update_doc(Db, Doc, []),
+ couch_db:close(Db).
+
+update_db_docs(DbName, Times) ->
+ {ok, Db} = couch_db:open_int(DbName, []),
+ {ok, _, _} = couch_db:enum_docs(
+ Db,
+ fun(FDI, _, Acc) -> db_fold_fun(FDI, Acc) end,
+ {DbName, Times},
+ []),
+ ok = couch_db:close(Db).
+
+db_fold_fun(FullDocInfo, {DbName, Times}) ->
+ {ok, Db} = couch_db:open_int(DbName, []),
+ {ok, Doc} = couch_db:open_doc(Db, FullDocInfo),
+ lists:foldl(
+ fun(_, {Pos, RevId}) ->
+ {ok, Db2} = couch_db:reopen(Db),
+ NewDocVersion = Doc#doc{
+ revs = {Pos, [RevId]},
+ body = {[{<<"value">>, base64:encode(crypto:rand_bytes(100))}]}
+ },
+ {ok, NewRev} = couch_db:update_doc(Db2, NewDocVersion, []),
+ NewRev
+ end,
+ {element(1, Doc#doc.revs), hd(element(2, Doc#doc.revs))},
+ lists:seq(1, Times)),
+ ok = couch_db:close(Db),
+ {ok, {DbName, Times}}.
+
+compare_dbs(Source, Target) ->
+ {ok, SourceDb} = couch_db:open_int(Source, []),
+ {ok, TargetDb} = couch_db:open_int(Target, []),
+
+ Fun = fun(FullDocInfo, _, Acc) ->
+ {ok, DocSource} = couch_db:open_doc(SourceDb, FullDocInfo,
+ [conflicts, deleted_conflicts]),
+ Id = DocSource#doc.id,
+
+ {ok, DocTarget} = couch_db:open_doc(TargetDb, Id,
+ [conflicts, deleted_conflicts]),
+ ?assertEqual(DocSource#doc.body, DocTarget#doc.body),
+
+ ?assertEqual(couch_doc:to_json_obj(DocSource, []),
+ couch_doc:to_json_obj(DocTarget, [])),
+
+ #doc{atts = SourceAtts} = DocSource,
+ #doc{atts = TargetAtts} = DocTarget,
+ ?assertEqual(lists:sort([N || #att{name = N} <- SourceAtts]),
+ lists:sort([N || #att{name = N} <- TargetAtts])),
+
+ lists:foreach(
+ fun(#att{name = AttName} = Att) ->
+ {ok, AttTarget} = find_att(TargetAtts, AttName),
+ SourceMd5 = att_md5(Att),
+ TargetMd5 = att_md5(AttTarget),
+ case AttName of
+ <<"att1">> ->
+ ?assertEqual(gzip, Att#att.encoding),
+ ?assertEqual(gzip, AttTarget#att.encoding),
+ DecSourceMd5 = att_decoded_md5(Att),
+ DecTargetMd5 = att_decoded_md5(AttTarget),
+ ?assertEqual(DecSourceMd5, DecTargetMd5);
+ _ ->
+ ?assertEqual(identity, Att#att.encoding),
+ ?assertEqual(identity, AttTarget#att.encoding)
+ end,
+ ?assertEqual(SourceMd5, TargetMd5),
+ ?assert(is_integer(Att#att.disk_len)),
+ ?assert(is_integer(Att#att.att_len)),
+ ?assert(is_integer(AttTarget#att.disk_len)),
+ ?assert(is_integer(AttTarget#att.att_len)),
+ ?assertEqual(Att#att.disk_len, AttTarget#att.disk_len),
+ ?assertEqual(Att#att.att_len, AttTarget#att.att_len),
+ ?assertEqual(Att#att.type, AttTarget#att.type),
+ ?assertEqual(Att#att.md5, AttTarget#att.md5)
+ end,
+ SourceAtts),
+ {ok, Acc}
+ end,
+
+ {ok, _, _} = couch_db:enum_docs(SourceDb, Fun, [], []),
+ ok = couch_db:close(SourceDb),
+ ok = couch_db:close(TargetDb).
+
+find_att([], _Name) ->
+ nil;
+find_att([#att{name = Name} = Att | _], Name) ->
+ {ok, Att};
+find_att([_ | Rest], Name) ->
+ find_att(Rest, Name).
+
+att_md5(Att) ->
+ Md50 = couch_doc:att_foldl(
+ Att,
+ fun(Chunk, Acc) -> couch_util:md5_update(Acc, Chunk) end,
+ couch_util:md5_init()),
+ couch_util:md5_final(Md50).
+
+att_decoded_md5(Att) ->
+ Md50 = couch_doc:att_foldl_decode(
+ Att,
+ fun(Chunk, Acc) -> couch_util:md5_update(Acc, Chunk) end,
+ couch_util:md5_init()),
+ couch_util:md5_final(Md50).
+
+db_url(DbName) ->
+ iolist_to_binary([
+ "http://", couch_config:get("httpd", "bind_address", "127.0.0.1"),
+ ":", integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
+ "/", DbName
+ ]).
+
+replicate(Source, Target) ->
+ RepObject = {[
+ {<<"source">>, Source},
+ {<<"target">>, Target}
+ ]},
+ {ok, Rep} = couch_replicator_utils:parse_rep_doc(RepObject, ?ADMIN_ROLE),
+ {ok, Pid} = couch_replicator:async_replicate(Rep),
+ MonRef = erlang:monitor(process, Pid),
+ receive
+ {'DOWN', MonRef, process, Pid, _} ->
+ ok
+ end.
diff --git a/src/couch_replicator/test/01-load.t b/src/couch_replicator/test/couch_replicator_modules_load_tests.erl
index 8bd82ddc7..7107b9e49 100644
--- a/src/couch_replicator/test/01-load.t
+++ b/src/couch_replicator/test/couch_replicator_modules_load_tests.erl
@@ -1,6 +1,3 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
@@ -13,10 +10,19 @@
% License for the specific language governing permissions and limitations under
% the License.
-% Test that we can load each module.
+-module(couch_replicator_modules_load_tests).
+
+-include("couch_eunit.hrl").
+
+
+modules_load_test_() ->
+ {
+ "Verify that all modules loads",
+ should_load_modules()
+ }.
+
-main(_) ->
- test_util:init_code_path(),
+should_load_modules() ->
Modules = [
couch_replicator_api_wrap,
couch_replicator_httpc,
@@ -28,10 +34,7 @@ main(_) ->
couch_replicator_utils,
couch_replicator_job_sup
],
+ [should_load_module(Mod) || Mod <- Modules].
- etap:plan(length(Modules)),
- lists:foreach(
- fun(Module) ->
- etap:loaded_ok(Module, lists:concat(["Loaded: ", Module]))
- end, Modules),
- etap:end_tests().
+should_load_module(Mod) ->
+ {atom_to_list(Mod), ?_assertMatch({module, _}, code:load_file(Mod))}.
diff --git a/src/couch_replicator/test/couch_replicator_use_checkpoints_tests.erl b/src/couch_replicator/test/couch_replicator_use_checkpoints_tests.erl
new file mode 100644
index 000000000..5356a37c2
--- /dev/null
+++ b/src/couch_replicator/test/couch_replicator_use_checkpoints_tests.erl
@@ -0,0 +1,200 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_replicator_use_checkpoints_tests).
+
+-include("couch_eunit.hrl").
+-include_lib("couchdb/couch_db.hrl").
+
+-define(ADMIN_ROLE, #user_ctx{roles=[<<"_admin">>]}).
+-define(ADMIN_USER, {user_ctx, ?ADMIN_ROLE}).
+-define(DOCS_COUNT, 100).
+-define(TIMEOUT_STOP, 1000).
+-define(TIMEOUT_EUNIT, 30).
+-define(i2l(I), integer_to_list(I)).
+-define(io2b(Io), iolist_to_binary(Io)).
+
+
+start(false) ->
+ fun
+ ({finished, _, {CheckpointHistory}}) ->
+ ?assertEqual([{<<"use_checkpoints">>,false}], CheckpointHistory);
+ (_) ->
+ ok
+ end;
+start(true) ->
+ fun
+ ({finished, _, {CheckpointHistory}}) ->
+ ?assertNotEqual(false, lists:keyfind(<<"session_id">>,
+ 1, CheckpointHistory));
+ (_) ->
+ ok
+ end.
+
+stop(_, _) ->
+ ok.
+
+setup() ->
+ DbName = ?tempdb(),
+ {ok, Db} = couch_db:create(DbName, [?ADMIN_USER]),
+ ok = couch_db:close(Db),
+ DbName.
+
+setup(local) ->
+ setup();
+setup(remote) ->
+ {remote, setup()};
+setup({_, Fun, {A, B}}) ->
+ {ok, _} = couch_server_sup:start_link(?CONFIG_CHAIN),
+ {ok, Listener} = couch_replicator_notifier:start_link(Fun),
+ Source = setup(A),
+ Target = setup(B),
+ {Source, Target, Listener}.
+
+teardown({remote, DbName}) ->
+ teardown(DbName);
+teardown(DbName) ->
+ ok = couch_server:delete(DbName, [?ADMIN_USER]),
+ ok.
+
+teardown(_, {Source, Target, Listener}) ->
+ teardown(Source),
+ teardown(Target),
+
+ couch_replicator_notifier:stop(Listener),
+ Pid = whereis(couch_server_sup),
+ erlang:monitor(process, Pid),
+ couch_server_sup:stop(),
+ receive
+ {'DOWN', _, _, Pid, _} ->
+ ok
+ after ?TIMEOUT_STOP ->
+ throw({timeout, server_stop})
+ end.
+
+
+use_checkpoints_test_() ->
+ {
+ "Replication use_checkpoints feature tests",
+ {
+ foreachx,
+ fun start/1, fun stop/2,
+ [{UseCheckpoints, fun use_checkpoints_tests/2}
+ || UseCheckpoints <- [false, true]]
+ }
+ }.
+
+use_checkpoints_tests(UseCheckpoints, Fun) ->
+ Pairs = [{local, local}, {local, remote},
+ {remote, local}, {remote, remote}],
+ {
+ "use_checkpoints: " ++ atom_to_list(UseCheckpoints),
+ {
+ foreachx,
+ fun setup/1, fun teardown/2,
+ [{{UseCheckpoints, Fun, Pair}, fun should_test_checkpoints/2}
+ || Pair <- Pairs]
+ }
+ }.
+
+should_test_checkpoints({UseCheckpoints, _, {From, To}}, {Source, Target, _}) ->
+ should_test_checkpoints(UseCheckpoints, {From, To}, {Source, Target}).
+should_test_checkpoints(UseCheckpoints, {From, To}, {Source, Target}) ->
+ {lists:flatten(io_lib:format("~p -> ~p", [From, To])),
+ {inorder, [
+ should_populate_source(Source, ?DOCS_COUNT),
+ should_replicate(Source, Target, UseCheckpoints),
+ should_compare_databases(Source, Target)
+ ]}}.
+
+should_populate_source({remote, Source}, DocCount) ->
+ should_populate_source(Source, DocCount);
+should_populate_source(Source, DocCount) ->
+ {timeout, ?TIMEOUT_EUNIT, ?_test(populate_db(Source, DocCount))}.
+
+should_replicate({remote, Source}, Target, UseCheckpoints) ->
+ should_replicate(db_url(Source), Target, UseCheckpoints);
+should_replicate(Source, {remote, Target}, UseCheckpoints) ->
+ should_replicate(Source, db_url(Target), UseCheckpoints);
+should_replicate(Source, Target, UseCheckpoints) ->
+ {timeout, ?TIMEOUT_EUNIT, ?_test(replicate(Source, Target, UseCheckpoints))}.
+
+should_compare_databases({remote, Source}, Target) ->
+ should_compare_databases(Source, Target);
+should_compare_databases(Source, {remote, Target}) ->
+ should_compare_databases(Source, Target);
+should_compare_databases(Source, Target) ->
+ {timeout, ?TIMEOUT_EUNIT, ?_test(compare_dbs(Source, Target))}.
+
+
+populate_db(DbName, DocCount) ->
+ {ok, Db} = couch_db:open_int(DbName, []),
+ Docs = lists:foldl(
+ fun(DocIdCounter, Acc) ->
+ Id = ?io2b(["doc", ?i2l(DocIdCounter)]),
+ Value = ?io2b(["val", ?i2l(DocIdCounter)]),
+ Doc = #doc{
+ id = Id,
+ body = {[ {<<"value">>, Value} ]}
+ },
+ [Doc | Acc]
+ end,
+ [], lists:seq(1, DocCount)),
+ {ok, _} = couch_db:update_docs(Db, Docs, []),
+ ok = couch_db:close(Db).
+
+compare_dbs(Source, Target) ->
+ {ok, SourceDb} = couch_db:open_int(Source, []),
+ {ok, TargetDb} = couch_db:open_int(Target, []),
+ Fun = fun(FullDocInfo, _, Acc) ->
+ {ok, Doc} = couch_db:open_doc(SourceDb, FullDocInfo),
+ {Props} = DocJson = couch_doc:to_json_obj(Doc, [attachments]),
+ DocId = couch_util:get_value(<<"_id">>, Props),
+ DocTarget = case couch_db:open_doc(TargetDb, DocId) of
+ {ok, DocT} ->
+ DocT;
+ Error ->
+ erlang:error(
+ {assertion_failed,
+ [{module, ?MODULE}, {line, ?LINE},
+ {reason, lists:concat(["Error opening document '",
+ ?b2l(DocId), "' from target: ",
+ couch_util:to_list(Error)])}]})
+ end,
+ DocTargetJson = couch_doc:to_json_obj(DocTarget, [attachments]),
+ ?assertEqual(DocJson, DocTargetJson),
+ {ok, Acc}
+ end,
+ {ok, _, _} = couch_db:enum_docs(SourceDb, Fun, [], []),
+ ok = couch_db:close(SourceDb),
+ ok = couch_db:close(TargetDb).
+
+db_url(DbName) ->
+ iolist_to_binary([
+ "http://", couch_config:get("httpd", "bind_address", "127.0.0.1"),
+ ":", integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
+ "/", DbName
+ ]).
+
+replicate(Source, Target, UseCheckpoints) ->
+ RepObject = {[
+ {<<"source">>, Source},
+ {<<"target">>, Target},
+ {<<"use_checkpoints">>, UseCheckpoints}
+ ]},
+ {ok, Rep} = couch_replicator_utils:parse_rep_doc(RepObject, ?ADMIN_ROLE),
+ {ok, Pid} = couch_replicator:async_replicate(Rep),
+ MonRef = erlang:monitor(process, Pid),
+ receive
+ {'DOWN', MonRef, process, Pid, _} ->
+ ok
+ end.
diff --git a/src/couchdb/couch_key_tree.erl b/src/couchdb/couch_key_tree.erl
index ce45ab81b..58204e2cf 100644
--- a/src/couchdb/couch_key_tree.erl
+++ b/src/couchdb/couch_key_tree.erl
@@ -418,5 +418,5 @@ value_pref(Last, _) ->
Last.
-% Tests moved to test/etap/06?-*.t
+% Tests moved to test/couchdb/couch_key_tree_tests.erl
diff --git a/src/etap/etap.erl b/src/etap/etap.erl
deleted file mode 100644
index ae3896c01..000000000
--- a/src/etap/etap.erl
+++ /dev/null
@@ -1,614 +0,0 @@
-%% Copyright (c) 2008-2009 Nick Gerakines <nick@gerakines.net>
-%%
-%% Permission is hereby granted, free of charge, to any person
-%% obtaining a copy of this software and associated documentation
-%% files (the "Software"), to deal in the Software without
-%% restriction, including without limitation the rights to use,
-%% copy, modify, merge, publish, distribute, sublicense, and/or sell
-%% copies of the Software, and to permit persons to whom the
-%% Software is furnished to do so, subject to the following
-%% conditions:
-%%
-%% The above copyright notice and this permission notice shall be
-%% included in all copies or substantial portions of the Software.
-%%
-%% THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-%% EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-%% OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-%% NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-%% HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-%% WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-%% FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-%% OTHER DEALINGS IN THE SOFTWARE.
-%%
-%% @author Nick Gerakines <nick@gerakines.net> [http://socklabs.com/]
-%% @author Jeremy Wall <jeremy@marzhillstudios.com>
-%% @version 0.3.4
-%% @copyright 2007-2008 Jeremy Wall, 2008-2009 Nick Gerakines
-%% @reference http://testanything.org/wiki/index.php/Main_Page
-%% @reference http://en.wikipedia.org/wiki/Test_Anything_Protocol
-%% @todo Finish implementing the skip directive.
-%% @todo Document the messages handled by this receive loop.
-%% @todo Explain in documentation why we use a process to handle test input.
-%% @doc etap is a TAP testing module for Erlang components and applications.
-%% This module allows developers to test their software using the TAP method.
-%%
-%% <blockquote cite="http://en.wikipedia.org/wiki/Test_Anything_Protocol"><p>
-%% TAP, the Test Anything Protocol, is a simple text-based interface between
-%% testing modules in a test harness. TAP started life as part of the test
-%% harness for Perl but now has implementations in C/C++, Python, PHP, Perl
-%% and probably others by the time you read this.
-%% </p></blockquote>
-%%
-%% The testing process begins by defining a plan using etap:plan/1, running
-%% a number of etap tests and then calling eta:end_tests/0. Please refer to
-%% the Erlang modules in the t directory of this project for example tests.
--module(etap).
--vsn("0.3.4").
-
--export([
- ensure_test_server/0,
- start_etap_server/0,
- test_server/1,
- msg/1, msg/2,
- diag/1, diag/2,
- expectation_mismatch_message/3,
- plan/1,
- end_tests/0,
- not_ok/2, ok/2, is_ok/2, is/3, isnt/3, any/3, none/3,
- fun_is/3, expect_fun/3, expect_fun/4,
- is_greater/3,
- skip/1, skip/2,
- datetime/1,
- skip/3,
- bail/0, bail/1,
- test_state/0, failure_count/0
-]).
-
--export([
- contains_ok/3,
- is_before/4
-]).
-
--export([
- is_pid/2,
- is_alive/2,
- is_mfa/3
-]).
-
--export([
- loaded_ok/2,
- can_ok/2, can_ok/3,
- has_attrib/2, is_attrib/3,
- is_behaviour/2
-]).
-
--export([
- dies_ok/2,
- lives_ok/2,
- throws_ok/3
-]).
-
-
--record(test_state, {
- planned = 0,
- count = 0,
- pass = 0,
- fail = 0,
- skip = 0,
- skip_reason = ""
-}).
-
-%% @spec plan(N) -> Result
-%% N = unknown | skip | {skip, string()} | integer()
-%% Result = ok
-%% @doc Create a test plan and boot strap the test server.
-plan(unknown) ->
- ensure_test_server(),
- etap_server ! {self(), plan, unknown},
- ok;
-plan(skip) ->
- io:format("1..0 # skip~n");
-plan({skip, Reason}) ->
- io:format("1..0 # skip ~s~n", [Reason]);
-plan(N) when is_integer(N), N > 0 ->
- ensure_test_server(),
- etap_server ! {self(), plan, N},
- ok.
-
-%% @spec end_tests() -> ok
-%% @doc End the current test plan and output test results.
-%% @todo This should probably be done in the test_server process.
-end_tests() ->
- Ref = make_ref(),
- case whereis(etap_server) of
- undefined -> self() ! {Ref, true};
- _ -> etap_server ! {self(), state, Ref}
- end,
- State = receive {Ref, X} -> X end,
- if
- is_record(State, test_state) andalso State#test_state.planned == -1 ->
- io:format("1..~p~n", [State#test_state.count]);
- true ->
- ok
- end,
- case whereis(etap_server) of
- undefined -> ok;
- _ -> etap_server ! done, ok
- end.
-
-bail() ->
- bail("").
-
-bail(Reason) ->
- etap_server ! {self(), diag, "Bail out! " ++ Reason},
- etap_server ! done, ok,
- ok.
-
-%% @spec test_state() -> Return
-%% Return = test_state_record() | {error, string()}
-%% @doc Return the current test state
-test_state() ->
- etap_server ! {self(), state},
- receive
- X when is_record(X, test_state) -> X
- after
- 1000 -> {error, "Timed out waiting for etap server reply.~n"}
- end.
-
-%% @spec failure_count() -> Return
-%% Return = integer() | {error, string()}
-%% @doc Return the current failure count
-failure_count() ->
- case test_state() of
- #test_state{fail=FailureCount} -> FailureCount;
- X -> X
- end.
-
-%% @spec msg(S) -> ok
-%% S = string()
-%% @doc Print a message in the test output.
-msg(S) -> etap_server ! {self(), diag, S}, ok.
-
-%% @spec msg(Format, Data) -> ok
-%% Format = atom() | string() | binary()
-%% Data = [term()]
-%% UnicodeList = [Unicode]
-%% Unicode = int()
-%% @doc Print a message in the test output.
-%% Function arguments are passed through io_lib:format/2.
-msg(Format, Data) -> msg(io_lib:format(Format, Data)).
-
-%% @spec diag(S) -> ok
-%% S = string()
-%% @doc Print a debug/status message related to the test suite.
-diag(S) -> msg("# " ++ S).
-
-%% @spec diag(Format, Data) -> ok
-%% Format = atom() | string() | binary()
-%% Data = [term()]
-%% UnicodeList = [Unicode]
-%% Unicode = int()
-%% @doc Print a debug/status message related to the test suite.
-%% Function arguments are passed through io_lib:format/2.
-diag(Format, Data) -> diag(io_lib:format(Format, Data)).
-
-%% @spec expectation_mismatch_message(Got, Expected, Desc) -> ok
-%% Got = any()
-%% Expected = any()
-%% Desc = string()
-%% @doc Print an expectation mismatch message in the test output.
-expectation_mismatch_message(Got, Expected, Desc) ->
- msg(" ---"),
- msg(" description: ~p", [Desc]),
- msg(" found: ~p", [Got]),
- msg(" wanted: ~p", [Expected]),
- msg(" ..."),
- ok.
-
-% @spec evaluate(Pass, Got, Expected, Desc) -> Result
-%% Pass = true | false
-%% Got = any()
-%% Expected = any()
-%% Desc = string()
-%% Result = true | false
-%% @doc Evaluate a test statement, printing an expectation mismatch message
-%% if the test failed.
-evaluate(Pass, Got, Expected, Desc) ->
- case mk_tap(Pass, Desc) of
- false ->
- expectation_mismatch_message(Got, Expected, Desc),
- false;
- true ->
- true
- end.
-
-%% @spec ok(Expr, Desc) -> Result
-%% Expr = true | false
-%% Desc = string()
-%% Result = true | false
-%% @doc Assert that a statement is true.
-ok(Expr, Desc) -> evaluate(Expr == true, Expr, true, Desc).
-
-%% @spec not_ok(Expr, Desc) -> Result
-%% Expr = true | false
-%% Desc = string()
-%% Result = true | false
-%% @doc Assert that a statement is false.
-not_ok(Expr, Desc) -> evaluate(Expr == false, Expr, false, Desc).
-
-%% @spec is_ok(Expr, Desc) -> Result
-%% Expr = any()
-%% Desc = string()
-%% Result = true | false
-%% @doc Assert that two values are the same.
-is_ok(Expr, Desc) -> evaluate(Expr == ok, Expr, ok, Desc).
-
-%% @spec is(Got, Expected, Desc) -> Result
-%% Got = any()
-%% Expected = any()
-%% Desc = string()
-%% Result = true | false
-%% @doc Assert that two values are the same.
-is(Got, Expected, Desc) -> evaluate(Got == Expected, Got, Expected, Desc).
-
-%% @spec isnt(Got, Expected, Desc) -> Result
-%% Got = any()
-%% Expected = any()
-%% Desc = string()
-%% Result = true | false
-%% @doc Assert that two values are not the same.
-isnt(Got, Expected, Desc) -> evaluate(Got /= Expected, Got, Expected, Desc).
-
-%% @spec is_greater(ValueA, ValueB, Desc) -> Result
-%% ValueA = number()
-%% ValueB = number()
-%% Desc = string()
-%% Result = true | false
-%% @doc Assert that an integer is greater than another.
-is_greater(ValueA, ValueB, Desc) when is_integer(ValueA), is_integer(ValueB) ->
- mk_tap(ValueA > ValueB, Desc).
-
-%% @spec any(Got, Items, Desc) -> Result
-%% Got = any()
-%% Items = [any()]
-%% Desc = string()
-%% Result = true | false
-%% @doc Assert that an item is in a list.
-any(Got, Items, Desc) when is_function(Got) ->
- is(lists:any(Got, Items), true, Desc);
-any(Got, Items, Desc) ->
- is(lists:member(Got, Items), true, Desc).
-
-%% @spec none(Got, Items, Desc) -> Result
-%% Got = any()
-%% Items = [any()]
-%% Desc = string()
-%% Result = true | false
-%% @doc Assert that an item is not in a list.
-none(Got, Items, Desc) when is_function(Got) ->
- is(lists:any(Got, Items), false, Desc);
-none(Got, Items, Desc) ->
- is(lists:member(Got, Items), false, Desc).
-
-%% @spec fun_is(Fun, Expected, Desc) -> Result
-%% Fun = function()
-%% Expected = any()
-%% Desc = string()
-%% Result = true | false
-%% @doc Use an anonymous function to assert a pattern match.
-fun_is(Fun, Expected, Desc) when is_function(Fun) ->
- is(Fun(Expected), true, Desc).
-
-%% @spec expect_fun(ExpectFun, Got, Desc) -> Result
-%% ExpectFun = function()
-%% Got = any()
-%% Desc = string()
-%% Result = true | false
-%% @doc Use an anonymous function to assert a pattern match, using actual
-%% value as the argument to the function.
-expect_fun(ExpectFun, Got, Desc) ->
- evaluate(ExpectFun(Got), Got, ExpectFun, Desc).
-
-%% @spec expect_fun(ExpectFun, Got, Desc, ExpectStr) -> Result
-%% ExpectFun = function()
-%% Got = any()
-%% Desc = string()
-%% ExpectStr = string()
-%% Result = true | false
-%% @doc Use an anonymous function to assert a pattern match, using actual
-%% value as the argument to the function.
-expect_fun(ExpectFun, Got, Desc, ExpectStr) ->
- evaluate(ExpectFun(Got), Got, ExpectStr, Desc).
-
-%% @equiv skip(TestFun, "")
-skip(TestFun) when is_function(TestFun) ->
- skip(TestFun, "").
-
-%% @spec skip(TestFun, Reason) -> ok
-%% TestFun = function()
-%% Reason = string()
-%% @doc Skip a test.
-skip(TestFun, Reason) when is_function(TestFun), is_list(Reason) ->
- begin_skip(Reason),
- catch TestFun(),
- end_skip(),
- ok.
-
-%% @spec skip(Q, TestFun, Reason) -> ok
-%% Q = true | false | function()
-%% TestFun = function()
-%% Reason = string()
-%% @doc Skips a test conditionally. The first argument to this function can
-%% either be the 'true' or 'false' atoms or a function that returns 'true' or
-%% 'false'.
-skip(QFun, TestFun, Reason) when is_function(QFun), is_function(TestFun), is_list(Reason) ->
- case QFun() of
- true -> begin_skip(Reason), TestFun(), end_skip();
- _ -> TestFun()
- end,
- ok;
-
-skip(Q, TestFun, Reason) when is_function(TestFun), is_list(Reason), Q == true ->
- begin_skip(Reason),
- TestFun(),
- end_skip(),
- ok;
-
-skip(_, TestFun, Reason) when is_function(TestFun), is_list(Reason) ->
- TestFun(),
- ok.
-
-%% @private
-begin_skip(Reason) ->
- etap_server ! {self(), begin_skip, Reason}.
-
-%% @private
-end_skip() ->
- etap_server ! {self(), end_skip}.
-
-%% @spec contains_ok(string(), string(), string()) -> true | false
-%% @doc Assert that a string is contained in another string.
-contains_ok(Source, String, Desc) ->
- etap:isnt(
- string:str(Source, String),
- 0,
- Desc
- ).
-
-%% @spec is_before(string(), string(), string(), string()) -> true | false
-%% @doc Assert that a string comes before another string within a larger body.
-is_before(Source, StringA, StringB, Desc) ->
- etap:is_greater(
- string:str(Source, StringB),
- string:str(Source, StringA),
- Desc
- ).
-
-%% @doc Assert that a given variable is a pid.
-is_pid(Pid, Desc) when is_pid(Pid) -> etap:ok(true, Desc);
-is_pid(_, Desc) -> etap:ok(false, Desc).
-
-%% @doc Assert that a given process/pid is alive.
-is_alive(Pid, Desc) ->
- etap:ok(erlang:is_process_alive(Pid), Desc).
-
-%% @doc Assert that the current function of a pid is a given {M, F, A} tuple.
-is_mfa(Pid, MFA, Desc) ->
- etap:is({current_function, MFA}, erlang:process_info(Pid, current_function), Desc).
-
-%% @spec loaded_ok(atom(), string()) -> true | false
-%% @doc Assert that a module has been loaded successfully.
-loaded_ok(M, Desc) when is_atom(M) ->
- etap:fun_is(fun({module, _}) -> true; (_) -> false end, code:load_file(M), Desc).
-
-%% @spec can_ok(atom(), atom()) -> true | false
-%% @doc Assert that a module exports a given function.
-can_ok(M, F) when is_atom(M), is_atom(F) ->
- Matches = [X || {X, _} <- M:module_info(exports), X == F],
- etap:ok(Matches > 0, lists:concat([M, " can ", F])).
-
-%% @spec can_ok(atom(), atom(), integer()) -> true | false
-%% @doc Assert that a module exports a given function with a given arity.
-can_ok(M, F, A) when is_atom(M); is_atom(F), is_number(A) ->
- Matches = [X || X <- M:module_info(exports), X == {F, A}],
- etap:ok(Matches > 0, lists:concat([M, " can ", F, "/", A])).
-
-%% @spec has_attrib(M, A) -> true | false
-%% M = atom()
-%% A = atom()
-%% @doc Asserts that a module has a given attribute.
-has_attrib(M, A) when is_atom(M), is_atom(A) ->
- etap:isnt(
- proplists:get_value(A, M:module_info(attributes), 'asdlkjasdlkads'),
- 'asdlkjasdlkads',
- lists:concat([M, " has attribute ", A])
- ).
-
-%% @spec has_attrib(M, A. V) -> true | false
-%% M = atom()
-%% A = atom()
-%% V = any()
-%% @doc Asserts that a module has a given attribute with a given value.
-is_attrib(M, A, V) when is_atom(M) andalso is_atom(A) ->
- etap:is(
- proplists:get_value(A, M:module_info(attributes)),
- [V],
- lists:concat([M, "'s ", A, " is ", V])
- ).
-
-%% @spec is_behavior(M, B) -> true | false
-%% M = atom()
-%% B = atom()
-%% @doc Asserts that a given module has a specific behavior.
-is_behaviour(M, B) when is_atom(M) andalso is_atom(B) ->
- is_attrib(M, behaviour, B).
-
-%% @doc Assert that an exception is raised when running a given function.
-dies_ok(F, Desc) ->
- case (catch F()) of
- {'EXIT', _} -> etap:ok(true, Desc);
- _ -> etap:ok(false, Desc)
- end.
-
-%% @doc Assert that an exception is not raised when running a given function.
-lives_ok(F, Desc) ->
- etap:is(try_this(F), success, Desc).
-
-%% @doc Assert that the exception thrown by a function matches the given exception.
-throws_ok(F, Exception, Desc) ->
- try F() of
- _ -> etap:ok(nok, Desc)
- catch
- _:E ->
- etap:is(E, Exception, Desc)
- end.
-
-%% @private
-%% @doc Run a function and catch any exceptions.
-try_this(F) when is_function(F, 0) ->
- try F() of
- _ -> success
- catch
- throw:E -> {throw, E};
- error:E -> {error, E};
- exit:E -> {exit, E}
- end.
-
-%% @private
-%% @doc Start the etap_server process if it is not running already.
-ensure_test_server() ->
- case whereis(etap_server) of
- undefined ->
- proc_lib:start(?MODULE, start_etap_server,[]);
- _ ->
- diag("The test server is already running.")
- end.
-
-%% @private
-%% @doc Start the etap_server loop and register itself as the etap_server
-%% process.
-start_etap_server() ->
- catch register(etap_server, self()),
- proc_lib:init_ack(ok),
- etap:test_server(#test_state{
- planned = 0,
- count = 0,
- pass = 0,
- fail = 0,
- skip = 0,
- skip_reason = ""
- }).
-
-
-%% @private
-%% @doc The main etap_server receive/run loop. The etap_server receive loop
-%% responds to seven messages apperatining to failure or passing of tests.
-%% It is also used to initiate the testing process with the {_, plan, _}
-%% message that clears the current test state.
-test_server(State) ->
- NewState = receive
- {_From, plan, unknown} ->
- io:format("# Current time local ~s~n", [datetime(erlang:localtime())]),
- io:format("# Using etap version ~p~n", [ proplists:get_value(vsn, proplists:get_value(attributes, etap:module_info())) ]),
- State#test_state{
- planned = -1,
- count = 0,
- pass = 0,
- fail = 0,
- skip = 0,
- skip_reason = ""
- };
- {_From, plan, N} ->
- io:format("# Current time local ~s~n", [datetime(erlang:localtime())]),
- io:format("# Using etap version ~p~n", [ proplists:get_value(vsn, proplists:get_value(attributes, etap:module_info())) ]),
- io:format("1..~p~n", [N]),
- State#test_state{
- planned = N,
- count = 0,
- pass = 0,
- fail = 0,
- skip = 0,
- skip_reason = ""
- };
- {_From, begin_skip, Reason} ->
- State#test_state{
- skip = 1,
- skip_reason = Reason
- };
- {_From, end_skip} ->
- State#test_state{
- skip = 0,
- skip_reason = ""
- };
- {_From, pass, Desc} ->
- FullMessage = skip_diag(
- " - " ++ Desc,
- State#test_state.skip,
- State#test_state.skip_reason
- ),
- io:format("ok ~p ~s~n", [State#test_state.count + 1, FullMessage]),
- State#test_state{
- count = State#test_state.count + 1,
- pass = State#test_state.pass + 1
- };
-
- {_From, fail, Desc} ->
- FullMessage = skip_diag(
- " - " ++ Desc,
- State#test_state.skip,
- State#test_state.skip_reason
- ),
- io:format("not ok ~p ~s~n", [State#test_state.count + 1, FullMessage]),
- State#test_state{
- count = State#test_state.count + 1,
- fail = State#test_state.fail + 1
- };
- {From, state, Ref} ->
- From ! {Ref, State},
- State;
- {_From, diag, Message} ->
- io:format("~s~n", [Message]),
- State;
- {From, count} ->
- From ! State#test_state.count,
- State;
- {From, is_skip, Ref} ->
- From ! {Ref, State#test_state.skip},
- State;
- done ->
- exit(normal)
- end,
- test_server(NewState).
-
-%% @private
-%% @doc Process the result of a test and send it to the etap_server process.
-mk_tap(Result, Desc) ->
- etap_server ! {self(), is_skip, Ref = make_ref()} ,
- receive {Ref, IsSkip} -> ok end,
- case [IsSkip, Result] of
- [_, true] ->
- etap_server ! {self(), pass, Desc},
- true;
- [1, _] ->
- etap_server ! {self(), pass, Desc},
- true;
- _ ->
- etap_server ! {self(), fail, Desc},
- false
- end.
-
-%% @private
-%% @doc Format a date/time string.
-datetime(DateTime) ->
- {{Year, Month, Day}, {Hour, Min, Sec}} = DateTime,
- io_lib:format("~4.10.0B-~2.10.0B-~2.10.0B ~2.10.0B:~2.10.0B:~2.10.0B", [Year, Month, Day, Hour, Min, Sec]).
-
-%% @private
-%% @doc Craft an output message taking skip/todo into consideration.
-skip_diag(Message, 0, _) ->
- Message;
-skip_diag(_Message, 1, "") ->
- " # SKIP";
-skip_diag(_Message, 1, Reason) ->
- " # SKIP : " ++ Reason.
diff --git a/test/Makefile.am b/test/Makefile.am
index 88c317bee..ce36f494c 100644
--- a/test/Makefile.am
+++ b/test/Makefile.am
@@ -10,6 +10,6 @@
## License for the specific language governing permissions and limitations under
## the License.
-SUBDIRS = etap javascript view_server
+SUBDIRS = couchdb javascript view_server
EXTRA_DIST = random_port.ini
diff --git a/test/couchdb/Makefile.am b/test/couchdb/Makefile.am
new file mode 100644
index 000000000..eaac42f7a
--- /dev/null
+++ b/test/couchdb/Makefile.am
@@ -0,0 +1,83 @@
+## Licensed under the Apache License, Version 2.0 (the "License"); you may not
+## use this file except in compliance with the License. You may obtain a copy of
+## the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+## License for the specific language governing permissions and limitations under
+## the License.
+
+SUBDIRS = fixtures
+
+noinst_SCRIPTS = run
+
+all:
+ mkdir -p ebin/
+ mkdir -p temp/
+ $(ERLC) -Wall -I$(top_srcdir)/src -I$(top_srcdir)/test/couchdb/include \
+ -o $(top_builddir)/test/couchdb/ebin/ $(ERLC_FLAGS) ${TEST} \
+ $(top_srcdir)/test/couchdb/test_request.erl \
+ $(top_srcdir)/test/couchdb/test_web.erl
+ chmod +x run
+ chmod +x $(top_builddir)/test/couchdb/fixtures/os_daemon_configer.escript
+
+eunit_files = \
+ couch_auth_cache_tests.erl \
+ couch_btree_tests.erl \
+ couch_changes_tests.erl \
+ couch_config_tests.erl \
+ couch_db_tests.erl \
+ couch_doc_json_tests.erl \
+ couch_file_tests.erl \
+ couch_key_tree_tests.erl \
+ couch_passwords_tests.erl \
+ couch_ref_counter_tests.erl \
+ couch_stream_tests.erl \
+ couch_stats_tests.erl \
+ couch_task_status_tests.erl \
+ couch_util_tests.erl \
+ couch_uuids_tests.erl \
+ couch_work_queue_tests.erl \
+ couchdb_attachments_tests.erl \
+ couchdb_compaction_daemon.erl \
+ couchdb_cors_tests.erl \
+ couchdb_file_compression_tests.erl \
+ couchdb_http_proxy_tests.erl \
+ couchdb_modules_load_tests.erl \
+ couchdb_os_daemons_tests.erl \
+ couchdb_os_proc_pool.erl \
+ couchdb_update_conflicts_tests.erl \
+ couchdb_vhosts_tests.erl \
+ couchdb_views_tests.erl \
+ json_stream_parse_tests.erl \
+ test_request.erl \
+ test_web.erl \
+ include/couch_eunit.hrl
+
+fixture_files = \
+ fixtures/couch_config_tests_1.ini \
+ fixtures/couch_config_tests_2.ini \
+ fixtures/couch_stats_aggregates.cfg \
+ fixtures/couch_stats_aggregates.ini \
+ fixtures/os_daemon_looper.escript \
+ fixtures/os_daemon_configer.escript \
+ fixtures/os_daemon_bad_perm.sh \
+ fixtures/os_daemon_can_reboot.sh \
+ fixtures/os_daemon_die_on_boot.sh \
+ fixtures/os_daemon_die_quickly.sh \
+ fixtures/logo.png \
+ fixtures/3b835456c235b1827e012e25666152f3.view \
+ fixtures/test.couch
+
+EXTRA_DIST = \
+ run.in \
+ eunit.ini \
+ $(eunit_files) \
+ $(fixture_files)
+
+clean-local:
+ rm -rf ebin/
+ rm -rf temp/
diff --git a/test/couchdb/couch_auth_cache_tests.erl b/test/couchdb/couch_auth_cache_tests.erl
new file mode 100644
index 000000000..3b2321caa
--- /dev/null
+++ b/test/couchdb/couch_auth_cache_tests.erl
@@ -0,0 +1,238 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_auth_cache_tests).
+
+-include("couch_eunit.hrl").
+-include_lib("couchdb/couch_db.hrl").
+
+-define(ADMIN_USER, {user_ctx, #user_ctx{roles=[<<"_admin">>]}}).
+-define(SALT, <<"SALT">>).
+-define(TIMEOUT, 1000).
+
+
+start() ->
+ {ok, Pid} = couch_server_sup:start_link(?CONFIG_CHAIN),
+ Pid.
+
+stop(Pid) ->
+ erlang:monitor(process, Pid),
+ couch_server_sup:stop(),
+ receive
+ {'DOWN', _, _, Pid, _} ->
+ ok
+ after ?TIMEOUT ->
+ throw({timeout, server_stop})
+ end.
+
+setup() ->
+ DbName = ?tempdb(),
+ couch_config:set("couch_httpd_auth", "authentication_db",
+ ?b2l(DbName), false),
+ DbName.
+
+teardown(DbName) ->
+ ok = couch_server:delete(DbName, [?ADMIN_USER]),
+ ok.
+
+
+couch_auth_cache_test_() ->
+ {
+ "CouchDB auth cache tests",
+ {
+ setup,
+ fun start/0, fun stop/1,
+ {
+ foreach,
+ fun setup/0, fun teardown/1,
+ [
+ fun should_get_nil_on_missed_cache/1,
+ fun should_get_right_password_hash/1,
+ fun should_ensure_doc_hash_equals_cached_one/1,
+ fun should_update_password/1,
+ fun should_cleanup_cache_after_userdoc_deletion/1,
+ fun should_restore_cache_after_userdoc_recreation/1,
+ fun should_drop_cache_on_auth_db_change/1,
+ fun should_restore_cache_on_auth_db_change/1,
+ fun should_recover_cache_after_shutdown/1
+ ]
+ }
+ }
+ }.
+
+
+should_get_nil_on_missed_cache(_) ->
+ ?_assertEqual(nil, couch_auth_cache:get_user_creds("joe")).
+
+should_get_right_password_hash(DbName) ->
+ ?_test(begin
+ PasswordHash = hash_password("pass1"),
+ {ok, _} = update_user_doc(DbName, "joe", "pass1"),
+ Creds = couch_auth_cache:get_user_creds("joe"),
+ ?assertEqual(PasswordHash,
+ couch_util:get_value(<<"password_sha">>, Creds))
+ end).
+
+should_ensure_doc_hash_equals_cached_one(DbName) ->
+ ?_test(begin
+ {ok, _} = update_user_doc(DbName, "joe", "pass1"),
+ Creds = couch_auth_cache:get_user_creds("joe"),
+
+ CachedHash = couch_util:get_value(<<"password_sha">>, Creds),
+ StoredHash = get_user_doc_password_sha(DbName, "joe"),
+ ?assertEqual(StoredHash, CachedHash)
+ end).
+
+should_update_password(DbName) ->
+ ?_test(begin
+ PasswordHash = hash_password("pass2"),
+ {ok, Rev} = update_user_doc(DbName, "joe", "pass1"),
+ {ok, _} = update_user_doc(DbName, "joe", "pass2", Rev),
+ Creds = couch_auth_cache:get_user_creds("joe"),
+ ?assertEqual(PasswordHash,
+ couch_util:get_value(<<"password_sha">>, Creds))
+ end).
+
+should_cleanup_cache_after_userdoc_deletion(DbName) ->
+ ?_test(begin
+ {ok, _} = update_user_doc(DbName, "joe", "pass1"),
+ delete_user_doc(DbName, "joe"),
+ ?assertEqual(nil, couch_auth_cache:get_user_creds("joe"))
+ end).
+
+should_restore_cache_after_userdoc_recreation(DbName) ->
+ ?_test(begin
+ PasswordHash = hash_password("pass5"),
+ {ok, _} = update_user_doc(DbName, "joe", "pass1"),
+ delete_user_doc(DbName, "joe"),
+ ?assertEqual(nil, couch_auth_cache:get_user_creds("joe")),
+
+ {ok, _} = update_user_doc(DbName, "joe", "pass5"),
+ Creds = couch_auth_cache:get_user_creds("joe"),
+
+ ?assertEqual(PasswordHash,
+ couch_util:get_value(<<"password_sha">>, Creds))
+ end).
+
+should_drop_cache_on_auth_db_change(DbName) ->
+ ?_test(begin
+ {ok, _} = update_user_doc(DbName, "joe", "pass1"),
+ full_commit(DbName),
+ couch_config:set("couch_httpd_auth", "authentication_db",
+ ?b2l(?tempdb()), false),
+ ?assertEqual(nil, couch_auth_cache:get_user_creds("joe"))
+ end).
+
+should_restore_cache_on_auth_db_change(DbName) ->
+ ?_test(begin
+ PasswordHash = hash_password("pass1"),
+ {ok, _} = update_user_doc(DbName, "joe", "pass1"),
+ Creds = couch_auth_cache:get_user_creds("joe"),
+ full_commit(DbName),
+
+ DbName1 = ?tempdb(),
+ couch_config:set("couch_httpd_auth", "authentication_db",
+ ?b2l(DbName1), false),
+
+ {ok, _} = update_user_doc(DbName1, "joe", "pass5"),
+ full_commit(DbName1),
+
+ couch_config:set("couch_httpd_auth", "authentication_db",
+ ?b2l(DbName), false),
+
+ Creds = couch_auth_cache:get_user_creds("joe"),
+ ?assertEqual(PasswordHash,
+ couch_util:get_value(<<"password_sha">>, Creds))
+ end).
+
+should_recover_cache_after_shutdown(DbName) ->
+ ?_test(begin
+ PasswordHash = hash_password("pass2"),
+ {ok, Rev0} = update_user_doc(DbName, "joe", "pass1"),
+ {ok, Rev1} = update_user_doc(DbName, "joe", "pass2", Rev0),
+ full_commit(DbName),
+ shutdown_db(DbName),
+ {ok, Rev1} = get_doc_rev(DbName, "joe"),
+ ?assertEqual(PasswordHash, get_user_doc_password_sha(DbName, "joe"))
+ end).
+
+
+update_user_doc(DbName, UserName, Password) ->
+ update_user_doc(DbName, UserName, Password, nil).
+
+update_user_doc(DbName, UserName, Password, Rev) ->
+ User = iolist_to_binary(UserName),
+ Doc = couch_doc:from_json_obj({[
+ {<<"_id">>, <<"org.couchdb.user:", User/binary>>},
+ {<<"name">>, User},
+ {<<"type">>, <<"user">>},
+ {<<"salt">>, ?SALT},
+ {<<"password_sha">>, hash_password(Password)},
+ {<<"roles">>, []}
+ ] ++ case Rev of
+ nil -> [];
+ _ -> [{<<"_rev">>, Rev}]
+ end
+ }),
+ {ok, AuthDb} = couch_db:open_int(DbName, [?ADMIN_USER]),
+ {ok, NewRev} = couch_db:update_doc(AuthDb, Doc, []),
+ ok = couch_db:close(AuthDb),
+ {ok, couch_doc:rev_to_str(NewRev)}.
+
+hash_password(Password) ->
+ ?l2b(couch_util:to_hex(crypto:sha(iolist_to_binary([Password, ?SALT])))).
+
+shutdown_db(DbName) ->
+ {ok, AuthDb} = couch_db:open_int(DbName, [?ADMIN_USER]),
+ ok = couch_db:close(AuthDb),
+ couch_util:shutdown_sync(AuthDb#db.main_pid),
+ ok = timer:sleep(1000).
+
+get_doc_rev(DbName, UserName) ->
+ DocId = iolist_to_binary([<<"org.couchdb.user:">>, UserName]),
+ {ok, AuthDb} = couch_db:open_int(DbName, [?ADMIN_USER]),
+ UpdateRev =
+ case couch_db:open_doc(AuthDb, DocId, []) of
+ {ok, Doc} ->
+ {Props} = couch_doc:to_json_obj(Doc, []),
+ couch_util:get_value(<<"_rev">>, Props);
+ {not_found, missing} ->
+ nil
+ end,
+ ok = couch_db:close(AuthDb),
+ {ok, UpdateRev}.
+
+get_user_doc_password_sha(DbName, UserName) ->
+ DocId = iolist_to_binary([<<"org.couchdb.user:">>, UserName]),
+ {ok, AuthDb} = couch_db:open_int(DbName, [?ADMIN_USER]),
+ {ok, Doc} = couch_db:open_doc(AuthDb, DocId, []),
+ ok = couch_db:close(AuthDb),
+ {Props} = couch_doc:to_json_obj(Doc, []),
+ couch_util:get_value(<<"password_sha">>, Props).
+
+delete_user_doc(DbName, UserName) ->
+ DocId = iolist_to_binary([<<"org.couchdb.user:">>, UserName]),
+ {ok, AuthDb} = couch_db:open_int(DbName, [?ADMIN_USER]),
+ {ok, Doc} = couch_db:open_doc(AuthDb, DocId, []),
+ {Props} = couch_doc:to_json_obj(Doc, []),
+ DeletedDoc = couch_doc:from_json_obj({[
+ {<<"_id">>, DocId},
+ {<<"_rev">>, couch_util:get_value(<<"_rev">>, Props)},
+ {<<"_deleted">>, true}
+ ]}),
+ {ok, _} = couch_db:update_doc(AuthDb, DeletedDoc, []),
+ ok = couch_db:close(AuthDb).
+
+full_commit(DbName) ->
+ {ok, AuthDb} = couch_db:open_int(DbName, [?ADMIN_USER]),
+ {ok, _} = couch_db:ensure_full_commit(AuthDb),
+ ok = couch_db:close(AuthDb).
diff --git a/test/couchdb/couch_btree_tests.erl b/test/couchdb/couch_btree_tests.erl
new file mode 100644
index 000000000..911640f2d
--- /dev/null
+++ b/test/couchdb/couch_btree_tests.erl
@@ -0,0 +1,551 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_btree_tests).
+
+-include("couch_eunit.hrl").
+-include_lib("couchdb/couch_db.hrl").
+
+-define(ROWS, 1000).
+
+
+setup() ->
+ {ok, Fd} = couch_file:open(?tempfile(), [create, overwrite]),
+ {ok, Btree} = couch_btree:open(nil, Fd, [{compression, none},
+ {reduce, fun reduce_fun/2}]),
+ {Fd, Btree}.
+
+setup_kvs(_) ->
+ setup().
+
+setup_red() ->
+ {_, EvenOddKVs} = lists:foldl(
+ fun(Idx, {Key, Acc}) ->
+ case Key of
+ "even" -> {"odd", [{{Key, Idx}, 1} | Acc]};
+ _ -> {"even", [{{Key, Idx}, 1} | Acc]}
+ end
+ end, {"odd", []}, lists:seq(1, ?ROWS)),
+ {Fd, Btree} = setup(),
+ {ok, Btree1} = couch_btree:add_remove(Btree, EvenOddKVs, []),
+ {Fd, Btree1}.
+setup_red(_) ->
+ setup_red().
+
+teardown(Fd) when is_pid(Fd) ->
+ ok = couch_file:close(Fd);
+teardown({Fd, _}) ->
+ teardown(Fd).
+teardown(_, {Fd, _}) ->
+ teardown(Fd).
+
+
+kvs_test_funs() ->
+ [
+ fun should_set_fd_correctly/2,
+ fun should_set_root_correctly/2,
+ fun should_create_zero_sized_btree/2,
+ fun should_set_reduce_option/2,
+ fun should_fold_over_empty_btree/2,
+ fun should_add_all_keys/2,
+ fun should_continuously_add_new_kv/2,
+ fun should_continuously_remove_keys/2,
+ fun should_insert_keys_in_reversed_order/2,
+ fun should_add_every_odd_key_remove_every_even/2,
+ fun should_add_every_even_key_remove_every_old/2
+ ].
+
+red_test_funs() ->
+ [
+ fun should_reduce_whole_range/2,
+ fun should_reduce_first_half/2,
+ fun should_reduce_second_half/2
+ ].
+
+
+btree_open_test_() ->
+ {ok, Fd} = couch_file:open(?tempfile(), [create, overwrite]),
+ {ok, Btree} = couch_btree:open(nil, Fd, [{compression, none}]),
+ {
+ "Ensure that created btree is really a btree record",
+ ?_assert(is_record(Btree, btree))
+ }.
+
+sorted_kvs_test_() ->
+ Funs = kvs_test_funs(),
+ Sorted = [{Seq, random:uniform()} || Seq <- lists:seq(1, ?ROWS)],
+ {
+ "BTree with sorted keys",
+ {
+ foreachx,
+ fun setup_kvs/1, fun teardown/2,
+ [{Sorted, Fun} || Fun <- Funs]
+ }
+ }.
+
+rsorted_kvs_test_() ->
+ Sorted = [{Seq, random:uniform()} || Seq <- lists:seq(1, ?ROWS)],
+ Funs = kvs_test_funs(),
+ Reversed = Sorted,
+ {
+ "BTree with backward sorted keys",
+ {
+ foreachx,
+ fun setup_kvs/1, fun teardown/2,
+ [{Reversed, Fun} || Fun <- Funs]
+ }
+ }.
+
+shuffled_kvs_test_() ->
+ Funs = kvs_test_funs(),
+ Sorted = [{Seq, random:uniform()} || Seq <- lists:seq(1, ?ROWS)],
+ Shuffled = shuffle(Sorted),
+ {
+ "BTree with shuffled keys",
+ {
+ foreachx,
+ fun setup_kvs/1, fun teardown/2,
+ [{Shuffled, Fun} || Fun <- Funs]
+ }
+ }.
+
+reductions_test_() ->
+ {
+ "BTree reductions",
+ [
+ {
+ "Common tests",
+ {
+ foreach,
+ fun setup_red/0, fun teardown/1,
+ [
+ fun should_reduce_without_specified_direction/1,
+ fun should_reduce_forward/1,
+ fun should_reduce_backward/1
+ ]
+ }
+ },
+ {
+ "Range requests",
+ [
+ {
+ "Forward direction",
+ {
+ foreachx,
+ fun setup_red/1, fun teardown/2,
+ [{fwd, F} || F <- red_test_funs()]
+ }
+ },
+ {
+ "Backward direction",
+ {
+ foreachx,
+ fun setup_red/1, fun teardown/2,
+ [{rev, F} || F <- red_test_funs()]
+ }
+ }
+ ]
+ }
+ ]
+ }.
+
+
+should_set_fd_correctly(_, {Fd, Btree}) ->
+ ?_assertMatch(Fd, Btree#btree.fd).
+
+should_set_root_correctly(_, {_, Btree}) ->
+ ?_assertMatch(nil, Btree#btree.root).
+
+should_create_zero_sized_btree(_, {_, Btree}) ->
+ ?_assertMatch(0, couch_btree:size(Btree)).
+
+should_set_reduce_option(_, {_, Btree}) ->
+ ReduceFun = fun reduce_fun/2,
+ Btree1 = couch_btree:set_options(Btree, [{reduce, ReduceFun}]),
+ ?_assertMatch(ReduceFun, Btree1#btree.reduce).
+
+should_fold_over_empty_btree(_, {_, Btree}) ->
+ {ok, _, EmptyRes} = couch_btree:foldl(Btree, fun(_, X) -> {ok, X+1} end, 0),
+ ?_assertEqual(EmptyRes, 0).
+
+should_add_all_keys(KeyValues, {Fd, Btree}) ->
+ {ok, Btree1} = couch_btree:add_remove(Btree, KeyValues, []),
+ [
+ should_return_complete_btree_on_adding_all_keys(KeyValues, Btree1),
+ should_have_non_zero_size(Btree1),
+ should_have_lesser_size_than_file(Fd, Btree1),
+ should_keep_root_pointer_to_kp_node(Fd, Btree1),
+ should_remove_all_keys(KeyValues, Btree1)
+ ].
+
+should_return_complete_btree_on_adding_all_keys(KeyValues, Btree) ->
+ ?_assert(test_btree(Btree, KeyValues)).
+
+should_have_non_zero_size(Btree) ->
+ ?_assert(couch_btree:size(Btree) > 0).
+
+should_have_lesser_size_than_file(Fd, Btree) ->
+ ?_assert((couch_btree:size(Btree) =< couch_file:bytes(Fd))).
+
+should_keep_root_pointer_to_kp_node(Fd, Btree) ->
+ ?_assertMatch({ok, {kp_node, _}},
+ couch_file:pread_term(Fd, element(1, Btree#btree.root))).
+
+should_remove_all_keys(KeyValues, Btree) ->
+ Keys = keys(KeyValues),
+ {ok, Btree1} = couch_btree:add_remove(Btree, [], Keys),
+ {
+ "Should remove all the keys",
+ [
+ should_produce_valid_btree(Btree1, []),
+ should_be_empty(Btree1)
+ ]
+ }.
+
+should_continuously_add_new_kv(KeyValues, {_, Btree}) ->
+ {Btree1, _} = lists:foldl(
+ fun(KV, {BtAcc, PrevSize}) ->
+ {ok, BtAcc2} = couch_btree:add_remove(BtAcc, [KV], []),
+ ?assert(couch_btree:size(BtAcc2) > PrevSize),
+ {BtAcc2, couch_btree:size(BtAcc2)}
+ end, {Btree, couch_btree:size(Btree)}, KeyValues),
+ {
+ "Should continuously add key-values to btree",
+ [
+ should_produce_valid_btree(Btree1, KeyValues),
+ should_not_be_empty(Btree1)
+ ]
+ }.
+
+should_continuously_remove_keys(KeyValues, {_, Btree}) ->
+ {ok, Btree1} = couch_btree:add_remove(Btree, KeyValues, []),
+ {Btree2, _} = lists:foldl(
+ fun({K, _}, {BtAcc, PrevSize}) ->
+ {ok, BtAcc2} = couch_btree:add_remove(BtAcc, [], [K]),
+ ?assert(couch_btree:size(BtAcc2) < PrevSize),
+ {BtAcc2, couch_btree:size(BtAcc2)}
+ end, {Btree1, couch_btree:size(Btree1)}, KeyValues),
+ {
+ "Should continuously remove keys from btree",
+ [
+ should_produce_valid_btree(Btree2, []),
+ should_be_empty(Btree2)
+ ]
+ }.
+
+should_insert_keys_in_reversed_order(KeyValues, {_, Btree}) ->
+ KeyValuesRev = lists:reverse(KeyValues),
+ {Btree1, _} = lists:foldl(
+ fun(KV, {BtAcc, PrevSize}) ->
+ {ok, BtAcc2} = couch_btree:add_remove(BtAcc, [KV], []),
+ ?assert(couch_btree:size(BtAcc2) > PrevSize),
+ {BtAcc2, couch_btree:size(BtAcc2)}
+ end, {Btree, couch_btree:size(Btree)}, KeyValuesRev),
+ should_produce_valid_btree(Btree1, KeyValues).
+
+should_add_every_odd_key_remove_every_even(KeyValues, {_, Btree}) ->
+ {ok, Btree1} = couch_btree:add_remove(Btree, KeyValues, []),
+ {_, Rem2Keys0, Rem2Keys1} = lists:foldl(fun(X, {Count, Left, Right}) ->
+ case Count rem 2 == 0 of
+ true -> {Count + 1, [X | Left], Right};
+ false -> {Count + 1, Left, [X | Right]}
+ end
+ end, {0, [], []}, KeyValues),
+ ?_assert(test_add_remove(Btree1, Rem2Keys0, Rem2Keys1)).
+
+should_add_every_even_key_remove_every_old(KeyValues, {_, Btree}) ->
+ {ok, Btree1} = couch_btree:add_remove(Btree, KeyValues, []),
+ {_, Rem2Keys0, Rem2Keys1} = lists:foldl(fun(X, {Count, Left, Right}) ->
+ case Count rem 2 == 0 of
+ true -> {Count + 1, [X | Left], Right};
+ false -> {Count + 1, Left, [X | Right]}
+ end
+ end, {0, [], []}, KeyValues),
+ ?_assert(test_add_remove(Btree1, Rem2Keys1, Rem2Keys0)).
+
+
+should_reduce_without_specified_direction({_, Btree}) ->
+ ?_assertMatch(
+ {ok, [{{"odd", _}, ?ROWS div 2}, {{"even", _}, ?ROWS div 2}]},
+ fold_reduce(Btree, [])).
+
+should_reduce_forward({_, Btree}) ->
+ ?_assertMatch(
+ {ok, [{{"odd", _}, ?ROWS div 2}, {{"even", _}, ?ROWS div 2}]},
+ fold_reduce(Btree, [{dir, fwd}])).
+
+should_reduce_backward({_, Btree}) ->
+ ?_assertMatch(
+ {ok, [{{"even", _}, ?ROWS div 2}, {{"odd", _}, ?ROWS div 2}]},
+ fold_reduce(Btree, [{dir, rev}])).
+
+should_reduce_whole_range(fwd, {_, Btree}) ->
+ {SK, EK} = {{"even", 0}, {"odd", ?ROWS - 1}},
+ [
+ {
+ "include endkey",
+ ?_assertMatch(
+ {ok, [{{"odd", 1}, ?ROWS div 2},
+ {{"even", 2}, ?ROWS div 2}]},
+ fold_reduce(Btree, [{dir, fwd},
+ {start_key, SK},
+ {end_key, EK}]))
+ },
+ {
+ "exclude endkey",
+ ?_assertMatch(
+ {ok, [{{"odd", 1}, (?ROWS div 2) - 1},
+ {{"even", 2}, ?ROWS div 2}]},
+ fold_reduce(Btree, [{dir, fwd},
+ {start_key, SK},
+ {end_key_gt, EK}]))
+ }
+ ];
+should_reduce_whole_range(rev, {_, Btree}) ->
+ {SK, EK} = {{"odd", ?ROWS - 1}, {"even", 2}},
+ [
+ {
+ "include endkey",
+ ?_assertMatch(
+ {ok, [{{"even", ?ROWS}, ?ROWS div 2},
+ {{"odd", ?ROWS - 1}, ?ROWS div 2}]},
+ fold_reduce(Btree, [{dir, rev},
+ {start_key, SK},
+ {end_key, EK}]))
+ },
+ {
+ "exclude endkey",
+ ?_assertMatch(
+ {ok, [{{"even", ?ROWS}, (?ROWS div 2) - 1},
+ {{"odd", ?ROWS - 1}, ?ROWS div 2}]},
+ fold_reduce(Btree, [{dir, rev},
+ {start_key, SK},
+ {end_key_gt, EK}]))
+ }
+ ].
+
+should_reduce_first_half(fwd, {_, Btree}) ->
+ {SK, EK} = {{"even", 0}, {"odd", (?ROWS div 2) - 1}},
+ [
+ {
+ "include endkey",
+ ?_assertMatch(
+ {ok, [{{"odd", 1}, ?ROWS div 4},
+ {{"even", 2}, ?ROWS div 2}]},
+ fold_reduce(Btree, [{dir, fwd},
+ {start_key, SK}, {end_key, EK}]))
+ },
+ {
+ "exclude endkey",
+ ?_assertMatch(
+ {ok, [{{"odd", 1}, (?ROWS div 4) - 1},
+ {{"even", 2}, ?ROWS div 2}]},
+ fold_reduce(Btree, [{dir, fwd},
+ {start_key, SK},
+ {end_key_gt, EK}]))
+ }
+ ];
+should_reduce_first_half(rev, {_, Btree}) ->
+ {SK, EK} = {{"odd", ?ROWS - 1}, {"even", ?ROWS div 2}},
+ [
+ {
+ "include endkey",
+ ?_assertMatch(
+ {ok, [{{"even", ?ROWS}, (?ROWS div 4) + 1},
+ {{"odd", ?ROWS - 1}, ?ROWS div 2}]},
+ fold_reduce(Btree, [{dir, rev},
+ {start_key, SK},
+ {end_key, EK}]))
+ },
+ {
+ "exclude endkey",
+ ?_assertMatch(
+ {ok, [{{"even", ?ROWS}, ?ROWS div 4},
+ {{"odd", ?ROWS - 1}, ?ROWS div 2}]},
+ fold_reduce(Btree, [{dir, rev},
+ {start_key, SK},
+ {end_key_gt, EK}]))
+ }
+ ].
+
+should_reduce_second_half(fwd, {_, Btree}) ->
+ {SK, EK} = {{"even", ?ROWS div 2}, {"odd", ?ROWS - 1}},
+ [
+ {
+ "include endkey",
+ ?_assertMatch(
+ {ok, [{{"odd", 1}, ?ROWS div 2},
+ {{"even", ?ROWS div 2}, (?ROWS div 4) + 1}]},
+ fold_reduce(Btree, [{dir, fwd},
+ {start_key, SK},
+ {end_key, EK}]))
+ },
+ {
+ "exclude endkey",
+ ?_assertMatch(
+ {ok, [{{"odd", 1}, (?ROWS div 2) - 1},
+ {{"even", ?ROWS div 2}, (?ROWS div 4) + 1}]},
+ fold_reduce(Btree, [{dir, fwd},
+ {start_key, SK},
+ {end_key_gt, EK}]))
+ }
+ ];
+should_reduce_second_half(rev, {_, Btree}) ->
+ {SK, EK} = {{"odd", (?ROWS div 2) + 1}, {"even", 2}},
+ [
+ {
+ "include endkey",
+ ?_assertMatch(
+ {ok, [{{"even", ?ROWS}, ?ROWS div 2},
+ {{"odd", (?ROWS div 2) + 1}, (?ROWS div 4) + 1}]},
+ fold_reduce(Btree, [{dir, rev},
+ {start_key, SK},
+ {end_key, EK}]))
+ },
+ {
+ "exclude endkey",
+ ?_assertMatch(
+ {ok, [{{"even", ?ROWS}, (?ROWS div 2) - 1},
+ {{"odd", (?ROWS div 2) + 1}, (?ROWS div 4) + 1}]},
+ fold_reduce(Btree, [{dir, rev},
+ {start_key, SK},
+ {end_key_gt, EK}]))
+ }
+ ].
+
+should_produce_valid_btree(Btree, KeyValues) ->
+ ?_assert(test_btree(Btree, KeyValues)).
+
+should_be_empty(Btree) ->
+ ?_assertEqual(couch_btree:size(Btree), 0).
+
+should_not_be_empty(Btree) ->
+ ?_assert(couch_btree:size(Btree) > 0).
+
+fold_reduce(Btree, Opts) ->
+ GroupFun = fun({K1, _}, {K2, _}) ->
+ K1 == K2
+ end,
+ FoldFun = fun(GroupedKey, Unreduced, Acc) ->
+ {ok, [{GroupedKey, couch_btree:final_reduce(Btree, Unreduced)} | Acc]}
+ end,
+ couch_btree:fold_reduce(Btree, FoldFun, [],
+ [{key_group_fun, GroupFun}] ++ Opts).
+
+
+keys(KVs) ->
+ [K || {K, _} <- KVs].
+
+reduce_fun(reduce, KVs) ->
+ length(KVs);
+reduce_fun(rereduce, Reds) ->
+ lists:sum(Reds).
+
+
+shuffle(List) ->
+ randomize(round(math:log(length(List)) + 0.5), List).
+
+randomize(1, List) ->
+ randomize(List);
+randomize(T, List) ->
+ lists:foldl(
+ fun(_E, Acc) ->
+ randomize(Acc)
+ end, randomize(List), lists:seq(1, (T - 1))).
+
+randomize(List) ->
+ D = lists:map(fun(A) -> {random:uniform(), A} end, List),
+ {_, D1} = lists:unzip(lists:keysort(1, D)),
+ D1.
+
+test_btree(Btree, KeyValues) ->
+ ok = test_key_access(Btree, KeyValues),
+ ok = test_lookup_access(Btree, KeyValues),
+ ok = test_final_reductions(Btree, KeyValues),
+ ok = test_traversal_callbacks(Btree, KeyValues),
+ true.
+
+test_add_remove(Btree, OutKeyValues, RemainingKeyValues) ->
+ Btree2 = lists:foldl(
+ fun({K, _}, BtAcc) ->
+ {ok, BtAcc2} = couch_btree:add_remove(BtAcc, [], [K]),
+ BtAcc2
+ end, Btree, OutKeyValues),
+ true = test_btree(Btree2, RemainingKeyValues),
+
+ Btree3 = lists:foldl(
+ fun(KV, BtAcc) ->
+ {ok, BtAcc2} = couch_btree:add_remove(BtAcc, [KV], []),
+ BtAcc2
+ end, Btree2, OutKeyValues),
+ true = test_btree(Btree3, OutKeyValues ++ RemainingKeyValues).
+
+test_key_access(Btree, List) ->
+ FoldFun = fun(Element, {[HAcc|TAcc], Count}) ->
+ case Element == HAcc of
+ true -> {ok, {TAcc, Count + 1}};
+ _ -> {ok, {TAcc, Count + 1}}
+ end
+ end,
+ Length = length(List),
+ Sorted = lists:sort(List),
+ {ok, _, {[], Length}} = couch_btree:foldl(Btree, FoldFun, {Sorted, 0}),
+ {ok, _, {[], Length}} = couch_btree:fold(Btree, FoldFun,
+ {Sorted, 0}, [{dir, rev}]),
+ ok.
+
+test_lookup_access(Btree, KeyValues) ->
+ FoldFun = fun({Key, Value}, {Key, Value}) -> {stop, true} end,
+ lists:foreach(
+ fun({Key, Value}) ->
+ [{ok, {Key, Value}}] = couch_btree:lookup(Btree, [Key]),
+ {ok, _, true} = couch_btree:foldl(Btree, FoldFun,
+ {Key, Value}, [{start_key, Key}])
+ end, KeyValues).
+
+test_final_reductions(Btree, KeyValues) ->
+ KVLen = length(KeyValues),
+ FoldLFun = fun(_X, LeadingReds, Acc) ->
+ CountToStart = KVLen div 3 + Acc,
+ CountToStart = couch_btree:final_reduce(Btree, LeadingReds),
+ {ok, Acc + 1}
+ end,
+ FoldRFun = fun(_X, LeadingReds, Acc) ->
+ CountToEnd = KVLen - KVLen div 3 + Acc,
+ CountToEnd = couch_btree:final_reduce(Btree, LeadingReds),
+ {ok, Acc + 1}
+ end,
+ {LStartKey, _} = case KVLen of
+ 0 -> {nil, nil};
+ _ -> lists:nth(KVLen div 3 + 1, lists:sort(KeyValues))
+ end,
+ {RStartKey, _} = case KVLen of
+ 0 -> {nil, nil};
+ _ -> lists:nth(KVLen div 3, lists:sort(KeyValues))
+ end,
+ {ok, _, FoldLRed} = couch_btree:foldl(Btree, FoldLFun, 0,
+ [{start_key, LStartKey}]),
+ {ok, _, FoldRRed} = couch_btree:fold(Btree, FoldRFun, 0,
+ [{dir, rev}, {start_key, RStartKey}]),
+ KVLen = FoldLRed + FoldRRed,
+ ok.
+
+test_traversal_callbacks(Btree, _KeyValues) ->
+ FoldFun = fun
+ (visit, _GroupedKey, _Unreduced, Acc) ->
+ {ok, Acc andalso false};
+ (traverse, _LK, _Red, Acc) ->
+ {skip, Acc andalso true}
+ end,
+ % With 250 items the root is a kp. Always skipping should reduce to true.
+ {ok, _, true} = couch_btree:fold(Btree, FoldFun, true, [{dir, fwd}]),
+ ok.
diff --git a/test/couchdb/couch_changes_tests.erl b/test/couchdb/couch_changes_tests.erl
new file mode 100644
index 000000000..a129ba2fd
--- /dev/null
+++ b/test/couchdb/couch_changes_tests.erl
@@ -0,0 +1,612 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_changes_tests).
+
+-include("couch_eunit.hrl").
+-include_lib("couchdb/couch_db.hrl").
+
+-define(ADMIN_USER, {user_ctx, #user_ctx{roles = [<<"_admin">>]}}).
+-define(TIMEOUT, 3000).
+-define(TEST_TIMEOUT, 10000).
+
+-record(row, {
+ id,
+ seq,
+ deleted = false
+}).
+
+
+start() ->
+ {ok, Pid} = couch_server_sup:start_link(?CONFIG_CHAIN),
+ Pid.
+
+stop(Pid) ->
+ erlang:monitor(process, Pid),
+ couch_server_sup:stop(),
+ receive
+ {'DOWN', _, _, Pid, _} ->
+ ok
+ after ?TIMEOUT ->
+ throw({timeout, server_stop})
+ end.
+
+setup() ->
+ DbName = ?tempdb(),
+ {ok, Db} = create_db(DbName),
+ Revs = [R || {ok, R} <- [
+ save_doc(Db, {[{<<"_id">>, <<"doc1">>}]}),
+ save_doc(Db, {[{<<"_id">>, <<"doc2">>}]}),
+ save_doc(Db, {[{<<"_id">>, <<"doc3">>}]}),
+ save_doc(Db, {[{<<"_id">>, <<"doc4">>}]}),
+ save_doc(Db, {[{<<"_id">>, <<"doc5">>}]})
+ ]],
+ Rev = lists:nth(3, Revs),
+ {ok, Rev1} = save_doc(Db, {[{<<"_id">>, <<"doc3">>}, {<<"_rev">>, Rev}]}),
+ Revs1 = Revs ++ [Rev1],
+ Revs2 = Revs1 ++ [R || {ok, R} <- [
+ save_doc(Db, {[{<<"_id">>, <<"doc6">>}]}),
+ save_doc(Db, {[{<<"_id">>, <<"_design/foo">>}]}),
+ save_doc(Db, {[{<<"_id">>, <<"doc7">>}]}),
+ save_doc(Db, {[{<<"_id">>, <<"doc8">>}]})
+ ]],
+ {DbName, list_to_tuple(Revs2)}.
+
+teardown({DbName, _}) ->
+ delete_db(DbName),
+ ok.
+
+
+changes_test_() ->
+ {
+ "Changes feeed",
+ {
+ setup,
+ fun start/0, fun stop/1,
+ [
+ filter_by_doc_id(),
+ filter_by_design(),
+ continuous_feed(),
+ filter_by_custom_function()
+ ]
+ }
+ }.
+
+filter_by_doc_id() ->
+ {
+ "Filter _doc_id",
+ {
+ foreach,
+ fun setup/0, fun teardown/1,
+ [
+ fun should_filter_by_specific_doc_ids/1,
+ fun should_filter_by_specific_doc_ids_descending/1,
+ fun should_filter_by_specific_doc_ids_with_since/1,
+ fun should_filter_by_specific_doc_ids_no_result/1,
+ fun should_handle_deleted_docs/1
+ ]
+ }
+ }.
+
+filter_by_design() ->
+ {
+ "Filter _design",
+ {
+ foreach,
+ fun setup/0, fun teardown/1,
+ [
+ fun should_emit_only_design_documents/1
+ ]
+ }
+ }.
+
+filter_by_custom_function() ->
+ {
+ "Filter function",
+ {
+ foreach,
+ fun setup/0, fun teardown/1,
+ [
+ fun should_receive_heartbeats/1
+ ]
+ }
+ }.
+
+continuous_feed() ->
+ {
+ "Continuous Feed",
+ {
+ foreach,
+ fun setup/0, fun teardown/1,
+ [
+ fun should_filter_continuous_feed_by_specific_doc_ids/1
+ ]
+ }
+ }.
+
+
+should_filter_by_specific_doc_ids({DbName, _}) ->
+ ?_test(
+ begin
+ ChangesArgs = #changes_args{
+ filter = "_doc_ids"
+ },
+ DocIds = [<<"doc3">>, <<"doc4">>, <<"doc9999">>],
+ Req = {json_req, {[{<<"doc_ids">>, DocIds}]}},
+ Consumer = spawn_consumer(DbName, ChangesArgs, Req),
+
+ {Rows, LastSeq} = wait_finished(Consumer),
+ {ok, Db} = couch_db:open_int(DbName, []),
+ UpSeq = couch_db:get_update_seq(Db),
+ couch_db:close(Db),
+ stop_consumer(Consumer),
+
+ ?assertEqual(2, length(Rows)),
+ [#row{seq = Seq1, id = Id1}, #row{seq = Seq2, id = Id2}] = Rows,
+ ?assertEqual(<<"doc4">>, Id1),
+ ?assertEqual(4, Seq1),
+ ?assertEqual(<<"doc3">>, Id2),
+ ?assertEqual(6, Seq2),
+ ?assertEqual(UpSeq, LastSeq)
+ end).
+
+should_filter_by_specific_doc_ids_descending({DbName, _}) ->
+ ?_test(
+ begin
+ ChangesArgs = #changes_args{
+ filter = "_doc_ids",
+ dir = rev
+ },
+ DocIds = [<<"doc3">>, <<"doc4">>, <<"doc9999">>],
+ Req = {json_req, {[{<<"doc_ids">>, DocIds}]}},
+ Consumer = spawn_consumer(DbName, ChangesArgs, Req),
+
+ {Rows, LastSeq} = wait_finished(Consumer),
+ {ok, Db} = couch_db:open_int(DbName, []),
+ couch_db:close(Db),
+ stop_consumer(Consumer),
+
+ ?assertEqual(2, length(Rows)),
+ [#row{seq = Seq1, id = Id1}, #row{seq = Seq2, id = Id2}] = Rows,
+ ?assertEqual(<<"doc3">>, Id1),
+ ?assertEqual(6, Seq1),
+ ?assertEqual(<<"doc4">>, Id2),
+ ?assertEqual(4, Seq2),
+ ?assertEqual(4, LastSeq)
+ end).
+
+should_filter_by_specific_doc_ids_with_since({DbName, _}) ->
+ ?_test(
+ begin
+ ChangesArgs = #changes_args{
+ filter = "_doc_ids",
+ since = 5
+ },
+ DocIds = [<<"doc3">>, <<"doc4">>, <<"doc9999">>],
+ Req = {json_req, {[{<<"doc_ids">>, DocIds}]}},
+ Consumer = spawn_consumer(DbName, ChangesArgs, Req),
+
+ {Rows, LastSeq} = wait_finished(Consumer),
+ {ok, Db} = couch_db:open_int(DbName, []),
+ UpSeq = couch_db:get_update_seq(Db),
+ couch_db:close(Db),
+ stop_consumer(Consumer),
+
+ ?assertEqual(1, length(Rows)),
+ [#row{seq = Seq1, id = Id1}] = Rows,
+ ?assertEqual(<<"doc3">>, Id1),
+ ?assertEqual(6, Seq1),
+ ?assertEqual(UpSeq, LastSeq)
+ end).
+
+should_filter_by_specific_doc_ids_no_result({DbName, _}) ->
+ ?_test(
+ begin
+ ChangesArgs = #changes_args{
+ filter = "_doc_ids",
+ since = 6
+ },
+ DocIds = [<<"doc3">>, <<"doc4">>, <<"doc9999">>],
+ Req = {json_req, {[{<<"doc_ids">>, DocIds}]}},
+ Consumer = spawn_consumer(DbName, ChangesArgs, Req),
+
+ {Rows, LastSeq} = wait_finished(Consumer),
+ {ok, Db} = couch_db:open_int(DbName, []),
+ UpSeq = couch_db:get_update_seq(Db),
+ couch_db:close(Db),
+ stop_consumer(Consumer),
+
+ ?assertEqual(0, length(Rows)),
+ ?assertEqual(UpSeq, LastSeq)
+ end).
+
+should_handle_deleted_docs({DbName, Revs}) ->
+ ?_test(
+ begin
+ Rev3_2 = element(6, Revs),
+ {ok, Db} = couch_db:open_int(DbName, []),
+ {ok, _} = save_doc(
+ Db,
+ {[{<<"_id">>, <<"doc3">>},
+ {<<"_deleted">>, true},
+ {<<"_rev">>, Rev3_2}]}),
+
+ ChangesArgs = #changes_args{
+ filter = "_doc_ids",
+ since = 9
+ },
+ DocIds = [<<"doc3">>, <<"doc4">>, <<"doc9999">>],
+ Req = {json_req, {[{<<"doc_ids">>, DocIds}]}},
+ Consumer = spawn_consumer(DbName, ChangesArgs, Req),
+
+ {Rows, LastSeq} = wait_finished(Consumer),
+ couch_db:close(Db),
+ stop_consumer(Consumer),
+
+ ?assertEqual(1, length(Rows)),
+ ?assertMatch(
+ [#row{seq = LastSeq, id = <<"doc3">>, deleted = true}],
+ Rows
+ ),
+ ?assertEqual(11, LastSeq)
+ end).
+
+should_filter_continuous_feed_by_specific_doc_ids({DbName, Revs}) ->
+ ?_test(
+ begin
+ {ok, Db} = couch_db:open_int(DbName, []),
+ ChangesArgs = #changes_args{
+ filter = "_doc_ids",
+ feed = "continuous"
+ },
+ DocIds = [<<"doc3">>, <<"doc4">>, <<"doc9999">>],
+ Req = {json_req, {[{<<"doc_ids">>, DocIds}]}},
+ Consumer = spawn_consumer(DbName, ChangesArgs, Req),
+ pause(Consumer),
+
+ Rows = get_rows(Consumer),
+ ?assertEqual(2, length(Rows)),
+ [#row{seq = Seq1, id = Id1}, #row{seq = Seq2, id = Id2}] = Rows,
+ ?assertEqual(<<"doc4">>, Id1),
+ ?assertEqual(4, Seq1),
+ ?assertEqual(<<"doc3">>, Id2),
+ ?assertEqual(6, Seq2),
+
+ clear_rows(Consumer),
+ {ok, _Rev9} = save_doc(Db, {[{<<"_id">>, <<"doc9">>}]}),
+ {ok, _Rev10} = save_doc(Db, {[{<<"_id">>, <<"doc10">>}]}),
+ unpause(Consumer),
+ pause(Consumer),
+ ?assertEqual([], get_rows(Consumer)),
+
+ Rev4 = element(4, Revs),
+ Rev3_2 = element(6, Revs),
+ {ok, Rev4_2} = save_doc(Db, {[{<<"_id">>, <<"doc4">>},
+ {<<"_rev">>, Rev4}]}),
+ {ok, _} = save_doc(Db, {[{<<"_id">>, <<"doc11">>}]}),
+ {ok, _} = save_doc(Db, {[{<<"_id">>, <<"doc4">>},
+ {<<"_rev">>, Rev4_2}]}),
+ {ok, _} = save_doc(Db, {[{<<"_id">>, <<"doc12">>}]}),
+ {ok, Rev3_3} = save_doc(Db, {[{<<"_id">>, <<"doc3">>},
+ {<<"_rev">>, Rev3_2}]}),
+ unpause(Consumer),
+ pause(Consumer),
+
+ NewRows = get_rows(Consumer),
+ ?assertEqual(2, length(NewRows)),
+ [Row14, Row16] = NewRows,
+ ?assertEqual(<<"doc4">>, Row14#row.id),
+ ?assertEqual(15, Row14#row.seq),
+ ?assertEqual(<<"doc3">>, Row16#row.id),
+ ?assertEqual(17, Row16#row.seq),
+
+ clear_rows(Consumer),
+ {ok, _Rev3_4} = save_doc(Db, {[{<<"_id">>, <<"doc3">>},
+ {<<"_rev">>, Rev3_3}]}),
+ unpause(Consumer),
+ pause(Consumer),
+
+ FinalRows = get_rows(Consumer),
+
+ unpause(Consumer),
+ stop_consumer(Consumer),
+
+ ?assertMatch([#row{seq = 18, id = <<"doc3">>}], FinalRows)
+ end).
+
+should_emit_only_design_documents({DbName, Revs}) ->
+ ?_test(
+ begin
+ ChangesArgs = #changes_args{
+ filter = "_design"
+ },
+ Consumer = spawn_consumer(DbName, ChangesArgs, {json_req, null}),
+
+ {Rows, LastSeq} = wait_finished(Consumer),
+ {ok, Db} = couch_db:open_int(DbName, []),
+ UpSeq = couch_db:get_update_seq(Db),
+ couch_db:close(Db),
+
+ ?assertEqual(1, length(Rows)),
+ ?assertEqual(UpSeq, LastSeq),
+ ?assertEqual([#row{seq = 8, id = <<"_design/foo">>}], Rows),
+
+ stop_consumer(Consumer),
+
+ {ok, Db2} = couch_db:open_int(DbName, [?ADMIN_USER]),
+ {ok, _} = save_doc(Db2, {[{<<"_id">>, <<"_design/foo">>},
+ {<<"_rev">>, element(8, Revs)},
+ {<<"_deleted">>, true}]}),
+
+ Consumer2 = spawn_consumer(DbName, ChangesArgs, {json_req, null}),
+
+ {Rows2, LastSeq2} = wait_finished(Consumer2),
+ UpSeq2 = UpSeq + 1,
+ couch_db:close(Db2),
+
+ ?assertEqual(1, length(Rows2)),
+ ?assertEqual(UpSeq2, LastSeq2),
+ ?assertEqual([#row{seq = 11,
+ id = <<"_design/foo">>,
+ deleted = true}],
+ Rows2)
+ end).
+
+should_receive_heartbeats(_) ->
+ {timeout, ?TEST_TIMEOUT div 1000,
+ ?_test(
+ begin
+ DbName = ?tempdb(),
+ Timeout = 100,
+ {ok, Db} = create_db(DbName),
+
+ {ok, _} = save_doc(Db, {[
+ {<<"_id">>, <<"_design/filtered">>},
+ {<<"language">>, <<"javascript">>},
+ {<<"filters">>, {[
+ {<<"foo">>, <<"function(doc) {
+ return ['doc10', 'doc11', 'doc12'].indexOf(doc._id) != -1;}">>
+ }]}}
+ ]}),
+
+ ChangesArgs = #changes_args{
+ filter = "filtered/foo",
+ feed = "continuous",
+ timeout = 10000,
+ heartbeat = 1000
+ },
+ Consumer = spawn_consumer(DbName, ChangesArgs, {json_req, null}),
+
+ {ok, _Rev1} = save_doc(Db, {[{<<"_id">>, <<"doc1">>}]}),
+ timer:sleep(Timeout),
+ {ok, _Rev2} = save_doc(Db, {[{<<"_id">>, <<"doc2">>}]}),
+ timer:sleep(Timeout),
+ {ok, _Rev3} = save_doc(Db, {[{<<"_id">>, <<"doc3">>}]}),
+ timer:sleep(Timeout),
+ {ok, _Rev4} = save_doc(Db, {[{<<"_id">>, <<"doc4">>}]}),
+ timer:sleep(Timeout),
+ {ok, _Rev5} = save_doc(Db, {[{<<"_id">>, <<"doc5">>}]}),
+ timer:sleep(Timeout),
+ {ok, _Rev6} = save_doc(Db, {[{<<"_id">>, <<"doc6">>}]}),
+ timer:sleep(Timeout),
+ {ok, _Rev7} = save_doc(Db, {[{<<"_id">>, <<"doc7">>}]}),
+ timer:sleep(Timeout),
+ {ok, _Rev8} = save_doc(Db, {[{<<"_id">>, <<"doc8">>}]}),
+ timer:sleep(Timeout),
+ {ok, _Rev9} = save_doc(Db, {[{<<"_id">>, <<"doc9">>}]}),
+
+ Heartbeats = get_heartbeats(Consumer),
+ ?assert(Heartbeats > 0),
+
+ {ok, _Rev10} = save_doc(Db, {[{<<"_id">>, <<"doc10">>}]}),
+ timer:sleep(Timeout),
+ {ok, _Rev11} = save_doc(Db, {[{<<"_id">>, <<"doc11">>}]}),
+ timer:sleep(Timeout),
+ {ok, _Rev12} = save_doc(Db, {[{<<"_id">>, <<"doc12">>}]}),
+
+ Heartbeats2 = get_heartbeats(Consumer),
+ ?assert(Heartbeats2 > Heartbeats),
+
+ Rows = get_rows(Consumer),
+ ?assertEqual(3, length(Rows)),
+
+ {ok, _Rev13} = save_doc(Db, {[{<<"_id">>, <<"doc13">>}]}),
+ timer:sleep(Timeout),
+ {ok, _Rev14} = save_doc(Db, {[{<<"_id">>, <<"doc14">>}]}),
+ timer:sleep(Timeout),
+
+ Heartbeats3 = get_heartbeats(Consumer),
+ ?assert(Heartbeats3 > Heartbeats2)
+ end)}.
+
+
+save_doc(Db, Json) ->
+ Doc = couch_doc:from_json_obj(Json),
+ {ok, Rev} = couch_db:update_doc(Db, Doc, []),
+ {ok, couch_doc:rev_to_str(Rev)}.
+
+get_rows(Consumer) ->
+ Ref = make_ref(),
+ Consumer ! {get_rows, Ref},
+ Resp = receive
+ {rows, Ref, Rows} ->
+ Rows
+ after ?TIMEOUT ->
+ timeout
+ end,
+ ?assertNotEqual(timeout, Resp),
+ Resp.
+
+get_heartbeats(Consumer) ->
+ Ref = make_ref(),
+ Consumer ! {get_heartbeats, Ref},
+ Resp = receive
+ {hearthbeats, Ref, HeartBeats} ->
+ HeartBeats
+ after ?TIMEOUT ->
+ timeout
+ end,
+ ?assertNotEqual(timeout, Resp),
+ Resp.
+
+clear_rows(Consumer) ->
+ Ref = make_ref(),
+ Consumer ! {reset, Ref},
+ Resp = receive
+ {ok, Ref} ->
+ ok
+ after ?TIMEOUT ->
+ timeout
+ end,
+ ?assertNotEqual(timeout, Resp),
+ Resp.
+
+stop_consumer(Consumer) ->
+ Ref = make_ref(),
+ Consumer ! {stop, Ref},
+ Resp = receive
+ {ok, Ref} ->
+ ok
+ after ?TIMEOUT ->
+ timeout
+ end,
+ ?assertNotEqual(timeout, Resp),
+ Resp.
+
+pause(Consumer) ->
+ Ref = make_ref(),
+ Consumer ! {pause, Ref},
+ Resp = receive
+ {paused, Ref} ->
+ ok
+ after ?TIMEOUT ->
+ timeout
+ end,
+ ?assertNotEqual(timeout, Resp),
+ Resp.
+
+unpause(Consumer) ->
+ Ref = make_ref(),
+ Consumer ! {continue, Ref},
+ Resp = receive
+ {ok, Ref} ->
+ ok
+ after ?TIMEOUT ->
+ timeout
+ end,
+ ?assertNotEqual(timeout, Resp),
+ Resp.
+
+wait_finished(_Consumer) ->
+ Resp = receive
+ {consumer_finished, Rows, LastSeq} ->
+ {Rows, LastSeq}
+ after ?TIMEOUT ->
+ timeout
+ end,
+ ?assertNotEqual(timeout, Resp),
+ Resp.
+
+spawn_consumer(DbName, ChangesArgs0, Req) ->
+ Parent = self(),
+ spawn(fun() ->
+ put(heartbeat_count, 0),
+ Callback = fun
+ ({change, {Change}, _}, _, Acc) ->
+ Id = couch_util:get_value(<<"id">>, Change),
+ Seq = couch_util:get_value(<<"seq">>, Change),
+ Del = couch_util:get_value(<<"deleted">>, Change, false),
+ [#row{id = Id, seq = Seq, deleted = Del} | Acc];
+ ({stop, LastSeq}, _, Acc) ->
+ Parent ! {consumer_finished, lists:reverse(Acc), LastSeq},
+ stop_loop(Parent, Acc);
+ (timeout, _, Acc) ->
+ put(heartbeat_count, get(heartbeat_count) + 1),
+ maybe_pause(Parent, Acc);
+ (_, _, Acc) ->
+ maybe_pause(Parent, Acc)
+ end,
+ {ok, Db} = couch_db:open_int(DbName, []),
+ ChangesArgs = case (ChangesArgs0#changes_args.timeout =:= undefined)
+ andalso (ChangesArgs0#changes_args.heartbeat =:= undefined) of
+ true ->
+ ChangesArgs0#changes_args{timeout = 10, heartbeat = 10};
+ false ->
+ ChangesArgs0
+ end,
+ FeedFun = couch_changes:handle_changes(ChangesArgs, Req, Db),
+ try
+ FeedFun({Callback, []})
+ catch throw:{stop, _} ->
+ ok
+ end,
+ catch couch_db:close(Db)
+ end).
+
+maybe_pause(Parent, Acc) ->
+ receive
+ {get_rows, Ref} ->
+ Parent ! {rows, Ref, lists:reverse(Acc)},
+ maybe_pause(Parent, Acc);
+ {get_heartbeats, Ref} ->
+ Parent ! {hearthbeats, Ref, get(heartbeat_count)},
+ maybe_pause(Parent, Acc);
+ {reset, Ref} ->
+ Parent ! {ok, Ref},
+ maybe_pause(Parent, []);
+ {pause, Ref} ->
+ Parent ! {paused, Ref},
+ pause_loop(Parent, Acc);
+ {stop, Ref} ->
+ Parent ! {ok, Ref},
+ throw({stop, Acc});
+ V ->
+ erlang:error({assertion_failed,
+ [{module, ?MODULE},
+ {line, ?LINE},
+ {value, V},
+ {reason, "Received unexpected message"}]})
+ after 0 ->
+ Acc
+ end.
+
+pause_loop(Parent, Acc) ->
+ receive
+ {stop, Ref} ->
+ Parent ! {ok, Ref},
+ throw({stop, Acc});
+ {reset, Ref} ->
+ Parent ! {ok, Ref},
+ pause_loop(Parent, []);
+ {continue, Ref} ->
+ Parent ! {ok, Ref},
+ Acc;
+ {get_rows, Ref} ->
+ Parent ! {rows, Ref, lists:reverse(Acc)},
+ pause_loop(Parent, Acc)
+ end.
+
+stop_loop(Parent, Acc) ->
+ receive
+ {get_rows, Ref} ->
+ Parent ! {rows, Ref, lists:reverse(Acc)},
+ stop_loop(Parent, Acc);
+ {stop, Ref} ->
+ Parent ! {ok, Ref},
+ Acc
+ end.
+
+create_db(DbName) ->
+ couch_db:create(DbName, [?ADMIN_USER, overwrite]).
+
+delete_db(DbName) ->
+ ok = couch_server:delete(DbName, [?ADMIN_USER]).
diff --git a/test/couchdb/couch_config_tests.erl b/test/couchdb/couch_config_tests.erl
new file mode 100644
index 000000000..9e9dfe701
--- /dev/null
+++ b/test/couchdb/couch_config_tests.erl
@@ -0,0 +1,463 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_config_tests).
+
+-include("couch_eunit.hrl").
+-include_lib("couchdb/couch_db.hrl").
+
+-define(SHORT_TIMEOUT, 100).
+-define(TIMEOUT, 1000).
+
+-define(CONFIG_DEFAULT,
+ filename:join([?BUILDDIR, "etc", "couchdb", "default_dev.ini"])).
+-define(CONFIG_FIXTURE_1,
+ filename:join([?FIXTURESDIR, "couch_config_tests_1.ini"])).
+-define(CONFIG_FIXTURE_2,
+ filename:join([?FIXTURESDIR, "couch_config_tests_2.ini"])).
+-define(CONFIG_FIXTURE_TEMP,
+ begin
+ FileName = filename:join([?TEMPDIR, "couch_config_temp.ini"]),
+ {ok, Fd} = file:open(FileName, write),
+ ok = file:truncate(Fd),
+ ok = file:close(Fd),
+ FileName
+ end).
+
+
+setup() ->
+ setup(?CONFIG_CHAIN).
+setup({temporary, Chain}) ->
+ setup(Chain);
+setup({persistent, Chain}) ->
+ setup(lists:append(Chain, [?CONFIG_FIXTURE_TEMP]));
+setup(Chain) ->
+ {ok, Pid} = couch_config:start_link(Chain),
+ Pid.
+
+setup_empty() ->
+ setup([]).
+
+setup_register() ->
+ ConfigPid = setup(),
+ SentinelFunc = fun() ->
+ % Ping/Pong to make sure we wait for this
+ % process to die
+ receive
+ {ping, From} ->
+ From ! pong
+ end
+ end,
+ SentinelPid = spawn(SentinelFunc),
+ {ConfigPid, SentinelPid}.
+
+teardown({ConfigPid, SentinelPid}) ->
+ teardown(ConfigPid),
+ case process_info(SentinelPid) of
+ undefined -> ok;
+ _ ->
+ SentinelPid ! {ping, self()},
+ receive
+ pong ->
+ ok
+ after 100 ->
+ throw({timeout_error, registered_pid})
+ end
+ end;
+teardown(Pid) ->
+ couch_config:stop(),
+ erlang:monitor(process, Pid),
+ receive
+ {'DOWN', _, _, Pid, _} ->
+ ok
+ after ?TIMEOUT ->
+ throw({timeout_error, config_stop})
+ end.
+teardown(_, Pid) ->
+ teardown(Pid).
+
+
+couch_config_test_() ->
+ {
+ "CouchDB config tests",
+ [
+ couch_config_get_tests(),
+ couch_config_set_tests(),
+ couch_config_del_tests(),
+ config_override_tests(),
+ config_persistent_changes_tests(),
+ config_register_tests(),
+ config_no_files_tests()
+ ]
+ }.
+
+couch_config_get_tests() ->
+ {
+ "Config get tests",
+ {
+ foreach,
+ fun setup/0, fun teardown/1,
+ [
+ should_load_all_configs(),
+ should_locate_daemons_section(),
+ should_locate_mrview_handler(),
+ should_return_undefined_atom_on_missed_section(),
+ should_return_undefined_atom_on_missed_option(),
+ should_return_custom_default_value_on_missed_option(),
+ should_only_return_default_on_missed_option(),
+ should_get_binary_option()
+ ]
+ }
+ }.
+
+couch_config_set_tests() ->
+ {
+ "Config set tests",
+ {
+ foreach,
+ fun setup/0, fun teardown/1,
+ [
+ should_update_option(),
+ should_create_new_section(),
+ should_set_binary_option()
+ ]
+ }
+ }.
+
+couch_config_del_tests() ->
+ {
+ "Config deletion tests",
+ {
+ foreach,
+ fun setup/0, fun teardown/1,
+ [
+ should_return_undefined_atom_after_option_deletion(),
+ should_be_ok_on_deleting_unknown_options(),
+ should_delete_binary_option()
+ ]
+ }
+ }.
+
+config_override_tests() ->
+ {
+ "Configs overide tests",
+ {
+ foreachx,
+ fun setup/1, fun teardown/2,
+ [
+ {{temporary, [?CONFIG_DEFAULT]},
+ fun should_ensure_in_defaults/2},
+ {{temporary, [?CONFIG_DEFAULT, ?CONFIG_FIXTURE_1]},
+ fun should_override_options/2},
+ {{temporary, [?CONFIG_DEFAULT, ?CONFIG_FIXTURE_2]},
+ fun should_create_new_sections_on_override/2},
+ {{temporary, [?CONFIG_DEFAULT, ?CONFIG_FIXTURE_1,
+ ?CONFIG_FIXTURE_2]},
+ fun should_win_last_in_chain/2}
+ ]
+ }
+ }.
+
+config_persistent_changes_tests() ->
+ {
+ "Config persistent changes",
+ {
+ foreachx,
+ fun setup/1, fun teardown/2,
+ [
+ {{persistent, [?CONFIG_DEFAULT]},
+ fun should_write_changes/2},
+ {{temporary, [?CONFIG_DEFAULT]},
+ fun should_ensure_that_default_wasnt_modified/2},
+ {{temporary, [?CONFIG_FIXTURE_TEMP]},
+ fun should_ensure_that_written_to_last_config_in_chain/2}
+ ]
+ }
+ }.
+
+config_register_tests() ->
+ {
+ "Config changes subscriber",
+ {
+ foreach,
+ fun setup_register/0, fun teardown/1,
+ [
+ fun should_handle_port_changes/1,
+ fun should_pass_persistent_flag/1,
+ fun should_not_trigger_handler_on_other_options_changes/1,
+ fun should_not_trigger_handler_after_related_process_death/1
+ ]
+ }
+ }.
+
+config_no_files_tests() ->
+ {
+ "Test couch_config with no files",
+ {
+ foreach,
+ fun setup_empty/0, fun teardown/1,
+ [
+ should_ensure_that_no_ini_files_loaded(),
+ should_create_non_persistent_option(),
+ should_create_persistent_option()
+ ]
+ }
+ }.
+
+
+should_load_all_configs() ->
+ ?_assert(length(couch_config:all()) > 0).
+
+should_locate_daemons_section() ->
+ ?_assert(length(couch_config:get("daemons")) > 0).
+
+should_locate_mrview_handler() ->
+ ?_assertEqual("{couch_mrview_http, handle_view_req}",
+ couch_config:get("httpd_design_handlers", "_view")).
+
+should_return_undefined_atom_on_missed_section() ->
+ ?_assertEqual(undefined,
+ couch_config:get("foo", "bar")).
+
+should_return_undefined_atom_on_missed_option() ->
+ ?_assertEqual(undefined,
+ couch_config:get("httpd", "foo")).
+
+should_return_custom_default_value_on_missed_option() ->
+ ?_assertEqual("bar",
+ couch_config:get("httpd", "foo", "bar")).
+
+should_only_return_default_on_missed_option() ->
+ ?_assertEqual("0",
+ couch_config:get("httpd", "port", "bar")).
+
+should_get_binary_option() ->
+ ?_assertEqual(<<"baz">>,
+ couch_config:get(<<"foo">>, <<"bar">>, <<"baz">>)).
+
+should_update_option() ->
+ ?_assertEqual("severe",
+ begin
+ ok = couch_config:set("log", "level", "severe", false),
+ couch_config:get("log", "level")
+ end).
+
+should_create_new_section() ->
+ ?_assertEqual("bang",
+ begin
+ undefined = couch_config:get("new_section", "bizzle"),
+ ok = couch_config:set("new_section", "bizzle", "bang", false),
+ couch_config:get("new_section", "bizzle")
+ end).
+
+should_set_binary_option() ->
+ ?_assertEqual(<<"baz">>,
+ begin
+ ok = couch_config:set(<<"foo">>, <<"bar">>, <<"baz">>, false),
+ couch_config:get(<<"foo">>, <<"bar">>)
+ end).
+
+should_return_undefined_atom_after_option_deletion() ->
+ ?_assertEqual(undefined,
+ begin
+ ok = couch_config:delete("log", "level", false),
+ couch_config:get("log", "level")
+ end).
+
+should_be_ok_on_deleting_unknown_options() ->
+ ?_assertEqual(ok, couch_config:delete("zoo", "boo", false)).
+
+should_delete_binary_option() ->
+ ?_assertEqual(undefined,
+ begin
+ ok = couch_config:set(<<"foo">>, <<"bar">>, <<"baz">>, false),
+ ok = couch_config:delete(<<"foo">>, <<"bar">>, false),
+ couch_config:get(<<"foo">>, <<"bar">>)
+ end).
+
+should_ensure_in_defaults(_, _) ->
+ ?_test(begin
+ ?assertEqual("100",
+ couch_config:get("couchdb", "max_dbs_open")),
+ ?assertEqual("5984",
+ couch_config:get("httpd", "port")),
+ ?assertEqual(undefined,
+ couch_config:get("fizbang", "unicode"))
+ end).
+
+should_override_options(_, _) ->
+ ?_test(begin
+ ?assertEqual("10",
+ couch_config:get("couchdb", "max_dbs_open")),
+ ?assertEqual("4895",
+ couch_config:get("httpd", "port"))
+ end).
+
+should_create_new_sections_on_override(_, _) ->
+ ?_test(begin
+ ?assertEqual("80",
+ couch_config:get("httpd", "port")),
+ ?assertEqual("normalized",
+ couch_config:get("fizbang", "unicode"))
+ end).
+
+should_win_last_in_chain(_, _) ->
+ ?_assertEqual("80", couch_config:get("httpd", "port")).
+
+should_write_changes(_, _) ->
+ ?_test(begin
+ ?assertEqual("5984",
+ couch_config:get("httpd", "port")),
+ ?assertEqual(ok,
+ couch_config:set("httpd", "port", "8080")),
+ ?assertEqual("8080",
+ couch_config:get("httpd", "port")),
+ ?assertEqual(ok,
+ couch_config:delete("httpd", "bind_address", "8080")),
+ ?assertEqual(undefined,
+ couch_config:get("httpd", "bind_address"))
+ end).
+
+should_ensure_that_default_wasnt_modified(_, _) ->
+ ?_test(begin
+ ?assertEqual("5984",
+ couch_config:get("httpd", "port")),
+ ?assertEqual("127.0.0.1",
+ couch_config:get("httpd", "bind_address"))
+ end).
+
+should_ensure_that_written_to_last_config_in_chain(_, _) ->
+ ?_test(begin
+ ?assertEqual("8080",
+ couch_config:get("httpd", "port")),
+ ?assertEqual(undefined,
+ couch_config:get("httpd", "bind_address"))
+ end).
+
+should_handle_port_changes({_, SentinelPid}) ->
+ ?_assert(begin
+ MainProc = self(),
+ Port = "8080",
+
+ couch_config:register(
+ fun("httpd", "port", Value) ->
+ % couch_config catches every error raised from handler
+ % so it's not possible to just assert on wrong value.
+ % We have to return the result as message
+ MainProc ! (Value =:= Port)
+ end,
+ SentinelPid
+ ),
+ ok = couch_config:set("httpd", "port", Port, false),
+
+ receive
+ R ->
+ R
+ after ?TIMEOUT ->
+ erlang:error({assertion_failed,
+ [{module, ?MODULE},
+ {line, ?LINE},
+ {reason, "Timeout"}]})
+ end
+ end).
+
+should_pass_persistent_flag({_, SentinelPid}) ->
+ ?_assert(begin
+ MainProc = self(),
+
+ couch_config:register(
+ fun("httpd", "port", _, Persist) ->
+ % couch_config catches every error raised from handler
+ % so it's not possible to just assert on wrong value.
+ % We have to return the result as message
+ MainProc ! Persist
+ end,
+ SentinelPid
+ ),
+ ok = couch_config:set("httpd", "port", "8080", false),
+
+ receive
+ false ->
+ true
+ after ?SHORT_TIMEOUT ->
+ false
+ end
+ end).
+
+should_not_trigger_handler_on_other_options_changes({_, SentinelPid}) ->
+ ?_assert(begin
+ MainProc = self(),
+
+ couch_config:register(
+ fun("httpd", "port", _) ->
+ MainProc ! ok
+ end,
+ SentinelPid
+ ),
+ ok = couch_config:set("httpd", "bind_address", "0.0.0.0", false),
+
+ receive
+ ok ->
+ false
+ after ?SHORT_TIMEOUT ->
+ true
+ end
+ end).
+
+should_not_trigger_handler_after_related_process_death({_, SentinelPid}) ->
+ ?_assert(begin
+ MainProc = self(),
+
+ couch_config:register(
+ fun("httpd", "port", _) ->
+ MainProc ! ok
+ end,
+ SentinelPid
+ ),
+
+ SentinelPid ! {ping, MainProc},
+ receive
+ pong ->
+ ok
+ after ?SHORT_TIMEOUT ->
+ erlang:error({assertion_failed,
+ [{module, ?MODULE},
+ {line, ?LINE},
+ {reason, "Timeout"}]})
+ end,
+
+ ok = couch_config:set("httpd", "port", "12345", false),
+
+ receive
+ ok ->
+ false
+ after ?SHORT_TIMEOUT ->
+ true
+ end
+ end).
+
+should_ensure_that_no_ini_files_loaded() ->
+ ?_assertEqual(0, length(couch_config:all())).
+
+should_create_non_persistent_option() ->
+ ?_assertEqual("80",
+ begin
+ ok = couch_config:set("httpd", "port", "80", false),
+ couch_config:get("httpd", "port")
+ end).
+
+should_create_persistent_option() ->
+ ?_assertEqual("127.0.0.1",
+ begin
+ ok = couch_config:set("httpd", "bind_address", "127.0.0.1"),
+ couch_config:get("httpd", "bind_address")
+ end).
diff --git a/test/couchdb/couch_db_tests.erl b/test/couchdb/couch_db_tests.erl
new file mode 100644
index 000000000..30897144f
--- /dev/null
+++ b/test/couchdb/couch_db_tests.erl
@@ -0,0 +1,114 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_db_tests).
+
+-include("couch_eunit.hrl").
+
+-define(TIMEOUT, 120).
+
+
+setup() ->
+ {ok, _} = couch_server_sup:start_link(?CONFIG_CHAIN),
+ couch_config:set("log", "include_sasl", "false", false),
+ ok.
+
+teardown(_) ->
+ couch_server_sup:stop().
+
+
+create_delete_db_test_()->
+ {
+ "Database create/delete tests",
+ {
+ setup,
+ fun setup/0, fun teardown/1,
+ fun(_) ->
+ [should_create_db(),
+ should_delete_db(),
+ should_create_multiple_dbs(),
+ should_delete_multiple_dbs(),
+ should_create_delete_database_continuously()]
+ end
+ }
+ }.
+
+
+should_create_db() ->
+ DbName = ?tempdb(),
+ {ok, Db} = couch_db:create(DbName, []),
+ ok = couch_db:close(Db),
+ {ok, AllDbs} = couch_server:all_databases(),
+ ?_assert(lists:member(DbName, AllDbs)).
+
+should_delete_db() ->
+ DbName = ?tempdb(),
+ couch_db:create(DbName, []),
+ couch_server:delete(DbName, []),
+ {ok, AllDbs} = couch_server:all_databases(),
+ ?_assertNot(lists:member(DbName, AllDbs)).
+
+should_create_multiple_dbs() ->
+ gen_server:call(couch_server, {set_max_dbs_open, 3}),
+
+ DbNames = [?tempdb() || _ <- lists:seq(1, 6)],
+ lists:foreach(fun(DbName) ->
+ {ok, Db} = couch_db:create(DbName, []),
+ ok = couch_db:close(Db)
+ end, DbNames),
+
+ {ok, AllDbs} = couch_server:all_databases(),
+ NumCreated = lists:foldl(fun(DbName, Acc) ->
+ ?assert(lists:member(DbName, AllDbs)),
+ Acc+1
+ end, 0, DbNames),
+
+ ?_assertEqual(NumCreated, 6).
+
+should_delete_multiple_dbs() ->
+ DbNames = [?tempdb() || _ <- lists:seq(1, 6)],
+ lists:foreach(fun(DbName) ->
+ {ok, Db} = couch_db:create(DbName, []),
+ ok = couch_db:close(Db)
+ end, DbNames),
+
+ lists:foreach(fun(DbName) ->
+ ok = couch_server:delete(DbName, [])
+ end, DbNames),
+
+ {ok, AllDbs} = couch_server:all_databases(),
+ NumDeleted = lists:foldl(fun(DbName, Acc) ->
+ ?assertNot(lists:member(DbName, AllDbs)),
+ Acc + 1
+ end, 0, DbNames),
+
+ ?_assertEqual(NumDeleted, 6).
+
+should_create_delete_database_continuously() ->
+ DbName = ?tempdb(),
+ {ok, Db} = couch_db:create(DbName, []),
+ couch_db:close(Db),
+ [{timeout, ?TIMEOUT, {integer_to_list(N) ++ " times",
+ ?_assert(loop(DbName, N))}}
+ || N <- [10, 100, 1000]].
+
+loop(_, 0) ->
+ true;
+loop(DbName, N) ->
+ ok = cycle(DbName),
+ loop(DbName, N - 1).
+
+cycle(DbName) ->
+ ok = couch_server:delete(DbName, []),
+ {ok, Db} = couch_db:create(DbName, []),
+ couch_db:close(Db),
+ ok.
diff --git a/test/couchdb/couch_doc_json_tests.erl b/test/couchdb/couch_doc_json_tests.erl
new file mode 100644
index 000000000..1592b6b5a
--- /dev/null
+++ b/test/couchdb/couch_doc_json_tests.erl
@@ -0,0 +1,391 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_doc_json_tests).
+
+-include("couch_eunit.hrl").
+-include_lib("couchdb/couch_db.hrl").
+
+
+setup() ->
+ couch_config:start_link(?CONFIG_CHAIN),
+ couch_config:set("attachments", "compression_level", "0", false),
+ ok.
+
+teardown(_) ->
+ couch_config:stop().
+
+
+json_doc_test_() ->
+ {
+ setup,
+ fun setup/0, fun teardown/1,
+ [
+ {
+ "Document from JSON",
+ [
+ from_json_success_cases(),
+ from_json_error_cases()
+ ]
+ },
+ {
+ "Document to JSON",
+ [
+ to_json_success_cases()
+ ]
+ }
+ ]
+ }.
+
+from_json_success_cases() ->
+ Cases = [
+ {
+ {[]},
+ #doc{},
+ "Return an empty document for an empty JSON object."
+ },
+ {
+ {[{<<"_id">>, <<"zing!">>}]},
+ #doc{id = <<"zing!">>},
+ "Parses document ids."
+ },
+ {
+ {[{<<"_id">>, <<"_design/foo">>}]},
+ #doc{id = <<"_design/foo">>},
+ "_design/document ids."
+ },
+ {
+ {[{<<"_id">>, <<"_local/bam">>}]},
+ #doc{id = <<"_local/bam">>},
+ "_local/document ids."
+ },
+ {
+ {[{<<"_rev">>, <<"4-230234">>}]},
+ #doc{revs = {4, [<<"230234">>]}},
+ "_rev stored in revs."
+ },
+ {
+ {[{<<"soap">>, 35}]},
+ #doc{body = {[{<<"soap">>, 35}]}},
+ "Non underscore prefixed fields stored in body."
+ },
+ {
+ {[{<<"_attachments">>, {[
+ {<<"my_attachment.fu">>, {[
+ {<<"stub">>, true},
+ {<<"content_type">>, <<"application/awesome">>},
+ {<<"length">>, 45}
+ ]}},
+ {<<"noahs_private_key.gpg">>, {[
+ {<<"data">>, <<"SSBoYXZlIGEgcGV0IGZpc2gh">>},
+ {<<"content_type">>, <<"application/pgp-signature">>}
+ ]}}
+ ]}}]},
+ #doc{atts = [
+ #att{
+ name = <<"my_attachment.fu">>,
+ data = stub,
+ type = <<"application/awesome">>,
+ att_len = 45,
+ disk_len = 45,
+ revpos = nil
+ },
+ #att{
+ name = <<"noahs_private_key.gpg">>,
+ data = <<"I have a pet fish!">>,
+ type = <<"application/pgp-signature">>,
+ att_len = 18,
+ disk_len = 18,
+ revpos = 0
+ }
+ ]},
+ "Attachments are parsed correctly."
+ },
+ {
+ {[{<<"_deleted">>, true}]},
+ #doc{deleted = true},
+ "_deleted controls the deleted field."
+ },
+ {
+ {[{<<"_deleted">>, false}]},
+ #doc{},
+ "{\"_deleted\": false} is ok."
+ },
+ {
+ {[
+ {<<"_revisions">>,
+ {[{<<"start">>, 4},
+ {<<"ids">>, [<<"foo1">>, <<"phi3">>, <<"omega">>]}]}},
+ {<<"_rev">>, <<"6-something">>}
+ ]},
+ #doc{revs = {4, [<<"foo1">>, <<"phi3">>, <<"omega">>]}},
+ "_revisions attribute are preferred to _rev."
+ },
+ {
+ {[{<<"_revs_info">>, dropping}]},
+ #doc{},
+ "Drops _revs_info."
+ },
+ {
+ {[{<<"_local_seq">>, dropping}]},
+ #doc{},
+ "Drops _local_seq."
+ },
+ {
+ {[{<<"_conflicts">>, dropping}]},
+ #doc{},
+ "Drops _conflicts."
+ },
+ {
+ {[{<<"_deleted_conflicts">>, dropping}]},
+ #doc{},
+ "Drops _deleted_conflicts."
+ }
+ ],
+ lists:map(
+ fun({EJson, Expect, Msg}) ->
+ {Msg, ?_assertMatch(Expect, couch_doc:from_json_obj(EJson))}
+ end,
+ Cases).
+
+from_json_error_cases() ->
+ Cases = [
+ {
+ [],
+ {bad_request, "Document must be a JSON object"},
+ "arrays are invalid"
+ },
+ {
+ 4,
+ {bad_request, "Document must be a JSON object"},
+ "integers are invalid"
+ },
+ {
+ true,
+ {bad_request, "Document must be a JSON object"},
+ "literals are invalid"
+ },
+ {
+ {[{<<"_id">>, {[{<<"foo">>, 5}]}}]},
+ {bad_request, <<"Document id must be a string">>},
+ "Document id must be a string."
+ },
+ {
+ {[{<<"_id">>, <<"_random">>}]},
+ {bad_request,
+ <<"Only reserved document ids may start with underscore.">>},
+ "Disallow arbitrary underscore prefixed docids."
+ },
+ {
+ {[{<<"_rev">>, 5}]},
+ {bad_request, <<"Invalid rev format">>},
+ "_rev must be a string"
+ },
+ {
+ {[{<<"_rev">>, "foobar"}]},
+ {bad_request, <<"Invalid rev format">>},
+ "_rev must be %d-%s"
+ },
+ {
+ {[{<<"_rev">>, "foo-bar"}]},
+ "Error if _rev's integer expection is broken."
+ },
+ {
+ {[{<<"_revisions">>, {[{<<"start">>, true}]}}]},
+ {doc_validation, "_revisions.start isn't an integer."},
+ "_revisions.start must be an integer."
+ },
+ {
+ {[{<<"_revisions">>, {[{<<"start">>, 0}, {<<"ids">>, 5}]}}]},
+ {doc_validation, "_revisions.ids isn't a array."},
+ "_revions.ids must be a list."
+ },
+ {
+ {[{<<"_revisions">>, {[{<<"start">>, 0}, {<<"ids">>, [5]}]}}]},
+ {doc_validation, "RevId isn't a string"},
+ "Revision ids must be strings."
+ },
+ {
+ {[{<<"_something">>, 5}]},
+ {doc_validation, <<"Bad special document member: _something">>},
+ "Underscore prefix fields are reserved."
+ }
+ ],
+
+ lists:map(fun
+ ({EJson, Expect, Msg}) ->
+ Error = (catch couch_doc:from_json_obj(EJson)),
+ {Msg, ?_assertMatch(Expect, Error)};
+ ({EJson, Msg}) ->
+ try
+ couch_doc:from_json_obj(EJson),
+ {"Conversion failed to raise an exception", ?_assert(false)}
+ catch
+ _:_ -> {Msg, ?_assert(true)}
+ end
+ end, Cases).
+
+to_json_success_cases() ->
+ Cases = [
+ {
+ #doc{},
+ {[{<<"_id">>, <<"">>}]},
+ "Empty docs are {\"_id\": \"\"}"
+ },
+ {
+ #doc{id = <<"foo">>},
+ {[{<<"_id">>, <<"foo">>}]},
+ "_id is added."
+ },
+ {
+ #doc{revs = {5, ["foo"]}},
+ {[{<<"_id">>, <<>>}, {<<"_rev">>, <<"5-foo">>}]},
+ "_rev is added."
+ },
+ {
+ [revs],
+ #doc{revs = {5, [<<"first">>, <<"second">>]}},
+ {[
+ {<<"_id">>, <<>>},
+ {<<"_rev">>, <<"5-first">>},
+ {<<"_revisions">>, {[
+ {<<"start">>, 5},
+ {<<"ids">>, [<<"first">>, <<"second">>]}
+ ]}}
+ ]},
+ "_revisions include with revs option"
+ },
+ {
+ #doc{body = {[{<<"foo">>, <<"bar">>}]}},
+ {[{<<"_id">>, <<>>}, {<<"foo">>, <<"bar">>}]},
+ "Arbitrary fields are added."
+ },
+ {
+ #doc{deleted = true, body = {[{<<"foo">>, <<"bar">>}]}},
+ {[{<<"_id">>, <<>>}, {<<"foo">>, <<"bar">>}, {<<"_deleted">>, true}]},
+ "Deleted docs no longer drop body members."
+ },
+ {
+ #doc{meta = [
+ {revs_info, 4, [{<<"fin">>, deleted}, {<<"zim">>, missing}]}
+ ]},
+ {[
+ {<<"_id">>, <<>>},
+ {<<"_revs_info">>, [
+ {[{<<"rev">>, <<"4-fin">>}, {<<"status">>, <<"deleted">>}]},
+ {[{<<"rev">>, <<"3-zim">>}, {<<"status">>, <<"missing">>}]}
+ ]}
+ ]},
+ "_revs_info field is added correctly."
+ },
+ {
+ #doc{meta = [{local_seq, 5}]},
+ {[{<<"_id">>, <<>>}, {<<"_local_seq">>, 5}]},
+ "_local_seq is added as an integer."
+ },
+ {
+ #doc{meta = [{conflicts, [{3, <<"yep">>}, {1, <<"snow">>}]}]},
+ {[
+ {<<"_id">>, <<>>},
+ {<<"_conflicts">>, [<<"3-yep">>, <<"1-snow">>]}
+ ]},
+ "_conflicts is added as an array of strings."
+ },
+ {
+ #doc{meta = [{deleted_conflicts, [{10923, <<"big_cowboy_hat">>}]}]},
+ {[
+ {<<"_id">>, <<>>},
+ {<<"_deleted_conflicts">>, [<<"10923-big_cowboy_hat">>]}
+ ]},
+ "_deleted_conflicsts is added as an array of strings."
+ },
+ {
+ #doc{atts = [
+ #att{
+ name = <<"big.xml">>,
+ type = <<"xml/sucks">>,
+ data = fun() -> ok end,
+ revpos = 1,
+ att_len = 400,
+ disk_len = 400
+ },
+ #att{
+ name = <<"fast.json">>,
+ type = <<"json/ftw">>,
+ data = <<"{\"so\": \"there!\"}">>,
+ revpos = 1,
+ att_len = 16,
+ disk_len = 16
+ }
+ ]},
+ {[
+ {<<"_id">>, <<>>},
+ {<<"_attachments">>, {[
+ {<<"big.xml">>, {[
+ {<<"content_type">>, <<"xml/sucks">>},
+ {<<"revpos">>, 1},
+ {<<"length">>, 400},
+ {<<"stub">>, true}
+ ]}},
+ {<<"fast.json">>, {[
+ {<<"content_type">>, <<"json/ftw">>},
+ {<<"revpos">>, 1},
+ {<<"length">>, 16},
+ {<<"stub">>, true}
+ ]}}
+ ]}}
+ ]},
+ "Attachments attached as stubs only include a length."
+ },
+ {
+ [attachments],
+ #doc{atts = [
+ #att{
+ name = <<"stuff.txt">>,
+ type = <<"text/plain">>,
+ data = fun() -> <<"diet pepsi">> end,
+ revpos = 1,
+ att_len = 10,
+ disk_len = 10
+ },
+ #att{
+ name = <<"food.now">>,
+ type = <<"application/food">>,
+ revpos = 1,
+ data = <<"sammich">>
+ }
+ ]},
+ {[
+ {<<"_id">>, <<>>},
+ {<<"_attachments">>, {[
+ {<<"stuff.txt">>, {[
+ {<<"content_type">>, <<"text/plain">>},
+ {<<"revpos">>, 1},
+ {<<"data">>, <<"ZGlldCBwZXBzaQ==">>}
+ ]}},
+ {<<"food.now">>, {[
+ {<<"content_type">>, <<"application/food">>},
+ {<<"revpos">>, 1},
+ {<<"data">>, <<"c2FtbWljaA==">>}
+ ]}}
+ ]}}
+ ]},
+ "Attachments included inline with attachments option."
+ }
+ ],
+
+ lists:map(fun
+ ({Doc, EJson, Msg}) ->
+ {Msg, ?_assertMatch(EJson, couch_doc:to_json_obj(Doc, []))};
+ ({Options, Doc, EJson, Msg}) ->
+ {Msg, ?_assertMatch(EJson, couch_doc:to_json_obj(Doc, Options))}
+ end, Cases).
diff --git a/test/couchdb/couch_file_tests.erl b/test/couchdb/couch_file_tests.erl
new file mode 100644
index 000000000..ad1338391
--- /dev/null
+++ b/test/couchdb/couch_file_tests.erl
@@ -0,0 +1,265 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_file_tests).
+
+-include("couch_eunit.hrl").
+
+-define(BLOCK_SIZE, 4096).
+-define(setup(F), {setup, fun setup/0, fun teardown/1, F}).
+-define(foreach(Fs), {foreach, fun setup/0, fun teardown/1, Fs}).
+
+
+setup() ->
+ {ok, Fd} = couch_file:open(?tempfile(), [create, overwrite]),
+ Fd.
+
+teardown(Fd) ->
+ ok = couch_file:close(Fd).
+
+
+open_close_test_() ->
+ {
+ "Test for proper file open and close",
+ [
+ should_return_enoent_if_missed(),
+ should_ignore_invalid_flags_with_open(),
+ ?setup(fun should_return_pid_on_file_open/1),
+ should_close_file_properly(),
+ ?setup(fun should_create_empty_new_files/1)
+ ]
+ }.
+
+should_return_enoent_if_missed() ->
+ ?_assertEqual({error, enoent}, couch_file:open("not a real file")).
+
+should_ignore_invalid_flags_with_open() ->
+ ?_assertMatch({ok, _},
+ couch_file:open(?tempfile(), [create, invalid_option])).
+
+should_return_pid_on_file_open(Fd) ->
+ ?_assert(is_pid(Fd)).
+
+should_close_file_properly() ->
+ {ok, Fd} = couch_file:open(?tempfile(), [create, overwrite]),
+ ok = couch_file:close(Fd),
+ ?_assert(true).
+
+should_create_empty_new_files(Fd) ->
+ ?_assertMatch({ok, 0}, couch_file:bytes(Fd)).
+
+
+read_write_test_() ->
+ {
+ "Common file read/write tests",
+ ?foreach([
+ fun should_increase_file_size_on_write/1,
+ fun should_return_current_file_size_on_write/1,
+ fun should_write_and_read_term/1,
+ fun should_write_and_read_binary/1,
+ fun should_write_and_read_large_binary/1,
+ fun should_return_term_as_binary_for_reading_binary/1,
+ fun should_read_term_written_as_binary/1,
+ fun should_read_iolist/1,
+ fun should_fsync/1,
+ fun should_not_read_beyond_eof/1,
+ fun should_truncate/1
+ ])
+ }.
+
+
+should_increase_file_size_on_write(Fd) ->
+ {ok, 0, _} = couch_file:append_term(Fd, foo),
+ {ok, Size} = couch_file:bytes(Fd),
+ ?_assert(Size > 0).
+
+should_return_current_file_size_on_write(Fd) ->
+ {ok, 0, _} = couch_file:append_term(Fd, foo),
+ {ok, Size} = couch_file:bytes(Fd),
+ ?_assertMatch({ok, Size, _}, couch_file:append_term(Fd, bar)).
+
+should_write_and_read_term(Fd) ->
+ {ok, Pos, _} = couch_file:append_term(Fd, foo),
+ ?_assertMatch({ok, foo}, couch_file:pread_term(Fd, Pos)).
+
+should_write_and_read_binary(Fd) ->
+ {ok, Pos, _} = couch_file:append_binary(Fd, <<"fancy!">>),
+ ?_assertMatch({ok, <<"fancy!">>}, couch_file:pread_binary(Fd, Pos)).
+
+should_return_term_as_binary_for_reading_binary(Fd) ->
+ {ok, Pos, _} = couch_file:append_term(Fd, foo),
+ Foo = couch_compress:compress(foo, snappy),
+ ?_assertMatch({ok, Foo}, couch_file:pread_binary(Fd, Pos)).
+
+should_read_term_written_as_binary(Fd) ->
+ {ok, Pos, _} = couch_file:append_binary(Fd, <<131,100,0,3,102,111,111>>),
+ ?_assertMatch({ok, foo}, couch_file:pread_term(Fd, Pos)).
+
+should_write_and_read_large_binary(Fd) ->
+ BigBin = list_to_binary(lists:duplicate(100000, 0)),
+ {ok, Pos, _} = couch_file:append_binary(Fd, BigBin),
+ ?_assertMatch({ok, BigBin}, couch_file:pread_binary(Fd, Pos)).
+
+should_read_iolist(Fd) ->
+ %% append_binary == append_iolist?
+ %% Possible bug in pread_iolist or iolist() -> append_binary
+ {ok, Pos, _} = couch_file:append_binary(Fd, ["foo", $m, <<"bam">>]),
+ {ok, IoList} = couch_file:pread_iolist(Fd, Pos),
+ ?_assertMatch(<<"foombam">>, iolist_to_binary(IoList)).
+
+should_fsync(Fd) ->
+ {"How does on test fsync?", ?_assertMatch(ok, couch_file:sync(Fd))}.
+
+should_not_read_beyond_eof(_) ->
+ {"No idea how to test reading beyond EOF", ?_assert(true)}.
+
+should_truncate(Fd) ->
+ {ok, 0, _} = couch_file:append_term(Fd, foo),
+ {ok, Size} = couch_file:bytes(Fd),
+ BigBin = list_to_binary(lists:duplicate(100000, 0)),
+ {ok, _, _} = couch_file:append_binary(Fd, BigBin),
+ ok = couch_file:truncate(Fd, Size),
+ ?_assertMatch({ok, foo}, couch_file:pread_term(Fd, 0)).
+
+
+header_test_() ->
+ {
+ "File header read/write tests",
+ [
+ ?foreach([
+ fun should_write_and_read_atom_header/1,
+ fun should_write_and_read_tuple_header/1,
+ fun should_write_and_read_second_header/1,
+ fun should_truncate_second_header/1,
+ fun should_produce_same_file_size_on_rewrite/1,
+ fun should_save_headers_larger_than_block_size/1
+ ]),
+ should_recover_header_marker_corruption(),
+ should_recover_header_size_corruption(),
+ should_recover_header_md5sig_corruption(),
+ should_recover_header_data_corruption()
+ ]
+ }.
+
+
+should_write_and_read_atom_header(Fd) ->
+ ok = couch_file:write_header(Fd, hello),
+ ?_assertMatch({ok, hello}, couch_file:read_header(Fd)).
+
+should_write_and_read_tuple_header(Fd) ->
+ ok = couch_file:write_header(Fd, {<<"some_data">>, 32}),
+ ?_assertMatch({ok, {<<"some_data">>, 32}}, couch_file:read_header(Fd)).
+
+should_write_and_read_second_header(Fd) ->
+ ok = couch_file:write_header(Fd, {<<"some_data">>, 32}),
+ ok = couch_file:write_header(Fd, [foo, <<"more">>]),
+ ?_assertMatch({ok, [foo, <<"more">>]}, couch_file:read_header(Fd)).
+
+should_truncate_second_header(Fd) ->
+ ok = couch_file:write_header(Fd, {<<"some_data">>, 32}),
+ {ok, Size} = couch_file:bytes(Fd),
+ ok = couch_file:write_header(Fd, [foo, <<"more">>]),
+ ok = couch_file:truncate(Fd, Size),
+ ?_assertMatch({ok, {<<"some_data">>, 32}}, couch_file:read_header(Fd)).
+
+should_produce_same_file_size_on_rewrite(Fd) ->
+ ok = couch_file:write_header(Fd, {<<"some_data">>, 32}),
+ {ok, Size1} = couch_file:bytes(Fd),
+ ok = couch_file:write_header(Fd, [foo, <<"more">>]),
+ {ok, Size2} = couch_file:bytes(Fd),
+ ok = couch_file:truncate(Fd, Size1),
+ ok = couch_file:write_header(Fd, [foo, <<"more">>]),
+ ?_assertMatch({ok, Size2}, couch_file:bytes(Fd)).
+
+should_save_headers_larger_than_block_size(Fd) ->
+ Header = erlang:make_tuple(5000, <<"CouchDB">>),
+ couch_file:write_header(Fd, Header),
+ {"COUCHDB-1319", ?_assertMatch({ok, Header}, couch_file:read_header(Fd))}.
+
+
+should_recover_header_marker_corruption() ->
+ ?_assertMatch(
+ ok,
+ check_header_recovery(
+ fun(CouchFd, RawFd, Expect, HeaderPos) ->
+ ?assertNotMatch(Expect, couch_file:read_header(CouchFd)),
+ file:pwrite(RawFd, HeaderPos, <<0>>),
+ ?assertMatch(Expect, couch_file:read_header(CouchFd))
+ end)
+ ).
+
+should_recover_header_size_corruption() ->
+ ?_assertMatch(
+ ok,
+ check_header_recovery(
+ fun(CouchFd, RawFd, Expect, HeaderPos) ->
+ ?assertNotMatch(Expect, couch_file:read_header(CouchFd)),
+ % +1 for 0x1 byte marker
+ file:pwrite(RawFd, HeaderPos + 1, <<10/integer>>),
+ ?assertMatch(Expect, couch_file:read_header(CouchFd))
+ end)
+ ).
+
+should_recover_header_md5sig_corruption() ->
+ ?_assertMatch(
+ ok,
+ check_header_recovery(
+ fun(CouchFd, RawFd, Expect, HeaderPos) ->
+ ?assertNotMatch(Expect, couch_file:read_header(CouchFd)),
+ % +5 = +1 for 0x1 byte and +4 for term size.
+ file:pwrite(RawFd, HeaderPos + 5, <<"F01034F88D320B22">>),
+ ?assertMatch(Expect, couch_file:read_header(CouchFd))
+ end)
+ ).
+
+should_recover_header_data_corruption() ->
+ ?_assertMatch(
+ ok,
+ check_header_recovery(
+ fun(CouchFd, RawFd, Expect, HeaderPos) ->
+ ?assertNotMatch(Expect, couch_file:read_header(CouchFd)),
+ % +21 = +1 for 0x1 byte, +4 for term size and +16 for MD5 sig
+ file:pwrite(RawFd, HeaderPos + 21, <<"some data goes here!">>),
+ ?assertMatch(Expect, couch_file:read_header(CouchFd))
+ end)
+ ).
+
+
+check_header_recovery(CheckFun) ->
+ Path = ?tempfile(),
+ {ok, Fd} = couch_file:open(Path, [create, overwrite]),
+ {ok, RawFd} = file:open(Path, [read, write, raw, binary]),
+
+ {ok, _} = write_random_data(Fd),
+ ExpectHeader = {some_atom, <<"a binary">>, 756},
+ ok = couch_file:write_header(Fd, ExpectHeader),
+
+ {ok, HeaderPos} = write_random_data(Fd),
+ ok = couch_file:write_header(Fd, {2342, <<"corruption! greed!">>}),
+
+ CheckFun(Fd, RawFd, {ok, ExpectHeader}, HeaderPos),
+
+ ok = file:close(RawFd),
+ ok = couch_file:close(Fd),
+ ok.
+
+write_random_data(Fd) ->
+ write_random_data(Fd, 100 + random:uniform(1000)).
+
+write_random_data(Fd, 0) ->
+ {ok, Bytes} = couch_file:bytes(Fd),
+ {ok, (1 + Bytes div ?BLOCK_SIZE) * ?BLOCK_SIZE};
+write_random_data(Fd, N) ->
+ Choices = [foo, bar, <<"bizzingle">>, "bank", ["rough", stuff]],
+ Term = lists:nth(random:uniform(4) + 1, Choices),
+ {ok, _, _} = couch_file:append_term(Fd, Term),
+ write_random_data(Fd, N - 1).
diff --git a/test/couchdb/couch_key_tree_tests.erl b/test/couchdb/couch_key_tree_tests.erl
new file mode 100644
index 000000000..753ecc446
--- /dev/null
+++ b/test/couchdb/couch_key_tree_tests.erl
@@ -0,0 +1,380 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_key_tree_tests).
+
+-include("couch_eunit.hrl").
+
+-define(DEPTH, 10).
+
+
+key_tree_merge_test_()->
+ {
+ "Key tree merge",
+ [
+ should_merge_with_empty_tree(),
+ should_merge_reflexive(),
+ should_merge_prefix_of_a_tree_with_tree(),
+ should_produce_conflict_on_merge_with_unrelated_branch(),
+ should_merge_reflexive_for_child_nodes(),
+ should_merge_tree_to_itself(),
+ should_merge_tree_of_odd_length(),
+ should_merge_tree_with_stem(),
+ should_merge_with_stem_at_deeper_level(),
+ should_merge_with_stem_at_deeper_level_with_deeper_paths(),
+ should_merge_single_tree_with_deeper_stem(),
+ should_merge_tree_with_large_stem(),
+ should_merge_stems(),
+ should_create_conflicts_on_merge(),
+ should_create_no_conflicts_on_merge(),
+ should_ignore_conflicting_branch()
+ ]
+ }.
+
+key_tree_missing_leaves_test_()->
+ {
+ "Missing tree leaves",
+ [
+ should_not_find_missing_leaves(),
+ should_find_missing_leaves()
+ ]
+ }.
+
+key_tree_remove_leaves_test_()->
+ {
+ "Remove tree leaves",
+ [
+ should_have_no_effect_on_removing_no_leaves(),
+ should_have_no_effect_on_removing_non_existant_branch(),
+ should_remove_leaf(),
+ should_produce_empty_tree_on_removing_all_leaves(),
+ should_have_no_effect_on_removing_non_existant_node(),
+ should_produce_empty_tree_on_removing_last_leaf()
+ ]
+ }.
+
+key_tree_get_leaves_test_()->
+ {
+ "Leaves retrieving",
+ [
+ should_extract_subtree(),
+ should_extract_subsubtree(),
+ should_gather_non_existant_leaf(),
+ should_gather_leaf(),
+ shoul_gather_multiple_leaves(),
+ should_retrieve_full_key_path(),
+ should_retrieve_full_key_path_for_node(),
+ should_retrieve_leaves_with_parent_node(),
+ should_retrieve_all_leaves()
+ ]
+ }.
+
+key_tree_leaf_counting_test_()->
+ {
+ "Leaf counting",
+ [
+ should_have_no_leaves_for_empty_tree(),
+ should_have_single_leaf_for_tree_with_single_node(),
+ should_have_two_leaves_for_tree_with_chindler_siblings(),
+ should_not_affect_on_leaf_counting_for_stemmed_tree()
+ ]
+ }.
+
+key_tree_stemming_test_()->
+ {
+ "Stemming",
+ [
+ should_have_no_effect_for_stemming_more_levels_than_exists(),
+ should_return_one_deepest_node(),
+ should_return_two_deepest_nodes()
+ ]
+ }.
+
+
+should_merge_with_empty_tree()->
+ One = {1, {"1","foo",[]}},
+ ?_assertEqual({[One], no_conflicts},
+ couch_key_tree:merge([], One, ?DEPTH)).
+
+should_merge_reflexive()->
+ One = {1, {"1","foo",[]}},
+ ?_assertEqual({[One], no_conflicts},
+ couch_key_tree:merge([One], One, ?DEPTH)).
+
+should_merge_prefix_of_a_tree_with_tree()->
+ One = {1, {"1","foo",[]}},
+ TwoSibs = [{1, {"1","foo",[]}},
+ {1, {"2","foo",[]}}],
+ ?_assertEqual({TwoSibs, no_conflicts},
+ couch_key_tree:merge(TwoSibs, One, ?DEPTH)).
+
+should_produce_conflict_on_merge_with_unrelated_branch()->
+ TwoSibs = [{1, {"1","foo",[]}},
+ {1, {"2","foo",[]}}],
+ Three = {1, {"3","foo",[]}},
+ ThreeSibs = [{1, {"1","foo",[]}},
+ {1, {"2","foo",[]}},
+ {1, {"3","foo",[]}}],
+ ?_assertEqual({ThreeSibs, conflicts},
+ couch_key_tree:merge(TwoSibs, Three, ?DEPTH)).
+
+should_merge_reflexive_for_child_nodes()->
+ TwoChild = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}},
+ ?_assertEqual({[TwoChild], no_conflicts},
+ couch_key_tree:merge([TwoChild], TwoChild, ?DEPTH)).
+
+should_merge_tree_to_itself()->
+ TwoChildSibs = {1, {"1","foo", [{"1a", "bar", []},
+ {"1b", "bar", []}]}},
+ ?_assertEqual({[TwoChildSibs], no_conflicts},
+ couch_key_tree:merge([TwoChildSibs], TwoChildSibs, ?DEPTH)).
+
+should_merge_tree_of_odd_length()->
+ TwoChild = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}},
+ TwoChildSibs = {1, {"1","foo", [{"1a", "bar", []},
+ {"1b", "bar", []}]}},
+ TwoChildPlusSibs = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]},
+ {"1b", "bar", []}]}},
+
+ ?_assertEqual({[TwoChildPlusSibs], no_conflicts},
+ couch_key_tree:merge([TwoChild], TwoChildSibs, ?DEPTH)).
+
+should_merge_tree_with_stem()->
+ Stemmed = {2, {"1a", "bar", []}},
+ TwoChildSibs = {1, {"1","foo", [{"1a", "bar", []},
+ {"1b", "bar", []}]}},
+
+ ?_assertEqual({[TwoChildSibs], no_conflicts},
+ couch_key_tree:merge([TwoChildSibs], Stemmed, ?DEPTH)).
+
+should_merge_with_stem_at_deeper_level()->
+ Stemmed = {3, {"1bb", "boo", []}},
+ TwoChildSibs = {1, {"1","foo", [{"1a", "bar", []},
+ {"1b", "bar", [{"1bb", "boo", []}]}]}},
+ ?_assertEqual({[TwoChildSibs], no_conflicts},
+ couch_key_tree:merge([TwoChildSibs], Stemmed, ?DEPTH)).
+
+should_merge_with_stem_at_deeper_level_with_deeper_paths()->
+ Stemmed = {3, {"1bb", "boo", []}},
+ StemmedTwoChildSibs = [{2,{"1a", "bar", []}},
+ {2,{"1b", "bar", [{"1bb", "boo", []}]}}],
+ ?_assertEqual({StemmedTwoChildSibs, no_conflicts},
+ couch_key_tree:merge(StemmedTwoChildSibs, Stemmed, ?DEPTH)).
+
+should_merge_single_tree_with_deeper_stem()->
+ Stemmed = {3, {"1aa", "bar", []}},
+ TwoChild = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}},
+ ?_assertEqual({[TwoChild], no_conflicts},
+ couch_key_tree:merge([TwoChild], Stemmed, ?DEPTH)).
+
+should_merge_tree_with_large_stem()->
+ Stemmed = {2, {"1a", "bar", [{"1aa", "bar", []}]}},
+ TwoChild = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}},
+ ?_assertEqual({[TwoChild], no_conflicts},
+ couch_key_tree:merge([TwoChild], Stemmed, ?DEPTH)).
+
+should_merge_stems()->
+ StemmedA = {2, {"1a", "bar", [{"1aa", "bar", []}]}},
+ StemmedB = {3, {"1aa", "bar", []}},
+ ?_assertEqual({[StemmedA], no_conflicts},
+ couch_key_tree:merge([StemmedA], StemmedB, ?DEPTH)).
+
+should_create_conflicts_on_merge()->
+ OneChild = {1, {"1","foo",[{"1a", "bar", []}]}},
+ Stemmed = {3, {"1aa", "bar", []}},
+ ?_assertEqual({[OneChild, Stemmed], conflicts},
+ couch_key_tree:merge([OneChild], Stemmed, ?DEPTH)).
+
+should_create_no_conflicts_on_merge()->
+ OneChild = {1, {"1","foo",[{"1a", "bar", []}]}},
+ Stemmed = {3, {"1aa", "bar", []}},
+ TwoChild = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}},
+ ?_assertEqual({[TwoChild], no_conflicts},
+ couch_key_tree:merge([OneChild, Stemmed], TwoChild, ?DEPTH)).
+
+should_ignore_conflicting_branch()->
+ %% this test is based on couch-902-test-case2.py
+ %% foo has conflicts from replication at depth two
+ %% foo3 is the current value
+ Foo = {1, {"foo",
+ "val1",
+ [{"foo2","val2",[]},
+ {"foo3", "val3", []}
+ ]}},
+ %% foo now has an attachment added, which leads to foo4 and val4
+ %% off foo3
+ Bar = {1, {"foo",
+ [],
+ [{"foo3",
+ [],
+ [{"foo4","val4",[]}
+ ]}]}},
+ %% this is what the merge returns
+ %% note that it ignore the conflicting branch as there's no match
+ FooBar = {1, {"foo",
+ "val1",
+ [{"foo2","val2",[]},
+ {"foo3", "val3", [{"foo4","val4",[]}]}
+ ]}},
+ {
+ "COUCHDB-902",
+ ?_assertEqual({[FooBar], no_conflicts},
+ couch_key_tree:merge([Foo], Bar, ?DEPTH))
+ }.
+
+should_not_find_missing_leaves()->
+ TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+ ?_assertEqual([],
+ couch_key_tree:find_missing(TwoChildSibs,
+ [{0,"1"}, {1,"1a"}])).
+
+should_find_missing_leaves()->
+ Stemmed1 = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}],
+ Stemmed2 = [{2, {"1aa", "bar", []}}],
+ TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+ [
+ ?_assertEqual(
+ [{0, "10"}, {100, "x"}],
+ couch_key_tree:find_missing(
+ TwoChildSibs,
+ [{0,"1"}, {0, "10"}, {1,"1a"}, {100, "x"}])),
+ ?_assertEqual(
+ [{0, "1"}, {100, "x"}],
+ couch_key_tree:find_missing(
+ Stemmed1,
+ [{0,"1"}, {1,"1a"}, {100, "x"}])),
+ ?_assertEqual(
+ [{0, "1"}, {1,"1a"}, {100, "x"}],
+ couch_key_tree:find_missing(
+ Stemmed2,
+ [{0,"1"}, {1,"1a"}, {100, "x"}]))
+ ].
+
+should_have_no_effect_on_removing_no_leaves()->
+ TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+ ?_assertEqual({TwoChildSibs, []},
+ couch_key_tree:remove_leafs(TwoChildSibs,
+ [])).
+
+should_have_no_effect_on_removing_non_existant_branch()->
+ TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+ ?_assertEqual({TwoChildSibs, []},
+ couch_key_tree:remove_leafs(TwoChildSibs,
+ [{0, "1"}])).
+
+should_remove_leaf()->
+ OneChild = [{0, {"1","foo",[{"1a", "bar", []}]}}],
+ TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+ ?_assertEqual({OneChild, [{1, "1b"}]},
+ couch_key_tree:remove_leafs(TwoChildSibs,
+ [{1, "1b"}])).
+
+should_produce_empty_tree_on_removing_all_leaves()->
+ TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+ ?_assertEqual({[], [{1, "1b"}, {1, "1a"}]},
+ couch_key_tree:remove_leafs(TwoChildSibs,
+ [{1, "1b"}, {1, "1a"}])).
+
+should_have_no_effect_on_removing_non_existant_node()->
+ Stemmed = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}],
+ ?_assertEqual({Stemmed, []},
+ couch_key_tree:remove_leafs(Stemmed,
+ [{1, "1a"}])).
+
+should_produce_empty_tree_on_removing_last_leaf()->
+ Stemmed = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}],
+ ?_assertEqual({[], [{2, "1aa"}]},
+ couch_key_tree:remove_leafs(Stemmed,
+ [{2, "1aa"}])).
+
+should_extract_subtree()->
+ TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+ ?_assertEqual({[{"foo", {0, ["1"]}}],[]},
+ couch_key_tree:get(TwoChildSibs, [{0, "1"}])).
+
+should_extract_subsubtree()->
+ TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+ ?_assertEqual({[{"bar", {1, ["1a", "1"]}}],[]},
+ couch_key_tree:get(TwoChildSibs, [{1, "1a"}])).
+
+should_gather_non_existant_leaf()->
+ TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+ ?_assertEqual({[],[{0, "x"}]},
+ couch_key_tree:get_key_leafs(TwoChildSibs, [{0, "x"}])).
+
+should_gather_leaf()->
+ TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+ ?_assertEqual({[{"bar", {1, ["1a","1"]}}],[]},
+ couch_key_tree:get_key_leafs(TwoChildSibs, [{1, "1a"}])).
+
+shoul_gather_multiple_leaves()->
+ TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+ ?_assertEqual({[{"bar", {1, ["1a","1"]}},{"bar",{1, ["1b","1"]}}],[]},
+ couch_key_tree:get_key_leafs(TwoChildSibs, [{0, "1"}])).
+
+should_retrieve_full_key_path()->
+ TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+ ?_assertEqual({[{0,[{"1", "foo"}]}],[]},
+ couch_key_tree:get_full_key_paths(TwoChildSibs, [{0, "1"}])).
+
+should_retrieve_full_key_path_for_node()->
+ TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+ ?_assertEqual({[{1,[{"1a", "bar"},{"1", "foo"}]}],[]},
+ couch_key_tree:get_full_key_paths(TwoChildSibs, [{1, "1a"}])).
+
+should_retrieve_leaves_with_parent_node()->
+ Stemmed = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}],
+ TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+ [
+ ?_assertEqual([{2, [{"1aa", "bar"},{"1a", "bar"}]}],
+ couch_key_tree:get_all_leafs_full(Stemmed)),
+ ?_assertEqual([{1, [{"1a", "bar"},{"1", "foo"}]},
+ {1, [{"1b", "bar"},{"1", "foo"}]}],
+ couch_key_tree:get_all_leafs_full(TwoChildSibs))
+ ].
+
+should_retrieve_all_leaves()->
+ Stemmed = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}],
+ TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+ [
+ ?_assertEqual([{"bar", {2, ["1aa","1a"]}}],
+ couch_key_tree:get_all_leafs(Stemmed)),
+ ?_assertEqual([{"bar", {1, ["1a", "1"]}}, {"bar", {1, ["1b","1"]}}],
+ couch_key_tree:get_all_leafs(TwoChildSibs))
+ ].
+
+should_have_no_leaves_for_empty_tree()->
+ ?_assertEqual(0, couch_key_tree:count_leafs([])).
+
+should_have_single_leaf_for_tree_with_single_node()->
+ ?_assertEqual(1, couch_key_tree:count_leafs([{0, {"1","foo",[]}}])).
+
+should_have_two_leaves_for_tree_with_chindler_siblings()->
+ TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+ ?_assertEqual(2, couch_key_tree:count_leafs(TwoChildSibs)).
+
+should_not_affect_on_leaf_counting_for_stemmed_tree()->
+ ?_assertEqual(1, couch_key_tree:count_leafs([{2, {"1bb", "boo", []}}])).
+
+should_have_no_effect_for_stemming_more_levels_than_exists()->
+ TwoChild = [{0, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}],
+ ?_assertEqual(TwoChild, couch_key_tree:stem(TwoChild, 3)).
+
+should_return_one_deepest_node()->
+ TwoChild = [{0, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}],
+ Stemmed = [{2, {"1aa", "bar", []}}],
+ ?_assertEqual(Stemmed, couch_key_tree:stem(TwoChild, 1)).
+
+should_return_two_deepest_nodes()->
+ TwoChild = [{0, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}],
+ Stemmed = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}],
+ ?_assertEqual(Stemmed, couch_key_tree:stem(TwoChild, 2)).
diff --git a/test/couchdb/couch_passwords_tests.erl b/test/couchdb/couch_passwords_tests.erl
new file mode 100644
index 000000000..116265cd1
--- /dev/null
+++ b/test/couchdb/couch_passwords_tests.erl
@@ -0,0 +1,54 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_passwords_tests).
+
+-include("couch_eunit.hrl").
+
+
+pbkdf2_test_()->
+ {"PBKDF2",
+ [
+ {"Iterations: 1, length: 20",
+ ?_assertEqual(
+ {ok, <<"0c60c80f961f0e71f3a9b524af6012062fe037a6">>},
+ couch_passwords:pbkdf2(<<"password">>, <<"salt">>, 1, 20))},
+
+ {"Iterations: 2, length: 20",
+ ?_assertEqual(
+ {ok, <<"ea6c014dc72d6f8ccd1ed92ace1d41f0d8de8957">>},
+ couch_passwords:pbkdf2(<<"password">>, <<"salt">>, 2, 20))},
+
+ {"Iterations: 4096, length: 20",
+ ?_assertEqual(
+ {ok, <<"4b007901b765489abead49d926f721d065a429c1">>},
+ couch_passwords:pbkdf2(<<"password">>, <<"salt">>, 4096, 20))},
+
+ {"Iterations: 4096, length: 25",
+ ?_assertEqual(
+ {ok, <<"3d2eec4fe41c849b80c8d83662c0e44a8b291a964cf2f07038">>},
+ couch_passwords:pbkdf2(<<"passwordPASSWORDpassword">>,
+ <<"saltSALTsaltSALTsaltSALTsaltSALTsalt">>,
+ 4096, 25))},
+ {"Null byte",
+ ?_assertEqual(
+ {ok, <<"56fa6aa75548099dcc37d7f03425e0c3">>},
+ couch_passwords:pbkdf2(<<"pass\0word">>,
+ <<"sa\0lt">>,
+ 4096, 16))},
+
+ {timeout, 180, %% this may runs too long on slow hosts
+ {"Iterations: 16777216 - this may take some time",
+ ?_assertEqual(
+ {ok, <<"eefe3d61cd4da4e4e9945b3d6ba2158c2634e984">>},
+ couch_passwords:pbkdf2(<<"password">>, <<"salt">>, 16777216, 20)
+ )}}]}.
diff --git a/test/couchdb/couch_ref_counter_tests.erl b/test/couchdb/couch_ref_counter_tests.erl
new file mode 100644
index 000000000..b7e97b41b
--- /dev/null
+++ b/test/couchdb/couch_ref_counter_tests.erl
@@ -0,0 +1,107 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_ref_counter_tests).
+
+-include("couch_eunit.hrl").
+-include_lib("couchdb/couch_db.hrl").
+
+-define(TIMEOUT, 1000).
+
+
+setup() ->
+ {ok, RefCtr} = couch_ref_counter:start([]),
+ ChildPid = spawn(fun() -> loop() end),
+ {RefCtr, ChildPid}.
+
+teardown({_, ChildPid}) ->
+ erlang:monitor(process, ChildPid),
+ ChildPid ! close,
+ wait().
+
+
+couch_ref_counter_test_() ->
+ {
+ "CouchDB reference counter tests",
+ {
+ foreach,
+ fun setup/0, fun teardown/1,
+ [
+ fun should_initialize_with_calling_process_as_referrer/1,
+ fun should_ignore_unknown_pid/1,
+ fun should_increment_counter_on_pid_add/1,
+ fun should_not_increase_counter_on_readding_same_pid/1,
+ fun should_drop_ref_for_double_added_pid/1,
+ fun should_decrement_counter_on_pid_drop/1,
+ fun should_add_after_drop/1,
+ fun should_decrement_counter_on_process_exit/1
+
+ ]
+ }
+ }.
+
+
+should_initialize_with_calling_process_as_referrer({RefCtr, _}) ->
+ ?_assertEqual(1, couch_ref_counter:count(RefCtr)).
+
+should_ignore_unknown_pid({RefCtr, ChildPid}) ->
+ ?_assertEqual(ok, couch_ref_counter:drop(RefCtr, ChildPid)).
+
+should_increment_counter_on_pid_add({RefCtr, ChildPid}) ->
+ couch_ref_counter:add(RefCtr, ChildPid),
+ ?_assertEqual(2, couch_ref_counter:count(RefCtr)).
+
+should_not_increase_counter_on_readding_same_pid({RefCtr, ChildPid}) ->
+ couch_ref_counter:add(RefCtr, ChildPid),
+ couch_ref_counter:add(RefCtr, ChildPid),
+ ?_assertEqual(2, couch_ref_counter:count(RefCtr)).
+
+should_drop_ref_for_double_added_pid({RefCtr, ChildPid}) ->
+ couch_ref_counter:add(RefCtr, ChildPid),
+ couch_ref_counter:add(RefCtr, ChildPid),
+ couch_ref_counter:drop(RefCtr, ChildPid),
+ ?_assertEqual(2, couch_ref_counter:count(RefCtr)).
+
+should_decrement_counter_on_pid_drop({RefCtr, ChildPid}) ->
+ couch_ref_counter:add(RefCtr, ChildPid),
+ couch_ref_counter:drop(RefCtr, ChildPid),
+ ?_assertEqual(1, couch_ref_counter:count(RefCtr)).
+
+should_add_after_drop({RefCtr, ChildPid}) ->
+ couch_ref_counter:add(RefCtr, ChildPid),
+ couch_ref_counter:drop(RefCtr, ChildPid),
+ couch_ref_counter:add(RefCtr, ChildPid),
+ ?_assertEqual(2, couch_ref_counter:count(RefCtr)).
+
+should_decrement_counter_on_process_exit({RefCtr, ChildPid}) ->
+ ?_assertEqual(1,
+ begin
+ couch_ref_counter:add(RefCtr, ChildPid),
+ erlang:monitor(process, ChildPid),
+ ChildPid ! close,
+ wait(),
+ couch_ref_counter:count(RefCtr)
+ end).
+
+
+loop() ->
+ receive
+ close -> ok
+ end.
+
+wait() ->
+ receive
+ {'DOWN', _, _, _, _} ->
+ ok
+ after ?TIMEOUT ->
+ throw(timeout_error)
+ end.
diff --git a/test/couchdb/couch_stats_tests.erl b/test/couchdb/couch_stats_tests.erl
new file mode 100644
index 000000000..8810ae990
--- /dev/null
+++ b/test/couchdb/couch_stats_tests.erl
@@ -0,0 +1,421 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_stats_tests).
+
+-include("couch_eunit.hrl").
+-include_lib("couchdb/couch_db.hrl").
+
+-define(STATS_CFG_FIXTURE,
+ filename:join([?FIXTURESDIR, "couch_stats_aggregates.cfg"])).
+-define(STATS_INI_FIXTURE,
+ filename:join([?FIXTURESDIR, "couch_stats_aggregates.ini"])).
+-define(TIMEOUT, 1000).
+-define(TIMEWAIT, 500).
+
+
+setup_collector() ->
+ {ok, Pid} = couch_stats_collector:start(),
+ Pid.
+
+setup_aggregator(_) ->
+ {ok, ConfigPid} = couch_config:start_link([?STATS_INI_FIXTURE]),
+ {ok, CollectorPid} = couch_stats_collector:start(),
+ {ok, AggregatorPid} = couch_stats_aggregator:start(?STATS_CFG_FIXTURE),
+ {ConfigPid, CollectorPid, AggregatorPid}.
+
+teardown_collector(Pid) ->
+ erlang:monitor(process, Pid),
+ couch_stats_collector:stop(),
+ receive
+ {'DOWN', _, _, Pid, _} ->
+ ok
+ after ?TIMEOUT ->
+ throw({timeout, couch_stats_collector})
+ end,
+ ok.
+
+teardown_aggregator(_, {ConfigPid, CollectorPid, AggregatorPid}) ->
+ lists:foreach(fun({Module, Pid}) ->
+ erlang:monitor(process, Pid),
+ Module:stop(),
+ receive
+ {'DOWN', _, _, Pid, _} ->
+ ok
+ after ?TIMEOUT ->
+ throw({timeout, Module})
+ end
+ end, [{couch_config, ConfigPid},
+ {couch_stats_collector, CollectorPid},
+ {couch_stats_aggregator, AggregatorPid}]),
+ ok.
+
+
+couch_stats_collector_test_() ->
+ {
+ "CouchDB stats collector tests",
+ {
+ foreach,
+ fun setup_collector/0, fun teardown_collector/1,
+ [
+ should_increment_counter(),
+ should_decrement_counter(),
+ should_increment_and_decrement_counter(),
+ should_record_absolute_values(),
+ should_clear_absolute_values(),
+ should_track_process_count(),
+ should_increment_counter_multiple_times_per_pid(),
+ should_decrement_counter_on_process_exit(),
+ should_decrement_for_each_track_process_count_call_on_exit(),
+ should_return_all_counters_and_absolute_values(),
+ should_return_incremental_counters(),
+ should_return_absolute_values()
+ ]
+ }
+ }.
+
+couch_stats_aggregator_test_() ->
+ Funs = [
+ fun should_init_empty_aggregate/2,
+ fun should_get_empty_aggregate/2,
+ fun should_change_stats_on_values_add/2,
+ fun should_change_stats_for_all_times_on_values_add/2,
+ fun should_change_stats_on_values_change/2,
+ fun should_change_stats_for_all_times_on_values_change/2,
+ fun should_not_remove_data_after_some_time_for_0_sample/2,
+ fun should_remove_data_after_some_time_for_other_samples/2
+ ],
+ {
+ "CouchDB stats aggregator tests",
+ [
+ {
+ "Absolute values",
+ {
+ foreachx,
+ fun setup_aggregator/1, fun teardown_aggregator/2,
+ [{absolute, Fun} || Fun <- Funs]
+ }
+ },
+ {
+ "Counters",
+ {
+ foreachx,
+ fun setup_aggregator/1, fun teardown_aggregator/2,
+ [{counter, Fun} || Fun <- Funs]
+ }
+ }
+ ]
+ }.
+
+
+should_increment_counter() ->
+ ?_assertEqual(100,
+ begin
+ AddCount = fun() -> couch_stats_collector:increment(foo) end,
+ repeat(AddCount, 100),
+ couch_stats_collector:get(foo)
+ end).
+
+should_decrement_counter() ->
+ ?_assertEqual(67,
+ begin
+ AddCount = fun() -> couch_stats_collector:increment(foo) end,
+ RemCount = fun() -> couch_stats_collector:decrement(foo) end,
+ repeat(AddCount, 100),
+ repeat(RemCount, 33),
+ couch_stats_collector:get(foo)
+ end).
+
+should_increment_and_decrement_counter() ->
+ ?_assertEqual(0,
+ begin
+ AddCount = fun() -> couch_stats_collector:increment(foo) end,
+ RemCount = fun() -> couch_stats_collector:decrement(foo) end,
+ repeat(AddCount, 100),
+ repeat(RemCount, 25),
+ repeat(AddCount, 10),
+ repeat(RemCount, 5),
+ repeat(RemCount, 80),
+ couch_stats_collector:get(foo)
+ end).
+
+should_record_absolute_values() ->
+ ?_assertEqual(lists:seq(1, 15),
+ begin
+ lists:map(fun(Val) ->
+ couch_stats_collector:record(bar, Val)
+ end, lists:seq(1, 15)),
+ couch_stats_collector:get(bar)
+ end).
+
+should_clear_absolute_values() ->
+ ?_assertEqual(nil,
+ begin
+ lists:map(fun(Val) ->
+ couch_stats_collector:record(bar, Val)
+ end, lists:seq(1, 15)),
+ couch_stats_collector:clear(bar),
+ couch_stats_collector:get(bar)
+ end).
+
+should_track_process_count() ->
+ ?_assertMatch({_, 1}, spawn_and_count(1)).
+
+should_increment_counter_multiple_times_per_pid() ->
+ ?_assertMatch({_, 3}, spawn_and_count(3)).
+
+should_decrement_counter_on_process_exit() ->
+ ?_assertEqual(2,
+ begin
+ {Pid, 1} = spawn_and_count(1),
+ spawn_and_count(2),
+ RefMon = erlang:monitor(process, Pid),
+ Pid ! sepuku,
+ receive
+ {'DOWN', RefMon, _, _, _} -> ok
+ after ?TIMEOUT ->
+ throw(timeout)
+ end,
+ % sleep for awhile to let collector handle the updates
+ % suddenly, it couldn't notice process death instantly
+ timer:sleep(?TIMEWAIT),
+ couch_stats_collector:get(hoopla)
+ end).
+
+should_decrement_for_each_track_process_count_call_on_exit() ->
+ ?_assertEqual(2,
+ begin
+ {_, 2} = spawn_and_count(2),
+ {Pid, 6} = spawn_and_count(4),
+ RefMon = erlang:monitor(process, Pid),
+ Pid ! sepuku,
+ receive
+ {'DOWN', RefMon, _, _, _} -> ok
+ after ?TIMEOUT ->
+ throw(timeout)
+ end,
+ timer:sleep(?TIMEWAIT),
+ couch_stats_collector:get(hoopla)
+ end).
+
+should_return_all_counters_and_absolute_values() ->
+ ?_assertEqual([{bar,[1.0,0.0]}, {foo,1}],
+ begin
+ couch_stats_collector:record(bar, 0.0),
+ couch_stats_collector:record(bar, 1.0),
+ couch_stats_collector:increment(foo),
+ lists:sort(couch_stats_collector:all())
+ end).
+
+should_return_incremental_counters() ->
+ ?_assertEqual([{foo,1}],
+ begin
+ couch_stats_collector:record(bar, 0.0),
+ couch_stats_collector:record(bar, 1.0),
+ couch_stats_collector:increment(foo),
+ lists:sort(couch_stats_collector:all(incremental))
+ end).
+
+should_return_absolute_values() ->
+ ?_assertEqual([{bar,[1.0,0.0]}, {zing, "Z"}],
+ begin
+ couch_stats_collector:record(bar, 0.0),
+ couch_stats_collector:record(bar, 1.0),
+ couch_stats_collector:record(zing, 90),
+ couch_stats_collector:increment(foo),
+ lists:sort(couch_stats_collector:all(absolute))
+ end).
+
+should_init_empty_aggregate(absolute, _) ->
+ {Aggs} = couch_stats_aggregator:all(),
+ ?_assertEqual({[{'11', make_agg(<<"randomosity">>,
+ null, null, null, null, null)}]},
+ couch_util:get_value(number, Aggs));
+should_init_empty_aggregate(counter, _) ->
+ {Aggs} = couch_stats_aggregator:all(),
+ ?_assertEqual({[{stuff, make_agg(<<"yay description">>,
+ null, null, null, null, null)}]},
+ couch_util:get_value(testing, Aggs)).
+
+should_get_empty_aggregate(absolute, _) ->
+ ?_assertEqual(make_agg(<<"randomosity">>, null, null, null, null, null),
+ couch_stats_aggregator:get_json({number, '11'}));
+should_get_empty_aggregate(counter, _) ->
+ ?_assertEqual(make_agg(<<"yay description">>, null, null, null, null, null),
+ couch_stats_aggregator:get_json({testing, stuff})).
+
+should_change_stats_on_values_add(absolute, _) ->
+ lists:foreach(fun(X) ->
+ couch_stats_collector:record({number, 11}, X)
+ end, lists:seq(0, 10)),
+ couch_stats_aggregator:collect_sample(),
+ ?_assertEqual(make_agg(<<"randomosity">>, 5.0, 5.0, null, 5.0, 5.0),
+ couch_stats_aggregator:get_json({number, 11}));
+should_change_stats_on_values_add(counter, _) ->
+ lists:foreach(fun(_) ->
+ couch_stats_collector:increment({testing, stuff})
+ end, lists:seq(1, 100)),
+ couch_stats_aggregator:collect_sample(),
+ ?_assertEqual(make_agg(<<"yay description">>, 100.0, 100.0, null, 100, 100),
+ couch_stats_aggregator:get_json({testing, stuff})).
+
+should_change_stats_for_all_times_on_values_add(absolute, _) ->
+ lists:foreach(fun(X) ->
+ couch_stats_collector:record({number, 11}, X)
+ end, lists:seq(0, 10)),
+ couch_stats_aggregator:collect_sample(),
+ ?_assertEqual(make_agg(<<"randomosity">>, 5.0, 5.0, null, 5.0, 5.0),
+ couch_stats_aggregator:get_json({number, 11}, 1));
+should_change_stats_for_all_times_on_values_add(counter, _) ->
+ lists:foreach(fun(_) ->
+ couch_stats_collector:increment({testing, stuff})
+ end, lists:seq(1, 100)),
+ couch_stats_aggregator:collect_sample(),
+ ?_assertEqual(make_agg(<<"yay description">>, 100.0, 100.0, null, 100, 100),
+ couch_stats_aggregator:get_json({testing, stuff}, 1)).
+
+should_change_stats_on_values_change(absolute, _) ->
+ ?_assertEqual(make_agg(<<"randomosity">>, 20.0, 10.0, 7.071, 5.0, 15.0),
+ begin
+ lists:foreach(fun(X) ->
+ couch_stats_collector:record({number, 11}, X)
+ end, lists:seq(0, 10)),
+ couch_stats_aggregator:collect_sample(),
+ timer:sleep(?TIMEWAIT),
+ couch_stats_collector:record({number, 11}, 15),
+ couch_stats_aggregator:collect_sample(),
+ couch_stats_aggregator:get_json({number, 11})
+ end);
+should_change_stats_on_values_change(counter, _) ->
+ ?_assertEqual(make_agg(<<"yay description">>, 100.0, 50.0, 70.711, 0, 100),
+ begin
+ lists:foreach(fun(_) ->
+ couch_stats_collector:increment({testing, stuff})
+ end, lists:seq(1, 100)),
+ couch_stats_aggregator:collect_sample(),
+ timer:sleep(?TIMEWAIT),
+ couch_stats_aggregator:collect_sample(),
+ couch_stats_aggregator:get_json({testing, stuff})
+ end).
+
+should_change_stats_for_all_times_on_values_change(absolute, _) ->
+ ?_assertEqual(make_agg(<<"randomosity">>, 20.0, 10.0, 7.071, 5.0, 15.0),
+ begin
+ lists:foreach(fun(X) ->
+ couch_stats_collector:record({number, 11}, X)
+ end, lists:seq(0, 10)),
+ couch_stats_aggregator:collect_sample(),
+ timer:sleep(?TIMEWAIT),
+ couch_stats_collector:record({number, 11}, 15),
+ couch_stats_aggregator:collect_sample(),
+ couch_stats_aggregator:get_json({number, 11}, 1)
+ end);
+should_change_stats_for_all_times_on_values_change(counter, _) ->
+ ?_assertEqual(make_agg(<<"yay description">>, 100.0, 50.0, 70.711, 0, 100),
+ begin
+ lists:foreach(fun(_) ->
+ couch_stats_collector:increment({testing, stuff})
+ end, lists:seq(1, 100)),
+ couch_stats_aggregator:collect_sample(),
+ timer:sleep(?TIMEWAIT),
+ couch_stats_aggregator:collect_sample(),
+ couch_stats_aggregator:get_json({testing, stuff}, 1)
+ end).
+
+should_not_remove_data_after_some_time_for_0_sample(absolute, _) ->
+ ?_assertEqual(make_agg(<<"randomosity">>, 20.0, 10.0, 7.071, 5.0, 15.0),
+ begin
+ lists:foreach(fun(X) ->
+ couch_stats_collector:record({number, 11}, X)
+ end, lists:seq(0, 10)),
+ couch_stats_aggregator:collect_sample(),
+ timer:sleep(?TIMEWAIT),
+ couch_stats_collector:record({number, 11}, 15),
+ couch_stats_aggregator:collect_sample(),
+ timer:sleep(?TIMEWAIT),
+ couch_stats_aggregator:collect_sample(),
+ couch_stats_aggregator:get_json({number, 11})
+ end);
+should_not_remove_data_after_some_time_for_0_sample(counter, _) ->
+ ?_assertEqual(make_agg(<<"yay description">>, 100.0, 33.333, 57.735, 0, 100),
+ begin
+ lists:foreach(fun(_) ->
+ couch_stats_collector:increment({testing, stuff})
+ end, lists:seq(1, 100)),
+ couch_stats_aggregator:collect_sample(),
+ timer:sleep(?TIMEWAIT),
+ couch_stats_aggregator:collect_sample(),
+ timer:sleep(?TIMEWAIT),
+ couch_stats_aggregator:collect_sample(),
+ couch_stats_aggregator:get_json({testing, stuff})
+ end).
+
+should_remove_data_after_some_time_for_other_samples(absolute, _) ->
+ ?_assertEqual(make_agg(<<"randomosity">>, 15.0, 15.0, null, 15.0, 15.0),
+ begin
+ lists:foreach(fun(X) ->
+ couch_stats_collector:record({number, 11}, X)
+ end, lists:seq(0, 10)),
+ couch_stats_aggregator:collect_sample(),
+ timer:sleep(?TIMEWAIT),
+ couch_stats_collector:record({number, 11}, 15),
+ couch_stats_aggregator:collect_sample(),
+ timer:sleep(?TIMEWAIT),
+ couch_stats_aggregator:collect_sample(),
+ couch_stats_aggregator:get_json({number, 11}, 1)
+ end);
+should_remove_data_after_some_time_for_other_samples(counter, _) ->
+ ?_assertEqual(make_agg(<<"yay description">>, 0, 0.0, 0.0, 0, 0),
+ begin
+ lists:foreach(fun(_) ->
+ couch_stats_collector:increment({testing, stuff})
+ end, lists:seq(1, 100)),
+ couch_stats_aggregator:collect_sample(),
+ timer:sleep(?TIMEWAIT),
+ couch_stats_aggregator:collect_sample(),
+ timer:sleep(?TIMEWAIT),
+ couch_stats_aggregator:collect_sample(),
+ couch_stats_aggregator:get_json({testing, stuff}, 1)
+ end).
+
+
+spawn_and_count(N) ->
+ Self = self(),
+ Pid = spawn(fun() ->
+ lists:foreach(
+ fun(_) ->
+ couch_stats_collector:track_process_count(hoopla)
+ end, lists:seq(1,N)),
+ Self ! reporting,
+ receive
+ sepuku -> ok
+ end
+ end),
+ receive reporting -> ok end,
+ {Pid, couch_stats_collector:get(hoopla)}.
+
+repeat(_, 0) ->
+ ok;
+repeat(Fun, Count) ->
+ Fun(),
+ repeat(Fun, Count-1).
+
+make_agg(Desc, Sum, Mean, StdDev, Min, Max) ->
+ {[
+ {description, Desc},
+ {current, Sum},
+ {sum, Sum},
+ {mean, Mean},
+ {stddev, StdDev},
+ {min, Min},
+ {max, Max}
+ ]}.
diff --git a/test/couchdb/couch_stream_tests.erl b/test/couchdb/couch_stream_tests.erl
new file mode 100644
index 000000000..335a2fe72
--- /dev/null
+++ b/test/couchdb/couch_stream_tests.erl
@@ -0,0 +1,100 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_stream_tests).
+
+-include("couch_eunit.hrl").
+
+
+setup() ->
+ {ok, Fd} = couch_file:open(?tempfile(), [create, overwrite]),
+ {ok, Stream} = couch_stream:open(Fd),
+ {Fd, Stream}.
+
+teardown({Fd, _}) ->
+ ok = couch_file:close(Fd).
+
+
+stream_test_() ->
+ {
+ "CouchDB stream tests",
+ {
+ foreach,
+ fun setup/0, fun teardown/1,
+ [
+ fun should_write/1,
+ fun should_write_consecutive/1,
+ fun should_write_empty_binary/1,
+ fun should_return_file_pointers_on_close/1,
+ fun should_return_stream_size_on_close/1,
+ fun should_return_valid_pointers/1,
+ fun should_recall_last_pointer_position/1,
+ fun should_stream_more_with_4K_chunk_size/1
+ ]
+ }
+ }.
+
+
+should_write({_, Stream}) ->
+ ?_assertEqual(ok, couch_stream:write(Stream, <<"food">>)).
+
+should_write_consecutive({_, Stream}) ->
+ couch_stream:write(Stream, <<"food">>),
+ ?_assertEqual(ok, couch_stream:write(Stream, <<"foob">>)).
+
+should_write_empty_binary({_, Stream}) ->
+ ?_assertEqual(ok, couch_stream:write(Stream, <<>>)).
+
+should_return_file_pointers_on_close({_, Stream}) ->
+ couch_stream:write(Stream, <<"foodfoob">>),
+ {Ptrs, _, _, _, _} = couch_stream:close(Stream),
+ ?_assertEqual([{0, 8}], Ptrs).
+
+should_return_stream_size_on_close({_, Stream}) ->
+ couch_stream:write(Stream, <<"foodfoob">>),
+ {_, Length, _, _, _} = couch_stream:close(Stream),
+ ?_assertEqual(8, Length).
+
+should_return_valid_pointers({Fd, Stream}) ->
+ couch_stream:write(Stream, <<"foodfoob">>),
+ {Ptrs, _, _, _, _} = couch_stream:close(Stream),
+ ?_assertEqual(<<"foodfoob">>, read_all(Fd, Ptrs)).
+
+should_recall_last_pointer_position({Fd, Stream}) ->
+ couch_stream:write(Stream, <<"foodfoob">>),
+ {_, _, _, _, _} = couch_stream:close(Stream),
+ {ok, ExpPtr} = couch_file:bytes(Fd),
+ {ok, Stream2} = couch_stream:open(Fd),
+ ZeroBits = <<0:(8 * 10)>>,
+ OneBits = <<1:(8 * 10)>>,
+ ok = couch_stream:write(Stream2, OneBits),
+ ok = couch_stream:write(Stream2, ZeroBits),
+ {Ptrs, 20, _, _, _} = couch_stream:close(Stream2),
+ [{ExpPtr, 20}] = Ptrs,
+ AllBits = iolist_to_binary([OneBits, ZeroBits]),
+ ?_assertEqual(AllBits, read_all(Fd, Ptrs)).
+
+should_stream_more_with_4K_chunk_size({Fd, _}) ->
+ {ok, Stream} = couch_stream:open(Fd, [{buffer_size, 4096}]),
+ lists:foldl(
+ fun(_, Acc) ->
+ Data = <<"a1b2c">>,
+ couch_stream:write(Stream, Data),
+ [Data | Acc]
+ end, [], lists:seq(1, 1024)),
+ ?_assertMatch({[{0, 4100}, {4106, 1020}], 5120, _, _, _},
+ couch_stream:close(Stream)).
+
+
+read_all(Fd, PosList) ->
+ Data = couch_stream:foldl(Fd, PosList, fun(Bin, Acc) -> [Bin, Acc] end, []),
+ iolist_to_binary(Data).
diff --git a/test/couchdb/couch_task_status_tests.erl b/test/couchdb/couch_task_status_tests.erl
new file mode 100644
index 000000000..f71ad2bf7
--- /dev/null
+++ b/test/couchdb/couch_task_status_tests.erl
@@ -0,0 +1,225 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_task_status_tests).
+
+-include("couch_eunit.hrl").
+-include_lib("couchdb/couch_db.hrl").
+
+-define(TIMEOUT, 1000).
+
+
+setup() ->
+ {ok, TaskStatusPid} = couch_task_status:start_link(),
+ TaskUpdaterPid = spawn(fun() -> loop() end),
+ {TaskStatusPid, TaskUpdaterPid}.
+
+teardown({TaskStatusPid, _}) ->
+ erlang:monitor(process, TaskStatusPid),
+ couch_task_status:stop(),
+ receive
+ {'DOWN', _, _, TaskStatusPid, _} ->
+ ok
+ after ?TIMEOUT ->
+ throw(timeout_error)
+ end.
+
+
+couch_task_status_test_() ->
+ {
+ "CouchDB task status updates",
+ {
+ foreach,
+ fun setup/0, fun teardown/1,
+ [
+ fun should_register_task/1,
+ fun should_set_task_startup_time/1,
+ fun should_have_update_time_as_startup_before_any_progress/1,
+ fun should_set_task_type/1,
+ fun should_not_register_multiple_tasks_for_same_pid/1,
+ fun should_set_task_progress/1,
+ fun should_update_task_progress/1,
+ fun should_update_time_changes_on_task_progress/1,
+ fun should_control_update_frequency/1,
+ fun should_reset_control_update_frequency/1,
+ fun should_track_multiple_tasks/1,
+ fun should_finish_task/1
+
+ ]
+ }
+ }.
+
+
+should_register_task({_, Pid}) ->
+ ok = call(Pid, add, [{type, replication}, {progress, 0}]),
+ ?_assertEqual(1, length(couch_task_status:all())).
+
+should_set_task_startup_time({_, Pid}) ->
+ ok = call(Pid, add, [{type, replication}, {progress, 0}]),
+ ?_assert(is_integer(get_task_prop(Pid, started_on))).
+
+should_have_update_time_as_startup_before_any_progress({_, Pid}) ->
+ ok = call(Pid, add, [{type, replication}, {progress, 0}]),
+ StartTime = get_task_prop(Pid, started_on),
+ ?_assertEqual(StartTime, get_task_prop(Pid, updated_on)).
+
+should_set_task_type({_, Pid}) ->
+ ok = call(Pid, add, [{type, replication}, {progress, 0}]),
+ ?_assertEqual(replication, get_task_prop(Pid, type)).
+
+should_not_register_multiple_tasks_for_same_pid({_, Pid}) ->
+ ok = call(Pid, add, [{type, replication}, {progress, 0}]),
+ ?_assertEqual({add_task_error, already_registered},
+ call(Pid, add, [{type, compaction}, {progress, 0}])).
+
+should_set_task_progress({_, Pid}) ->
+ ok = call(Pid, add, [{type, replication}, {progress, 0}]),
+ ?_assertEqual(0, get_task_prop(Pid, progress)).
+
+should_update_task_progress({_, Pid}) ->
+ ok = call(Pid, add, [{type, replication}, {progress, 0}]),
+ call(Pid, update, [{progress, 25}]),
+ ?_assertEqual(25, get_task_prop(Pid, progress)).
+
+should_update_time_changes_on_task_progress({_, Pid}) ->
+ ?_assert(
+ begin
+ ok = call(Pid, add, [{type, replication}, {progress, 0}]),
+ ok = timer:sleep(1000), % sleep awhile to customize update time
+ call(Pid, update, [{progress, 25}]),
+ get_task_prop(Pid, updated_on) > get_task_prop(Pid, started_on)
+ end).
+
+should_control_update_frequency({_, Pid}) ->
+ ?_assertEqual(66,
+ begin
+ ok = call(Pid, add, [{type, replication}, {progress, 0}]),
+ call(Pid, update, [{progress, 50}]),
+ call(Pid, update_frequency, 500),
+ call(Pid, update, [{progress, 66}]),
+ call(Pid, update, [{progress, 77}]),
+ get_task_prop(Pid, progress)
+ end).
+
+should_reset_control_update_frequency({_, Pid}) ->
+ ?_assertEqual(87,
+ begin
+ ok = call(Pid, add, [{type, replication}, {progress, 0}]),
+ call(Pid, update, [{progress, 50}]),
+ call(Pid, update_frequency, 500),
+ call(Pid, update, [{progress, 66}]),
+ call(Pid, update, [{progress, 77}]),
+ call(Pid, update_frequency, 0),
+ call(Pid, update, [{progress, 87}]),
+ get_task_prop(Pid, progress)
+ end).
+
+should_track_multiple_tasks(_) ->
+ ?_assert(run_multiple_tasks()).
+
+should_finish_task({_, Pid}) ->
+ ok = call(Pid, add, [{type, replication}, {progress, 0}]),
+ ?assertEqual(1, length(couch_task_status:all())),
+ ok = call(Pid, done),
+ ?_assertEqual(0, length(couch_task_status:all())).
+
+
+run_multiple_tasks() ->
+ Pid1 = spawn(fun() -> loop() end),
+ Pid2 = spawn(fun() -> loop() end),
+ Pid3 = spawn(fun() -> loop() end),
+ call(Pid1, add, [{type, replication}, {progress, 0}]),
+ call(Pid2, add, [{type, compaction}, {progress, 0}]),
+ call(Pid3, add, [{type, indexer}, {progress, 0}]),
+
+ ?assertEqual(3, length(couch_task_status:all())),
+ ?assertEqual(replication, get_task_prop(Pid1, type)),
+ ?assertEqual(compaction, get_task_prop(Pid2, type)),
+ ?assertEqual(indexer, get_task_prop(Pid3, type)),
+
+ call(Pid2, update, [{progress, 33}]),
+ call(Pid3, update, [{progress, 42}]),
+ call(Pid1, update, [{progress, 11}]),
+ ?assertEqual(42, get_task_prop(Pid3, progress)),
+ call(Pid1, update, [{progress, 72}]),
+ ?assertEqual(72, get_task_prop(Pid1, progress)),
+ ?assertEqual(33, get_task_prop(Pid2, progress)),
+
+ call(Pid1, done),
+ ?assertEqual(2, length(couch_task_status:all())),
+ call(Pid3, done),
+ ?assertEqual(1, length(couch_task_status:all())),
+ call(Pid2, done),
+ ?assertEqual(0, length(couch_task_status:all())),
+
+ true.
+
+
+loop() ->
+ receive
+ {add, Props, From} ->
+ Resp = couch_task_status:add_task(Props),
+ From ! {ok, self(), Resp},
+ loop();
+ {update, Props, From} ->
+ Resp = couch_task_status:update(Props),
+ From ! {ok, self(), Resp},
+ loop();
+ {update_frequency, Msecs, From} ->
+ Resp = couch_task_status:set_update_frequency(Msecs),
+ From ! {ok, self(), Resp},
+ loop();
+ {done, From} ->
+ From ! {ok, self(), ok}
+ end.
+
+call(Pid, Command) ->
+ Pid ! {Command, self()},
+ wait(Pid).
+
+call(Pid, Command, Arg) ->
+ Pid ! {Command, Arg, self()},
+ wait(Pid).
+
+wait(Pid) ->
+ receive
+ {ok, Pid, Msg} ->
+ Msg
+ after ?TIMEOUT ->
+ throw(timeout_error)
+ end.
+
+get_task_prop(Pid, Prop) ->
+ From = list_to_binary(pid_to_list(Pid)),
+ Element = lists:foldl(
+ fun(PropList, Acc) ->
+ case couch_util:get_value(pid, PropList) of
+ From ->
+ [PropList | Acc];
+ _ ->
+ Acc
+ end
+ end,
+ [], couch_task_status:all()
+ ),
+ case couch_util:get_value(Prop, hd(Element), nil) of
+ nil ->
+ erlang:error({assertion_failed,
+ [{module, ?MODULE},
+ {line, ?LINE},
+ {reason, "Could not get property '"
+ ++ couch_util:to_list(Prop)
+ ++ "' for task "
+ ++ pid_to_list(Pid)}]});
+ Value ->
+ Value
+ end.
diff --git a/test/couchdb/couch_util_tests.erl b/test/couchdb/couch_util_tests.erl
new file mode 100644
index 000000000..8e24e7277
--- /dev/null
+++ b/test/couchdb/couch_util_tests.erl
@@ -0,0 +1,136 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_util_tests).
+
+-include("couch_eunit.hrl").
+
+
+setup() ->
+ %% We cannot start driver from here since it becomes bounded to eunit
+ %% master process and the next couch_server_sup:start_link call will
+ %% fail because server couldn't load driver since it already is.
+ %%
+ %% On other hand, we cannot unload driver here due to
+ %% {error, not_loaded_by_this_process} while it is. Any ideas is welcome.
+ %%
+ couch_server_sup:start_link(?CONFIG_CHAIN),
+ %% couch_config:start_link(?CONFIG_CHAIN),
+ %% {ok, _} = couch_drv:start_link(),
+ ok.
+
+teardown(_) ->
+ couch_server_sup:stop(),
+ %% couch_config:stop(),
+ %% erl_ddll:unload_driver(couch_icu_driver),
+ ok.
+
+
+collation_test_() ->
+ {
+ "Collation tests",
+ [
+ {
+ setup,
+ fun setup/0, fun teardown/1,
+ [
+ should_collate_ascii(),
+ should_collate_non_ascii()
+ ]
+ }
+ ]
+ }.
+
+should_collate_ascii() ->
+ ?_assertEqual(1, couch_util:collate(<<"foo">>, <<"bar">>)).
+
+should_collate_non_ascii() ->
+ ?_assertEqual(-1, couch_util:collate(<<"A">>, <<"aa">>)).
+
+to_existed_atom_test() ->
+ ?assert(couch_util:to_existing_atom(true)),
+ ?assertMatch(foo, couch_util:to_existing_atom(<<"foo">>)),
+ ?assertMatch(foobarbaz, couch_util:to_existing_atom("foobarbaz")).
+
+implode_test() ->
+ ?assertEqual([1, 38, 2, 38, 3], couch_util:implode([1, 2, 3], "&")).
+
+trim_test() ->
+ lists:map(fun(S) -> ?assertEqual("foo", couch_util:trim(S)) end,
+ [" foo", "foo ", "\tfoo", " foo ", "foo\t", "foo\n", "\nfoo"]).
+
+abs_pathname_test() ->
+ {ok, Cwd} = file:get_cwd(),
+ ?assertEqual(Cwd ++ "/foo", couch_util:abs_pathname("./foo")).
+
+flush_test() ->
+ ?assertNot(couch_util:should_flush()),
+ AcquireMem = fun() ->
+ _IntsToAGazillion = lists:seq(1, 200000),
+ _LotsOfData = lists:map(fun(_) -> <<"foobar">> end,
+ lists:seq(1, 500000)),
+ _BigBin = list_to_binary(_LotsOfData),
+
+ %% Allocation 200K tuples puts us above the memory threshold
+ %% Originally, there should be:
+ %% ?assertNot(should_flush())
+ %% however, unlike for etap test, GC collects all allocated bits
+ %% making this conditions fail. So we have to invert the condition
+ %% since GC works, cleans the memory and everything is fine.
+ ?assertNot(couch_util:should_flush())
+ end,
+ AcquireMem(),
+
+ %% Checking to flush invokes GC
+ ?assertNot(couch_util:should_flush()).
+
+verify_test() ->
+ ?assert(couch_util:verify("It4Vooya", "It4Vooya")),
+ ?assertNot(couch_util:verify("It4VooyaX", "It4Vooya")),
+ ?assert(couch_util:verify(<<"ahBase3r">>, <<"ahBase3r">>)),
+ ?assertNot(couch_util:verify(<<"ahBase3rX">>, <<"ahBase3r">>)),
+ ?assertNot(couch_util:verify(nil, <<"ahBase3r">>)).
+
+find_in_binary_test_() ->
+ Cases = [
+ {<<"foo">>, <<"foobar">>, {exact, 0}},
+ {<<"foo">>, <<"foofoo">>, {exact, 0}},
+ {<<"foo">>, <<"barfoo">>, {exact, 3}},
+ {<<"foo">>, <<"barfo">>, {partial, 3}},
+ {<<"f">>, <<"fobarfff">>, {exact, 0}},
+ {<<"f">>, <<"obarfff">>, {exact, 4}},
+ {<<"f">>, <<"obarggf">>, {exact, 6}},
+ {<<"f">>, <<"f">>, {exact, 0}},
+ {<<"f">>, <<"g">>, not_found},
+ {<<"foo">>, <<"f">>, {partial, 0}},
+ {<<"foo">>, <<"g">>, not_found},
+ {<<"foo">>, <<"">>, not_found},
+ {<<"fofo">>, <<"foofo">>, {partial, 3}},
+ {<<"foo">>, <<"gfobarfo">>, {partial, 6}},
+ {<<"foo">>, <<"gfobarf">>, {partial, 6}},
+ {<<"foo">>, <<"gfobar">>, not_found},
+ {<<"fog">>, <<"gbarfogquiz">>, {exact, 4}},
+ {<<"ggg">>, <<"ggg">>, {exact, 0}},
+ {<<"ggg">>, <<"ggggg">>, {exact, 0}},
+ {<<"ggg">>, <<"bggg">>, {exact, 1}},
+ {<<"ggg">>, <<"bbgg">>, {partial, 2}},
+ {<<"ggg">>, <<"bbbg">>, {partial, 3}},
+ {<<"ggg">>, <<"bgbggbggg">>, {exact, 6}},
+ {<<"ggg">>, <<"bgbggb">>, not_found}
+ ],
+ lists:map(
+ fun({Needle, Haystack, Result}) ->
+ Msg = lists:flatten(io_lib:format("Looking for ~s in ~s",
+ [Needle, Haystack])),
+ {Msg, ?_assertMatch(Result,
+ couch_util:find_in_binary(Needle, Haystack))}
+ end, Cases).
diff --git a/test/couchdb/couch_uuids_tests.erl b/test/couchdb/couch_uuids_tests.erl
new file mode 100644
index 000000000..ea1d03437
--- /dev/null
+++ b/test/couchdb/couch_uuids_tests.erl
@@ -0,0 +1,161 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_uuids_tests).
+
+-include("couch_eunit.hrl").
+
+-define(TIMEOUT_S, 20).
+
+
+setup() ->
+ {ok, Pid} = couch_config:start_link(?CONFIG_CHAIN),
+ erlang:monitor(process, Pid),
+ couch_uuids:start(),
+ Pid.
+
+setup(Opts) ->
+ Pid = setup(),
+ lists:foreach(
+ fun({Option, Value}) ->
+ couch_config:set("uuids", Option, Value, false)
+ end, Opts),
+ Pid.
+
+teardown(Pid) ->
+ couch_uuids:stop(),
+ couch_config:stop(),
+ receive
+ {'DOWN', _, _, Pid, _} -> ok
+ after
+ 1000 -> throw({timeout_error, config_stop})
+ end.
+
+teardown(_, Pid) ->
+ teardown(Pid).
+
+
+default_test_() ->
+ {
+ "Default UUID algorithm",
+ {
+ setup,
+ fun setup/0, fun teardown/1,
+ fun should_be_unique/1
+ }
+ }.
+
+sequential_test_() ->
+ Opts = [{"algorithm", "sequential"}],
+ Cases = [
+ fun should_be_unique/2,
+ fun should_increment_monotonically/2,
+ fun should_rollover/2
+ ],
+ {
+ "UUID algorithm: sequential",
+ {
+ foreachx,
+ fun setup/1, fun teardown/2,
+ [{Opts, Fun} || Fun <- Cases]
+ }
+ }.
+
+utc_test_() ->
+ Opts = [{"algorithm", "utc_random"}],
+ Cases = [
+ fun should_be_unique/2,
+ fun should_increment_monotonically/2
+ ],
+ {
+ "UUID algorithm: utc_random",
+ {
+ foreachx,
+ fun setup/1, fun teardown/2,
+ [{Opts, Fun} || Fun <- Cases]
+ }
+ }.
+
+utc_id_suffix_test_() ->
+ Opts = [{"algorithm", "utc_id"}, {"utc_id_suffix", "bozo"}],
+ Cases = [
+ fun should_be_unique/2,
+ fun should_increment_monotonically/2,
+ fun should_preserve_suffix/2
+ ],
+ {
+ "UUID algorithm: utc_id",
+ {
+ foreachx,
+ fun setup/1, fun teardown/2,
+ [{Opts, Fun} || Fun <- Cases]
+ }
+ }.
+
+
+should_be_unique() ->
+ %% this one may really runs for too long on slow hosts
+ {timeout, ?TIMEOUT_S, ?_assert(test_unique(10000, [couch_uuids:new()]))}.
+should_be_unique(_) ->
+ should_be_unique().
+should_be_unique(_, _) ->
+ should_be_unique().
+
+should_increment_monotonically(_, _) ->
+ ?_assert(couch_uuids:new() < couch_uuids:new()).
+
+should_rollover(_, _) ->
+ ?_test(begin
+ UUID = binary_to_list(couch_uuids:new()),
+ Prefix = element(1, lists:split(26, UUID)),
+ N = gen_until_pref_change(Prefix, 0),
+ ?assert(N >= 5000 andalso N =< 11000)
+ end).
+
+should_preserve_suffix(_, _) ->
+ ?_test(begin
+ UUID = binary_to_list(couch_uuids:new()),
+ Suffix = get_suffix(UUID),
+ ?assert(test_same_suffix(10000, Suffix))
+ end).
+
+
+test_unique(0, _) ->
+ true;
+test_unique(N, UUIDs) ->
+ UUID = couch_uuids:new(),
+ ?assertNot(lists:member(UUID, UUIDs)),
+ test_unique(N - 1, [UUID| UUIDs]).
+
+get_prefix(UUID) ->
+ element(1, lists:split(26, binary_to_list(UUID))).
+
+gen_until_pref_change(_, Count) when Count > 8251 ->
+ Count;
+gen_until_pref_change(Prefix, N) ->
+ case get_prefix(couch_uuids:new()) of
+ Prefix -> gen_until_pref_change(Prefix, N + 1);
+ _ -> N
+ end.
+
+get_suffix(UUID) when is_binary(UUID) ->
+ get_suffix(binary_to_list(UUID));
+get_suffix(UUID) ->
+ element(2, lists:split(14, UUID)).
+
+test_same_suffix(0, _) ->
+ true;
+test_same_suffix(N, Suffix) ->
+ case get_suffix(couch_uuids:new()) of
+ Suffix -> test_same_suffix(N - 1, Suffix);
+ _ -> false
+ end.
diff --git a/test/couchdb/couch_work_queue_tests.erl b/test/couchdb/couch_work_queue_tests.erl
new file mode 100644
index 000000000..8a463b51f
--- /dev/null
+++ b/test/couchdb/couch_work_queue_tests.erl
@@ -0,0 +1,393 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_work_queue_tests).
+
+-include("couch_eunit.hrl").
+
+-define(TIMEOUT, 100).
+
+
+setup(Opts) ->
+ {ok, Q} = couch_work_queue:new(Opts),
+ Producer = spawn_producer(Q),
+ Consumer = spawn_consumer(Q),
+ {Q, Producer, Consumer}.
+
+setup_max_items() ->
+ setup([{max_items, 3}]).
+
+setup_max_size() ->
+ setup([{max_size, 160}]).
+
+setup_max_items_and_size() ->
+ setup([{max_size, 160}, {max_items, 3}]).
+
+setup_multi_workers() ->
+ {Q, Producer, Consumer1} = setup([{max_size, 160},
+ {max_items, 3},
+ {multi_workers, true}]),
+ Consumer2 = spawn_consumer(Q),
+ Consumer3 = spawn_consumer(Q),
+ {Q, Producer, [Consumer1, Consumer2, Consumer3]}.
+
+teardown({Q, Producer, Consumers}) when is_list(Consumers) ->
+ % consume all to unblock and let producer/consumer stop without timeout
+ [consume(Consumer, all) || Consumer <- Consumers],
+
+ ok = close_queue(Q),
+ ok = stop(Producer, "producer"),
+ R = [stop(Consumer, "consumer") || Consumer <- Consumers],
+ R = [ok || _ <- Consumers],
+ ok;
+teardown({Q, Producer, Consumer}) ->
+ teardown({Q, Producer, [Consumer]}).
+
+
+single_consumer_test_() ->
+ {
+ "Single producer and consumer",
+ [
+ {
+ "Queue with 3 max items",
+ {
+ foreach,
+ fun setup_max_items/0, fun teardown/1,
+ single_consumer_max_item_count() ++ common_cases()
+ }
+ },
+ {
+ "Queue with max size of 160 bytes",
+ {
+ foreach,
+ fun setup_max_size/0, fun teardown/1,
+ single_consumer_max_size() ++ common_cases()
+ }
+ },
+ {
+ "Queue with max size of 160 bytes and 3 max items",
+ {
+ foreach,
+ fun setup_max_items_and_size/0, fun teardown/1,
+ single_consumer_max_items_and_size() ++ common_cases()
+ }
+ }
+ ]
+ }.
+
+multiple_consumers_test_() ->
+ {
+ "Single producer and multiple consumers",
+ [
+ {
+ "Queue with max size of 160 bytes and 3 max items",
+ {
+ foreach,
+ fun setup_multi_workers/0, fun teardown/1,
+ common_cases() ++ multiple_consumers()
+ }
+
+ }
+ ]
+ }.
+
+common_cases()->
+ [
+ fun should_block_consumer_on_dequeue_from_empty_queue/1,
+ fun should_consume_right_item/1,
+ fun should_timeout_on_close_non_empty_queue/1,
+ fun should_not_block_producer_for_non_empty_queue_after_close/1,
+ fun should_be_closed/1
+ ].
+
+single_consumer_max_item_count()->
+ [
+ fun should_have_no_items_for_new_queue/1,
+ fun should_block_producer_on_full_queue_count/1,
+ fun should_receive_first_queued_item/1,
+ fun should_consume_multiple_items/1,
+ fun should_consume_all/1
+ ].
+
+single_consumer_max_size()->
+ [
+ fun should_have_zero_size_for_new_queue/1,
+ fun should_block_producer_on_full_queue_size/1,
+ fun should_increase_queue_size_on_produce/1,
+ fun should_receive_first_queued_item/1,
+ fun should_consume_multiple_items/1,
+ fun should_consume_all/1
+ ].
+
+single_consumer_max_items_and_size() ->
+ single_consumer_max_item_count() ++ single_consumer_max_size().
+
+multiple_consumers() ->
+ [
+ fun should_have_zero_size_for_new_queue/1,
+ fun should_have_no_items_for_new_queue/1,
+ fun should_increase_queue_size_on_produce/1
+ ].
+
+
+should_have_no_items_for_new_queue({Q, _, _}) ->
+ ?_assertEqual(0, couch_work_queue:item_count(Q)).
+
+should_have_zero_size_for_new_queue({Q, _, _}) ->
+ ?_assertEqual(0, couch_work_queue:size(Q)).
+
+should_block_consumer_on_dequeue_from_empty_queue({_, _, Consumers}) when is_list(Consumers) ->
+ [consume(C, 2) || C <- Consumers],
+ Pongs = [ping(C) || C <- Consumers],
+ ?_assertEqual([timeout, timeout, timeout], Pongs);
+should_block_consumer_on_dequeue_from_empty_queue({_, _, Consumer}) ->
+ consume(Consumer, 1),
+ Pong = ping(Consumer),
+ ?_assertEqual(timeout, Pong).
+
+should_consume_right_item({Q, Producer, Consumers}) when is_list(Consumers) ->
+ [consume(C, 3) || C <- Consumers],
+
+ Item1 = produce(Producer, 10),
+ ok = ping(Producer),
+ ?assertEqual(0, couch_work_queue:item_count(Q)),
+ ?assertEqual(0, couch_work_queue:size(Q)),
+
+ Item2 = produce(Producer, 10),
+ ok = ping(Producer),
+ ?assertEqual(0, couch_work_queue:item_count(Q)),
+ ?assertEqual(0, couch_work_queue:size(Q)),
+
+ Item3 = produce(Producer, 10),
+ ok = ping(Producer),
+ ?assertEqual(0, couch_work_queue:item_count(Q)),
+ ?assertEqual(0, couch_work_queue:size(Q)),
+
+ R = [{ping(C), Item}
+ || {C, Item} <- lists:zip(Consumers, [Item1, Item2, Item3])],
+
+ ?_assertEqual([{ok, Item1}, {ok, Item2}, {ok, Item3}], R);
+should_consume_right_item({_, Producer, Consumer}) ->
+ consume(Consumer, 1),
+ Item = produce(Producer, 10),
+ produce(Producer, 20),
+ ok = ping(Producer),
+ ok = ping(Consumer),
+ {ok, Items} = last_consumer_items(Consumer),
+ ?_assertEqual([Item], Items).
+
+should_increase_queue_size_on_produce({Q, Producer, _}) ->
+ produce(Producer, 50),
+ ok = ping(Producer),
+ Count1 = couch_work_queue:item_count(Q),
+ Size1 = couch_work_queue:size(Q),
+
+ produce(Producer, 10),
+ Count2 = couch_work_queue:item_count(Q),
+ Size2 = couch_work_queue:size(Q),
+
+ ?_assertEqual([{Count1, Size1}, {Count2, Size2}], [{1, 50}, {2, 60}]).
+
+should_block_producer_on_full_queue_count({Q, Producer, _}) ->
+ produce(Producer, 10),
+ ?assertEqual(1, couch_work_queue:item_count(Q)),
+ ok = ping(Producer),
+
+ produce(Producer, 15),
+ ?assertEqual(2, couch_work_queue:item_count(Q)),
+ ok = ping(Producer),
+
+ produce(Producer, 20),
+ ?assertEqual(3, couch_work_queue:item_count(Q)),
+ Pong = ping(Producer),
+
+ ?_assertEqual(timeout, Pong).
+
+should_block_producer_on_full_queue_size({Q, Producer, _}) ->
+ produce(Producer, 100),
+ ok = ping(Producer),
+ ?assertEqual(1, couch_work_queue:item_count(Q)),
+ ?assertEqual(100, couch_work_queue:size(Q)),
+
+ produce(Producer, 110),
+ Pong = ping(Producer),
+ ?assertEqual(2, couch_work_queue:item_count(Q)),
+ ?assertEqual(210, couch_work_queue:size(Q)),
+
+ ?_assertEqual(timeout, Pong).
+
+should_consume_multiple_items({_, Producer, Consumer}) ->
+ Item1 = produce(Producer, 10),
+ ok = ping(Producer),
+
+ Item2 = produce(Producer, 15),
+ ok = ping(Producer),
+
+ consume(Consumer, 2),
+
+ {ok, Items} = last_consumer_items(Consumer),
+ ?_assertEqual([Item1, Item2], Items).
+
+should_receive_first_queued_item({Q, Producer, Consumer}) ->
+ consume(Consumer, 100),
+ timeout = ping(Consumer),
+
+ Item = produce(Producer, 11),
+ ok = ping(Producer),
+
+ ok = ping(Consumer),
+ ?assertEqual(0, couch_work_queue:item_count(Q)),
+
+ {ok, Items} = last_consumer_items(Consumer),
+ ?_assertEqual([Item], Items).
+
+should_consume_all({_, Producer, Consumer}) ->
+ Item1 = produce(Producer, 10),
+ Item2 = produce(Producer, 15),
+ Item3 = produce(Producer, 20),
+
+ consume(Consumer, all),
+
+ {ok, Items} = last_consumer_items(Consumer),
+ ?_assertEqual([Item1, Item2, Item3], Items).
+
+should_timeout_on_close_non_empty_queue({Q, Producer, _}) ->
+ produce(Producer, 1),
+ Status = close_queue(Q),
+
+ ?_assertEqual(timeout, Status).
+
+should_not_block_producer_for_non_empty_queue_after_close({Q, Producer, _}) ->
+ produce(Producer, 1),
+ close_queue(Q),
+ Pong = ping(Producer),
+ Size = couch_work_queue:size(Q),
+ Count = couch_work_queue:item_count(Q),
+
+ ?_assertEqual({ok, 1, 1}, {Pong, Size, Count}).
+
+should_be_closed({Q, _, Consumers}) when is_list(Consumers) ->
+ ok = close_queue(Q),
+
+ [consume(C, 1) || C <- Consumers],
+
+ LastConsumerItems = [last_consumer_items(C) || C <- Consumers],
+ ItemsCount = couch_work_queue:item_count(Q),
+ Size = couch_work_queue:size(Q),
+
+ ?_assertEqual({[closed, closed, closed], closed, closed},
+ {LastConsumerItems, ItemsCount, Size});
+should_be_closed({Q, _, Consumer}) ->
+ ok = close_queue(Q),
+
+ consume(Consumer, 1),
+
+ LastConsumerItems = last_consumer_items(Consumer),
+ ItemsCount = couch_work_queue:item_count(Q),
+ Size = couch_work_queue:size(Q),
+
+ ?_assertEqual({closed, closed, closed},
+ {LastConsumerItems, ItemsCount, Size}).
+
+
+close_queue(Q) ->
+ ok = couch_work_queue:close(Q),
+ MonRef = erlang:monitor(process, Q),
+ receive
+ {'DOWN', MonRef, process, Q, _Reason} -> ok
+ after ?TIMEOUT ->
+ erlang:demonitor(MonRef),
+ timeout
+ end.
+
+spawn_consumer(Q) ->
+ Parent = self(),
+ spawn(fun() -> consumer_loop(Parent, Q, nil) end).
+
+consumer_loop(Parent, Q, PrevItem) ->
+ receive
+ {stop, Ref} ->
+ Parent ! {ok, Ref};
+ {ping, Ref} ->
+ Parent ! {pong, Ref},
+ consumer_loop(Parent, Q, PrevItem);
+ {last_item, Ref} ->
+ Parent ! {item, Ref, PrevItem},
+ consumer_loop(Parent, Q, PrevItem);
+ {consume, N} ->
+ Result = couch_work_queue:dequeue(Q, N),
+ consumer_loop(Parent, Q, Result)
+ end.
+
+spawn_producer(Q) ->
+ Parent = self(),
+ spawn(fun() -> producer_loop(Parent, Q) end).
+
+producer_loop(Parent, Q) ->
+ receive
+ {stop, Ref} ->
+ Parent ! {ok, Ref};
+ {ping, Ref} ->
+ Parent ! {pong, Ref},
+ producer_loop(Parent, Q);
+ {produce, Ref, Size} ->
+ Item = crypto:rand_bytes(Size),
+ Parent ! {item, Ref, Item},
+ ok = couch_work_queue:queue(Q, Item),
+ producer_loop(Parent, Q)
+ end.
+
+consume(Consumer, N) ->
+ Consumer ! {consume, N}.
+
+last_consumer_items(Consumer) ->
+ Ref = make_ref(),
+ Consumer ! {last_item, Ref},
+ receive
+ {item, Ref, Items} ->
+ Items
+ after ?TIMEOUT ->
+ timeout
+ end.
+
+produce(Producer, Size) ->
+ Ref = make_ref(),
+ Producer ! {produce, Ref, Size},
+ receive
+ {item, Ref, Item} ->
+ Item
+ after ?TIMEOUT ->
+ erlang:error({assertion_failed,
+ [{module, ?MODULE},
+ {line, ?LINE},
+ {reason, "Timeout asking producer to produce an item"}]})
+ end.
+
+ping(Pid) ->
+ Ref = make_ref(),
+ Pid ! {ping, Ref},
+ receive
+ {pong, Ref} ->
+ ok
+ after ?TIMEOUT ->
+ timeout
+ end.
+
+stop(Pid, Name) ->
+ Ref = make_ref(),
+ Pid ! {stop, Ref},
+ receive
+ {ok, Ref} -> ok
+ after ?TIMEOUT ->
+ ?debugMsg("Timeout stopping " ++ Name),
+ timeout
+ end.
diff --git a/test/couchdb/couchdb_attachments_tests.erl b/test/couchdb/couchdb_attachments_tests.erl
new file mode 100644
index 000000000..cf597855b
--- /dev/null
+++ b/test/couchdb/couchdb_attachments_tests.erl
@@ -0,0 +1,638 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couchdb_attachments_tests).
+
+-include("couch_eunit.hrl").
+-include_lib("couchdb/couch_db.hrl").
+
+-define(COMPRESSION_LEVEL, 8).
+-define(ATT_BIN_NAME, <<"logo.png">>).
+-define(ATT_TXT_NAME, <<"file.erl">>).
+-define(FIXTURE_PNG, filename:join([?FIXTURESDIR, "logo.png"])).
+-define(FIXTURE_TXT, ?FILE).
+-define(TIMEOUT, 1000).
+-define(TIMEOUT_EUNIT, 10).
+-define(TIMEWAIT, 100).
+-define(i2l(I), integer_to_list(I)).
+
+
+start() ->
+ {ok, Pid} = couch_server_sup:start_link(?CONFIG_CHAIN),
+ % ensure in default compression settings for attachments_compression_tests
+ couch_config:set("attachments", "compression_level",
+ ?i2l(?COMPRESSION_LEVEL), false),
+ couch_config:set("attachments", "compressible_types", "text/*", false),
+ Pid.
+
+stop(Pid) ->
+ erlang:monitor(process, Pid),
+ couch_server_sup:stop(),
+ receive
+ {'DOWN', _, _, Pid, _} ->
+ ok
+ after ?TIMEOUT ->
+ throw({timeout, server_stop})
+ end.
+
+setup() ->
+ DbName = ?tempdb(),
+ {ok, Db} = couch_db:create(DbName, []),
+ ok = couch_db:close(Db),
+ Addr = couch_config:get("httpd", "bind_address", any),
+ Port = mochiweb_socket_server:get(couch_httpd, port),
+ Host = Addr ++ ":" ++ ?i2l(Port),
+ {Host, ?b2l(DbName)}.
+
+setup({binary, standalone}) ->
+ {Host, DbName} = setup(),
+ setup_att(fun create_standalone_png_att/2, Host, DbName, ?FIXTURE_PNG);
+setup({text, standalone}) ->
+ {Host, DbName} = setup(),
+ setup_att(fun create_standalone_text_att/2, Host, DbName, ?FIXTURE_TXT);
+setup({binary, inline}) ->
+ {Host, DbName} = setup(),
+ setup_att(fun create_inline_png_att/2, Host, DbName, ?FIXTURE_PNG);
+setup({text, inline}) ->
+ {Host, DbName} = setup(),
+ setup_att(fun create_inline_text_att/2, Host, DbName, ?FIXTURE_TXT);
+setup(compressed) ->
+ {Host, DbName} = setup(),
+ setup_att(fun create_already_compressed_att/2, Host, DbName, ?FIXTURE_TXT).
+setup_att(Fun, Host, DbName, File) ->
+ HttpHost = "http://" ++ Host,
+ AttUrl = Fun(HttpHost, DbName),
+ {ok, Data} = file:read_file(File),
+ DocUrl = string:join([HttpHost, DbName, "doc"], "/"),
+ Helpers = {DbName, DocUrl, AttUrl},
+ {Data, Helpers}.
+
+teardown(_, {_, {DbName, _, _}}) ->
+ teardown(DbName).
+
+teardown({_, DbName}) ->
+ teardown(DbName);
+teardown(DbName) ->
+ ok = couch_server:delete(?l2b(DbName), []),
+ ok.
+
+
+attachments_test_() ->
+ {
+ "Attachments tests",
+ {
+ setup,
+ fun start/0, fun stop/1,
+ [
+ attachments_md5_tests(),
+ attachments_compression_tests()
+ ]
+ }
+ }.
+
+attachments_md5_tests() ->
+ {
+ "Attachments MD5 tests",
+ {
+ foreach,
+ fun setup/0, fun teardown/1,
+ [
+ fun should_upload_attachment_without_md5/1,
+ fun should_upload_attachment_by_chunks_without_md5/1,
+ fun should_upload_attachment_with_valid_md5_header/1,
+ fun should_upload_attachment_by_chunks_with_valid_md5_header/1,
+ fun should_upload_attachment_by_chunks_with_valid_md5_trailer/1,
+ fun should_reject_attachment_with_invalid_md5/1,
+ fun should_reject_chunked_attachment_with_invalid_md5/1,
+ fun should_reject_chunked_attachment_with_invalid_md5_trailer/1
+ ]
+ }
+ }.
+
+attachments_compression_tests() ->
+ Funs = [
+ fun should_get_att_without_accept_gzip_encoding/2,
+ fun should_get_att_with_accept_gzip_encoding/2,
+ fun should_get_att_with_accept_deflate_encoding/2,
+ fun should_return_406_response_on_unsupported_encoding/2,
+ fun should_get_doc_with_att_data/2,
+ fun should_get_doc_with_att_data_stub/2
+ ],
+ {
+ "Attachments compression tests",
+ [
+ {
+ "Created via Attachments API",
+ created_attachments_compression_tests(standalone, Funs)
+ },
+ {
+ "Created inline via Document API",
+ created_attachments_compression_tests(inline, Funs)
+ },
+ {
+ "Created already been compressed via Attachments API",
+ {
+ foreachx,
+ fun setup/1, fun teardown/2,
+ [{compressed, Fun} || Fun <- Funs]
+ }
+ },
+ {
+ foreach,
+ fun setup/0, fun teardown/1,
+ [
+ fun should_not_create_compressed_att_with_deflate_encoding/1,
+ fun should_not_create_compressed_att_with_compress_encoding/1,
+ fun should_create_compressible_att_with_ctype_params/1
+ ]
+ }
+ ]
+ }.
+
+created_attachments_compression_tests(Mod, Funs) ->
+ [
+ {
+ "Compressiable attachments",
+ {
+ foreachx,
+ fun setup/1, fun teardown/2,
+ [{{text, Mod}, Fun} || Fun <- Funs]
+ }
+ },
+ {
+ "Uncompressiable attachments",
+ {
+ foreachx,
+ fun setup/1, fun teardown/2,
+ [{{binary, Mod}, Fun} || Fun <- Funs]
+ }
+ }
+ ].
+
+
+
+should_upload_attachment_without_md5({Host, DbName}) ->
+ ?_test(begin
+ AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"),
+ Body = "We all live in a yellow submarine!",
+ Headers = [
+ {"Content-Length", "34"},
+ {"Content-Type", "text/plain"},
+ {"Host", Host}
+ ],
+ {ok, Code, Json} = request("PUT", AttUrl, Headers, Body),
+ ?assertEqual(201, Code),
+ ?assertEqual(true, get_json(Json, [<<"ok">>]))
+ end).
+
+should_upload_attachment_by_chunks_without_md5({Host, DbName}) ->
+ ?_test(begin
+ AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"),
+ AttData = <<"We all live in a yellow submarine!">>,
+ <<Part1:21/binary, Part2:13/binary>> = AttData,
+ Body = chunked_body([Part1, Part2]),
+ Headers = [
+ {"Content-Type", "text/plain"},
+ {"Transfer-Encoding", "chunked"},
+ {"Host", Host}
+ ],
+ {ok, Code, Json} = request("PUT", AttUrl, Headers, Body),
+ ?assertEqual(201, Code),
+ ?assertEqual(true, get_json(Json, [<<"ok">>]))
+ end).
+
+should_upload_attachment_with_valid_md5_header({Host, DbName}) ->
+ ?_test(begin
+ AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"),
+ Body = "We all live in a yellow submarine!",
+ Headers = [
+ {"Content-Length", "34"},
+ {"Content-Type", "text/plain"},
+ {"Content-MD5", ?b2l(base64:encode(couch_util:md5(Body)))},
+ {"Host", Host}
+ ],
+ {ok, Code, Json} = request("PUT", AttUrl, Headers, Body),
+ ?assertEqual(201, Code),
+ ?assertEqual(true, get_json(Json, [<<"ok">>]))
+ end).
+
+should_upload_attachment_by_chunks_with_valid_md5_header({Host, DbName}) ->
+ ?_test(begin
+ AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"),
+ AttData = <<"We all live in a yellow submarine!">>,
+ <<Part1:21/binary, Part2:13/binary>> = AttData,
+ Body = chunked_body([Part1, Part2]),
+ Headers = [
+ {"Content-Type", "text/plain"},
+ {"Content-MD5", ?b2l(base64:encode(couch_util:md5(AttData)))},
+ {"Host", Host},
+ {"Transfer-Encoding", "chunked"}
+ ],
+ {ok, Code, Json} = request("PUT", AttUrl, Headers, Body),
+ ?assertEqual(201, Code),
+ ?assertEqual(true, get_json(Json, [<<"ok">>]))
+ end).
+
+should_upload_attachment_by_chunks_with_valid_md5_trailer({Host, DbName}) ->
+ ?_test(begin
+ AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"),
+ AttData = <<"We all live in a yellow submarine!">>,
+ <<Part1:21/binary, Part2:13/binary>> = AttData,
+ Body = [chunked_body([Part1, Part2]),
+ "Content-MD5: ", base64:encode(couch_util:md5(AttData)),
+ "\r\n"],
+ Headers = [
+ {"Content-Type", "text/plain"},
+ {"Host", Host},
+ {"Trailer", "Content-MD5"},
+ {"Transfer-Encoding", "chunked"}
+ ],
+ {ok, Code, Json} = request("PUT", AttUrl, Headers, Body),
+ ?assertEqual(201, Code),
+ ?assertEqual(true, get_json(Json, [<<"ok">>]))
+ end).
+
+should_reject_attachment_with_invalid_md5({Host, DbName}) ->
+ ?_test(begin
+ AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"),
+ Body = "We all live in a yellow submarine!",
+ Headers = [
+ {"Content-Length", "34"},
+ {"Content-Type", "text/plain"},
+ {"Content-MD5", ?b2l(base64:encode(<<"foobar!">>))},
+ {"Host", Host}
+ ],
+ {ok, Code, Json} = request("PUT", AttUrl, Headers, Body),
+ ?assertEqual(400, Code),
+ ?assertEqual(<<"content_md5_mismatch">>,
+ get_json(Json, [<<"error">>]))
+ end).
+
+
+should_reject_chunked_attachment_with_invalid_md5({Host, DbName}) ->
+ ?_test(begin
+ AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"),
+ AttData = <<"We all live in a yellow submarine!">>,
+ <<Part1:21/binary, Part2:13/binary>> = AttData,
+ Body = chunked_body([Part1, Part2]),
+ Headers = [
+ {"Content-Type", "text/plain"},
+ {"Content-MD5", ?b2l(base64:encode(<<"foobar!">>))},
+ {"Host", Host},
+ {"Transfer-Encoding", "chunked"}
+ ],
+ {ok, Code, Json} = request("PUT", AttUrl, Headers, Body),
+ ?assertEqual(400, Code),
+ ?assertEqual(<<"content_md5_mismatch">>,
+ get_json(Json, [<<"error">>]))
+ end).
+
+should_reject_chunked_attachment_with_invalid_md5_trailer({Host, DbName}) ->
+ ?_test(begin
+ AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"),
+ AttData = <<"We all live in a yellow submarine!">>,
+ <<Part1:21/binary, Part2:13/binary>> = AttData,
+ Body = [chunked_body([Part1, Part2]),
+ "Content-MD5: ", base64:encode(<<"foobar!">>),
+ "\r\n"],
+ Headers = [
+ {"Content-Type", "text/plain"},
+ {"Host", Host},
+ {"Trailer", "Content-MD5"},
+ {"Transfer-Encoding", "chunked"}
+ ],
+ {ok, Code, Json} = request("PUT", AttUrl, Headers, Body),
+ ?assertEqual(400, Code),
+ ?assertEqual(<<"content_md5_mismatch">>, get_json(Json, [<<"error">>]))
+ end).
+
+should_get_att_without_accept_gzip_encoding(_, {Data, {_, _, AttUrl}}) ->
+ ?_test(begin
+ {ok, Code, Headers, Body} = test_request:get(AttUrl),
+ ?assertEqual(200, Code),
+ ?assertNot(lists:member({"Content-Encoding", "gzip"}, Headers)),
+ ?assertEqual(Data, iolist_to_binary(Body))
+ end).
+
+should_get_att_with_accept_gzip_encoding(compressed, {Data, {_, _, AttUrl}}) ->
+ ?_test(begin
+ {ok, Code, Headers, Body} = test_request:get(
+ AttUrl, [{"Accept-Encoding", "gzip"}]),
+ ?assertEqual(200, Code),
+ ?assert(lists:member({"Content-Encoding", "gzip"}, Headers)),
+ ?assertEqual(Data, zlib:gunzip(iolist_to_binary(Body)))
+ end);
+should_get_att_with_accept_gzip_encoding({text, _}, {Data, {_, _, AttUrl}}) ->
+ ?_test(begin
+ {ok, Code, Headers, Body} = test_request:get(
+ AttUrl, [{"Accept-Encoding", "gzip"}]),
+ ?assertEqual(200, Code),
+ ?assert(lists:member({"Content-Encoding", "gzip"}, Headers)),
+ ?assertEqual(Data, zlib:gunzip(iolist_to_binary(Body)))
+ end);
+should_get_att_with_accept_gzip_encoding({binary, _}, {Data, {_, _, AttUrl}}) ->
+ ?_test(begin
+ {ok, Code, Headers, Body} = test_request:get(
+ AttUrl, [{"Accept-Encoding", "gzip"}]),
+ ?assertEqual(200, Code),
+ ?assertEqual(undefined,
+ couch_util:get_value("Content-Encoding", Headers)),
+ ?assertEqual(Data, iolist_to_binary(Body))
+ end).
+
+should_get_att_with_accept_deflate_encoding(_, {Data, {_, _, AttUrl}}) ->
+ ?_test(begin
+ {ok, Code, Headers, Body} = test_request:get(
+ AttUrl, [{"Accept-Encoding", "deflate"}]),
+ ?assertEqual(200, Code),
+ ?assertEqual(undefined,
+ couch_util:get_value("Content-Encoding", Headers)),
+ ?assertEqual(Data, iolist_to_binary(Body))
+ end).
+
+should_return_406_response_on_unsupported_encoding(_, {_, {_, _, AttUrl}}) ->
+ ?_assertEqual(406,
+ begin
+ {ok, Code, _, _} = test_request:get(
+ AttUrl, [{"Accept-Encoding", "deflate, *;q=0"}]),
+ Code
+ end).
+
+should_get_doc_with_att_data(compressed, {Data, {_, DocUrl, _}}) ->
+ ?_test(begin
+ Url = DocUrl ++ "?attachments=true",
+ {ok, Code, _, Body} = test_request:get(
+ Url, [{"Accept", "application/json"}]),
+ ?assertEqual(200, Code),
+ Json = ejson:decode(Body),
+ AttJson = couch_util:get_nested_json_value(
+ Json, [<<"_attachments">>, ?ATT_TXT_NAME]),
+ AttData = couch_util:get_nested_json_value(
+ AttJson, [<<"data">>]),
+ ?assertEqual(
+ <<"text/plain">>,
+ couch_util:get_nested_json_value(AttJson,[<<"content_type">>])),
+ ?assertEqual(Data, base64:decode(AttData))
+ end);
+should_get_doc_with_att_data({text, _}, {Data, {_, DocUrl, _}}) ->
+ ?_test(begin
+ Url = DocUrl ++ "?attachments=true",
+ {ok, Code, _, Body} = test_request:get(
+ Url, [{"Accept", "application/json"}]),
+ ?assertEqual(200, Code),
+ Json = ejson:decode(Body),
+ AttJson = couch_util:get_nested_json_value(
+ Json, [<<"_attachments">>, ?ATT_TXT_NAME]),
+ AttData = couch_util:get_nested_json_value(
+ AttJson, [<<"data">>]),
+ ?assertEqual(
+ <<"text/plain">>,
+ couch_util:get_nested_json_value(AttJson,[<<"content_type">>])),
+ ?assertEqual(Data, base64:decode(AttData))
+ end);
+should_get_doc_with_att_data({binary, _}, {Data, {_, DocUrl, _}}) ->
+ ?_test(begin
+ Url = DocUrl ++ "?attachments=true",
+ {ok, Code, _, Body} = test_request:get(
+ Url, [{"Accept", "application/json"}]),
+ ?assertEqual(200, Code),
+ Json = ejson:decode(Body),
+ AttJson = couch_util:get_nested_json_value(
+ Json, [<<"_attachments">>, ?ATT_BIN_NAME]),
+ AttData = couch_util:get_nested_json_value(
+ AttJson, [<<"data">>]),
+ ?assertEqual(
+ <<"image/png">>,
+ couch_util:get_nested_json_value(AttJson,[<<"content_type">>])),
+ ?assertEqual(Data, base64:decode(AttData))
+ end).
+
+should_get_doc_with_att_data_stub(compressed, {Data, {_, DocUrl, _}}) ->
+ ?_test(begin
+ Url = DocUrl ++ "?att_encoding_info=true",
+ {ok, Code, _, Body} = test_request:get(
+ Url, [{"Accept", "application/json"}]),
+ ?assertEqual(200, Code),
+ Json = ejson:decode(Body),
+ {AttJson} = couch_util:get_nested_json_value(
+ Json, [<<"_attachments">>, ?ATT_TXT_NAME]),
+ ?assertEqual(<<"gzip">>,
+ couch_util:get_value(<<"encoding">>, AttJson)),
+ AttLength = couch_util:get_value(<<"length">>, AttJson),
+ EncLength = couch_util:get_value(<<"encoded_length">>, AttJson),
+ ?assertEqual(AttLength, EncLength),
+ ?assertEqual(iolist_size(zlib:gzip(Data)), AttLength)
+ end);
+should_get_doc_with_att_data_stub({text, _}, {Data, {_, DocUrl, _}}) ->
+ ?_test(begin
+ Url = DocUrl ++ "?att_encoding_info=true",
+ {ok, Code, _, Body} = test_request:get(
+ Url, [{"Accept", "application/json"}]),
+ ?assertEqual(200, Code),
+ Json = ejson:decode(Body),
+ {AttJson} = couch_util:get_nested_json_value(
+ Json, [<<"_attachments">>, ?ATT_TXT_NAME]),
+ ?assertEqual(<<"gzip">>,
+ couch_util:get_value(<<"encoding">>, AttJson)),
+ AttEncLength = iolist_size(gzip(Data)),
+ ?assertEqual(AttEncLength,
+ couch_util:get_value(<<"encoded_length">>, AttJson)),
+ ?assertEqual(byte_size(Data),
+ couch_util:get_value(<<"length">>, AttJson))
+ end);
+should_get_doc_with_att_data_stub({binary, _}, {Data, {_, DocUrl, _}}) ->
+ ?_test(begin
+ Url = DocUrl ++ "?att_encoding_info=true",
+ {ok, Code, _, Body} = test_request:get(
+ Url, [{"Accept", "application/json"}]),
+ ?assertEqual(200, Code),
+ Json = ejson:decode(Body),
+ {AttJson} = couch_util:get_nested_json_value(
+ Json, [<<"_attachments">>, ?ATT_BIN_NAME]),
+ ?assertEqual(undefined,
+ couch_util:get_value(<<"encoding">>, AttJson)),
+ ?assertEqual(undefined,
+ couch_util:get_value(<<"encoded_length">>, AttJson)),
+ ?assertEqual(byte_size(Data),
+ couch_util:get_value(<<"length">>, AttJson))
+ end).
+
+should_not_create_compressed_att_with_deflate_encoding({Host, DbName}) ->
+ ?_assertEqual(415,
+ begin
+ HttpHost = "http://" ++ Host,
+ AttUrl = string:join([HttpHost, DbName, ?docid(), "file.txt"], "/"),
+ {ok, Data} = file:read_file(?FIXTURE_TXT),
+ Body = zlib:compress(Data),
+ Headers = [
+ {"Content-Encoding", "deflate"},
+ {"Content-Type", "text/plain"}
+ ],
+ {ok, Code, _, _} = test_request:put(AttUrl, Headers, Body),
+ Code
+ end).
+
+should_not_create_compressed_att_with_compress_encoding({Host, DbName}) ->
+ % Note: As of OTP R13B04, it seems there's no LZW compression
+ % (i.e. UNIX compress utility implementation) lib in OTP.
+ % However there's a simple working Erlang implementation at:
+ % http://scienceblogs.com/goodmath/2008/01/simple_lempelziv_compression_i.php
+ ?_assertEqual(415,
+ begin
+ HttpHost = "http://" ++ Host,
+ AttUrl = string:join([HttpHost, DbName, ?docid(), "file.txt"], "/"),
+ {ok, Data} = file:read_file(?FIXTURE_TXT),
+ Headers = [
+ {"Content-Encoding", "compress"},
+ {"Content-Type", "text/plain"}
+ ],
+ {ok, Code, _, _} = test_request:put(AttUrl, Headers, Data),
+ Code
+ end).
+
+should_create_compressible_att_with_ctype_params({Host, DbName}) ->
+ {timeout, ?TIMEOUT_EUNIT, ?_test(begin
+ HttpHost = "http://" ++ Host,
+ DocUrl = string:join([HttpHost, DbName, ?docid()], "/"),
+ AttUrl = string:join([DocUrl, ?b2l(?ATT_TXT_NAME)], "/"),
+ {ok, Data} = file:read_file(?FIXTURE_TXT),
+ Headers = [{"Content-Type", "text/plain; charset=UTF-8"}],
+ {ok, Code0, _, _} = test_request:put(AttUrl, Headers, Data),
+ ?assertEqual(201, Code0),
+
+ {ok, Code1, _, Body} = test_request:get(
+ DocUrl ++ "?att_encoding_info=true"),
+ ?assertEqual(200, Code1),
+ Json = ejson:decode(Body),
+ {AttJson} = couch_util:get_nested_json_value(
+ Json, [<<"_attachments">>, ?ATT_TXT_NAME]),
+ ?assertEqual(<<"gzip">>,
+ couch_util:get_value(<<"encoding">>, AttJson)),
+ AttEncLength = iolist_size(gzip(Data)),
+ ?assertEqual(AttEncLength,
+ couch_util:get_value(<<"encoded_length">>, AttJson)),
+ ?assertEqual(byte_size(Data),
+ couch_util:get_value(<<"length">>, AttJson))
+ end)}.
+
+
+get_json(Json, Path) ->
+ couch_util:get_nested_json_value(Json, Path).
+
+to_hex(Val) ->
+ to_hex(Val, []).
+
+to_hex(0, Acc) ->
+ Acc;
+to_hex(Val, Acc) ->
+ to_hex(Val div 16, [hex_char(Val rem 16) | Acc]).
+
+hex_char(V) when V < 10 -> $0 + V;
+hex_char(V) -> $A + V - 10.
+
+chunked_body(Chunks) ->
+ chunked_body(Chunks, []).
+
+chunked_body([], Acc) ->
+ iolist_to_binary(lists:reverse(Acc, "0\r\n"));
+chunked_body([Chunk | Rest], Acc) ->
+ Size = to_hex(size(Chunk)),
+ chunked_body(Rest, ["\r\n", Chunk, "\r\n", Size | Acc]).
+
+get_socket() ->
+ Options = [binary, {packet, 0}, {active, false}],
+ Addr = couch_config:get("httpd", "bind_address", any),
+ Port = mochiweb_socket_server:get(couch_httpd, port),
+ {ok, Sock} = gen_tcp:connect(Addr, Port, Options),
+ Sock.
+
+request(Method, Url, Headers, Body) ->
+ RequestHead = [Method, " ", Url, " HTTP/1.1"],
+ RequestHeaders = [[string:join([Key, Value], ": "), "\r\n"]
+ || {Key, Value} <- Headers],
+ Request = [RequestHead, "\r\n", RequestHeaders, "\r\n", Body, "\r\n"],
+ Sock = get_socket(),
+ gen_tcp:send(Sock, list_to_binary(lists:flatten(Request))),
+ timer:sleep(?TIMEWAIT), % must wait to receive complete response
+ {ok, R} = gen_tcp:recv(Sock, 0),
+ gen_tcp:close(Sock),
+ [Header, Body1] = re:split(R, "\r\n\r\n", [{return, binary}]),
+ {ok, {http_response, _, Code, _}, _} =
+ erlang:decode_packet(http, Header, []),
+ Json = ejson:decode(Body1),
+ {ok, Code, Json}.
+
+create_standalone_text_att(Host, DbName) ->
+ {ok, Data} = file:read_file(?FIXTURE_TXT),
+ Url = string:join([Host, DbName, "doc", ?b2l(?ATT_TXT_NAME)], "/"),
+ {ok, Code, _Headers, _Body} = test_request:put(
+ Url, [{"Content-Type", "text/plain"}], Data),
+ ?assertEqual(201, Code),
+ Url.
+
+create_standalone_png_att(Host, DbName) ->
+ {ok, Data} = file:read_file(?FIXTURE_PNG),
+ Url = string:join([Host, DbName, "doc", ?b2l(?ATT_BIN_NAME)], "/"),
+ {ok, Code, _Headers, _Body} = test_request:put(
+ Url, [{"Content-Type", "image/png"}], Data),
+ ?assertEqual(201, Code),
+ Url.
+
+create_inline_text_att(Host, DbName) ->
+ {ok, Data} = file:read_file(?FIXTURE_TXT),
+ Url = string:join([Host, DbName, "doc"], "/"),
+ Doc = {[
+ {<<"_attachments">>, {[
+ {?ATT_TXT_NAME, {[
+ {<<"content_type">>, <<"text/plain">>},
+ {<<"data">>, base64:encode(Data)}
+ ]}
+ }]}}
+ ]},
+ {ok, Code, _Headers, _Body} = test_request:put(
+ Url, [{"Content-Type", "application/json"}], ejson:encode(Doc)),
+ ?assertEqual(201, Code),
+ string:join([Url, ?b2l(?ATT_TXT_NAME)], "/").
+
+create_inline_png_att(Host, DbName) ->
+ {ok, Data} = file:read_file(?FIXTURE_PNG),
+ Url = string:join([Host, DbName, "doc"], "/"),
+ Doc = {[
+ {<<"_attachments">>, {[
+ {?ATT_BIN_NAME, {[
+ {<<"content_type">>, <<"image/png">>},
+ {<<"data">>, base64:encode(Data)}
+ ]}
+ }]}}
+ ]},
+ {ok, Code, _Headers, _Body} = test_request:put(
+ Url, [{"Content-Type", "application/json"}], ejson:encode(Doc)),
+ ?assertEqual(201, Code),
+ string:join([Url, ?b2l(?ATT_BIN_NAME)], "/").
+
+create_already_compressed_att(Host, DbName) ->
+ {ok, Data} = file:read_file(?FIXTURE_TXT),
+ Url = string:join([Host, DbName, "doc", ?b2l(?ATT_TXT_NAME)], "/"),
+ {ok, Code, _Headers, _Body} = test_request:put(
+ Url, [{"Content-Type", "text/plain"}, {"Content-Encoding", "gzip"}],
+ zlib:gzip(Data)),
+ ?assertEqual(201, Code),
+ Url.
+
+gzip(Data) ->
+ Z = zlib:open(),
+ ok = zlib:deflateInit(Z, ?COMPRESSION_LEVEL, deflated, 16 + 15, 8, default),
+ zlib:deflate(Z, Data),
+ Last = zlib:deflate(Z, [], finish),
+ ok = zlib:deflateEnd(Z),
+ ok = zlib:close(Z),
+ Last.
diff --git a/test/couchdb/couchdb_compaction_daemon.erl b/test/couchdb/couchdb_compaction_daemon.erl
new file mode 100644
index 000000000..725a97b45
--- /dev/null
+++ b/test/couchdb/couchdb_compaction_daemon.erl
@@ -0,0 +1,231 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couchdb_compaction_daemon).
+
+-include("couch_eunit.hrl").
+-include_lib("couchdb/couch_db.hrl").
+
+-define(ADMIN_USER, {user_ctx, #user_ctx{roles=[<<"_admin">>]}}).
+-define(DELAY, 100).
+-define(TIMEOUT, 30000).
+-define(TIMEOUT_S, ?TIMEOUT div 1000).
+
+
+start() ->
+ {ok, Pid} = couch_server_sup:start_link(?CONFIG_CHAIN),
+ couch_config:set("compaction_daemon", "check_interval", "3", false),
+ couch_config:set("compaction_daemon", "min_file_size", "100000", false),
+ Pid.
+
+stop(Pid) ->
+ erlang:monitor(process, Pid),
+ couch_server_sup:stop(),
+ receive
+ {'DOWN', _, _, Pid, _} ->
+ ok
+ after ?TIMEOUT ->
+ throw({timeout, server_stop})
+ end.
+
+setup() ->
+ DbName = ?tempdb(),
+ {ok, Db} = couch_db:create(DbName, [?ADMIN_USER]),
+ create_design_doc(Db),
+ ok = couch_db:close(Db),
+ DbName.
+
+teardown(DbName) ->
+ Configs = couch_config:get("compactions"),
+ lists:foreach(
+ fun({Key, _}) ->
+ ok = couch_config:delete("compactions", Key, false)
+ end,
+ Configs),
+ couch_server:delete(DbName, [?ADMIN_USER]),
+ ok.
+
+
+compaction_daemon_test_() ->
+ {
+ "Compaction daemon tests",
+ {
+ setup,
+ fun start/0, fun stop/1,
+ {
+ foreach,
+ fun setup/0, fun teardown/1,
+ [
+ fun should_compact_by_default_rule/1,
+ fun should_compact_by_dbname_rule/1
+ ]
+ }
+ }
+ }.
+
+
+should_compact_by_default_rule(DbName) ->
+ {timeout, ?TIMEOUT_S, ?_test(begin
+ {ok, Db} = couch_db:open_int(DbName, []),
+ populate(DbName, 70, 70, 200 * 1024),
+
+ {_, DbFileSize} = get_db_frag(DbName),
+ {_, ViewFileSize} = get_view_frag(DbName),
+
+ ok = couch_config:set("compactions", "_default",
+ "[{db_fragmentation, \"70%\"}, {view_fragmentation, \"70%\"}]",
+ false),
+
+ ok = timer:sleep(4000), % something >= check_interval
+ wait_compaction_finished(DbName),
+ ok = couch_config:delete("compactions", "_default", false),
+
+ {DbFrag2, DbFileSize2} = get_db_frag(DbName),
+ {ViewFrag2, ViewFileSize2} = get_view_frag(DbName),
+
+ ?assert(DbFrag2 < 70),
+ ?assert(ViewFrag2 < 70),
+
+ ?assert(DbFileSize > DbFileSize2),
+ ?assert(ViewFileSize > ViewFileSize2),
+
+ ?assert(couch_db:is_idle(Db)),
+ ok = couch_db:close(Db)
+ end)}.
+
+should_compact_by_dbname_rule(DbName) ->
+ {timeout, ?TIMEOUT_S, ?_test(begin
+ {ok, Db} = couch_db:open_int(DbName, []),
+ populate(DbName, 70, 70, 200 * 1024),
+
+ {_, DbFileSize} = get_db_frag(DbName),
+ {_, ViewFileSize} = get_view_frag(DbName),
+
+ ok = couch_config:set("compactions", ?b2l(DbName),
+ "[{db_fragmentation, \"70%\"}, {view_fragmentation, \"70%\"}]",
+ false),
+
+ ok = timer:sleep(4000), % something >= check_interval
+ wait_compaction_finished(DbName),
+ ok = couch_config:delete("compactions", ?b2l(DbName), false),
+
+ {DbFrag2, DbFileSize2} = get_db_frag(DbName),
+ {ViewFrag2, ViewFileSize2} = get_view_frag(DbName),
+
+ ?assert(DbFrag2 < 70),
+ ?assert(ViewFrag2 < 70),
+
+ ?assert(DbFileSize > DbFileSize2),
+ ?assert(ViewFileSize > ViewFileSize2),
+
+ ?assert(couch_db:is_idle(Db)),
+ ok = couch_db:close(Db)
+ end)}.
+
+
+create_design_doc(Db) ->
+ DDoc = couch_doc:from_json_obj({[
+ {<<"_id">>, <<"_design/foo">>},
+ {<<"language">>, <<"javascript">>},
+ {<<"views">>, {[
+ {<<"foo">>, {[
+ {<<"map">>, <<"function(doc) { emit(doc._id, doc); }">>}
+ ]}},
+ {<<"foo2">>, {[
+ {<<"map">>, <<"function(doc) { emit(doc._id, doc); }">>}
+ ]}},
+ {<<"foo3">>, {[
+ {<<"map">>, <<"function(doc) { emit(doc._id, doc); }">>}
+ ]}}
+ ]}}
+ ]}),
+ {ok, _} = couch_db:update_docs(Db, [DDoc]),
+ {ok, _} = couch_db:ensure_full_commit(Db),
+ ok.
+
+populate(DbName, DbFrag, ViewFrag, MinFileSize) ->
+ {CurDbFrag, DbFileSize} = get_db_frag(DbName),
+ {CurViewFrag, ViewFileSize} = get_view_frag(DbName),
+ populate(DbName, DbFrag, ViewFrag, MinFileSize, CurDbFrag, CurViewFrag,
+ lists:min([DbFileSize, ViewFileSize])).
+
+populate(_Db, DbFrag, ViewFrag, MinFileSize, CurDbFrag, CurViewFrag, FileSize)
+ when CurDbFrag >= DbFrag, CurViewFrag >= ViewFrag, FileSize >= MinFileSize ->
+ ok;
+populate(DbName, DbFrag, ViewFrag, MinFileSize, _, _, _) ->
+ update(DbName),
+ {CurDbFrag, DbFileSize} = get_db_frag(DbName),
+ {CurViewFrag, ViewFileSize} = get_view_frag(DbName),
+ populate(DbName, DbFrag, ViewFrag, MinFileSize, CurDbFrag, CurViewFrag,
+ lists:min([DbFileSize, ViewFileSize])).
+
+update(DbName) ->
+ {ok, Db} = couch_db:open_int(DbName, []),
+ lists:foreach(fun(_) ->
+ Doc = couch_doc:from_json_obj({[{<<"_id">>, couch_uuids:new()}]}),
+ {ok, _} = couch_db:update_docs(Db, [Doc]),
+ query_view(Db#db.name)
+ end, lists:seq(1, 200)),
+ couch_db:close(Db).
+
+db_url(DbName) ->
+ Addr = couch_config:get("httpd", "bind_address", "127.0.0.1"),
+ Port = integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
+ "http://" ++ Addr ++ ":" ++ Port ++ "/" ++ ?b2l(DbName).
+
+query_view(DbName) ->
+ {ok, Code, _Headers, _Body} = test_request:get(
+ db_url(DbName) ++ "/_design/foo/_view/foo"),
+ ?assertEqual(200, Code).
+
+get_db_frag(DbName) ->
+ {ok, Db} = couch_db:open_int(DbName, []),
+ {ok, Info} = couch_db:get_db_info(Db),
+ couch_db:close(Db),
+ FileSize = couch_util:get_value(disk_size, Info),
+ DataSize = couch_util:get_value(data_size, Info),
+ {round((FileSize - DataSize) / FileSize * 100), FileSize}.
+
+get_view_frag(DbName) ->
+ {ok, Db} = couch_db:open_int(DbName, []),
+ {ok, Info} = couch_mrview:get_info(Db, <<"_design/foo">>),
+ couch_db:close(Db),
+ FileSize = couch_util:get_value(disk_size, Info),
+ DataSize = couch_util:get_value(data_size, Info),
+ {round((FileSize - DataSize) / FileSize * 100), FileSize}.
+
+wait_compaction_finished(DbName) ->
+ Parent = self(),
+ Loop = spawn_link(fun() -> wait_loop(DbName, Parent) end),
+ receive
+ {done, Loop} ->
+ ok
+ after ?TIMEOUT ->
+ erlang:error(
+ {assertion_failed,
+ [{module, ?MODULE}, {line, ?LINE},
+ {reason, "Compaction timeout"}]})
+ end.
+
+wait_loop(DbName, Parent) ->
+ {ok, Db} = couch_db:open_int(DbName, []),
+ {ok, DbInfo} = couch_db:get_db_info(Db),
+ {ok, ViewInfo} = couch_mrview:get_info(Db, <<"_design/foo">>),
+ couch_db:close(Db),
+ case (couch_util:get_value(compact_running, ViewInfo) =:= true) orelse
+ (couch_util:get_value(compact_running, DbInfo) =:= true) of
+ false ->
+ Parent ! {done, self()};
+ true ->
+ ok = timer:sleep(?DELAY),
+ wait_loop(DbName, Parent)
+ end.
diff --git a/test/couchdb/couchdb_cors_tests.erl b/test/couchdb/couchdb_cors_tests.erl
new file mode 100644
index 000000000..4e88ae732
--- /dev/null
+++ b/test/couchdb/couchdb_cors_tests.erl
@@ -0,0 +1,344 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couchdb_cors_tests).
+
+-include("couch_eunit.hrl").
+-include_lib("couchdb/couch_db.hrl").
+
+
+-define(ADMIN_USER, {user_ctx, #user_ctx{roles=[<<"_admin">>]}}).
+-define(SUPPORTED_METHODS,
+ "GET, HEAD, POST, PUT, DELETE, TRACE, CONNECT, COPY, OPTIONS").
+-define(TIMEOUT, 1000).
+
+
+start() ->
+ {ok, Pid} = couch_server_sup:start_link(?CONFIG_CHAIN),
+ ok = couch_config:set("httpd", "enable_cors", "true", false),
+ ok = couch_config:set("vhosts", "example.com", "/", false),
+ Pid.
+
+stop(Pid) ->
+ couch_server_sup:stop(),
+ erlang:monitor(process, Pid),
+ receive
+ {'DOWN', _, _, Pid, _} ->
+ ok
+ after ?TIMEOUT ->
+ throw({timeout, server_stop})
+ end.
+
+setup() ->
+ DbName = ?tempdb(),
+ {ok, Db} = couch_db:create(DbName, [?ADMIN_USER]),
+ couch_db:close(Db),
+
+ couch_config:set("cors", "credentials", "false", false),
+ couch_config:set("cors", "origins", "http://example.com", false),
+
+ Addr = couch_config:get("httpd", "bind_address", "127.0.0.1"),
+ Port = integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
+ Host = "http://" ++ Addr ++ ":" ++ Port,
+ {Host, ?b2l(DbName)}.
+
+setup({Mod, VHost}) ->
+ {Host, DbName} = setup(),
+ Url = case Mod of
+ server ->
+ Host;
+ db ->
+ Host ++ "/" ++ DbName
+ end,
+ DefaultHeaders = [{"Origin", "http://example.com"}]
+ ++ maybe_append_vhost(VHost),
+ {Host, DbName, Url, DefaultHeaders}.
+
+teardown(DbName) when is_list(DbName) ->
+ ok = couch_server:delete(?l2b(DbName), [?ADMIN_USER]),
+ ok;
+teardown({_, DbName}) ->
+ teardown(DbName).
+
+teardown(_, {_, DbName, _, _}) ->
+ teardown(DbName).
+
+
+cors_test_() ->
+ Funs = [
+ fun should_not_allow_origin/2,
+ fun should_not_allow_origin_with_port_mismatch/2,
+ fun should_not_allow_origin_with_scheme_mismatch/2,
+ fun should_not_all_origin_due_case_mismatch/2,
+ fun should_make_simple_request/2,
+ fun should_make_preflight_request/2,
+ fun should_make_prefligh_request_with_port/2,
+ fun should_make_prefligh_request_with_scheme/2,
+ fun should_make_prefligh_request_with_wildcard_origin/2,
+ fun should_make_request_with_credentials/2,
+ fun should_make_origin_request_with_auth/2,
+ fun should_make_preflight_request_with_auth/2
+ ],
+ {
+ "CORS (COUCHDB-431)",
+ {
+ setup,
+ fun start/0, fun stop/1,
+ [
+ cors_tests(Funs),
+ vhost_cors_tests(Funs),
+ headers_tests()
+ ]
+ }
+ }.
+
+headers_tests() ->
+ {
+ "Various headers tests",
+ {
+ foreach,
+ fun setup/0, fun teardown/1,
+ [
+ fun should_not_return_cors_headers_for_invalid_origin/1,
+ fun should_not_return_cors_headers_for_invalid_origin_preflight/1,
+ fun should_make_request_against_attachment/1,
+ fun should_make_range_request_against_attachment/1,
+ fun should_make_request_with_if_none_match_header/1
+ ]
+ }
+ }.
+
+cors_tests(Funs) ->
+ {
+ "CORS tests",
+ [
+ make_test_case(server, false, Funs),
+ make_test_case(db, false, Funs)
+ ]
+ }.
+
+vhost_cors_tests(Funs) ->
+ {
+ "Virtual Host CORS",
+ [
+ make_test_case(server, true, Funs),
+ make_test_case(db, true, Funs)
+ ]
+ }.
+
+make_test_case(Mod, UseVhost, Funs) ->
+ {
+ case Mod of server -> "Server"; db -> "Database" end,
+ {foreachx, fun setup/1, fun teardown/2, [{{Mod, UseVhost}, Fun}
+ || Fun <- Funs]}
+ }.
+
+
+should_not_allow_origin(_, {_, _, Url, Headers0}) ->
+ ?_assertEqual(undefined,
+ begin
+ couch_config:delete("cors", "origins", false),
+ Headers1 = proplists:delete("Origin", Headers0),
+ Headers = [{"Origin", "http://127.0.0.1"}]
+ ++ Headers1,
+ {ok, _, Resp, _} = test_request:get(Url, Headers),
+ proplists:get_value("Access-Control-Allow-Origin", Resp)
+ end).
+
+should_not_allow_origin_with_port_mismatch({_, VHost}, {_, _, Url, _}) ->
+ ?_assertEqual(undefined,
+ begin
+ Headers = [{"Origin", "http://example.com:5984"},
+ {"Access-Control-Request-Method", "GET"}]
+ ++ maybe_append_vhost(VHost),
+ {ok, _, Resp, _} = test_request:options(Url, Headers),
+ proplists:get_value("Access-Control-Allow-Origin", Resp)
+ end).
+
+should_not_allow_origin_with_scheme_mismatch({_, VHost}, {_, _, Url, _}) ->
+ ?_assertEqual(undefined,
+ begin
+ Headers = [{"Origin", "http://example.com:5984"},
+ {"Access-Control-Request-Method", "GET"}]
+ ++ maybe_append_vhost(VHost),
+ {ok, _, Resp, _} = test_request:options(Url, Headers),
+ proplists:get_value("Access-Control-Allow-Origin", Resp)
+ end).
+
+should_not_all_origin_due_case_mismatch({_, VHost}, {_, _, Url, _}) ->
+ ?_assertEqual(undefined,
+ begin
+ Headers = [{"Origin", "http://ExAmPlE.CoM"},
+ {"Access-Control-Request-Method", "GET"}]
+ ++ maybe_append_vhost(VHost),
+ {ok, _, Resp, _} = test_request:options(Url, Headers),
+ proplists:get_value("Access-Control-Allow-Origin", Resp)
+ end).
+
+should_make_simple_request(_, {_, _, Url, DefaultHeaders}) ->
+ ?_test(begin
+ {ok, _, Resp, _} = test_request:get(Url, DefaultHeaders),
+ ?assertEqual(
+ undefined,
+ proplists:get_value("Access-Control-Allow-Credentials", Resp)),
+ ?assertEqual(
+ "http://example.com",
+ proplists:get_value("Access-Control-Allow-Origin", Resp)),
+ ?assertEqual(
+ "Cache-Control, Content-Type, Server",
+ proplists:get_value("Access-Control-Expose-Headers", Resp))
+ end).
+
+should_make_preflight_request(_, {_, _, Url, DefaultHeaders}) ->
+ ?_assertEqual(?SUPPORTED_METHODS,
+ begin
+ Headers = DefaultHeaders
+ ++ [{"Access-Control-Request-Method", "GET"}],
+ {ok, _, Resp, _} = test_request:options(Url, Headers),
+ proplists:get_value("Access-Control-Allow-Methods", Resp)
+ end).
+
+should_make_prefligh_request_with_port({_, VHost}, {_, _, Url, _}) ->
+ ?_assertEqual("http://example.com:5984",
+ begin
+ couch_config:set("cors", "origins", "http://example.com:5984",
+ false),
+ Headers = [{"Origin", "http://example.com:5984"},
+ {"Access-Control-Request-Method", "GET"}]
+ ++ maybe_append_vhost(VHost),
+ {ok, _, Resp, _} = test_request:options(Url, Headers),
+ proplists:get_value("Access-Control-Allow-Origin", Resp)
+ end).
+
+should_make_prefligh_request_with_scheme({_, VHost}, {_, _, Url, _}) ->
+ ?_assertEqual("https://example.com:5984",
+ begin
+ couch_config:set("cors", "origins", "https://example.com:5984",
+ false),
+ Headers = [{"Origin", "https://example.com:5984"},
+ {"Access-Control-Request-Method", "GET"}]
+ ++ maybe_append_vhost(VHost),
+ {ok, _, Resp, _} = test_request:options(Url, Headers),
+ proplists:get_value("Access-Control-Allow-Origin", Resp)
+ end).
+
+should_make_prefligh_request_with_wildcard_origin({_, VHost}, {_, _, Url, _}) ->
+ ?_assertEqual("https://example.com:5984",
+ begin
+ couch_config:set("cors", "origins", "*", false),
+ Headers = [{"Origin", "https://example.com:5984"},
+ {"Access-Control-Request-Method", "GET"}]
+ ++ maybe_append_vhost(VHost),
+ {ok, _, Resp, _} = test_request:options(Url, Headers),
+ proplists:get_value("Access-Control-Allow-Origin", Resp)
+ end).
+
+should_make_request_with_credentials(_, {_, _, Url, DefaultHeaders}) ->
+ ?_assertEqual("true",
+ begin
+ ok = couch_config:set("cors", "credentials", "true", false),
+ {ok, _, Resp, _} = test_request:options(Url, DefaultHeaders),
+ proplists:get_value("Access-Control-Allow-Credentials", Resp)
+ end).
+
+should_make_origin_request_with_auth(_, {_, _, Url, DefaultHeaders}) ->
+ ?_assertEqual("http://example.com",
+ begin
+ Hashed = couch_passwords:hash_admin_password(<<"test">>),
+ couch_config:set("admins", "test", Hashed, false),
+ {ok, _, Resp, _} = test_request:get(
+ Url, DefaultHeaders, [{basic_auth, {"test", "test"}}]),
+ couch_config:delete("admins", "test", false),
+ proplists:get_value("Access-Control-Allow-Origin", Resp)
+ end).
+
+should_make_preflight_request_with_auth(_, {_, _, Url, DefaultHeaders}) ->
+ ?_assertEqual(?SUPPORTED_METHODS,
+ begin
+ Hashed = couch_passwords:hash_admin_password(<<"test">>),
+ couch_config:set("admins", "test", Hashed, false),
+ Headers = DefaultHeaders
+ ++ [{"Access-Control-Request-Method", "GET"}],
+ {ok, _, Resp, _} = test_request:options(
+ Url, Headers, [{basic_auth, {"test", "test"}}]),
+ couch_config:delete("admins", "test", false),
+ proplists:get_value("Access-Control-Allow-Methods", Resp)
+ end).
+
+should_not_return_cors_headers_for_invalid_origin({Host, _}) ->
+ ?_assertEqual(undefined,
+ begin
+ Headers = [{"Origin", "http://127.0.0.1"}],
+ {ok, _, Resp, _} = test_request:get(Host, Headers),
+ proplists:get_value("Access-Control-Allow-Origin", Resp)
+ end).
+
+should_not_return_cors_headers_for_invalid_origin_preflight({Host, _}) ->
+ ?_assertEqual(undefined,
+ begin
+ Headers = [{"Origin", "http://127.0.0.1"},
+ {"Access-Control-Request-Method", "GET"}],
+ {ok, _, Resp, _} = test_request:options(Host, Headers),
+ proplists:get_value("Access-Control-Allow-Origin", Resp)
+ end).
+
+should_make_request_against_attachment({Host, DbName}) ->
+ {"COUCHDB-1689",
+ ?_assertEqual(200,
+ begin
+ Url = Host ++ "/" ++ DbName,
+ {ok, Code0, _, _} = test_request:put(
+ Url ++ "/doc/file.txt", [{"Content-Type", "text/plain"}],
+ "hello, couch!"),
+ ?assert(Code0 =:= 201),
+ {ok, Code, _, _} = test_request:get(
+ Url ++ "/doc?attachments=true",
+ [{"Origin", "http://example.com"}]),
+ Code
+ end)}.
+
+should_make_range_request_against_attachment({Host, DbName}) ->
+ {"COUCHDB-1689",
+ ?_assertEqual(206,
+ begin
+ Url = Host ++ "/" ++ DbName,
+ {ok, Code0, _, _} = test_request:put(
+ Url ++ "/doc/file.txt",
+ [{"Content-Type", "application/octet-stream"}],
+ "hello, couch!"),
+ ?assert(Code0 =:= 201),
+ {ok, Code, _, _} = test_request:get(
+ Url ++ "/doc/file.txt", [{"Origin", "http://example.com"},
+ {"Range", "bytes=0-6"}]),
+ Code
+ end)}.
+
+should_make_request_with_if_none_match_header({Host, DbName}) ->
+ {"COUCHDB-1697",
+ ?_assertEqual(304,
+ begin
+ Url = Host ++ "/" ++ DbName,
+ {ok, Code0, Headers0, _} = test_request:put(
+ Url ++ "/doc", [{"Content-Type", "application/json"}], "{}"),
+ ?assert(Code0 =:= 201),
+ ETag = proplists:get_value("ETag", Headers0),
+ {ok, Code, _, _} = test_request:get(
+ Url ++ "/doc", [{"Origin", "http://example.com"},
+ {"If-None-Match", ETag}]),
+ Code
+ end)}.
+
+
+maybe_append_vhost(true) ->
+ [{"Host", "http://example.com"}];
+maybe_append_vhost(false) ->
+ [].
diff --git a/test/couchdb/couchdb_file_compression_tests.erl b/test/couchdb/couchdb_file_compression_tests.erl
new file mode 100644
index 000000000..fd3f51352
--- /dev/null
+++ b/test/couchdb/couchdb_file_compression_tests.erl
@@ -0,0 +1,239 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couchdb_file_compression_tests).
+
+-include("couch_eunit.hrl").
+-include_lib("couchdb/couch_db.hrl").
+
+-define(ADMIN_USER, {user_ctx, #user_ctx{roles=[<<"_admin">>]}}).
+-define(DDOC_ID, <<"_design/test">>).
+-define(DOCS_COUNT, 5000).
+-define(TIMEOUT, 30000).
+
+
+start() ->
+ {ok, Pid} = couch_server_sup:start_link(?CONFIG_CHAIN),
+ Pid.
+
+stop(Pid) ->
+ erlang:monitor(process, Pid),
+ couch_server_sup:stop(),
+ receive
+ {'DOWN', _, _, Pid, _} ->
+ ok
+ after ?TIMEOUT ->
+ throw({timeout, server_stop})
+ end.
+
+setup() ->
+ couch_config:set("couchdb", "file_compression", "none", false),
+ DbName = ?tempdb(),
+ {ok, Db} = couch_db:create(DbName, [?ADMIN_USER]),
+ ok = populate_db(Db, ?DOCS_COUNT),
+ DDoc = couch_doc:from_json_obj({[
+ {<<"_id">>, ?DDOC_ID},
+ {<<"language">>, <<"javascript">>},
+ {<<"views">>, {[
+ {<<"by_id">>, {[
+ {<<"map">>, <<"function(doc){emit(doc._id, doc.string);}">>}
+ ]}}
+ ]}
+ }
+ ]}),
+ {ok, _} = couch_db:update_doc(Db, DDoc, []),
+ refresh_index(DbName),
+ ok = couch_db:close(Db),
+ DbName.
+
+teardown(DbName) ->
+ ok = couch_server:delete(DbName, [?ADMIN_USER]),
+ ok.
+
+
+couch_auth_cache_test_() ->
+ {
+ "CouchDB file compression tests",
+ {
+ setup,
+ fun start/0, fun stop/1,
+ {
+ foreach,
+ fun setup/0, fun teardown/1,
+ [
+ fun should_use_none/1,
+ fun should_use_deflate_1/1,
+ fun should_use_deflate_9/1,
+ fun should_use_snappy/1,
+ fun should_compare_compression_methods/1
+ ]
+ }
+ }
+ }.
+
+
+should_use_none(DbName) ->
+ couch_config:set("couchdb", "file_compression", "none", false),
+ {
+ "Use no compression",
+ [
+ {"compact database", ?_test(compact_db(DbName))},
+ {"compact view", ?_test(compact_view(DbName))}
+ ]
+ }.
+
+should_use_deflate_1(DbName) ->
+ couch_config:set("couchdb", "file_compression", "deflate_1", false),
+ {
+ "Use deflate compression at level 1",
+ [
+ {"compact database", ?_test(compact_db(DbName))},
+ {"compact view", ?_test(compact_view(DbName))}
+ ]
+ }.
+
+should_use_deflate_9(DbName) ->
+ couch_config:set("couchdb", "file_compression", "deflate_9", false),
+ {
+ "Use deflate compression at level 9",
+ [
+ {"compact database", ?_test(compact_db(DbName))},
+ {"compact view", ?_test(compact_view(DbName))}
+ ]
+ }.
+
+should_use_snappy(DbName) ->
+ couch_config:set("couchdb", "file_compression", "snappy", false),
+ {
+ "Use snappy compression",
+ [
+ {"compact database", ?_test(compact_db(DbName))},
+ {"compact view", ?_test(compact_view(DbName))}
+ ]
+ }.
+
+should_compare_compression_methods(DbName) ->
+ {"none > snappy > deflate_1 > deflate_9",
+ {timeout, ?TIMEOUT div 1000, ?_test(compare_compression_methods(DbName))}}.
+
+compare_compression_methods(DbName) ->
+ couch_config:set("couchdb", "file_compression", "none", false),
+ compact_db(DbName),
+ compact_view(DbName),
+ DbSizeNone = db_disk_size(DbName),
+ ViewSizeNone = view_disk_size(DbName),
+
+ couch_config:set("couchdb", "file_compression", "snappy", false),
+ compact_db(DbName),
+ compact_view(DbName),
+ DbSizeSnappy = db_disk_size(DbName),
+ ViewSizeSnappy = view_disk_size(DbName),
+
+ ?assert(DbSizeNone > DbSizeSnappy),
+ ?assert(ViewSizeNone > ViewSizeSnappy),
+
+ couch_config:set("couchdb", "file_compression", "deflate_1", false),
+ compact_db(DbName),
+ compact_view(DbName),
+ DbSizeDeflate1 = db_disk_size(DbName),
+ ViewSizeDeflate1 = view_disk_size(DbName),
+
+ ?assert(DbSizeSnappy > DbSizeDeflate1),
+ ?assert(ViewSizeSnappy > ViewSizeDeflate1),
+
+ couch_config:set("couchdb", "file_compression", "deflate_9", false),
+ compact_db(DbName),
+ compact_view(DbName),
+ DbSizeDeflate9 = db_disk_size(DbName),
+ ViewSizeDeflate9 = view_disk_size(DbName),
+
+ ?assert(DbSizeDeflate1 > DbSizeDeflate9),
+ ?assert(ViewSizeDeflate1 > ViewSizeDeflate9).
+
+
+populate_db(_Db, NumDocs) when NumDocs =< 0 ->
+ ok;
+populate_db(Db, NumDocs) ->
+ Docs = lists:map(
+ fun(_) ->
+ couch_doc:from_json_obj({[
+ {<<"_id">>, couch_uuids:random()},
+ {<<"string">>, ?l2b(lists:duplicate(1000, $X))}
+ ]})
+ end,
+ lists:seq(1, 500)),
+ {ok, _} = couch_db:update_docs(Db, Docs, []),
+ populate_db(Db, NumDocs - 500).
+
+refresh_index(DbName) ->
+ {ok, Db} = couch_db:open_int(DbName, []),
+ {ok, DDoc} = couch_db:open_doc(Db, ?DDOC_ID, [ejson_body]),
+ couch_mrview:query_view(Db, DDoc, <<"by_id">>, [{stale, false}]),
+ ok = couch_db:close(Db).
+
+compact_db(DbName) ->
+ DiskSizeBefore = db_disk_size(DbName),
+ {ok, Db} = couch_db:open_int(DbName, []),
+ {ok, CompactPid} = couch_db:start_compact(Db),
+ MonRef = erlang:monitor(process, CompactPid),
+ receive
+ {'DOWN', MonRef, process, CompactPid, normal} ->
+ ok;
+ {'DOWN', MonRef, process, CompactPid, Reason} ->
+ erlang:error({assertion_failed,
+ [{module, ?MODULE},
+ {line, ?LINE},
+ {reason, "Error compacting database: "
+ ++ couch_util:to_list(Reason)}]})
+ after ?TIMEOUT ->
+ erlang:error({assertion_failed,
+ [{module, ?MODULE},
+ {line, ?LINE},
+ {reason, "Timeout waiting for database compaction"}]})
+ end,
+ ok = couch_db:close(Db),
+ DiskSizeAfter = db_disk_size(DbName),
+ ?assert(DiskSizeBefore > DiskSizeAfter).
+
+compact_view(DbName) ->
+ DiskSizeBefore = view_disk_size(DbName),
+ {ok, MonRef} = couch_mrview:compact(DbName, ?DDOC_ID, [monitor]),
+ receive
+ {'DOWN', MonRef, process, _CompactPid, normal} ->
+ ok;
+ {'DOWN', MonRef, process, _CompactPid, Reason} ->
+ erlang:error({assertion_failed,
+ [{module, ?MODULE},
+ {line, ?LINE},
+ {reason, "Error compacting view group: "
+ ++ couch_util:to_list(Reason)}]})
+ after ?TIMEOUT ->
+ erlang:error({assertion_failed,
+ [{module, ?MODULE},
+ {line, ?LINE},
+ {reason, "Timeout waiting for view group compaction"}]})
+ end,
+ DiskSizeAfter = view_disk_size(DbName),
+ ?assert(DiskSizeBefore > DiskSizeAfter).
+
+db_disk_size(DbName) ->
+ {ok, Db} = couch_db:open_int(DbName, []),
+ {ok, Info} = couch_db:get_db_info(Db),
+ ok = couch_db:close(Db),
+ couch_util:get_value(disk_size, Info).
+
+view_disk_size(DbName) ->
+ {ok, Db} = couch_db:open_int(DbName, []),
+ {ok, DDoc} = couch_db:open_doc(Db, ?DDOC_ID, [ejson_body]),
+ {ok, Info} = couch_mrview:get_info(Db, DDoc),
+ ok = couch_db:close(Db),
+ couch_util:get_value(disk_size, Info).
diff --git a/test/couchdb/couchdb_http_proxy_tests.erl b/test/couchdb/couchdb_http_proxy_tests.erl
new file mode 100644
index 000000000..03ceca7c2
--- /dev/null
+++ b/test/couchdb/couchdb_http_proxy_tests.erl
@@ -0,0 +1,554 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couchdb_http_proxy_tests).
+
+-include("couch_eunit.hrl").
+
+-record(req, {method=get, path="", headers=[], body="", opts=[]}).
+
+-define(CONFIG_FIXTURE_TEMP,
+ begin
+ FileName = filename:join([?TEMPDIR, ?tempfile() ++ ".ini"]),
+ {ok, Fd} = file:open(FileName, write),
+ ok = file:truncate(Fd),
+ ok = file:close(Fd),
+ FileName
+ end).
+-define(TIMEOUT, 5000).
+
+
+start() ->
+ % we have to write any config changes to temp ini file to not loose them
+ % when supervisor will kill all children due to reaching restart threshold
+ % (each httpd_global_handlers changes causes couch_httpd restart)
+ couch_server_sup:start_link(?CONFIG_CHAIN ++ [?CONFIG_FIXTURE_TEMP]),
+ % 49151 is IANA Reserved, let's assume no one is listening there
+ with_process_restart(couch_httpd, fun() ->
+ couch_config:set("httpd_global_handlers", "_error",
+ "{couch_httpd_proxy, handle_proxy_req, <<\"http://127.0.0.1:49151/\">>}"
+ )
+ end),
+ ok.
+
+stop(_) ->
+ couch_server_sup:stop(),
+ ok.
+
+setup() ->
+ {ok, Pid} = test_web:start_link(),
+ Value = lists:flatten(io_lib:format(
+ "{couch_httpd_proxy, handle_proxy_req, ~p}",
+ [list_to_binary(proxy_url())])),
+ with_process_restart(couch_httpd, fun() ->
+ couch_config:set("httpd_global_handlers", "_test", Value)
+ end),
+ Pid.
+
+teardown(Pid) ->
+ erlang:monitor(process, Pid),
+ test_web:stop(),
+ receive
+ {'DOWN', _, _, Pid, _} ->
+ ok
+ after ?TIMEOUT ->
+ throw({timeout, test_web_stop})
+ end.
+
+
+http_proxy_test_() ->
+ {
+ "HTTP Proxy handler tests",
+ {
+ setup,
+ fun start/0, fun stop/1,
+ {
+ foreach,
+ fun setup/0, fun teardown/1,
+ [
+ fun should_proxy_basic_request/1,
+ fun should_return_alternative_status/1,
+ fun should_respect_trailing_slash/1,
+ fun should_proxy_headers/1,
+ fun should_proxy_host_header/1,
+ fun should_pass_headers_back/1,
+ fun should_use_same_protocol_version/1,
+ fun should_proxy_body/1,
+ fun should_proxy_body_back/1,
+ fun should_proxy_chunked_body/1,
+ fun should_proxy_chunked_body_back/1,
+ fun should_rewrite_location_header/1,
+ fun should_not_rewrite_external_locations/1,
+ fun should_rewrite_relative_location/1,
+ fun should_refuse_connection_to_backend/1
+ ]
+ }
+
+ }
+ }.
+
+
+should_proxy_basic_request(_) ->
+ Remote = fun(Req) ->
+ 'GET' = Req:get(method),
+ "/" = Req:get(path),
+ 0 = Req:get(body_length),
+ <<>> = Req:recv_body(),
+ {ok, {200, [{"Content-Type", "text/plain"}], "ok"}}
+ end,
+ Local = fun
+ ({ok, "200", _, "ok"}) ->
+ true;
+ (_) ->
+ false
+ end,
+ ?_test(check_request(#req{}, Remote, Local)).
+
+should_return_alternative_status(_) ->
+ Remote = fun(Req) ->
+ "/alternate_status" = Req:get(path),
+ {ok, {201, [], "ok"}}
+ end,
+ Local = fun
+ ({ok, "201", _, "ok"}) ->
+ true;
+ (_) ->
+ false
+ end,
+ Req = #req{path = "/alternate_status"},
+ ?_test(check_request(Req, Remote, Local)).
+
+should_respect_trailing_slash(_) ->
+ Remote = fun(Req) ->
+ "/trailing_slash/" = Req:get(path),
+ {ok, {200, [], "ok"}}
+ end,
+ Local = fun
+ ({ok, "200", _, "ok"}) ->
+ true;
+ (_) ->
+ false
+ end,
+ Req = #req{path="/trailing_slash/"},
+ ?_test(check_request(Req, Remote, Local)).
+
+should_proxy_headers(_) ->
+ Remote = fun(Req) ->
+ "/passes_header" = Req:get(path),
+ "plankton" = Req:get_header_value("X-CouchDB-Ralph"),
+ {ok, {200, [], "ok"}}
+ end,
+ Local = fun
+ ({ok, "200", _, "ok"}) ->
+ true;
+ (_) ->
+ false
+ end,
+ Req = #req{
+ path="/passes_header",
+ headers=[{"X-CouchDB-Ralph", "plankton"}]
+ },
+ ?_test(check_request(Req, Remote, Local)).
+
+should_proxy_host_header(_) ->
+ Remote = fun(Req) ->
+ "/passes_host_header" = Req:get(path),
+ "www.google.com" = Req:get_header_value("Host"),
+ {ok, {200, [], "ok"}}
+ end,
+ Local = fun
+ ({ok, "200", _, "ok"}) ->
+ true;
+ (_) ->
+ false
+ end,
+ Req = #req{
+ path="/passes_host_header",
+ headers=[{"Host", "www.google.com"}]
+ },
+ ?_test(check_request(Req, Remote, Local)).
+
+should_pass_headers_back(_) ->
+ Remote = fun(Req) ->
+ "/passes_header_back" = Req:get(path),
+ {ok, {200, [{"X-CouchDB-Plankton", "ralph"}], "ok"}}
+ end,
+ Local = fun
+ ({ok, "200", Headers, "ok"}) ->
+ lists:member({"X-CouchDB-Plankton", "ralph"}, Headers);
+ (_) ->
+ false
+ end,
+ Req = #req{path="/passes_header_back"},
+ ?_test(check_request(Req, Remote, Local)).
+
+should_use_same_protocol_version(_) ->
+ Remote = fun(Req) ->
+ "/uses_same_version" = Req:get(path),
+ {1, 0} = Req:get(version),
+ {ok, {200, [], "ok"}}
+ end,
+ Local = fun
+ ({ok, "200", _, "ok"}) ->
+ true;
+ (_) ->
+ false
+ end,
+ Req = #req{
+ path="/uses_same_version",
+ opts=[{http_vsn, {1, 0}}]
+ },
+ ?_test(check_request(Req, Remote, Local)).
+
+should_proxy_body(_) ->
+ Remote = fun(Req) ->
+ 'PUT' = Req:get(method),
+ "/passes_body" = Req:get(path),
+ <<"Hooray!">> = Req:recv_body(),
+ {ok, {201, [], "ok"}}
+ end,
+ Local = fun
+ ({ok, "201", _, "ok"}) ->
+ true;
+ (_) ->
+ false
+ end,
+ Req = #req{
+ method=put,
+ path="/passes_body",
+ body="Hooray!"
+ },
+ ?_test(check_request(Req, Remote, Local)).
+
+should_proxy_body_back(_) ->
+ BodyChunks = [<<"foo">>, <<"bar">>, <<"bazinga">>],
+ Remote = fun(Req) ->
+ 'GET' = Req:get(method),
+ "/passes_eof_body" = Req:get(path),
+ {raw, {200, [{"Connection", "close"}], BodyChunks}}
+ end,
+ Local = fun
+ ({ok, "200", _, "foobarbazinga"}) ->
+ true;
+ (_) ->
+ false
+ end,
+ Req = #req{path="/passes_eof_body"},
+ ?_test(check_request(Req, Remote, Local)).
+
+should_proxy_chunked_body(_) ->
+ BodyChunks = [<<"foo">>, <<"bar">>, <<"bazinga">>],
+ Remote = fun(Req) ->
+ 'POST' = Req:get(method),
+ "/passes_chunked_body" = Req:get(path),
+ RecvBody = fun
+ ({Length, Chunk}, [Chunk | Rest]) ->
+ Length = size(Chunk),
+ Rest;
+ ({0, []}, []) ->
+ ok
+ end,
+ ok = Req:stream_body(1024 * 1024, RecvBody, BodyChunks),
+ {ok, {201, [], "ok"}}
+ end,
+ Local = fun
+ ({ok, "201", _, "ok"}) ->
+ true;
+ (_) ->
+ false
+ end,
+ Req = #req{
+ method=post,
+ path="/passes_chunked_body",
+ headers=[{"Transfer-Encoding", "chunked"}],
+ body=chunked_body(BodyChunks)
+ },
+ ?_test(check_request(Req, Remote, Local)).
+
+should_proxy_chunked_body_back(_) ->
+ ?_test(begin
+ Remote = fun(Req) ->
+ 'GET' = Req:get(method),
+ "/passes_chunked_body_back" = Req:get(path),
+ BodyChunks = [<<"foo">>, <<"bar">>, <<"bazinga">>],
+ {chunked, {200, [{"Transfer-Encoding", "chunked"}], BodyChunks}}
+ end,
+ Req = #req{
+ path="/passes_chunked_body_back",
+ opts=[{stream_to, self()}]
+ },
+
+ Resp = check_request(Req, Remote, no_local),
+ ?assertMatch({ibrowse_req_id, _}, Resp),
+ {_, ReqId} = Resp,
+
+ % Grab headers from response
+ receive
+ {ibrowse_async_headers, ReqId, "200", Headers} ->
+ ?assertEqual("chunked",
+ proplists:get_value("Transfer-Encoding", Headers)),
+ ibrowse:stream_next(ReqId)
+ after 1000 ->
+ throw({error, timeout})
+ end,
+
+ ?assertEqual(<<"foobarbazinga">>, recv_body(ReqId, [])),
+ ?assertEqual(was_ok, test_web:check_last())
+ end).
+
+should_refuse_connection_to_backend(_) ->
+ Local = fun
+ ({ok, "500", _, _}) ->
+ true;
+ (_) ->
+ false
+ end,
+ Req = #req{opts=[{url, server_url("/_error")}]},
+ ?_test(check_request(Req, no_remote, Local)).
+
+should_rewrite_location_header(_) ->
+ {
+ "Testing location header rewrites",
+ do_rewrite_tests([
+ {"Location", proxy_url() ++ "/foo/bar",
+ server_url() ++ "/foo/bar"},
+ {"Content-Location", proxy_url() ++ "/bing?q=2",
+ server_url() ++ "/bing?q=2"},
+ {"Uri", proxy_url() ++ "/zip#frag",
+ server_url() ++ "/zip#frag"},
+ {"Destination", proxy_url(),
+ server_url() ++ "/"}
+ ])
+ }.
+
+should_not_rewrite_external_locations(_) ->
+ {
+ "Testing no rewrite of external locations",
+ do_rewrite_tests([
+ {"Location", external_url() ++ "/search",
+ external_url() ++ "/search"},
+ {"Content-Location", external_url() ++ "/s?q=2",
+ external_url() ++ "/s?q=2"},
+ {"Uri", external_url() ++ "/f#f",
+ external_url() ++ "/f#f"},
+ {"Destination", external_url() ++ "/f?q=2#f",
+ external_url() ++ "/f?q=2#f"}
+ ])
+ }.
+
+should_rewrite_relative_location(_) ->
+ {
+ "Testing relative rewrites",
+ do_rewrite_tests([
+ {"Location", "/foo",
+ server_url() ++ "/foo"},
+ {"Content-Location", "bar",
+ server_url() ++ "/bar"},
+ {"Uri", "/zing?q=3",
+ server_url() ++ "/zing?q=3"},
+ {"Destination", "bing?q=stuff#yay",
+ server_url() ++ "/bing?q=stuff#yay"}
+ ])
+ }.
+
+
+do_rewrite_tests(Tests) ->
+ lists:map(fun({Header, Location, Url}) ->
+ should_rewrite_header(Header, Location, Url)
+ end, Tests).
+
+should_rewrite_header(Header, Location, Url) ->
+ Remote = fun(Req) ->
+ "/rewrite_test" = Req:get(path),
+ {ok, {302, [{Header, Location}], "ok"}}
+ end,
+ Local = fun
+ ({ok, "302", Headers, "ok"}) ->
+ ?assertEqual(Url, couch_util:get_value(Header, Headers)),
+ true;
+ (E) ->
+ ?debugFmt("~p", [E]),
+ false
+ end,
+ Req = #req{path="/rewrite_test"},
+ {Header, ?_test(check_request(Req, Remote, Local))}.
+
+
+server_url() ->
+ server_url("/_test").
+
+server_url(Resource) ->
+ Addr = couch_config:get("httpd", "bind_address"),
+ Port = integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
+ lists:concat(["http://", Addr, ":", Port, Resource]).
+
+proxy_url() ->
+ "http://127.0.0.1:" ++ integer_to_list(test_web:get_port()).
+
+external_url() ->
+ "https://google.com".
+
+check_request(Req, Remote, Local) ->
+ case Remote of
+ no_remote ->
+ ok;
+ _ ->
+ test_web:set_assert(Remote)
+ end,
+ Url = case proplists:lookup(url, Req#req.opts) of
+ none ->
+ server_url() ++ Req#req.path;
+ {url, DestUrl} ->
+ DestUrl
+ end,
+ Opts = [{headers_as_is, true} | Req#req.opts],
+ Resp =ibrowse:send_req(
+ Url, Req#req.headers, Req#req.method, Req#req.body, Opts
+ ),
+ %?debugFmt("ibrowse response: ~p", [Resp]),
+ case Local of
+ no_local ->
+ ok;
+ _ ->
+ ?assert(Local(Resp))
+ end,
+ case {Remote, Local} of
+ {no_remote, _} ->
+ ok;
+ {_, no_local} ->
+ ok;
+ _ ->
+ ?assertEqual(was_ok, test_web:check_last())
+ end,
+ Resp.
+
+chunked_body(Chunks) ->
+ chunked_body(Chunks, []).
+
+chunked_body([], Acc) ->
+ iolist_to_binary(lists:reverse(Acc, "0\r\n\r\n"));
+chunked_body([Chunk | Rest], Acc) ->
+ Size = to_hex(size(Chunk)),
+ chunked_body(Rest, ["\r\n", Chunk, "\r\n", Size | Acc]).
+
+to_hex(Val) ->
+ to_hex(Val, []).
+
+to_hex(0, Acc) ->
+ Acc;
+to_hex(Val, Acc) ->
+ to_hex(Val div 16, [hex_char(Val rem 16) | Acc]).
+
+hex_char(V) when V < 10 -> $0 + V;
+hex_char(V) -> $A + V - 10.
+
+recv_body(ReqId, Acc) ->
+ receive
+ {ibrowse_async_response, ReqId, Data} ->
+ recv_body(ReqId, [Data | Acc]);
+ {ibrowse_async_response_end, ReqId} ->
+ iolist_to_binary(lists:reverse(Acc));
+ Else ->
+ throw({error, unexpected_mesg, Else})
+ after ?TIMEOUT ->
+ throw({error, timeout})
+ end.
+
+
+%% Copy from couch test_util @ master branch
+
+now_us() ->
+ {MegaSecs, Secs, MicroSecs} = now(),
+ (MegaSecs * 1000000 + Secs) * 1000000 + MicroSecs.
+
+stop_sync(Name) ->
+ stop_sync(Name, shutdown).
+stop_sync(Name, Reason) ->
+ stop_sync(Name, Reason, 5000).
+stop_sync(Name, Reason, Timeout) when is_atom(Name) ->
+ stop_sync(whereis(Name), Reason, Timeout);
+stop_sync(Pid, Reason, Timeout) when is_atom(Reason) and is_pid(Pid) ->
+ stop_sync(Pid, fun() -> exit(Pid, Reason) end, Timeout);
+stop_sync(Pid, Fun, Timeout) when is_function(Fun) and is_pid(Pid) ->
+ MRef = erlang:monitor(process, Pid),
+ try
+ begin
+ catch unlink(Pid),
+ Res = (catch Fun()),
+ receive
+ {'DOWN', MRef, _, _, _} ->
+ Res
+ after Timeout ->
+ timeout
+ end
+ end
+ after
+ erlang:demonitor(MRef, [flush])
+ end;
+stop_sync(_, _, _) -> error(badarg).
+
+stop_sync_throw(Name, Error) ->
+ stop_sync_throw(Name, shutdown, Error).
+stop_sync_throw(Name, Reason, Error) ->
+ stop_sync_throw(Name, Reason, Error, 5000).
+stop_sync_throw(Pid, Fun, Error, Timeout) ->
+ case stop_sync(Pid, Fun, Timeout) of
+ timeout ->
+ throw(Error);
+ Else ->
+ Else
+ end.
+
+with_process_restart(Name) ->
+ {Pid, true} = with_process_restart(
+ fun() -> exit(whereis(Name), shutdown) end, Name),
+ Pid.
+with_process_restart(Name, Fun) ->
+ with_process_restart(Name, Fun, 5000).
+with_process_restart(Name, Fun, Timeout) ->
+ ok = stop_sync(Name, Fun),
+ case wait_process(Name, Timeout) of
+ timeout ->
+ timeout;
+ Pid ->
+ Pid
+ end.
+
+wait_process(Name) ->
+ wait_process(Name, 5000).
+wait_process(Name, Timeout) ->
+ wait(fun() ->
+ case whereis(Name) of
+ undefined ->
+ wait;
+ Pid ->
+ Pid
+ end
+ end, Timeout).
+
+wait(Fun) ->
+ wait(Fun, 5000, 50).
+wait(Fun, Timeout) ->
+ wait(Fun, Timeout, 50).
+wait(Fun, Timeout, Delay) ->
+ Now = now_us(),
+ wait(Fun, Timeout * 1000, Delay, Now, Now).
+wait(_Fun, Timeout, _Delay, Started, Prev) when Prev - Started > Timeout ->
+ timeout;
+wait(Fun, Timeout, Delay, Started, _Prev) ->
+ case Fun() of
+ wait ->
+ ok = timer:sleep(Delay),
+ wait(Fun, Timeout, Delay, Started, now_us());
+ Else ->
+ Else
+ end.
diff --git a/test/etap/001-load.t b/test/couchdb/couchdb_modules_load_tests.erl
index 5ce0d9391..4eaa42bfc 100755..100644
--- a/test/etap/001-load.t
+++ b/test/couchdb/couchdb_modules_load_tests.erl
@@ -1,6 +1,3 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
@@ -13,10 +10,19 @@
% License for the specific language governing permissions and limitations under
% the License.
-% Test that we can load each module.
+-module(couchdb_modules_load_tests).
+
+-include("couch_eunit.hrl").
+
+
+modules_load_test_() ->
+ {
+ "Verify that all modules loads",
+ should_load_modules()
+ }.
+
-main(_) ->
- test_util:init_code_path(),
+should_load_modules() ->
Modules = [
couch_auth_cache,
couch_btree,
@@ -56,13 +62,7 @@ main(_) ->
couch_work_queue,
json_stream_parse
],
+ [should_load_module(Mod) || Mod <- Modules].
- etap:plan(length(Modules)),
- lists:foreach(
- fun(Module) ->
- etap:loaded_ok(
- Module,
- lists:concat(["Loaded: ", Module])
- )
- end, Modules),
- etap:end_tests().
+should_load_module(Mod) ->
+ {atom_to_list(Mod), ?_assertMatch({module, _}, code:load_file(Mod))}.
diff --git a/test/couchdb/couchdb_os_daemons_tests.erl b/test/couchdb/couchdb_os_daemons_tests.erl
new file mode 100644
index 000000000..66bbbb5ec
--- /dev/null
+++ b/test/couchdb/couchdb_os_daemons_tests.erl
@@ -0,0 +1,329 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couchdb_os_daemons_tests).
+
+-include("couch_eunit.hrl").
+
+%% keep in sync with couchdb/couch_os_daemons.erl
+-record(daemon, {
+ port,
+ name,
+ cmd,
+ kill,
+ status=running,
+ cfg_patterns=[],
+ errors=[],
+ buf=[]
+}).
+
+-define(DAEMON_CONFIGER, "os_daemon_configer.escript").
+-define(DAEMON_LOOPER, "os_daemon_looper.escript").
+-define(DAEMON_BAD_PERM, "os_daemon_bad_perm.sh").
+-define(DAEMON_CAN_REBOOT, "os_daemon_can_reboot.sh").
+-define(DAEMON_DIE_ON_BOOT, "os_daemon_die_on_boot.sh").
+-define(DAEMON_DIE_QUICKLY, "os_daemon_die_quickly.sh").
+-define(DAEMON_CFGREG, "test_cfg_register").
+-define(DELAY, 100).
+-define(FIXTURES_BUILDDIR,
+ filename:join([?BUILDDIR, "test", "couchdb", "fixtures"])).
+-define(TIMEOUT, 1000).
+
+
+setup(DName) ->
+ {ok, CfgPid} = couch_config:start_link(?CONFIG_CHAIN),
+ {ok, OsDPid} = couch_os_daemons:start_link(),
+ Path = case DName of
+ ?DAEMON_CONFIGER ->
+ filename:join([?FIXTURES_BUILDDIR, DName]);
+ ?DAEMON_CFGREG ->
+ filename:join([?FIXTURES_BUILDDIR, DName]);
+ _ ->
+ filename:join([?FIXTURESDIR, DName])
+ end,
+ couch_config:set("os_daemons", DName, Path, false),
+ timer:sleep(?DELAY), % sleep a bit to let daemon set kill flag
+ {CfgPid, OsDPid}.
+
+teardown(_, {CfgPid, OsDPid}) ->
+ erlang:monitor(process, CfgPid),
+ couch_config:stop(),
+ receive
+ {'DOWN', _, _, CfgPid, _} ->
+ ok
+ after ?TIMEOUT ->
+ throw({timeout, config_stop})
+ end,
+
+ erlang:monitor(process, OsDPid),
+ exit(OsDPid, normal),
+ receive
+ {'DOWN', _, _, OsDPid, _} ->
+ ok
+ after ?TIMEOUT ->
+ throw({timeout, os_daemon_stop})
+ end.
+
+
+os_daemons_test_() ->
+ {
+ "OS Daemons tests",
+ {
+ foreachx,
+ fun setup/1, fun teardown/2,
+ [{?DAEMON_LOOPER, Fun} || Fun <- [
+ fun should_check_daemon/2,
+ fun should_check_daemon_table_form/2,
+ fun should_clean_tables_on_daemon_remove/2,
+ fun should_spawn_multiple_daemons/2,
+ fun should_keep_alive_one_daemon_on_killing_other/2
+ ]]
+ }
+ }.
+
+configuration_reader_test_() ->
+ {
+ "OS Daemon requests CouchDB configuration",
+ {
+ foreachx,
+ fun setup/1, fun teardown/2,
+ [{?DAEMON_CONFIGER,
+ fun should_read_write_config_settings_by_daemon/2}]
+
+ }
+ }.
+
+error_test_() ->
+ {
+ "OS Daemon process error tests",
+ {
+ foreachx,
+ fun setup/1, fun teardown/2,
+ [{?DAEMON_BAD_PERM, fun should_fail_due_to_lack_of_permissions/2},
+ {?DAEMON_DIE_ON_BOOT, fun should_die_on_boot/2},
+ {?DAEMON_DIE_QUICKLY, fun should_die_quickly/2},
+ {?DAEMON_CAN_REBOOT, fun should_not_being_halted/2}]
+ }
+ }.
+
+configuration_register_test_() ->
+ {
+ "OS daemon subscribed to config changes",
+ {
+ foreachx,
+ fun setup/1, fun teardown/2,
+ [{?DAEMON_CFGREG, Fun} || Fun <- [
+ fun should_start_daemon/2,
+ fun should_restart_daemon_on_section_change/2,
+ fun should_not_restart_daemon_on_changing_ignored_section_key/2,
+ fun should_restart_daemon_on_section_key_change/2
+ ]]
+ }
+ }.
+
+
+should_check_daemon(DName, _) ->
+ ?_test(begin
+ {ok, [D]} = couch_os_daemons:info([table]),
+ check_daemon(D, DName)
+ end).
+
+should_check_daemon_table_form(DName, _) ->
+ ?_test(begin
+ {ok, Tab} = couch_os_daemons:info(),
+ [D] = ets:tab2list(Tab),
+ check_daemon(D, DName)
+ end).
+
+should_clean_tables_on_daemon_remove(DName, _) ->
+ ?_test(begin
+ couch_config:delete("os_daemons", DName, false),
+ {ok, Tab2} = couch_os_daemons:info(),
+ ?_assertEqual([], ets:tab2list(Tab2))
+ end).
+
+should_spawn_multiple_daemons(DName, _) ->
+ ?_test(begin
+ couch_config:set("os_daemons", "bar",
+ filename:join([?FIXTURESDIR, DName]), false),
+ couch_config:set("os_daemons", "baz",
+ filename:join([?FIXTURESDIR, DName]), false),
+ timer:sleep(?DELAY),
+ {ok, Daemons} = couch_os_daemons:info([table]),
+ lists:foreach(fun(D) ->
+ check_daemon(D)
+ end, Daemons),
+ {ok, Tab} = couch_os_daemons:info(),
+ lists:foreach(fun(D) ->
+ check_daemon(D)
+ end, ets:tab2list(Tab))
+ end).
+
+should_keep_alive_one_daemon_on_killing_other(DName, _) ->
+ ?_test(begin
+ couch_config:set("os_daemons", "bar",
+ filename:join([?FIXTURESDIR, DName]), false),
+ timer:sleep(?DELAY),
+ {ok, Daemons} = couch_os_daemons:info([table]),
+ lists:foreach(fun(D) ->
+ check_daemon(D)
+ end, Daemons),
+
+ couch_config:delete("os_daemons", "bar", false),
+ timer:sleep(?DELAY),
+ {ok, [D2]} = couch_os_daemons:info([table]),
+ check_daemon(D2, DName),
+
+ {ok, Tab} = couch_os_daemons:info(),
+ [T] = ets:tab2list(Tab),
+ check_daemon(T, DName)
+ end).
+
+should_read_write_config_settings_by_daemon(DName, _) ->
+ ?_test(begin
+ % have to wait till daemon run all his tests
+ % see daemon's script for more info
+ timer:sleep(?TIMEOUT),
+ {ok, [D]} = couch_os_daemons:info([table]),
+ check_daemon(D, DName)
+ end).
+
+should_fail_due_to_lack_of_permissions(DName, _) ->
+ ?_test(should_halts(DName, 1000)).
+
+should_die_on_boot(DName, _) ->
+ ?_test(should_halts(DName, 1000)).
+
+should_die_quickly(DName, _) ->
+ ?_test(should_halts(DName, 4000)).
+
+should_not_being_halted(DName, _) ->
+ ?_test(begin
+ timer:sleep(1000),
+ {ok, [D1]} = couch_os_daemons:info([table]),
+ check_daemon(D1, DName, running, 0),
+
+ % Should reboot every two seconds. We're at 1s, so wait
+ % until 3s to be in the middle of the next invocation's
+ % life span.
+
+ timer:sleep(2000),
+ {ok, [D2]} = couch_os_daemons:info([table]),
+ check_daemon(D2, DName, running, 1),
+
+ % If the kill command changed, that means we rebooted the process.
+ ?assertNotEqual(D1#daemon.kill, D2#daemon.kill)
+ end).
+
+should_halts(DName, Time) ->
+ timer:sleep(Time),
+ {ok, [D]} = couch_os_daemons:info([table]),
+ check_dead(D, DName),
+ couch_config:delete("os_daemons", DName, false).
+
+should_start_daemon(DName, _) ->
+ ?_test(begin
+ wait_for_start(10),
+ {ok, [D]} = couch_os_daemons:info([table]),
+ check_daemon(D, DName, running, 0, [{"s1"}, {"s2", "k"}])
+ end).
+
+should_restart_daemon_on_section_change(DName, _) ->
+ ?_test(begin
+ wait_for_start(10),
+ {ok, [D1]} = couch_os_daemons:info([table]),
+ couch_config:set("s1", "k", "foo", false),
+ wait_for_restart(10),
+ {ok, [D2]} = couch_os_daemons:info([table]),
+ check_daemon(D2, DName, running, 0, [{"s1"}, {"s2", "k"}]),
+ ?assertNotEqual(D1, D2)
+ end).
+
+should_not_restart_daemon_on_changing_ignored_section_key(_, _) ->
+ ?_test(begin
+ wait_for_start(10),
+ {ok, [D1]} = couch_os_daemons:info([table]),
+ couch_config:set("s2", "k2", "baz", false),
+ timer:sleep(?DELAY),
+ {ok, [D2]} = couch_os_daemons:info([table]),
+ ?assertEqual(D1, D2)
+ end).
+
+should_restart_daemon_on_section_key_change(DName, _) ->
+ ?_test(begin
+ wait_for_start(10),
+ {ok, [D1]} = couch_os_daemons:info([table]),
+ couch_config:set("s2", "k", "bingo", false),
+ wait_for_restart(10),
+ {ok, [D2]} = couch_os_daemons:info([table]),
+ check_daemon(D2, DName, running, 0, [{"s1"}, {"s2", "k"}]),
+ ?assertNotEqual(D1, D2)
+ end).
+
+
+wait_for_start(0) ->
+ erlang:error({assertion_failed,
+ [{module, ?MODULE},
+ {line, ?LINE},
+ {reason, "Timeout on waiting daemon for start"}]});
+wait_for_start(N) ->
+ case couch_os_daemons:info([table]) of
+ {ok, []} ->
+ timer:sleep(?DELAY),
+ wait_for_start(N - 1);
+ _ ->
+ timer:sleep(?TIMEOUT)
+ end.
+
+wait_for_restart(0) ->
+ erlang:error({assertion_failed,
+ [{module, ?MODULE},
+ {line, ?LINE},
+ {reason, "Timeout on waiting daemon for restart"}]});
+wait_for_restart(N) ->
+ {ok, [D]} = couch_os_daemons:info([table]),
+ case D#daemon.status of
+ restarting ->
+ timer:sleep(?DELAY),
+ wait_for_restart(N - 1);
+ _ ->
+ timer:sleep(?TIMEOUT)
+ end.
+
+check_daemon(D) ->
+ check_daemon(D, D#daemon.name).
+
+check_daemon(D, Name) ->
+ check_daemon(D, Name, running).
+
+check_daemon(D, Name, Status) ->
+ check_daemon(D, Name, Status, 0).
+
+check_daemon(D, Name, Status, Errs) ->
+ check_daemon(D, Name, Status, Errs, []).
+
+check_daemon(D, Name, Status, Errs, CfgPatterns) ->
+ ?assert(is_port(D#daemon.port)),
+ ?assertEqual(Name, D#daemon.name),
+ ?assertNotEqual(undefined, D#daemon.kill),
+ ?assertEqual(Status, D#daemon.status),
+ ?assertEqual(CfgPatterns, D#daemon.cfg_patterns),
+ ?assertEqual(Errs, length(D#daemon.errors)),
+ ?assertEqual([], D#daemon.buf).
+
+check_dead(D, Name) ->
+ ?assert(is_port(D#daemon.port)),
+ ?assertEqual(Name, D#daemon.name),
+ ?assertNotEqual(undefined, D#daemon.kill),
+ ?assertEqual(halted, D#daemon.status),
+ ?assertEqual(nil, D#daemon.errors),
+ ?assertEqual(nil, D#daemon.buf).
diff --git a/test/couchdb/couchdb_os_proc_pool.erl b/test/couchdb/couchdb_os_proc_pool.erl
new file mode 100644
index 000000000..1bb266e8a
--- /dev/null
+++ b/test/couchdb/couchdb_os_proc_pool.erl
@@ -0,0 +1,179 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couchdb_os_proc_pool).
+
+-include("couch_eunit.hrl").
+-include_lib("couchdb/couch_db.hrl").
+
+-define(TIMEOUT, 3000).
+
+
+start() ->
+ {ok, Pid} = couch_server_sup:start_link(?CONFIG_CHAIN),
+ couch_config:set("query_server_config", "os_process_limit", "3", false),
+ Pid.
+
+stop(Pid) ->
+ couch_server_sup:stop(),
+ erlang:monitor(process, Pid),
+ receive
+ {'DOWN', _, _, Pid, _} ->
+ ok
+ after ?TIMEOUT ->
+ throw({timeout, server_stop})
+ end.
+
+
+os_proc_pool_test_() ->
+ {
+ "OS processes pool tests",
+ {
+ setup,
+ fun start/0, fun stop/1,
+ [
+ should_block_new_proc_on_full_pool(),
+ should_free_slot_on_proc_unexpected_exit()
+ ]
+ }
+ }.
+
+
+should_block_new_proc_on_full_pool() ->
+ ?_test(begin
+ Client1 = spawn_client(),
+ Client2 = spawn_client(),
+ Client3 = spawn_client(),
+
+ ?assertEqual(ok, ping_client(Client1)),
+ ?assertEqual(ok, ping_client(Client2)),
+ ?assertEqual(ok, ping_client(Client3)),
+
+ Proc1 = get_client_proc(Client1, "1"),
+ Proc2 = get_client_proc(Client2, "2"),
+ Proc3 = get_client_proc(Client3, "3"),
+
+ ?assertNotEqual(Proc1, Proc2),
+ ?assertNotEqual(Proc2, Proc3),
+ ?assertNotEqual(Proc3, Proc1),
+
+ Client4 = spawn_client(),
+ ?assertEqual(timeout, ping_client(Client4)),
+
+ ?assertEqual(ok, stop_client(Client1)),
+ ?assertEqual(ok, ping_client(Client4)),
+
+ Proc4 = get_client_proc(Client4, "4"),
+ ?assertEqual(Proc1, Proc4),
+
+ lists:map(fun(C) ->
+ ?assertEqual(ok, stop_client(C))
+ end, [Client2, Client3, Client4])
+ end).
+
+should_free_slot_on_proc_unexpected_exit() ->
+ ?_test(begin
+ Client1 = spawn_client(),
+ Client2 = spawn_client(),
+ Client3 = spawn_client(),
+
+ ?assertEqual(ok, ping_client(Client1)),
+ ?assertEqual(ok, ping_client(Client2)),
+ ?assertEqual(ok, ping_client(Client3)),
+
+ Proc1 = get_client_proc(Client1, "1"),
+ Proc2 = get_client_proc(Client2, "2"),
+ Proc3 = get_client_proc(Client3, "3"),
+
+ ?assertNotEqual(Proc1, Proc2),
+ ?assertNotEqual(Proc2, Proc3),
+ ?assertNotEqual(Proc3, Proc1),
+
+ ?assertEqual(ok, kill_client(Client1)),
+
+ Client4 = spawn_client(),
+ ?assertEqual(ok, ping_client(Client4)),
+
+ Proc4 = get_client_proc(Client4, "4"),
+ ?assertNotEqual(Proc4, Proc1),
+ ?assertNotEqual(Proc2, Proc4),
+ ?assertNotEqual(Proc3, Proc4),
+
+ lists:map(fun(C) ->
+ ?assertEqual(ok, stop_client(C))
+ end, [Client2, Client3, Client4])
+ end).
+
+
+spawn_client() ->
+ Parent = self(),
+ Ref = make_ref(),
+ Pid = spawn(fun() ->
+ Proc = couch_query_servers:get_os_process(<<"javascript">>),
+ loop(Parent, Ref, Proc)
+ end),
+ {Pid, Ref}.
+
+ping_client({Pid, Ref}) ->
+ Pid ! ping,
+ receive
+ {pong, Ref} ->
+ ok
+ after ?TIMEOUT ->
+ timeout
+ end.
+
+get_client_proc({Pid, Ref}, ClientName) ->
+ Pid ! get_proc,
+ receive
+ {proc, Ref, Proc} -> Proc
+ after ?TIMEOUT ->
+ erlang:error({assertion_failed,
+ [{module, ?MODULE},
+ {line, ?LINE},
+ {reason, "Timeout getting client "
+ ++ ClientName ++ " proc"}]})
+ end.
+
+stop_client({Pid, Ref}) ->
+ Pid ! stop,
+ receive
+ {stop, Ref} ->
+ ok
+ after ?TIMEOUT ->
+ timeout
+ end.
+
+kill_client({Pid, Ref}) ->
+ Pid ! die,
+ receive
+ {die, Ref} ->
+ ok
+ after ?TIMEOUT ->
+ timeout
+ end.
+
+loop(Parent, Ref, Proc) ->
+ receive
+ ping ->
+ Parent ! {pong, Ref},
+ loop(Parent, Ref, Proc);
+ get_proc ->
+ Parent ! {proc, Ref, Proc},
+ loop(Parent, Ref, Proc);
+ stop ->
+ couch_query_servers:ret_os_process(Proc),
+ Parent ! {stop, Ref};
+ die ->
+ Parent ! {die, Ref},
+ exit(some_error)
+ end.
diff --git a/test/couchdb/couchdb_update_conflicts_tests.erl b/test/couchdb/couchdb_update_conflicts_tests.erl
new file mode 100644
index 000000000..fc48765f8
--- /dev/null
+++ b/test/couchdb/couchdb_update_conflicts_tests.erl
@@ -0,0 +1,243 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couchdb_update_conflicts_tests).
+
+-include("couch_eunit.hrl").
+-include_lib("couchdb/couch_db.hrl").
+
+-define(i2l(I), integer_to_list(I)).
+-define(ADMIN_USER, {userctx, #user_ctx{roles=[<<"_admin">>]}}).
+-define(DOC_ID, <<"foobar">>).
+-define(NUM_CLIENTS, [100, 500, 1000, 2000, 5000, 10000]).
+-define(TIMEOUT, 30000).
+
+
+start() ->
+ {ok, Pid} = couch_server_sup:start_link(?CONFIG_CHAIN),
+ couch_config:set("couchdb", "delayed_commits", "true", false),
+ Pid.
+
+stop(Pid) ->
+ erlang:monitor(process, Pid),
+ couch_server_sup:stop(),
+ receive
+ {'DOWN', _, _, Pid, _} ->
+ ok
+ after ?TIMEOUT ->
+ throw({timeout, server_stop})
+ end.
+
+setup() ->
+ DbName = ?tempdb(),
+ {ok, Db} = couch_db:create(DbName, [?ADMIN_USER, overwrite]),
+ Doc = couch_doc:from_json_obj({[{<<"_id">>, ?DOC_ID},
+ {<<"value">>, 0}]}),
+ {ok, Rev} = couch_db:update_doc(Db, Doc, []),
+ ok = couch_db:close(Db),
+ RevStr = couch_doc:rev_to_str(Rev),
+ {DbName, RevStr}.
+setup(_) ->
+ setup().
+
+teardown({DbName, _}) ->
+ ok = couch_server:delete(DbName, []),
+ ok.
+teardown(_, {DbName, _RevStr}) ->
+ teardown({DbName, _RevStr}).
+
+
+view_indexes_cleanup_test_() ->
+ {
+ "Update conflicts",
+ {
+ setup,
+ fun start/0, fun stop/1,
+ [
+ concurrent_updates(),
+ couchdb_188()
+ ]
+ }
+ }.
+
+concurrent_updates()->
+ {
+ "Concurrent updates",
+ {
+ foreachx,
+ fun setup/1, fun teardown/2,
+ [{NumClients, fun should_concurrently_update_doc/2}
+ || NumClients <- ?NUM_CLIENTS]
+ }
+ }.
+
+couchdb_188()->
+ {
+ "COUCHDB-188",
+ {
+ foreach,
+ fun setup/0, fun teardown/1,
+ [fun should_bulk_create_delete_doc/1]
+ }
+ }.
+
+
+should_concurrently_update_doc(NumClients, {DbName, InitRev})->
+ {?i2l(NumClients) ++ " clients",
+ {inorder,
+ [{"update doc",
+ {timeout, ?TIMEOUT div 1000,
+ ?_test(concurrent_doc_update(NumClients, DbName, InitRev))}},
+ {"ensure in single leaf",
+ ?_test(ensure_in_single_revision_leaf(DbName))}]}}.
+
+should_bulk_create_delete_doc({DbName, InitRev})->
+ ?_test(bulk_delete_create(DbName, InitRev)).
+
+
+concurrent_doc_update(NumClients, DbName, InitRev) ->
+ Clients = lists:map(
+ fun(Value) ->
+ ClientDoc = couch_doc:from_json_obj({[
+ {<<"_id">>, ?DOC_ID},
+ {<<"_rev">>, InitRev},
+ {<<"value">>, Value}
+ ]}),
+ Pid = spawn_client(DbName, ClientDoc),
+ {Value, Pid, erlang:monitor(process, Pid)}
+ end,
+ lists:seq(1, NumClients)),
+
+ lists:foreach(fun({_, Pid, _}) -> Pid ! go end, Clients),
+
+ {NumConflicts, SavedValue} = lists:foldl(
+ fun({Value, Pid, MonRef}, {AccConflicts, AccValue}) ->
+ receive
+ {'DOWN', MonRef, process, Pid, {ok, _NewRev}} ->
+ {AccConflicts, Value};
+ {'DOWN', MonRef, process, Pid, conflict} ->
+ {AccConflicts + 1, AccValue};
+ {'DOWN', MonRef, process, Pid, Error} ->
+ erlang:error({assertion_failed,
+ [{module, ?MODULE},
+ {line, ?LINE},
+ {reason, "Client " ++ ?i2l(Value)
+ ++ " got update error: "
+ ++ couch_util:to_list(Error)}]})
+ after ?TIMEOUT ->
+ erlang:error({assertion_failed,
+ [{module, ?MODULE},
+ {line, ?LINE},
+ {reason, "Timeout waiting for client "
+ ++ ?i2l(Value) ++ " to die"}]})
+ end
+ end, {0, nil}, Clients),
+ ?assertEqual(NumClients - 1, NumConflicts),
+
+ {ok, Db} = couch_db:open_int(DbName, []),
+ {ok, Leaves} = couch_db:open_doc_revs(Db, ?DOC_ID, all, []),
+ ok = couch_db:close(Db),
+ ?assertEqual(1, length(Leaves)),
+
+ [{ok, Doc2}] = Leaves,
+ {JsonDoc} = couch_doc:to_json_obj(Doc2, []),
+ ?assertEqual(SavedValue, couch_util:get_value(<<"value">>, JsonDoc)).
+
+ensure_in_single_revision_leaf(DbName) ->
+ {ok, Db} = couch_db:open_int(DbName, []),
+ {ok, Leaves} = couch_db:open_doc_revs(Db, ?DOC_ID, all, []),
+ ok = couch_db:close(Db),
+ [{ok, Doc}] = Leaves,
+
+ %% FIXME: server restart won't work from test side
+ %% stop(ok),
+ %% start(),
+
+ {ok, Db2} = couch_db:open_int(DbName, []),
+ {ok, Leaves2} = couch_db:open_doc_revs(Db2, ?DOC_ID, all, []),
+ ok = couch_db:close(Db2),
+ ?assertEqual(1, length(Leaves2)),
+
+ [{ok, Doc2}] = Leaves,
+ ?assertEqual(Doc, Doc2).
+
+bulk_delete_create(DbName, InitRev) ->
+ {ok, Db} = couch_db:open_int(DbName, []),
+
+ DeletedDoc = couch_doc:from_json_obj({[
+ {<<"_id">>, ?DOC_ID},
+ {<<"_rev">>, InitRev},
+ {<<"_deleted">>, true}
+ ]}),
+ NewDoc = couch_doc:from_json_obj({[
+ {<<"_id">>, ?DOC_ID},
+ {<<"value">>, 666}
+ ]}),
+
+ {ok, Results} = couch_db:update_docs(Db, [DeletedDoc, NewDoc], []),
+ ok = couch_db:close(Db),
+
+ ?assertEqual(2, length([ok || {ok, _} <- Results])),
+ [{ok, Rev1}, {ok, Rev2}] = Results,
+
+ {ok, Db2} = couch_db:open_int(DbName, []),
+ {ok, [{ok, Doc1}]} = couch_db:open_doc_revs(
+ Db2, ?DOC_ID, [Rev1], [conflicts, deleted_conflicts]),
+ {ok, [{ok, Doc2}]} = couch_db:open_doc_revs(
+ Db2, ?DOC_ID, [Rev2], [conflicts, deleted_conflicts]),
+ ok = couch_db:close(Db2),
+
+ {Doc1Props} = couch_doc:to_json_obj(Doc1, []),
+ {Doc2Props} = couch_doc:to_json_obj(Doc2, []),
+
+ %% Document was deleted
+ ?assert(couch_util:get_value(<<"_deleted">>, Doc1Props)),
+ %% New document not flagged as deleted
+ ?assertEqual(undefined, couch_util:get_value(<<"_deleted">>,
+ Doc2Props)),
+ %% New leaf revision has the right value
+ ?assertEqual(666, couch_util:get_value(<<"value">>,
+ Doc2Props)),
+ %% Deleted document has no conflicts
+ ?assertEqual(undefined, couch_util:get_value(<<"_conflicts">>,
+ Doc1Props)),
+ %% Deleted document has no deleted conflicts
+ ?assertEqual(undefined, couch_util:get_value(<<"_deleted_conflicts">>,
+ Doc1Props)),
+ %% New leaf revision doesn't have conflicts
+ ?assertEqual(undefined, couch_util:get_value(<<"_conflicts">>,
+ Doc1Props)),
+ %% New leaf revision doesn't have deleted conflicts
+ ?assertEqual(undefined, couch_util:get_value(<<"_deleted_conflicts">>,
+ Doc1Props)),
+
+ %% Deleted revision has position 2
+ ?assertEqual(2, element(1, Rev1)),
+ %% New leaf revision has position 1
+ ?assertEqual(1, element(1, Rev2)).
+
+
+spawn_client(DbName, Doc) ->
+ spawn(fun() ->
+ {ok, Db} = couch_db:open_int(DbName, []),
+ receive
+ go -> ok
+ end,
+ erlang:yield(),
+ Result = try
+ couch_db:update_doc(Db, Doc, [])
+ catch _:Error ->
+ Error
+ end,
+ ok = couch_db:close(Db),
+ exit(Result)
+ end).
diff --git a/test/couchdb/couchdb_vhosts_tests.erl b/test/couchdb/couchdb_vhosts_tests.erl
new file mode 100644
index 000000000..94b195769
--- /dev/null
+++ b/test/couchdb/couchdb_vhosts_tests.erl
@@ -0,0 +1,441 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couchdb_vhosts_tests).
+
+-include("couch_eunit.hrl").
+-include_lib("couchdb/couch_db.hrl").
+
+-define(ADMIN_USER, {user_ctx, #user_ctx{roles=[<<"_admin">>]}}).
+-define(TIMEOUT, 1000).
+-define(iofmt(S, A), lists:flatten(io_lib:format(S, A))).
+
+
+start() ->
+ {ok, Pid} = couch_server_sup:start_link(?CONFIG_CHAIN),
+ Pid.
+
+stop(Pid) ->
+ erlang:monitor(process, Pid),
+ couch_server_sup:stop(),
+ receive
+ {'DOWN', _, _, Pid, _} ->
+ ok
+ after ?TIMEOUT ->
+ throw({timeout, server_stop})
+ end.
+
+setup() ->
+ DbName = ?tempdb(),
+ {ok, Db} = couch_db:create(DbName, [?ADMIN_USER]),
+ Doc = couch_doc:from_json_obj({[
+ {<<"_id">>, <<"doc1">>},
+ {<<"value">>, 666}
+ ]}),
+
+ Doc1 = couch_doc:from_json_obj({[
+ {<<"_id">>, <<"_design/doc1">>},
+ {<<"shows">>, {[
+ {<<"test">>, <<"function(doc, req) {
+ return { json: {
+ requested_path: '/' + req.requested_path.join('/'),
+ path: '/' + req.path.join('/')}};}">>}
+ ]}},
+ {<<"rewrites">>, [
+ {[
+ {<<"from">>, <<"/">>},
+ {<<"to">>, <<"_show/test">>}
+ ]}
+ ]}
+ ]}),
+ {ok, _} = couch_db:update_docs(Db, [Doc, Doc1]),
+ couch_db:ensure_full_commit(Db),
+ couch_db:close(Db),
+
+ Addr = couch_config:get("httpd", "bind_address", "127.0.0.1"),
+ Port = integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
+ Url = "http://" ++ Addr ++ ":" ++ Port,
+ {Url, ?b2l(DbName)}.
+
+setup_oauth() ->
+ DbName = ?tempdb(),
+ {ok, Db} = couch_db:create(DbName, [?ADMIN_USER]),
+
+ couch_config:set("couch_httpd_auth", "authentication_db",
+ ?b2l(?tempdb()), false),
+ couch_config:set("oauth_token_users", "otoksec1", "joe", false),
+ couch_config:set("oauth_consumer_secrets", "consec1", "foo", false),
+ couch_config:set("oauth_token_secrets", "otoksec1", "foobar", false),
+ couch_config:set("couch_httpd_auth", "require_valid_user", "true", false),
+
+ ok = couch_config:set(
+ "vhosts", "oauth-example.com",
+ "/" ++ ?b2l(DbName) ++ "/_design/test/_rewrite/foobar", false),
+
+ DDoc = couch_doc:from_json_obj({[
+ {<<"_id">>, <<"_design/test">>},
+ {<<"language">>, <<"javascript">>},
+ {<<"rewrites">>, [
+ {[
+ {<<"from">>, <<"foobar">>},
+ {<<"to">>, <<"_info">>}
+ ]}
+ ]}
+ ]}),
+ {ok, _} = couch_db:update_doc(Db, DDoc, []),
+
+ couch_db:ensure_full_commit(Db),
+ couch_db:close(Db),
+
+ Addr = couch_config:get("httpd", "bind_address", "127.0.0.1"),
+ Port = integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
+ Url = "http://" ++ Addr ++ ":" ++ Port,
+ {Url, ?b2l(DbName)}.
+
+teardown({_, DbName}) ->
+ ok = couch_server:delete(?l2b(DbName), []),
+ ok.
+
+
+vhosts_test_() ->
+ {
+ "Virtual Hosts rewrite tests",
+ {
+ setup,
+ fun start/0, fun stop/1,
+ {
+ foreach,
+ fun setup/0, fun teardown/1,
+ [
+ fun should_return_database_info/1,
+ fun should_return_revs_info/1,
+ fun should_serve_utils_for_vhost/1,
+ fun should_return_virtual_request_path_field_in_request/1,
+ fun should_return_real_request_path_field_in_request/1,
+ fun should_match_wildcard_vhost/1,
+ fun should_return_db_info_for_wildcard_vhost_for_custom_db/1,
+ fun should_replace_rewrite_variables_for_db_and_doc/1,
+ fun should_return_db_info_for_vhost_with_resource/1,
+ fun should_return_revs_info_for_vhost_with_resource/1,
+ fun should_return_db_info_for_vhost_with_wildcard_resource/1,
+ fun should_return_path_for_vhost_with_wildcard_host/1
+ ]
+ }
+ }
+ }.
+
+oauth_test_() ->
+ {
+ "Virtual Hosts OAuth tests",
+ {
+ setup,
+ fun start/0, fun stop/1,
+ {
+ foreach,
+ fun setup_oauth/0, fun teardown/1,
+ [
+ fun should_require_auth/1,
+ fun should_succeed_oauth/1,
+ fun should_fail_oauth_with_wrong_credentials/1
+ ]
+ }
+ }
+ }.
+
+
+should_return_database_info({Url, DbName}) ->
+ ?_test(begin
+ ok = couch_config:set("vhosts", "example.com", "/" ++ DbName, false),
+ case test_request:get(Url, [], [{host_header, "example.com"}]) of
+ {ok, _, _, Body} ->
+ {JsonBody} = ejson:decode(Body),
+ ?assert(proplists:is_defined(<<"db_name">>, JsonBody));
+ Else ->
+ erlang:error({assertion_failed,
+ [{module, ?MODULE},
+ {line, ?LINE},
+ {reason, ?iofmt("Request failed: ~p", [Else])}]})
+ end
+ end).
+
+should_return_revs_info({Url, DbName}) ->
+ ?_test(begin
+ ok = couch_config:set("vhosts", "example.com", "/" ++ DbName, false),
+ case test_request:get(Url ++ "/doc1?revs_info=true", [],
+ [{host_header, "example.com"}]) of
+ {ok, _, _, Body} ->
+ {JsonBody} = ejson:decode(Body),
+ ?assert(proplists:is_defined(<<"_revs_info">>, JsonBody));
+ Else ->
+ erlang:error({assertion_failed,
+ [{module, ?MODULE},
+ {line, ?LINE},
+ {reason, ?iofmt("Request failed: ~p", [Else])}]})
+ end
+ end).
+
+should_serve_utils_for_vhost({Url, DbName}) ->
+ ?_test(begin
+ ok = couch_config:set("vhosts", "example.com", "/" ++ DbName, false),
+ case test_request:get(Url ++ "/_utils/index.html", [],
+ [{host_header, "example.com"}]) of
+ {ok, _, _, Body} ->
+ ?assertMatch(<<"<!DOCTYPE html>", _/binary>>, Body);
+ Else ->
+ erlang:error({assertion_failed,
+ [{module, ?MODULE},
+ {line, ?LINE},
+ {reason, ?iofmt("Request failed: ~p", [Else])}]})
+ end
+ end).
+
+should_return_virtual_request_path_field_in_request({Url, DbName}) ->
+ ?_test(begin
+ ok = couch_config:set("vhosts", "example1.com",
+ "/" ++ DbName ++ "/_design/doc1/_rewrite/",
+ false),
+ case test_request:get(Url, [], [{host_header, "example1.com"}]) of
+ {ok, _, _, Body} ->
+ {Json} = ejson:decode(Body),
+ ?assertEqual(<<"/">>,
+ proplists:get_value(<<"requested_path">>, Json));
+ Else ->
+ erlang:error({assertion_failed,
+ [{module, ?MODULE},
+ {line, ?LINE},
+ {reason, ?iofmt("Request failed: ~p", [Else])}]})
+ end
+ end).
+
+should_return_real_request_path_field_in_request({Url, DbName}) ->
+ ?_test(begin
+ ok = couch_config:set("vhosts", "example1.com",
+ "/" ++ DbName ++ "/_design/doc1/_rewrite/",
+ false),
+ case test_request:get(Url, [], [{host_header, "example1.com"}]) of
+ {ok, _, _, Body} ->
+ {Json} = ejson:decode(Body),
+ Path = ?l2b("/" ++ DbName ++ "/_design/doc1/_show/test"),
+ ?assertEqual(Path, proplists:get_value(<<"path">>, Json));
+ Else ->
+ erlang:error({assertion_failed,
+ [{module, ?MODULE},
+ {line, ?LINE},
+ {reason, ?iofmt("Request failed: ~p", [Else])}]})
+ end
+ end).
+
+should_match_wildcard_vhost({Url, DbName}) ->
+ ?_test(begin
+ ok = couch_config:set("vhosts", "*.example.com",
+ "/" ++ DbName ++ "/_design/doc1/_rewrite", false),
+ case test_request:get(Url, [], [{host_header, "test.example.com"}]) of
+ {ok, _, _, Body} ->
+ {Json} = ejson:decode(Body),
+ Path = ?l2b("/" ++ DbName ++ "/_design/doc1/_show/test"),
+ ?assertEqual(Path, proplists:get_value(<<"path">>, Json));
+ Else ->
+ erlang:error({assertion_failed,
+ [{module, ?MODULE},
+ {line, ?LINE},
+ {reason, ?iofmt("Request failed: ~p", [Else])}]})
+ end
+ end).
+
+should_return_db_info_for_wildcard_vhost_for_custom_db({Url, DbName}) ->
+ ?_test(begin
+ ok = couch_config:set("vhosts", ":dbname.example1.com",
+ "/:dbname", false),
+ Host = DbName ++ ".example1.com",
+ case test_request:get(Url, [], [{host_header, Host}]) of
+ {ok, _, _, Body} ->
+ {JsonBody} = ejson:decode(Body),
+ ?assert(proplists:is_defined(<<"db_name">>, JsonBody));
+ Else ->
+ erlang:error({assertion_failed,
+ [{module, ?MODULE},
+ {line, ?LINE},
+ {reason, ?iofmt("Request failed: ~p", [Else])}]})
+ end
+ end).
+
+should_replace_rewrite_variables_for_db_and_doc({Url, DbName}) ->
+ ?_test(begin
+ ok = couch_config:set("vhosts",":appname.:dbname.example1.com",
+ "/:dbname/_design/:appname/_rewrite/", false),
+ Host = "doc1." ++ DbName ++ ".example1.com",
+ case test_request:get(Url, [], [{host_header, Host}]) of
+ {ok, _, _, Body} ->
+ {Json} = ejson:decode(Body),
+ Path = ?l2b("/" ++ DbName ++ "/_design/doc1/_show/test"),
+ ?assertEqual(Path, proplists:get_value(<<"path">>, Json));
+ Else ->
+ erlang:error({assertion_failed,
+ [{module, ?MODULE},
+ {line, ?LINE},
+ {reason, ?iofmt("Request failed: ~p", [Else])}]})
+ end
+ end).
+
+should_return_db_info_for_vhost_with_resource({Url, DbName}) ->
+ ?_test(begin
+ ok = couch_config:set("vhosts",
+ "example.com/test", "/" ++ DbName, false),
+ ReqUrl = Url ++ "/test",
+ case test_request:get(ReqUrl, [], [{host_header, "example.com"}]) of
+ {ok, _, _, Body} ->
+ {JsonBody} = ejson:decode(Body),
+ ?assert(proplists:is_defined(<<"db_name">>, JsonBody));
+ Else ->
+ erlang:error({assertion_failed,
+ [{module, ?MODULE},
+ {line, ?LINE},
+ {reason, ?iofmt("Request failed: ~p", [Else])}]})
+ end
+ end).
+
+
+should_return_revs_info_for_vhost_with_resource({Url, DbName}) ->
+ ?_test(begin
+ ok = couch_config:set("vhosts",
+ "example.com/test", "/" ++ DbName, false),
+ ReqUrl = Url ++ "/test/doc1?revs_info=true",
+ case test_request:get(ReqUrl, [], [{host_header, "example.com"}]) of
+ {ok, _, _, Body} ->
+ {JsonBody} = ejson:decode(Body),
+ ?assert(proplists:is_defined(<<"_revs_info">>, JsonBody));
+ Else ->
+ erlang:error({assertion_failed,
+ [{module, ?MODULE},
+ {line, ?LINE},
+ {reason, ?iofmt("Request failed: ~p", [Else])}]})
+ end
+ end).
+
+should_return_db_info_for_vhost_with_wildcard_resource({Url, DbName}) ->
+ ?_test(begin
+ ok = couch_config:set("vhosts", "*.example2.com/test", "/*", false),
+ ReqUrl = Url ++ "/test",
+ Host = DbName ++ ".example2.com",
+ case test_request:get(ReqUrl, [], [{host_header, Host}]) of
+ {ok, _, _, Body} ->
+ {JsonBody} = ejson:decode(Body),
+ ?assert(proplists:is_defined(<<"db_name">>, JsonBody));
+ Else ->
+ erlang:error({assertion_failed,
+ [{module, ?MODULE},
+ {line, ?LINE},
+ {reason, ?iofmt("Request failed: ~p", [Else])}]})
+ end
+ end).
+
+should_return_path_for_vhost_with_wildcard_host({Url, DbName}) ->
+ ?_test(begin
+ ok = couch_config:set("vhosts", "*/test1",
+ "/" ++ DbName ++ "/_design/doc1/_show/test",
+ false),
+ case test_request:get(Url ++ "/test1") of
+ {ok, _, _, Body} ->
+ {Json} = ejson:decode(Body),
+ Path = ?l2b("/" ++ DbName ++ "/_design/doc1/_show/test"),
+ ?assertEqual(Path, proplists:get_value(<<"path">>, Json));
+ Else ->
+ erlang:error({assertion_failed,
+ [{module, ?MODULE},
+ {line, ?LINE},
+ {reason, ?iofmt("Request failed: ~p", [Else])}]})
+ end
+ end).
+
+should_require_auth({Url, _}) ->
+ ?_test(begin
+ case test_request:get(Url, [], [{host_header, "oauth-example.com"}]) of
+ {ok, Code, _, Body} ->
+ ?assertEqual(401, Code),
+ {JsonBody} = ejson:decode(Body),
+ ?assertEqual(<<"unauthorized">>,
+ couch_util:get_value(<<"error">>, JsonBody));
+ Else ->
+ erlang:error({assertion_failed,
+ [{module, ?MODULE},
+ {line, ?LINE},
+ {reason, ?iofmt("Request failed: ~p", [Else])}]})
+ end
+ end).
+
+should_succeed_oauth({Url, _}) ->
+ ?_test(begin
+ AuthDbName = couch_config:get("couch_httpd_auth", "authentication_db"),
+ JoeDoc = couch_doc:from_json_obj({[
+ {<<"_id">>, <<"org.couchdb.user:joe">>},
+ {<<"type">>, <<"user">>},
+ {<<"name">>, <<"joe">>},
+ {<<"roles">>, []},
+ {<<"password_sha">>, <<"fe95df1ca59a9b567bdca5cbaf8412abd6e06121">>},
+ {<<"salt">>, <<"4e170ffeb6f34daecfd814dfb4001a73">>}
+ ]}),
+ {ok, AuthDb} = couch_db:open_int(?l2b(AuthDbName), [?ADMIN_USER]),
+ {ok, _} = couch_db:update_doc(AuthDb, JoeDoc, [?ADMIN_USER]),
+
+ Host = "oauth-example.com",
+ Consumer = {"consec1", "foo", hmac_sha1},
+ SignedParams = oauth:sign(
+ "GET", "http://" ++ Host ++ "/", [], Consumer, "otoksec1", "foobar"),
+ OAuthUrl = oauth:uri(Url, SignedParams),
+
+ case test_request:get(OAuthUrl, [], [{host_header, Host}]) of
+ {ok, Code, _, Body} ->
+ ?assertEqual(200, Code),
+ {JsonBody} = ejson:decode(Body),
+ ?assertEqual(<<"test">>,
+ couch_util:get_value(<<"name">>, JsonBody));
+ Else ->
+ erlang:error({assertion_failed,
+ [{module, ?MODULE},
+ {line, ?LINE},
+ {reason, ?iofmt("Request failed: ~p", [Else])}]})
+ end
+ end).
+
+should_fail_oauth_with_wrong_credentials({Url, _}) ->
+ ?_test(begin
+ AuthDbName = couch_config:get("couch_httpd_auth", "authentication_db"),
+ JoeDoc = couch_doc:from_json_obj({[
+ {<<"_id">>, <<"org.couchdb.user:joe">>},
+ {<<"type">>, <<"user">>},
+ {<<"name">>, <<"joe">>},
+ {<<"roles">>, []},
+ {<<"password_sha">>, <<"fe95df1ca59a9b567bdca5cbaf8412abd6e06121">>},
+ {<<"salt">>, <<"4e170ffeb6f34daecfd814dfb4001a73">>}
+ ]}),
+ {ok, AuthDb} = couch_db:open_int(?l2b(AuthDbName), [?ADMIN_USER]),
+ {ok, _} = couch_db:update_doc(AuthDb, JoeDoc, [?ADMIN_USER]),
+
+ Host = "oauth-example.com",
+ Consumer = {"consec1", "bad_secret", hmac_sha1},
+ SignedParams = oauth:sign(
+ "GET", "http://" ++ Host ++ "/", [], Consumer, "otoksec1", "foobar"),
+ OAuthUrl = oauth:uri(Url, SignedParams),
+
+ case test_request:get(OAuthUrl, [], [{host_header, Host}]) of
+ {ok, Code, _, Body} ->
+ ?assertEqual(401, Code),
+ {JsonBody} = ejson:decode(Body),
+ ?assertEqual(<<"unauthorized">>,
+ couch_util:get_value(<<"error">>, JsonBody));
+ Else ->
+ erlang:error({assertion_failed,
+ [{module, ?MODULE},
+ {line, ?LINE},
+ {reason, ?iofmt("Request failed: ~p", [Else])}]})
+ end
+ end).
diff --git a/test/couchdb/couchdb_views_tests.erl b/test/couchdb/couchdb_views_tests.erl
new file mode 100644
index 000000000..6904f00ba
--- /dev/null
+++ b/test/couchdb/couchdb_views_tests.erl
@@ -0,0 +1,677 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couchdb_views_tests).
+
+-include("couch_eunit.hrl").
+-include_lib("couchdb/couch_db.hrl").
+-include_lib("couch_mrview/include/couch_mrview.hrl").
+
+-define(ADMIN_USER, {user_ctx, #user_ctx{roles=[<<"_admin">>]}}).
+-define(DELAY, 100).
+-define(TIMEOUT, 1000).
+
+
+start() ->
+ {ok, Pid} = couch_server_sup:start_link(?CONFIG_CHAIN),
+ Pid.
+
+stop(Pid) ->
+ erlang:monitor(process, Pid),
+ couch_server_sup:stop(),
+ receive
+ {'DOWN', _, _, Pid, _} ->
+ ok
+ after ?TIMEOUT ->
+ throw({timeout, server_stop})
+ end.
+
+setup() ->
+ DbName = ?tempdb(),
+ {ok, Db} = couch_db:create(DbName, [?ADMIN_USER]),
+ ok = couch_db:close(Db),
+ FooRev = create_design_doc(DbName, <<"_design/foo">>, <<"bar">>),
+ query_view(DbName, "foo", "bar"),
+ BooRev = create_design_doc(DbName, <<"_design/boo">>, <<"baz">>),
+ query_view(DbName, "boo", "baz"),
+ {DbName, {FooRev, BooRev}}.
+
+setup_with_docs() ->
+ DbName = ?tempdb(),
+ {ok, Db} = couch_db:create(DbName, [?ADMIN_USER]),
+ ok = couch_db:close(Db),
+ create_docs(DbName),
+ create_design_doc(DbName, <<"_design/foo">>, <<"bar">>),
+ DbName.
+
+teardown({DbName, _}) ->
+ teardown(DbName);
+teardown(DbName) when is_binary(DbName) ->
+ couch_server:delete(DbName, [?ADMIN_USER]),
+ ok.
+
+
+view_indexes_cleanup_test_() ->
+ {
+ "View indexes cleanup",
+ {
+ setup,
+ fun start/0, fun stop/1,
+ {
+ foreach,
+ fun setup/0, fun teardown/1,
+ [
+ fun should_have_two_indexes_alive_before_deletion/1,
+ fun should_cleanup_index_file_after_ddoc_deletion/1,
+ fun should_cleanup_all_index_files/1
+ ]
+ }
+ }
+ }.
+
+view_group_db_leaks_test_() ->
+ {
+ "View group db leaks",
+ {
+ setup,
+ fun start/0, fun stop/1,
+ {
+ foreach,
+ fun setup_with_docs/0, fun teardown/1,
+ [
+ fun couchdb_1138/1,
+ fun couchdb_1309/1
+ ]
+ }
+ }
+ }.
+
+view_group_shutdown_test_() ->
+ {
+ "View group shutdown",
+ {
+ setup,
+ fun start/0, fun stop/1,
+ [couchdb_1283()]
+ }
+ }.
+
+
+should_not_remember_docs_in_index_after_backup_restore_test() ->
+ %% COUCHDB-640
+ start(),
+ DbName = setup_with_docs(),
+
+ ok = backup_db_file(DbName),
+ create_doc(DbName, "doc666"),
+
+ Rows0 = query_view(DbName, "foo", "bar"),
+ ?assert(has_doc("doc1", Rows0)),
+ ?assert(has_doc("doc2", Rows0)),
+ ?assert(has_doc("doc3", Rows0)),
+ ?assert(has_doc("doc666", Rows0)),
+
+ restore_backup_db_file(DbName),
+
+ Rows1 = query_view(DbName, "foo", "bar"),
+ ?assert(has_doc("doc1", Rows1)),
+ ?assert(has_doc("doc2", Rows1)),
+ ?assert(has_doc("doc3", Rows1)),
+ ?assertNot(has_doc("doc666", Rows1)),
+
+ teardown(DbName),
+ stop(whereis(couch_server_sup)).
+
+
+should_upgrade_legacy_view_files_test() ->
+ start(),
+
+ ok = couch_config:set("query_server_config", "commit_freq", "0", false),
+
+ DbName = <<"test">>,
+ DbFileName = "test.couch",
+ DbFilePath = filename:join([?FIXTURESDIR, DbFileName]),
+ OldViewName = "3b835456c235b1827e012e25666152f3.view",
+ FixtureViewFilePath = filename:join([?FIXTURESDIR, OldViewName]),
+ NewViewName = "a1c5929f912aca32f13446122cc6ce50.view",
+
+ DbDir = couch_config:get("couchdb", "database_dir"),
+ ViewDir = couch_config:get("couchdb", "view_index_dir"),
+ OldViewFilePath = filename:join([ViewDir, ".test_design", OldViewName]),
+ NewViewFilePath = filename:join([ViewDir, ".test_design", "mrview",
+ NewViewName]),
+
+ % cleanup
+ Files = [
+ filename:join([DbDir, DbFileName]),
+ OldViewFilePath,
+ NewViewFilePath
+ ],
+ lists:foreach(fun(File) -> file:delete(File) end, Files),
+
+ % copy old db file into db dir
+ {ok, _} = file:copy(DbFilePath, filename:join([DbDir, DbFileName])),
+
+ % copy old view file into view dir
+ ok = filelib:ensure_dir(filename:join([ViewDir, ".test_design"]) ++ "/"),
+ {ok, _} = file:copy(FixtureViewFilePath, OldViewFilePath),
+
+ % ensure old header
+ OldHeader = read_header(OldViewFilePath),
+ ?assertMatch(#index_header{}, OldHeader),
+
+ % query view for expected results
+ Rows0 = query_view(DbName, "test", "test"),
+ ?assertEqual(2, length(Rows0)),
+
+ % ensure old file gone
+ ?assertNot(filelib:is_regular(OldViewFilePath)),
+
+ % add doc to trigger update
+ DocUrl = db_url(DbName) ++ "/boo",
+ {ok, _, _, _} = test_request:put(
+ DocUrl, [{"Content-Type", "application/json"}], <<"{\"a\":3}">>),
+
+ % query view for expected results
+ Rows1 = query_view(DbName, "test", "test"),
+ ?assertEqual(3, length(Rows1)),
+
+ % ensure new header
+ timer:sleep(2000), % have to wait for awhile to upgrade the index
+ NewHeader = read_header(NewViewFilePath),
+ ?assertMatch(#mrheader{}, NewHeader),
+
+ teardown(DbName),
+ stop(whereis(couch_server_sup)).
+
+
+should_have_two_indexes_alive_before_deletion({DbName, _}) ->
+ view_cleanup(DbName),
+ ?_assertEqual(2, count_index_files(DbName)).
+
+should_cleanup_index_file_after_ddoc_deletion({DbName, {FooRev, _}}) ->
+ delete_design_doc(DbName, <<"_design/foo">>, FooRev),
+ view_cleanup(DbName),
+ ?_assertEqual(1, count_index_files(DbName)).
+
+should_cleanup_all_index_files({DbName, {FooRev, BooRev}})->
+ delete_design_doc(DbName, <<"_design/foo">>, FooRev),
+ delete_design_doc(DbName, <<"_design/boo">>, BooRev),
+ view_cleanup(DbName),
+ ?_assertEqual(0, count_index_files(DbName)).
+
+couchdb_1138(DbName) ->
+ ?_test(begin
+ {ok, IndexerPid} = couch_index_server:get_index(
+ couch_mrview_index, DbName, <<"_design/foo">>),
+ ?assert(is_pid(IndexerPid)),
+ ?assert(is_process_alive(IndexerPid)),
+ ?assertEqual(2, count_db_refs(DbName)),
+
+ Rows0 = query_view(DbName, "foo", "bar"),
+ ?assertEqual(3, length(Rows0)),
+ ?assertEqual(2, count_db_refs(DbName)),
+ ?assert(is_process_alive(IndexerPid)),
+
+ create_doc(DbName, "doc1000"),
+ Rows1 = query_view(DbName, "foo", "bar"),
+ ?assertEqual(4, length(Rows1)),
+ ?assertEqual(2, count_db_refs(DbName)),
+ ?assert(is_process_alive(IndexerPid)),
+
+ Ref1 = get_db_ref_counter(DbName),
+ compact_db(DbName),
+ Ref2 = get_db_ref_counter(DbName),
+ ?assertEqual(2, couch_ref_counter:count(Ref2)),
+ ?assertNotEqual(Ref2, Ref1),
+ ?assertNot(is_process_alive(Ref1)),
+ ?assert(is_process_alive(IndexerPid)),
+
+ compact_view_group(DbName, "foo"),
+ ?assertEqual(2, count_db_refs(DbName)),
+ Ref3 = get_db_ref_counter(DbName),
+ ?assertEqual(Ref3, Ref2),
+ ?assert(is_process_alive(IndexerPid)),
+
+ create_doc(DbName, "doc1001"),
+ Rows2 = query_view(DbName, "foo", "bar"),
+ ?assertEqual(5, length(Rows2)),
+ ?assertEqual(2, count_db_refs(DbName)),
+ ?assert(is_process_alive(IndexerPid))
+ end).
+
+couchdb_1309(DbName) ->
+ ?_test(begin
+ {ok, IndexerPid} = couch_index_server:get_index(
+ couch_mrview_index, DbName, <<"_design/foo">>),
+ ?assert(is_pid(IndexerPid)),
+ ?assert(is_process_alive(IndexerPid)),
+ ?assertEqual(2, count_db_refs(DbName)),
+
+ create_doc(DbName, "doc1001"),
+ Rows0 = query_view(DbName, "foo", "bar"),
+ check_rows_value(Rows0, null),
+ ?assertEqual(4, length(Rows0)),
+ ?assertEqual(2, count_db_refs(DbName)),
+ ?assert(is_process_alive(IndexerPid)),
+
+ update_design_doc(DbName, <<"_design/foo">>, <<"bar">>),
+ {ok, NewIndexerPid} = couch_index_server:get_index(
+ couch_mrview_index, DbName, <<"_design/foo">>),
+ ?assert(is_pid(NewIndexerPid)),
+ ?assert(is_process_alive(NewIndexerPid)),
+ ?assertNotEqual(IndexerPid, NewIndexerPid),
+ ?assertEqual(2, count_db_refs(DbName)),
+
+ Rows1 = query_view(DbName, "foo", "bar", ok),
+ ?assertEqual(0, length(Rows1)),
+ Rows2 = query_view(DbName, "foo", "bar"),
+ check_rows_value(Rows2, 1),
+ ?assertEqual(4, length(Rows2)),
+
+ MonRef0 = erlang:monitor(process, IndexerPid),
+ receive
+ {'DOWN', MonRef0, _, _, _} ->
+ ok
+ after ?TIMEOUT ->
+ erlang:error(
+ {assertion_failed,
+ [{module, ?MODULE}, {line, ?LINE},
+ {reason, "old view group is not dead after ddoc update"}]})
+ end,
+
+ MonRef1 = erlang:monitor(process, NewIndexerPid),
+ ok = couch_server:delete(DbName, [?ADMIN_USER]),
+ receive
+ {'DOWN', MonRef1, _, _, _} ->
+ ok
+ after ?TIMEOUT ->
+ erlang:error(
+ {assertion_failed,
+ [{module, ?MODULE}, {line, ?LINE},
+ {reason, "new view group did not die after DB deletion"}]})
+ end
+ end).
+
+couchdb_1283() ->
+ ?_test(begin
+ ok = couch_config:set("couchdb", "max_dbs_open", "3", false),
+ ok = couch_config:set("couchdb", "delayed_commits", "false", false),
+
+ {ok, MDb1} = couch_db:create(?tempdb(), [?ADMIN_USER]),
+ DDoc = couch_doc:from_json_obj({[
+ {<<"_id">>, <<"_design/foo">>},
+ {<<"language">>, <<"javascript">>},
+ {<<"views">>, {[
+ {<<"foo">>, {[
+ {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>}
+ ]}},
+ {<<"foo2">>, {[
+ {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>}
+ ]}},
+ {<<"foo3">>, {[
+ {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>}
+ ]}},
+ {<<"foo4">>, {[
+ {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>}
+ ]}},
+ {<<"foo5">>, {[
+ {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>}
+ ]}}
+ ]}}
+ ]}),
+ {ok, _} = couch_db:update_doc(MDb1, DDoc, []),
+ ok = populate_db(MDb1, 100, 100),
+ query_view(MDb1#db.name, "foo", "foo"),
+ ok = couch_db:close(MDb1),
+
+ {ok, Db1} = couch_db:create(?tempdb(), [?ADMIN_USER]),
+ ok = couch_db:close(Db1),
+ {ok, Db2} = couch_db:create(?tempdb(), [?ADMIN_USER]),
+ ok = couch_db:close(Db2),
+ {ok, Db3} = couch_db:create(?tempdb(), [?ADMIN_USER]),
+ ok = couch_db:close(Db3),
+
+ Writer1 = spawn_writer(Db1#db.name),
+ Writer2 = spawn_writer(Db2#db.name),
+
+ ?assert(is_process_alive(Writer1)),
+ ?assert(is_process_alive(Writer2)),
+
+ %% Below we do exactly the same as couch_mrview:compact holds inside
+ %% because we need have access to compaction Pid, not a Ref.
+ %% {ok, MonRef} = couch_mrview:compact(MDb1#db.name, <<"_design/foo">>,
+ %% [monitor]),
+ {ok, Pid} = couch_index_server:get_index(
+ couch_mrview_index, MDb1#db.name, <<"_design/foo">>),
+ {ok, CPid} = gen_server:call(Pid, compact),
+ %% By suspending compaction process we ensure that compaction won't get
+ %% finished too early to make get_writer_status assertion fail.
+ erlang:suspend_process(CPid),
+ MonRef = erlang:monitor(process, CPid),
+ Writer3 = spawn_writer(Db3#db.name),
+ ?assert(is_process_alive(Writer3)),
+ ?assertEqual({error, all_dbs_active}, get_writer_status(Writer3)),
+
+ ?assert(is_process_alive(Writer1)),
+ ?assert(is_process_alive(Writer2)),
+ ?assert(is_process_alive(Writer3)),
+
+ %% Resume compaction
+ erlang:resume_process(CPid),
+
+ receive
+ {'DOWN', MonRef, process, _, Reason} ->
+ ?assertEqual(normal, Reason)
+ after ?TIMEOUT ->
+ erlang:error(
+ {assertion_failed,
+ [{module, ?MODULE}, {line, ?LINE},
+ {reason, "Failure compacting view group"}]})
+ end,
+
+ ?assertEqual(ok, writer_try_again(Writer3)),
+ ?assertEqual(ok, get_writer_status(Writer3)),
+
+ ?assert(is_process_alive(Writer1)),
+ ?assert(is_process_alive(Writer2)),
+ ?assert(is_process_alive(Writer3)),
+
+ ?assertEqual(ok, stop_writer(Writer1)),
+ ?assertEqual(ok, stop_writer(Writer2)),
+ ?assertEqual(ok, stop_writer(Writer3))
+ end).
+
+create_doc(DbName, DocId) when is_list(DocId) ->
+ create_doc(DbName, ?l2b(DocId));
+create_doc(DbName, DocId) when is_binary(DocId) ->
+ {ok, Db} = couch_db:open(DbName, [?ADMIN_USER]),
+ Doc666 = couch_doc:from_json_obj({[
+ {<<"_id">>, DocId},
+ {<<"value">>, 999}
+ ]}),
+ {ok, _} = couch_db:update_docs(Db, [Doc666]),
+ couch_db:ensure_full_commit(Db),
+ couch_db:close(Db).
+
+create_docs(DbName) ->
+ {ok, Db} = couch_db:open(DbName, [?ADMIN_USER]),
+ Doc1 = couch_doc:from_json_obj({[
+ {<<"_id">>, <<"doc1">>},
+ {<<"value">>, 1}
+
+ ]}),
+ Doc2 = couch_doc:from_json_obj({[
+ {<<"_id">>, <<"doc2">>},
+ {<<"value">>, 2}
+
+ ]}),
+ Doc3 = couch_doc:from_json_obj({[
+ {<<"_id">>, <<"doc3">>},
+ {<<"value">>, 3}
+
+ ]}),
+ {ok, _} = couch_db:update_docs(Db, [Doc1, Doc2, Doc3]),
+ couch_db:ensure_full_commit(Db),
+ couch_db:close(Db).
+
+populate_db(Db, BatchSize, N) when N > 0 ->
+ Docs = lists:map(
+ fun(_) ->
+ couch_doc:from_json_obj({[
+ {<<"_id">>, couch_uuids:new()},
+ {<<"value">>, base64:encode(crypto:rand_bytes(1000))}
+ ]})
+ end,
+ lists:seq(1, BatchSize)),
+ {ok, _} = couch_db:update_docs(Db, Docs, []),
+ populate_db(Db, BatchSize, N - length(Docs));
+populate_db(_Db, _, _) ->
+ ok.
+
+create_design_doc(DbName, DDName, ViewName) ->
+ {ok, Db} = couch_db:open(DbName, [?ADMIN_USER]),
+ DDoc = couch_doc:from_json_obj({[
+ {<<"_id">>, DDName},
+ {<<"language">>, <<"javascript">>},
+ {<<"views">>, {[
+ {ViewName, {[
+ {<<"map">>, <<"function(doc) { emit(doc.value, null); }">>}
+ ]}}
+ ]}}
+ ]}),
+ {ok, Rev} = couch_db:update_doc(Db, DDoc, []),
+ couch_db:ensure_full_commit(Db),
+ couch_db:close(Db),
+ Rev.
+
+update_design_doc(DbName, DDName, ViewName) ->
+ {ok, Db} = couch_db:open(DbName, [?ADMIN_USER]),
+ {ok, Doc} = couch_db:open_doc(Db, DDName, [?ADMIN_USER]),
+ {Props} = couch_doc:to_json_obj(Doc, []),
+ Rev = couch_util:get_value(<<"_rev">>, Props),
+ DDoc = couch_doc:from_json_obj({[
+ {<<"_id">>, DDName},
+ {<<"_rev">>, Rev},
+ {<<"language">>, <<"javascript">>},
+ {<<"views">>, {[
+ {ViewName, {[
+ {<<"map">>, <<"function(doc) { emit(doc.value, 1); }">>}
+ ]}}
+ ]}}
+ ]}),
+ {ok, NewRev} = couch_db:update_doc(Db, DDoc, [?ADMIN_USER]),
+ couch_db:ensure_full_commit(Db),
+ couch_db:close(Db),
+ NewRev.
+
+delete_design_doc(DbName, DDName, Rev) ->
+ {ok, Db} = couch_db:open(DbName, [?ADMIN_USER]),
+ DDoc = couch_doc:from_json_obj({[
+ {<<"_id">>, DDName},
+ {<<"_rev">>, couch_doc:rev_to_str(Rev)},
+ {<<"_deleted">>, true}
+ ]}),
+ {ok, _} = couch_db:update_doc(Db, DDoc, [Rev]),
+ couch_db:close(Db).
+
+db_url(DbName) ->
+ Addr = couch_config:get("httpd", "bind_address", "127.0.0.1"),
+ Port = integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
+ "http://" ++ Addr ++ ":" ++ Port ++ "/" ++ ?b2l(DbName).
+
+query_view(DbName, DDoc, View) ->
+ query_view(DbName, DDoc, View, false).
+
+query_view(DbName, DDoc, View, Stale) ->
+ {ok, Code, _Headers, Body} = test_request:get(
+ db_url(DbName) ++ "/_design/" ++ DDoc ++ "/_view/" ++ View
+ ++ case Stale of
+ false -> [];
+ _ -> "?stale=" ++ atom_to_list(Stale)
+ end),
+ ?assertEqual(200, Code),
+ {Props} = ejson:decode(Body),
+ couch_util:get_value(<<"rows">>, Props, []).
+
+check_rows_value(Rows, Value) ->
+ lists:foreach(
+ fun({Row}) ->
+ ?assertEqual(Value, couch_util:get_value(<<"value">>, Row))
+ end, Rows).
+
+view_cleanup(DbName) ->
+ {ok, Db} = couch_db:open(DbName, [?ADMIN_USER]),
+ couch_mrview:cleanup(Db),
+ couch_db:close(Db).
+
+get_db_ref_counter(DbName) ->
+ {ok, #db{fd_ref_counter = Ref} = Db} = couch_db:open_int(DbName, []),
+ ok = couch_db:close(Db),
+ Ref.
+
+count_db_refs(DbName) ->
+ Ref = get_db_ref_counter(DbName),
+ % have to sleep a bit to let couchdb cleanup all refs and leave only
+ % active ones. otherwise the related tests will randomly fail due to
+ % count number mismatch
+ timer:sleep(200),
+ couch_ref_counter:count(Ref).
+
+count_index_files(DbName) ->
+ % call server to fetch the index files
+ RootDir = couch_config:get("couchdb", "view_index_dir"),
+ length(filelib:wildcard(RootDir ++ "/." ++
+ binary_to_list(DbName) ++ "_design"++"/mrview/*")).
+
+has_doc(DocId1, Rows) ->
+ DocId = iolist_to_binary(DocId1),
+ lists:any(fun({R}) -> lists:member({<<"id">>, DocId}, R) end, Rows).
+
+backup_db_file(DbName) ->
+ DbDir = couch_config:get("couchdb", "database_dir"),
+ DbFile = filename:join([DbDir, ?b2l(DbName) ++ ".couch"]),
+ {ok, _} = file:copy(DbFile, DbFile ++ ".backup"),
+ ok.
+
+restore_backup_db_file(DbName) ->
+ DbDir = couch_config:get("couchdb", "database_dir"),
+ stop(whereis(couch_server_sup)),
+ DbFile = filename:join([DbDir, ?b2l(DbName) ++ ".couch"]),
+ ok = file:delete(DbFile),
+ ok = file:rename(DbFile ++ ".backup", DbFile),
+ start(),
+ ok.
+
+compact_db(DbName) ->
+ {ok, Db} = couch_db:open_int(DbName, []),
+ {ok, _} = couch_db:start_compact(Db),
+ ok = couch_db:close(Db),
+ wait_db_compact_done(DbName, 10).
+
+wait_db_compact_done(_DbName, 0) ->
+ erlang:error({assertion_failed,
+ [{module, ?MODULE},
+ {line, ?LINE},
+ {reason, "DB compaction failed to finish"}]});
+wait_db_compact_done(DbName, N) ->
+ {ok, Db} = couch_db:open_int(DbName, []),
+ ok = couch_db:close(Db),
+ case is_pid(Db#db.compactor_pid) of
+ false ->
+ ok;
+ true ->
+ ok = timer:sleep(?DELAY),
+ wait_db_compact_done(DbName, N - 1)
+ end.
+
+compact_view_group(DbName, DDocId) when is_list(DDocId) ->
+ compact_view_group(DbName, ?l2b("_design/" ++ DDocId));
+compact_view_group(DbName, DDocId) when is_binary(DDocId) ->
+ ok = couch_mrview:compact(DbName, DDocId),
+ wait_view_compact_done(DbName, DDocId, 10).
+
+wait_view_compact_done(_DbName, _DDocId, 0) ->
+ erlang:error({assertion_failed,
+ [{module, ?MODULE},
+ {line, ?LINE},
+ {reason, "DB compaction failed to finish"}]});
+wait_view_compact_done(DbName, DDocId, N) ->
+ {ok, Code, _Headers, Body} = test_request:get(
+ db_url(DbName) ++ "/" ++ ?b2l(DDocId) ++ "/_info"),
+ ?assertEqual(200, Code),
+ {Info} = ejson:decode(Body),
+ {IndexInfo} = couch_util:get_value(<<"view_index">>, Info),
+ CompactRunning = couch_util:get_value(<<"compact_running">>, IndexInfo),
+ case CompactRunning of
+ false ->
+ ok;
+ true ->
+ ok = timer:sleep(?DELAY),
+ wait_view_compact_done(DbName, DDocId, N - 1)
+ end.
+
+spawn_writer(DbName) ->
+ Parent = self(),
+ spawn(fun() ->
+ process_flag(priority, high),
+ writer_loop(DbName, Parent)
+ end).
+
+get_writer_status(Writer) ->
+ Ref = make_ref(),
+ Writer ! {get_status, Ref},
+ receive
+ {db_open, Ref} ->
+ ok;
+ {db_open_error, Error, Ref} ->
+ Error
+ after ?TIMEOUT ->
+ timeout
+ end.
+
+writer_try_again(Writer) ->
+ Ref = make_ref(),
+ Writer ! {try_again, Ref},
+ receive
+ {ok, Ref} ->
+ ok
+ after ?TIMEOUT ->
+ timeout
+ end.
+
+stop_writer(Writer) ->
+ Ref = make_ref(),
+ Writer ! {stop, Ref},
+ receive
+ {ok, Ref} ->
+ ok
+ after ?TIMEOUT ->
+ erlang:error({assertion_failed,
+ [{module, ?MODULE},
+ {line, ?LINE},
+ {reason, "Timeout on stopping process"}]})
+ end.
+
+writer_loop(DbName, Parent) ->
+ case couch_db:open_int(DbName, []) of
+ {ok, Db} ->
+ writer_loop_1(Db, Parent);
+ Error ->
+ writer_loop_2(DbName, Parent, Error)
+ end.
+
+writer_loop_1(Db, Parent) ->
+ receive
+ {get_status, Ref} ->
+ Parent ! {db_open, Ref},
+ writer_loop_1(Db, Parent);
+ {stop, Ref} ->
+ ok = couch_db:close(Db),
+ Parent ! {ok, Ref}
+ end.
+
+writer_loop_2(DbName, Parent, Error) ->
+ receive
+ {get_status, Ref} ->
+ Parent ! {db_open_error, Error, Ref},
+ writer_loop_2(DbName, Parent, Error);
+ {try_again, Ref} ->
+ Parent ! {ok, Ref},
+ writer_loop(DbName, Parent)
+ end.
+
+read_header(File) ->
+ {ok, Fd} = couch_file:open(File),
+ {ok, {_Sig, Header}} = couch_file:read_header(Fd),
+ couch_file:close(Fd),
+ Header.
diff --git a/test/etap/041-uuid-gen-id.ini b/test/couchdb/eunit.ini
index 6886efdb7..50024a375 100644
--- a/test/etap/041-uuid-gen-id.ini
+++ b/test/couchdb/eunit.ini
@@ -15,6 +15,14 @@
; specific language governing permissions and limitations
; under the License.
-[uuids]
-algorithm = utc_id
-utc_id_suffix = bozo
+[couchdb]
+; time to relax!
+uuid = 74696d6520746f2072656c617821
+
+[httpd]
+port = 0
+
+[log]
+; logging is disabled to remove unwanted noise in stdout from tests processing
+level = none
+include_sasl = false
diff --git a/test/etap/fixtures/3b835456c235b1827e012e25666152f3.view b/test/couchdb/fixtures/3b835456c235b1827e012e25666152f3.view
index 9c67648be..9c67648be 100644
--- a/test/etap/fixtures/3b835456c235b1827e012e25666152f3.view
+++ b/test/couchdb/fixtures/3b835456c235b1827e012e25666152f3.view
Binary files differ
diff --git a/src/etap/Makefile.am b/test/couchdb/fixtures/Makefile.am
index beaf65c3b..1273234f9 100644
--- a/src/etap/Makefile.am
+++ b/test/couchdb/fixtures/Makefile.am
@@ -1,28 +1,15 @@
## Licensed under the Apache License, Version 2.0 (the "License"); you may not
-## use this file except in compliance with the License. You may obtain a copy
-## of the License at
+## use this file except in compliance with the License. You may obtain a copy of
+## the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
## License for the specific language governing permissions and limitations under
## the License.
-etapebindir = $(localerlanglibdir)/etap/ebin
-
-etap_file_collection = \
- etap.erl
-
-etapebin_make_generated_file_list = \
- etap.beam
-
-etapebin_DATA = $(etapebin_make_generated_file_list)
-
-EXTRA_DIST = $(etap_file_collection)
-
-CLEANFILES = $(etapebin_make_generated_file_list)
-
-%.beam: %.erl
- $(ERLC) $(ERLC_FLAGS) $<
+noinst_PROGRAMS = test_cfg_register
+test_cfg_register_SOURCES = test_cfg_register.c
+test_cfg_register_CFLAGS = -D_BSD_SOURCE
diff --git a/test/etap/081-config-override.1.ini b/test/couchdb/fixtures/couch_config_tests_1.ini
index 55451dade..55451dade 100644
--- a/test/etap/081-config-override.1.ini
+++ b/test/couchdb/fixtures/couch_config_tests_1.ini
diff --git a/test/etap/081-config-override.2.ini b/test/couchdb/fixtures/couch_config_tests_2.ini
index 5f46357f5..5f46357f5 100644
--- a/test/etap/081-config-override.2.ini
+++ b/test/couchdb/fixtures/couch_config_tests_2.ini
diff --git a/test/etap/121-stats-aggregates.cfg b/test/couchdb/fixtures/couch_stats_aggregates.cfg
index 30e475da8..30e475da8 100644
--- a/test/etap/121-stats-aggregates.cfg
+++ b/test/couchdb/fixtures/couch_stats_aggregates.cfg
diff --git a/test/etap/121-stats-aggregates.ini b/test/couchdb/fixtures/couch_stats_aggregates.ini
index cc5cd2187..cc5cd2187 100644
--- a/test/etap/121-stats-aggregates.ini
+++ b/test/couchdb/fixtures/couch_stats_aggregates.ini
diff --git a/test/couchdb/fixtures/logo.png b/test/couchdb/fixtures/logo.png
new file mode 100644
index 000000000..d21ac025b
--- /dev/null
+++ b/test/couchdb/fixtures/logo.png
Binary files differ
diff --git a/test/etap/172-os-daemon-errors.1.sh b/test/couchdb/fixtures/os_daemon_bad_perm.sh
index 345c8b40b..345c8b40b 100644
--- a/test/etap/172-os-daemon-errors.1.sh
+++ b/test/couchdb/fixtures/os_daemon_bad_perm.sh
diff --git a/test/etap/172-os-daemon-errors.4.sh b/test/couchdb/fixtures/os_daemon_can_reboot.sh
index 5bc10e83f..5bc10e83f 100755
--- a/test/etap/172-os-daemon-errors.4.sh
+++ b/test/couchdb/fixtures/os_daemon_can_reboot.sh
diff --git a/test/etap/171-os-daemons-config.es b/test/couchdb/fixtures/os_daemon_configer.escript.in
index b4a914e61..d2ecfa8ac 100755
--- a/test/etap/171-os-daemons-config.es
+++ b/test/couchdb/fixtures/os_daemon_configer.escript.in
@@ -1,19 +1,18 @@
#! /usr/bin/env escript
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-filename() ->
- list_to_binary(test_util:source_file("test/etap/171-os-daemons-config.es")).
+%% -*- erlang -*-
+%%! -DTEST -pa @abs_top_builddir@/src/ejson
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+%% use this file except in compliance with the License. You may obtain a copy of
+%% the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+%% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+%% License for the specific language governing permissions and limitations under
+%% the License.
read() ->
case io:get_line('') of
@@ -42,14 +41,17 @@ log(Mesg, Level) ->
write([<<"log">>, Mesg, {[{<<"level">>, Level}]}]).
test_get_cfg1() ->
- FileName = filename(),
- {[{<<"foo">>, FileName}]} = get_cfg(<<"os_daemons">>).
+ Path = list_to_binary(?FILE),
+ FileName = list_to_binary(filename:basename(?FILE)),
+ {[{FileName, Path}]} = get_cfg(<<"os_daemons">>).
test_get_cfg2() ->
- FileName = filename(),
- FileName = get_cfg(<<"os_daemons">>, <<"foo">>),
+ Path = list_to_binary(?FILE),
+ FileName = list_to_binary(filename:basename(?FILE)),
+ Path = get_cfg(<<"os_daemons">>, FileName),
<<"sequential">> = get_cfg(<<"uuids">>, <<"algorithm">>).
+
test_get_unknown_cfg() ->
{[]} = get_cfg(<<"aal;3p4">>),
null = get_cfg(<<"aal;3p4">>, <<"313234kjhsdfl">>).
@@ -79,7 +81,4 @@ loop({error, _Reason}) ->
init:stop().
main([]) ->
- test_util:init_code_path(),
- couch_config:start_link(test_util:config_files()),
- couch_drv:start_link(),
do_tests().
diff --git a/test/etap/172-os-daemon-errors.2.sh b/test/couchdb/fixtures/os_daemon_die_on_boot.sh
index 256ee7935..256ee7935 100755
--- a/test/etap/172-os-daemon-errors.2.sh
+++ b/test/couchdb/fixtures/os_daemon_die_on_boot.sh
diff --git a/test/etap/172-os-daemon-errors.3.sh b/test/couchdb/fixtures/os_daemon_die_quickly.sh
index f5a13684e..f5a13684e 100755
--- a/test/etap/172-os-daemon-errors.3.sh
+++ b/test/couchdb/fixtures/os_daemon_die_quickly.sh
diff --git a/test/etap/170-os-daemons.es b/test/couchdb/fixtures/os_daemon_looper.escript
index 73974e905..73974e905 100755
--- a/test/etap/170-os-daemons.es
+++ b/test/couchdb/fixtures/os_daemon_looper.escript
diff --git a/test/etap/fixtures/test.couch b/test/couchdb/fixtures/test.couch
index 32c79af32..32c79af32 100644
--- a/test/etap/fixtures/test.couch
+++ b/test/couchdb/fixtures/test.couch
Binary files differ
diff --git a/test/etap/test_cfg_register.c b/test/couchdb/fixtures/test_cfg_register.c
index c910bac48..c910bac48 100644
--- a/test/etap/test_cfg_register.c
+++ b/test/couchdb/fixtures/test_cfg_register.c
diff --git a/test/couchdb/include/couch_eunit.hrl.in b/test/couchdb/include/couch_eunit.hrl.in
new file mode 100644
index 000000000..063b3dbb2
--- /dev/null
+++ b/test/couchdb/include/couch_eunit.hrl.in
@@ -0,0 +1,64 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-include_lib("eunit/include/eunit.hrl").
+
+-define(BUILDDIR, "@abs_top_builddir@").
+-define(SOURCEDIR, "@abs_top_srcdir@").
+-define(CONFIG_CHAIN, [
+ filename:join([?BUILDDIR, "etc", "couchdb", "default_dev.ini"]),
+ filename:join([?BUILDDIR, "etc", "couchdb", "local_dev.ini"]),
+ filename:join([?SOURCEDIR, "test", "couchdb", "eunit.ini"])]).
+-define(FIXTURESDIR,
+ filename:join([?SOURCEDIR, "test", "couchdb", "fixtures"])).
+-define(TEMPDIR,
+ filename:join([?BUILDDIR, "test", "couchdb", "temp"])).
+
+-define(tempfile,
+ fun() ->
+ {A, B, C} = erlang:now(),
+ N = node(),
+ FileName = lists:flatten(io_lib:format("~p-~p.~p.~p", [N, A, B, C])),
+ filename:join([?TEMPDIR, FileName])
+ end).
+-define(tempdb,
+ fun() ->
+ Nums = tuple_to_list(erlang:now()),
+ Prefix = "eunit-test-db",
+ Suffix = lists:concat([integer_to_list(Num) || Num <- Nums]),
+ list_to_binary(Prefix ++ "-" ++ Suffix)
+ end).
+-define(docid,
+ fun() ->
+ {A, B, C} = erlang:now(),
+ lists:flatten(io_lib:format("~p~p~p", [A, B, C]))
+ end).
+
+%% Borrowed from https://github.com/richcarl/eunit/blob/master/include/eunit.hrl#L200-L219
+%% TODO: get rid of this once R14* is no longer supported
+-ifndef(assertNotMatch).
+-define(assertNotMatch(Guard, Expr),
+ begin
+ ((fun () ->
+ __V = (Expr),
+ case __V of
+ Guard -> erlang:error({assertNotMatch_failed,
+ [{module, ?MODULE},
+ {line, ?LINE},
+ {expression, (??Expr)},
+ {pattern, (??Guard)},
+ {value, __V}]});
+ _ -> ok
+ end
+ end)())
+ end).
+-endif.
diff --git a/test/etap/190-json-stream-parse.t b/test/couchdb/json_stream_parse_tests.erl
index 49ea58f83..92303b65e 100755..100644
--- a/test/etap/190-json-stream-parse.t
+++ b/test/couchdb/json_stream_parse_tests.erl
@@ -1,4 +1,3 @@
-#!/usr/bin/env escript
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
@@ -11,67 +10,11 @@
% License for the specific language governing permissions and limitations under
% the License.
-main(_) ->
- test_util:init_code_path(),
- etap:plan(99),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag("Test died abnormally: ~p", [Other]),
- etap:bail("Bad return value.")
- end,
- ok.
+-module(json_stream_parse_tests).
-test() ->
- crypto:start(),
- ok = test_raw_json_input(),
- ok = test_1_byte_data_function(),
- ok = test_multiple_bytes_data_function().
+-include("couch_eunit.hrl").
-
-test_raw_json_input() ->
- etap:diag("Tests with raw JSON string as the input."),
- lists:foreach(
- fun({EJson, JsonString, Desc}) ->
- etap:is(
- equiv(EJson, json_stream_parse:to_ejson(JsonString)),
- true,
- Desc)
- end,
- cases()),
- ok.
-
-
-test_1_byte_data_function() ->
- etap:diag("Tests with a 1 byte output data function as the input."),
- lists:foreach(
- fun({EJson, JsonString, Desc}) ->
- DataFun = fun() -> single_byte_data_fun(JsonString) end,
- etap:is(
- equiv(EJson, json_stream_parse:to_ejson(DataFun)),
- true,
- Desc)
- end,
- cases()),
- ok.
-
-
-test_multiple_bytes_data_function() ->
- etap:diag("Tests with a multiple bytes output data function as the input."),
- lists:foreach(
- fun({EJson, JsonString, Desc}) ->
- DataFun = fun() -> multiple_bytes_data_fun(JsonString) end,
- etap:is(
- equiv(EJson, json_stream_parse:to_ejson(DataFun)),
- true,
- Desc)
- end,
- cases()),
- ok.
-
-
-cases() ->
+-define(CASES,
[
{1, "1", "integer numeric literial"},
{3.1416, "3.14160", "float numeric literal"}, % text representation may truncate, trail zeroes
@@ -117,7 +60,35 @@ cases() ->
{[-123, <<"foo">>, {[{<<"bar">>, []}]}, null],
"[-123,\"foo\",{\"bar\":[]},null]",
"complex array literal"}
- ].
+ ]
+).
+
+
+raw_json_input_test_() ->
+ Tests = lists:map(
+ fun({EJson, JsonString, Desc}) ->
+ {Desc,
+ ?_assert(equiv(EJson, json_stream_parse:to_ejson(JsonString)))}
+ end, ?CASES),
+ {"Tests with raw JSON string as the input", Tests}.
+
+one_byte_data_fun_test_() ->
+ Tests = lists:map(
+ fun({EJson, JsonString, Desc}) ->
+ DataFun = fun() -> single_byte_data_fun(JsonString) end,
+ {Desc,
+ ?_assert(equiv(EJson, json_stream_parse:to_ejson(DataFun)))}
+ end, ?CASES),
+ {"Tests with a 1 byte output data function as the input", Tests}.
+
+test_multiple_bytes_data_fun_test_() ->
+ Tests = lists:map(
+ fun({EJson, JsonString, Desc}) ->
+ DataFun = fun() -> multiple_bytes_data_fun(JsonString) end,
+ {Desc,
+ ?_assert(equiv(EJson, json_stream_parse:to_ejson(DataFun)))}
+ end, ?CASES),
+ {"Tests with a multiple bytes output data function as the input", Tests}.
%% Test for equivalence of Erlang terms.
@@ -139,7 +110,6 @@ equiv(false, false) ->
equiv(null, null) ->
true.
-
%% Object representation and traversal order is unknown.
%% Use the sledgehammer and sort property lists.
equiv_object(Props1, Props2) ->
@@ -152,20 +122,17 @@ equiv_object(Props1, Props2) ->
end,
Pairs).
-
%% Recursively compare tuple elements for equivalence.
equiv_list([], []) ->
true;
equiv_list([V1 | L1], [V2 | L2]) ->
equiv(V1, V2) andalso equiv_list(L1, L2).
-
single_byte_data_fun([]) ->
done;
single_byte_data_fun([H | T]) ->
{<<H>>, fun() -> single_byte_data_fun(T) end}.
-
multiple_bytes_data_fun([]) ->
done;
multiple_bytes_data_fun(L) ->
diff --git a/test/couchdb/run.in b/test/couchdb/run.in
new file mode 100644
index 000000000..5286da37b
--- /dev/null
+++ b/test/couchdb/run.in
@@ -0,0 +1,111 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+%%! -DTEST -env ERL_LIBS @abs_top_builddir@/src:$ERL_LIBS -pa @abs_top_builddir@/test/couchdb/ebin
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+%% use this file except in compliance with the License. You may obtain a copy of
+%% the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+%% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+%% License for the specific language governing permissions and limitations under
+%% the License.
+
+-define(BUILDDIR, "@abs_top_builddir@").
+-define(SOURCEDIR, "@abs_top_srcdir@").
+-define(TESTS_EBIN, filename:join([?BUILDDIR, "test", "couchdb", "ebin"])).
+-define(TESTS_TEMP, filename:join([?BUILDDIR, "test", "couchdb", "temp"])).
+
+main([]) ->
+ io:fwrite("Path to test file or directory wasn't specified.~n"),
+ erlang:halt(1);
+main(["-v"]) ->
+ io:fwrite("Path to test file or directory wasn't specified.~n"),
+ erlang:halt(1);
+main(["-v", Path]) ->
+ run(Path, [verbose]);
+main(["-v", _ | _]) ->
+ io:fwrite("Only single tests source path is supported.~n"),
+ erlang:halt(1);
+main([Path]) ->
+ run(Path, []),
+ ok;
+main([_|_]) ->
+ io:fwrite("Only single tests source path is supported.~n"),
+ erlang:halt(1).
+
+
+run(Path, Options) ->
+ ensure_dirs(),
+ Mask = "*_tests.erl",
+ Files = list_files(Path, Mask),
+ init_code_path(),
+ Mods = compile(Files),
+ run_tests(Mods, Options).
+
+ensure_dirs() ->
+ ok = filelib:ensure_dir(?TESTS_EBIN),
+ ok = filelib:ensure_dir(?TESTS_TEMP),
+ ok.
+
+list_files(Path, Mask)->
+ AbsPath = filename:absname(Path),
+ case filelib:is_file(AbsPath) of
+ true ->
+ ok;
+ false ->
+ io:fwrite("File or directory not found: ~p~n", [AbsPath]),
+ erlang:halt(1)
+ end,
+ case filelib:is_dir(AbsPath) of
+ true ->
+ case filelib:wildcard(filename:join([AbsPath, Mask])) of
+ [] ->
+ io:fwrite("No test files was found at ~p by mask ~p ~n",
+ [AbsPath, Mask]),
+ erlang:halt(1);
+ Files ->
+ Files
+ end;
+ false -> [AbsPath]
+ end.
+
+
+compile(Files) ->
+ lists:map(
+ fun(File)->
+ io:fwrite("compile ~p~n", [File]),
+ Opts = [report, verbose, {outdir, ?TESTS_EBIN},
+ {i, filename:join([?BUILDDIR, "test", "couchdb",
+ "include"])},
+ {i, filename:join([?SOURCEDIR, "src"])}],
+ {ok, Mod} = compile:file(File, Opts),
+ Mod
+ end,
+ Files).
+
+
+run_tests(Mods, Options) ->
+ %% disable error_logger to reduce noise in stdout
+ error_logger:tty(false),
+ case eunit:test(Mods, Options) of
+ error -> erlang:halt(1);
+ _ -> ok
+ end.
+
+
+init_code_path() ->
+ Paths = [
+ "couchdb",
+ "ejson",
+ "erlang-oauth",
+ "ibrowse",
+ "mochiweb",
+ "snappy"
+ ],
+ lists:foreach(fun(Name) ->
+ code:add_patha(filename:join([?BUILDDIR, "src", Name]))
+ end, Paths).
diff --git a/test/couchdb/test_request.erl b/test/couchdb/test_request.erl
new file mode 100644
index 000000000..68e495698
--- /dev/null
+++ b/test/couchdb/test_request.erl
@@ -0,0 +1,75 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(test_request).
+
+-export([get/1, get/2, get/3]).
+-export([put/2, put/3]).
+-export([options/1, options/2, options/3]).
+-export([request/3, request/4]).
+
+get(Url) ->
+ request(get, Url, []).
+
+get(Url, Headers) ->
+ request(get, Url, Headers).
+get(Url, Headers, Opts) ->
+ request(get, Url, Headers, [], Opts).
+
+
+put(Url, Body) ->
+ request(put, Url, [], Body).
+
+put(Url, Headers, Body) ->
+ request(put, Url, Headers, Body).
+
+
+options(Url) ->
+ request(options, Url, []).
+
+options(Url, Headers) ->
+ request(options, Url, Headers).
+
+options(Url, Headers, Opts) ->
+ request(options, Url, Headers, [], Opts).
+
+
+request(Method, Url, Headers) ->
+ request(Method, Url, Headers, []).
+
+request(Method, Url, Headers, Body) ->
+ request(Method, Url, Headers, Body, [], 3).
+
+request(Method, Url, Headers, Body, Opts) ->
+ request(Method, Url, Headers, Body, Opts, 3).
+
+request(_Method, _Url, _Headers, _Body, _Opts, 0) ->
+ {error, request_failed};
+request(Method, Url, Headers, Body, Opts, N) ->
+ case code:is_loaded(ibrowse) of
+ false ->
+ {ok, _} = ibrowse:start();
+ _ ->
+ ok
+ end,
+ case ibrowse:send_req(Url, Headers, Method, Body, Opts) of
+ {ok, Code0, RespHeaders, RespBody0} ->
+ Code = list_to_integer(Code0),
+ RespBody = iolist_to_binary(RespBody0),
+ {ok, Code, RespHeaders, RespBody};
+ {error, {'EXIT', {normal, _}}} ->
+ % Connection closed right after a successful request that
+ % used the same connection.
+ request(Method, Url, Headers, Body, N - 1);
+ Error ->
+ Error
+ end.
diff --git a/test/etap/test_web.erl b/test/couchdb/test_web.erl
index ed78651f1..1de2cd1c3 100644
--- a/test/etap/test_web.erl
+++ b/test/couchdb/test_web.erl
@@ -13,12 +13,15 @@
-module(test_web).
-behaviour(gen_server).
--export([start_link/0, loop/1, get_port/0, set_assert/1, check_last/0]).
+-include("couch_eunit.hrl").
+
+-export([start_link/0, stop/0, loop/1, get_port/0, set_assert/1, check_last/0]).
-export([init/1, terminate/2, code_change/3]).
-export([handle_call/3, handle_cast/2, handle_info/2]).
-define(SERVER, test_web_server).
-define(HANDLER, test_web_handler).
+-define(DELAY, 500).
start_link() ->
gen_server:start({local, ?HANDLER}, ?MODULE, [], []),
@@ -29,7 +32,7 @@ start_link() ->
]).
loop(Req) ->
- %etap:diag("Handling request: ~p", [Req]),
+ %?debugFmt("Handling request: ~p", [Req]),
case gen_server:call(?HANDLER, {check_request, Req}) of
{ok, RespInfo} ->
{ok, Req:respond(RespInfo)};
@@ -40,12 +43,12 @@ loop(Req) ->
{ok, Resp};
{chunked, {Status, Headers, BodyChunks}} ->
Resp = Req:respond({Status, Headers, chunked}),
- timer:sleep(500),
+ timer:sleep(?DELAY),
lists:foreach(fun(C) -> Resp:write_chunk(C) end, BodyChunks),
Resp:write_chunk([]),
{ok, Resp};
{error, Reason} ->
- etap:diag("Error: ~p", [Reason]),
+ ?debugFmt("Error: ~p", [Reason]),
Body = lists:flatten(io_lib:format("Error: ~p", [Reason])),
{ok, Req:respond({200, [], Body})}
end.
@@ -54,7 +57,7 @@ get_port() ->
mochiweb_socket_server:get(?SERVER, port).
set_assert(Fun) ->
- ok = gen_server:call(?HANDLER, {set_assert, Fun}).
+ ?assertEqual(ok, gen_server:call(?HANDLER, {set_assert, Fun})).
check_last() ->
gen_server:call(?HANDLER, last_status).
@@ -65,12 +68,20 @@ init(_) ->
terminate(_Reason, _State) ->
ok.
+stop() ->
+ gen_server:cast(?SERVER, stop).
+
+
handle_call({check_request, Req}, _From, State) when is_function(State, 1) ->
Resp2 = case (catch State(Req)) of
- {ok, Resp} -> {reply, {ok, Resp}, was_ok};
- {raw, Resp} -> {reply, {raw, Resp}, was_ok};
- {chunked, Resp} -> {reply, {chunked, Resp}, was_ok};
- Error -> {reply, {error, Error}, not_ok}
+ {ok, Resp} ->
+ {reply, {ok, Resp}, was_ok};
+ {raw, Resp} ->
+ {reply, {raw, Resp}, was_ok};
+ {chunked, Resp} ->
+ {reply, {chunked, Resp}, was_ok};
+ Error ->
+ {reply, {error, Error}, not_ok}
end,
Req:cleanup(),
Resp2;
@@ -87,12 +98,14 @@ handle_call({set_assert, _}, _From, State) ->
handle_call(Msg, _From, State) ->
{reply, {ignored, Msg}, State}.
+handle_cast(stop, State) ->
+ {stop, normal, State};
handle_cast(Msg, State) ->
- etap:diag("Ignoring cast message: ~p", [Msg]),
+ ?debugFmt("Ignoring cast message: ~p", [Msg]),
{noreply, State}.
handle_info(Msg, State) ->
- etap:diag("Ignoring info message: ~p", [Msg]),
+ ?debugFmt("Ignoring info message: ~p", [Msg]),
{noreply, State}.
code_change(_OldVsn, State, _Extra) ->
diff --git a/test/etap/002-icu-driver.t b/test/etap/002-icu-driver.t
deleted file mode 100755
index e23353394..000000000
--- a/test/etap/002-icu-driver.t
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/usr/bin/env escript
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-main(_) ->
- test_util:init_code_path(),
- couch_config:start_link(test_util:config_files()),
- etap:plan(3),
- etap:is(
- element(1, couch_drv:start_link()),
- ok,
- "Started couch_icu_driver."
- ),
- etap:is(
- couch_util:collate(<<"foo">>, <<"bar">>),
- 1,
- "Can collate stuff"
- ),
- etap:is(
- couch_util:collate(<<"A">>, <<"aa">>),
- -1,
- "Collate's non-ascii style."
- ),
- etap:end_tests().
diff --git a/test/etap/010-file-basics.t b/test/etap/010-file-basics.t
deleted file mode 100755
index fb1b29e00..000000000
--- a/test/etap/010-file-basics.t
+++ /dev/null
@@ -1,113 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--define(etap_match(Got, Expected, Desc),
- etap:fun_is(fun(XXXXXX) ->
- case XXXXXX of Expected -> true; _ -> false end
- end, Got, Desc)).
-
-filename() -> test_util:build_file("test/etap/temp.010").
-
-main(_) ->
- test_util:init_code_path(),
- etap:plan(19),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- etap:bail()
- end,
- ok.
-
-test() ->
- etap:is({error, enoent}, couch_file:open("not a real file"),
- "Opening a non-existant file should return an enoent error."),
-
- etap:fun_is(
- fun({ok, _}) -> true; (_) -> false end,
- couch_file:open(filename() ++ ".1", [create, invalid_option]),
- "Invalid flags to open are ignored."
- ),
-
- {ok, Fd} = couch_file:open(filename() ++ ".0", [create, overwrite]),
- etap:ok(is_pid(Fd),
- "Returned file descriptor is a Pid"),
-
- etap:is({ok, 0}, couch_file:bytes(Fd),
- "Newly created files have 0 bytes."),
-
- ?etap_match(couch_file:append_term(Fd, foo), {ok, 0, _},
- "Appending a term returns the previous end of file position."),
-
- {ok, Size} = couch_file:bytes(Fd),
- etap:is_greater(Size, 0,
- "Writing a term increased the file size."),
-
- ?etap_match(couch_file:append_binary(Fd, <<"fancy!">>), {ok, Size, _},
- "Appending a binary returns the current file size."),
-
- etap:is({ok, foo}, couch_file:pread_term(Fd, 0),
- "Reading the first term returns what we wrote: foo"),
-
- etap:is({ok, <<"fancy!">>}, couch_file:pread_binary(Fd, Size),
- "Reading back the binary returns what we wrote: <<\"fancy\">>."),
-
- etap:is({ok, couch_compress:compress(foo, snappy)},
- couch_file:pread_binary(Fd, 0),
- "Reading a binary at a term position returns the term as binary."
- ),
-
- {ok, BinPos, _} = couch_file:append_binary(Fd, <<131,100,0,3,102,111,111>>),
- etap:is({ok, foo}, couch_file:pread_term(Fd, BinPos),
- "Reading a term from a written binary term representation succeeds."),
-
- BigBin = list_to_binary(lists:duplicate(100000, 0)),
- {ok, BigBinPos, _} = couch_file:append_binary(Fd, BigBin),
- etap:is({ok, BigBin}, couch_file:pread_binary(Fd, BigBinPos),
- "Reading a large term from a written representation succeeds."),
-
- ok = couch_file:write_header(Fd, hello),
- etap:is({ok, hello}, couch_file:read_header(Fd),
- "Reading a header succeeds."),
-
- {ok, BigBinPos2, _} = couch_file:append_binary(Fd, BigBin),
- etap:is({ok, BigBin}, couch_file:pread_binary(Fd, BigBinPos2),
- "Reading a large term from a written representation succeeds 2."),
-
- % append_binary == append_iolist?
- % Possible bug in pread_iolist or iolist() -> append_binary
- {ok, IOLPos, _} = couch_file:append_binary(Fd, ["foo", $m, <<"bam">>]),
- {ok, IoList} = couch_file:pread_iolist(Fd, IOLPos),
- etap:is(<<"foombam">>, iolist_to_binary(IoList),
- "Reading an results in a binary form of the written iolist()"),
-
- % XXX: How does on test fsync?
- etap:is(ok, couch_file:sync(Fd),
- "Syncing does not cause an error."),
-
- etap:is(ok, couch_file:truncate(Fd, Size),
- "Truncating a file succeeds."),
-
- %etap:is(eof, (catch couch_file:pread_binary(Fd, Size)),
- % "Reading data that was truncated fails.")
- etap:skip(fun() -> ok end,
- "No idea how to test reading beyond EOF"),
-
- etap:is({ok, foo}, couch_file:pread_term(Fd, 0),
- "Truncating does not affect data located before the truncation mark."),
-
- etap:is(ok, couch_file:close(Fd),
- "Files close properly."),
- ok.
diff --git a/test/etap/011-file-headers.t b/test/etap/011-file-headers.t
deleted file mode 100755
index a26b03207..000000000
--- a/test/etap/011-file-headers.t
+++ /dev/null
@@ -1,152 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-%%! -pa ./src/couchdb -sasl errlog_type error -boot start_sasl -noshell
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-filename() -> test_util:build_file("test/etap/temp.011").
-sizeblock() -> 4096. % Need to keep this in sync with couch_file.erl
-
-main(_) ->
- test_util:init_code_path(),
- {S1, S2, S3} = now(),
- random:seed(S1, S2, S3),
-
- etap:plan(18),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- etap:bail()
- end,
- ok.
-
-test() ->
- {ok, Fd} = couch_file:open(filename(), [create,overwrite]),
-
- etap:is({ok, 0}, couch_file:bytes(Fd),
- "File should be initialized to contain zero bytes."),
-
- etap:is(ok, couch_file:write_header(Fd, {<<"some_data">>, 32}),
- "Writing a header succeeds."),
-
- {ok, Size1} = couch_file:bytes(Fd),
- etap:is_greater(Size1, 0,
- "Writing a header allocates space in the file."),
-
- etap:is({ok, {<<"some_data">>, 32}}, couch_file:read_header(Fd),
- "Reading the header returns what we wrote."),
-
- etap:is(ok, couch_file:write_header(Fd, [foo, <<"more">>]),
- "Writing a second header succeeds."),
-
- {ok, Size2} = couch_file:bytes(Fd),
- etap:is_greater(Size2, Size1,
- "Writing a second header allocates more space."),
-
- etap:is({ok, [foo, <<"more">>]}, couch_file:read_header(Fd),
- "Reading the second header does not return the first header."),
-
- % Delete the second header.
- ok = couch_file:truncate(Fd, Size1),
-
- etap:is({ok, {<<"some_data">>, 32}}, couch_file:read_header(Fd),
- "Reading the header after a truncation returns a previous header."),
-
- couch_file:write_header(Fd, [foo, <<"more">>]),
- etap:is({ok, Size2}, couch_file:bytes(Fd),
- "Rewriting the same second header returns the same second size."),
-
- couch_file:write_header(Fd, erlang:make_tuple(5000, <<"CouchDB">>)),
- etap:is(
- couch_file:read_header(Fd),
- {ok, erlang:make_tuple(5000, <<"CouchDB">>)},
- "Headers larger than the block size can be saved (COUCHDB-1319)"
- ),
-
- ok = couch_file:close(Fd),
-
- % Now for the fun stuff. Try corrupting the second header and see
- % if we recover properly.
-
- % Destroy the 0x1 byte that marks a header
- check_header_recovery(fun(CouchFd, RawFd, Expect, HeaderPos) ->
- etap:isnt(Expect, couch_file:read_header(CouchFd),
- "Should return a different header before corruption."),
- file:pwrite(RawFd, HeaderPos, <<0>>),
- etap:is(Expect, couch_file:read_header(CouchFd),
- "Corrupting the byte marker should read the previous header.")
- end),
-
- % Corrupt the size.
- check_header_recovery(fun(CouchFd, RawFd, Expect, HeaderPos) ->
- etap:isnt(Expect, couch_file:read_header(CouchFd),
- "Should return a different header before corruption."),
- % +1 for 0x1 byte marker
- file:pwrite(RawFd, HeaderPos+1, <<10/integer>>),
- etap:is(Expect, couch_file:read_header(CouchFd),
- "Corrupting the size should read the previous header.")
- end),
-
- % Corrupt the MD5 signature
- check_header_recovery(fun(CouchFd, RawFd, Expect, HeaderPos) ->
- etap:isnt(Expect, couch_file:read_header(CouchFd),
- "Should return a different header before corruption."),
- % +5 = +1 for 0x1 byte and +4 for term size.
- file:pwrite(RawFd, HeaderPos+5, <<"F01034F88D320B22">>),
- etap:is(Expect, couch_file:read_header(CouchFd),
- "Corrupting the MD5 signature should read the previous header.")
- end),
-
- % Corrupt the data
- check_header_recovery(fun(CouchFd, RawFd, Expect, HeaderPos) ->
- etap:isnt(Expect, couch_file:read_header(CouchFd),
- "Should return a different header before corruption."),
- % +21 = +1 for 0x1 byte, +4 for term size and +16 for MD5 sig
- file:pwrite(RawFd, HeaderPos+21, <<"some data goes here!">>),
- etap:is(Expect, couch_file:read_header(CouchFd),
- "Corrupting the header data should read the previous header.")
- end),
-
- ok.
-
-check_header_recovery(CheckFun) ->
- {ok, Fd} = couch_file:open(filename(), [create,overwrite]),
- {ok, RawFd} = file:open(filename(), [read, write, raw, binary]),
-
- {ok, _} = write_random_data(Fd),
- ExpectHeader = {some_atom, <<"a binary">>, 756},
- ok = couch_file:write_header(Fd, ExpectHeader),
-
- {ok, HeaderPos} = write_random_data(Fd),
- ok = couch_file:write_header(Fd, {2342, <<"corruption! greed!">>}),
-
- CheckFun(Fd, RawFd, {ok, ExpectHeader}, HeaderPos),
-
- ok = file:close(RawFd),
- ok = couch_file:close(Fd),
- ok.
-
-write_random_data(Fd) ->
- write_random_data(Fd, 100 + random:uniform(1000)).
-
-write_random_data(Fd, 0) ->
- {ok, Bytes} = couch_file:bytes(Fd),
- {ok, (1 + Bytes div sizeblock()) * sizeblock()};
-write_random_data(Fd, N) ->
- Choices = [foo, bar, <<"bizzingle">>, "bank", ["rough", stuff]],
- Term = lists:nth(random:uniform(4) + 1, Choices),
- {ok, _, _} = couch_file:append_term(Fd, Term),
- write_random_data(Fd, N-1).
-
diff --git a/test/etap/020-btree-basics.t b/test/etap/020-btree-basics.t
deleted file mode 100755
index b0fb2d28c..000000000
--- a/test/etap/020-btree-basics.t
+++ /dev/null
@@ -1,265 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-%%! -pa ./src/couchdb -sasl errlog_type error -boot start_sasl -noshell
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-filename() -> test_util:build_file("test/etap/temp.020").
-rows() -> 250.
-
--record(btree, {
- fd,
- root,
- extract_kv,
- assemble_kv,
- less,
- reduce,
- compression
-}).
-
-main(_) ->
- test_util:init_code_path(),
- etap:plan(75),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- etap:bail()
- end,
- ok.
-
-%% @todo Determine if this number should be greater to see if the btree was
-%% broken into multiple nodes. AKA "How do we appropiately detect if multiple
-%% nodes were created."
-test()->
- Sorted = [{Seq, random:uniform()} || Seq <- lists:seq(1, rows())],
- etap:ok(test_kvs(Sorted), "Testing sorted keys"),
- etap:ok(test_kvs(lists:reverse(Sorted)), "Testing reversed sorted keys"),
- etap:ok(test_kvs(shuffle(Sorted)), "Testing shuffled keys."),
- ok.
-
-test_kvs(KeyValues) ->
- ReduceFun = fun
- (reduce, KVs) ->
- length(KVs);
- (rereduce, Reds) ->
- lists:sum(Reds)
- end,
-
- Keys = [K || {K, _} <- KeyValues],
-
- {ok, Fd} = couch_file:open(filename(), [create,overwrite]),
- {ok, Btree} = couch_btree:open(nil, Fd, [{compression, none}]),
- etap:ok(is_record(Btree, btree), "Created btree is really a btree record"),
- etap:is(Btree#btree.fd, Fd, "Btree#btree.fd is set correctly."),
- etap:is(Btree#btree.root, nil, "Btree#btree.root is set correctly."),
- etap:is(0, couch_btree:size(Btree), "Empty btrees have a 0 size."),
-
- Btree1 = couch_btree:set_options(Btree, [{reduce, ReduceFun}]),
- etap:is(Btree1#btree.reduce, ReduceFun, "Reduce function was set"),
- {ok, _, EmptyRes} = couch_btree:foldl(Btree1, fun(_, X) -> {ok, X+1} end, 0),
- etap:is(EmptyRes, 0, "Folding over an empty btree"),
-
- {ok, Btree2} = couch_btree:add_remove(Btree1, KeyValues, []),
- etap:ok(test_btree(Btree2, KeyValues),
- "Adding all keys at once returns a complete btree."),
-
- etap:is((couch_btree:size(Btree2) > 0), true,
- "Non empty btrees have a size > 0."),
- etap:is((couch_btree:size(Btree2) =< couch_file:bytes(Fd)), true,
- "Btree size is <= file size."),
-
- etap:fun_is(
- fun
- ({ok, {kp_node, _}}) -> true;
- (_) -> false
- end,
- couch_file:pread_term(Fd, element(1, Btree2#btree.root)),
- "Btree root pointer is a kp_node."
- ),
-
- {ok, Btree3} = couch_btree:add_remove(Btree2, [], Keys),
- etap:ok(test_btree(Btree3, []),
- "Removing all keys at once returns an empty btree."),
-
- etap:is(0, couch_btree:size(Btree3),
- "After removing all keys btree size is 0."),
-
- {Btree4, _} = lists:foldl(fun(KV, {BtAcc, PrevSize}) ->
- {ok, BtAcc2} = couch_btree:add_remove(BtAcc, [KV], []),
- case couch_btree:size(BtAcc2) > PrevSize of
- true ->
- ok;
- false ->
- etap:bail("After inserting a value, btree size did not increase.")
- end,
- {BtAcc2, couch_btree:size(BtAcc2)}
- end, {Btree3, couch_btree:size(Btree3)}, KeyValues),
-
- etap:ok(test_btree(Btree4, KeyValues),
- "Adding all keys one at a time returns a complete btree."),
- etap:is((couch_btree:size(Btree4) > 0), true,
- "Non empty btrees have a size > 0."),
-
- {Btree5, _} = lists:foldl(fun({K, _}, {BtAcc, PrevSize}) ->
- {ok, BtAcc2} = couch_btree:add_remove(BtAcc, [], [K]),
- case couch_btree:size(BtAcc2) < PrevSize of
- true ->
- ok;
- false ->
- etap:bail("After removing a key, btree size did not decrease.")
- end,
- {BtAcc2, couch_btree:size(BtAcc2)}
- end, {Btree4, couch_btree:size(Btree4)}, KeyValues),
- etap:ok(test_btree(Btree5, []),
- "Removing all keys one at a time returns an empty btree."),
- etap:is(0, couch_btree:size(Btree5),
- "After removing all keys, one by one, btree size is 0."),
-
- KeyValuesRev = lists:reverse(KeyValues),
- {Btree6, _} = lists:foldl(fun(KV, {BtAcc, PrevSize}) ->
- {ok, BtAcc2} = couch_btree:add_remove(BtAcc, [KV], []),
- case couch_btree:size(BtAcc2) > PrevSize of
- true ->
- ok;
- false ->
- etap:is(false, true,
- "After inserting a value, btree size did not increase.")
- end,
- {BtAcc2, couch_btree:size(BtAcc2)}
- end, {Btree5, couch_btree:size(Btree5)}, KeyValuesRev),
- etap:ok(test_btree(Btree6, KeyValues),
- "Adding all keys in reverse order returns a complete btree."),
-
- {_, Rem2Keys0, Rem2Keys1} = lists:foldl(fun(X, {Count, Left, Right}) ->
- case Count rem 2 == 0 of
- true-> {Count+1, [X | Left], Right};
- false -> {Count+1, Left, [X | Right]}
- end
- end, {0, [], []}, KeyValues),
-
- etap:ok(test_add_remove(Btree6, Rem2Keys0, Rem2Keys1),
- "Add/Remove every other key."),
-
- etap:ok(test_add_remove(Btree6, Rem2Keys1, Rem2Keys0),
- "Add/Remove opposite every other key."),
-
- Size1 = couch_btree:size(Btree6),
- {ok, Btree7} = couch_btree:add_remove(Btree6, [], [K||{K,_}<-Rem2Keys1]),
- Size2 = couch_btree:size(Btree7),
- etap:is((Size2 < Size1), true, "Btree size decreased"),
- {ok, Btree8} = couch_btree:add_remove(Btree7, [], [K||{K,_}<-Rem2Keys0]),
- Size3 = couch_btree:size(Btree8),
- etap:is((Size3 < Size2), true, "Btree size decreased"),
- etap:is(Size3, 0, "Empty btree has size 0."),
- etap:ok(test_btree(Btree8, []),
- "Removing both halves of every other key returns an empty btree."),
-
- %% Third chunk (close out)
- etap:is(couch_file:close(Fd), ok, "closing out"),
- true.
-
-test_btree(Btree, KeyValues) ->
- ok = test_key_access(Btree, KeyValues),
- ok = test_lookup_access(Btree, KeyValues),
- ok = test_final_reductions(Btree, KeyValues),
- ok = test_traversal_callbacks(Btree, KeyValues),
- true.
-
-test_add_remove(Btree, OutKeyValues, RemainingKeyValues) ->
- Btree2 = lists:foldl(fun({K, _}, BtAcc) ->
- {ok, BtAcc2} = couch_btree:add_remove(BtAcc, [], [K]),
- BtAcc2
- end, Btree, OutKeyValues),
- true = test_btree(Btree2, RemainingKeyValues),
-
- Btree3 = lists:foldl(fun(KV, BtAcc) ->
- {ok, BtAcc2} = couch_btree:add_remove(BtAcc, [KV], []),
- BtAcc2
- end, Btree2, OutKeyValues),
- true = test_btree(Btree3, OutKeyValues ++ RemainingKeyValues).
-
-test_key_access(Btree, List) ->
- FoldFun = fun(Element, {[HAcc|TAcc], Count}) ->
- case Element == HAcc of
- true -> {ok, {TAcc, Count + 1}};
- _ -> {ok, {TAcc, Count + 1}}
- end
- end,
- Length = length(List),
- Sorted = lists:sort(List),
- {ok, _, {[], Length}} = couch_btree:foldl(Btree, FoldFun, {Sorted, 0}),
- {ok, _, {[], Length}} = couch_btree:fold(Btree, FoldFun, {Sorted, 0}, [{dir, rev}]),
- ok.
-
-test_lookup_access(Btree, KeyValues) ->
- FoldFun = fun({Key, Value}, {Key, Value}) -> {stop, true} end,
- lists:foreach(fun({Key, Value}) ->
- [{ok, {Key, Value}}] = couch_btree:lookup(Btree, [Key]),
- {ok, _, true} = couch_btree:foldl(Btree, FoldFun, {Key, Value}, [{start_key, Key}])
- end, KeyValues).
-
-test_final_reductions(Btree, KeyValues) ->
- KVLen = length(KeyValues),
- FoldLFun = fun(_X, LeadingReds, Acc) ->
- CountToStart = KVLen div 3 + Acc,
- CountToStart = couch_btree:final_reduce(Btree, LeadingReds),
- {ok, Acc+1}
- end,
- FoldRFun = fun(_X, LeadingReds, Acc) ->
- CountToEnd = KVLen - KVLen div 3 + Acc,
- CountToEnd = couch_btree:final_reduce(Btree, LeadingReds),
- {ok, Acc+1}
- end,
- {LStartKey, _} = case KVLen of
- 0 -> {nil, nil};
- _ -> lists:nth(KVLen div 3 + 1, lists:sort(KeyValues))
- end,
- {RStartKey, _} = case KVLen of
- 0 -> {nil, nil};
- _ -> lists:nth(KVLen div 3, lists:sort(KeyValues))
- end,
- {ok, _, FoldLRed} = couch_btree:foldl(Btree, FoldLFun, 0, [{start_key, LStartKey}]),
- {ok, _, FoldRRed} = couch_btree:fold(Btree, FoldRFun, 0, [{dir, rev}, {start_key, RStartKey}]),
- KVLen = FoldLRed + FoldRRed,
- ok.
-
-test_traversal_callbacks(Btree, _KeyValues) ->
- FoldFun =
- fun
- (visit, _GroupedKey, _Unreduced, Acc) ->
- {ok, Acc andalso false};
- (traverse, _LK, _Red, Acc) ->
- {skip, Acc andalso true}
- end,
- % With 250 items the root is a kp. Always skipping should reduce to true.
- {ok, _, true} = couch_btree:fold(Btree, FoldFun, true, [{dir, fwd}]),
- ok.
-
-shuffle(List) ->
- randomize(round(math:log(length(List)) + 0.5), List).
-
-randomize(1, List) ->
- randomize(List);
-randomize(T, List) ->
- lists:foldl(fun(_E, Acc) ->
- randomize(Acc)
- end, randomize(List), lists:seq(1, (T - 1))).
-
-randomize(List) ->
- D = lists:map(fun(A) ->
- {random:uniform(), A}
- end, List),
- {_, D1} = lists:unzip(lists:keysort(1, D)),
- D1.
diff --git a/test/etap/021-btree-reductions.t b/test/etap/021-btree-reductions.t
deleted file mode 100755
index e80ac2ded..000000000
--- a/test/etap/021-btree-reductions.t
+++ /dev/null
@@ -1,237 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-%%! -pa ./src/couchdb -sasl errlog_type error -boot start_sasl -noshell
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-filename() -> "./test/etap/temp.021".
-rows() -> 1000.
-
-main(_) ->
- test_util:init_code_path(),
- etap:plan(20),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- etap:bail()
- end,
- ok.
-
-test()->
- ReduceFun = fun
- (reduce, KVs) -> length(KVs);
- (rereduce, Reds) -> lists:sum(Reds)
- end,
-
- {ok, Fd} = couch_file:open(filename(), [create,overwrite]),
- {ok, Btree} = couch_btree:open(nil, Fd, [{reduce, ReduceFun}]),
-
- % Create a list, of {"even", Value} or {"odd", Value} pairs.
- {_, EvenOddKVs} = lists:foldl(fun(Idx, {Key, Acc}) ->
- case Key of
- "even" -> {"odd", [{{Key, Idx}, 1} | Acc]};
- _ -> {"even", [{{Key, Idx}, 1} | Acc]}
- end
- end, {"odd", []}, lists:seq(1, rows())),
-
- {ok, Btree2} = couch_btree:add_remove(Btree, EvenOddKVs, []),
-
- GroupFun = fun({K1, _}, {K2, _}) -> K1 == K2 end,
- FoldFun = fun(GroupedKey, Unreduced, Acc) ->
- {ok, [{GroupedKey, couch_btree:final_reduce(Btree2, Unreduced)} | Acc]}
- end,
-
- {SK1, EK1} = {{"even", -1}, {"even", foo}},
- {SK2, EK2} = {{"odd", -1}, {"odd", foo}},
-
- etap:fun_is(
- fun
- ({ok, [{{"odd", _}, 500}, {{"even", _}, 500}]}) ->
- true;
- (_) ->
- false
- end,
- couch_btree:fold_reduce(Btree2, FoldFun, [], [{key_group_fun, GroupFun}]),
- "Reduction works with no specified direction, startkey, or endkey."
- ),
-
- etap:fun_is(
- fun
- ({ok, [{{"odd", _}, 500}, {{"even", _}, 500}]}) ->
- true;
- (_) ->
- false
- end,
- couch_btree:fold_reduce(Btree2, FoldFun, [], [{key_group_fun, GroupFun}, {dir, fwd}]),
- "Reducing forward works with no startkey or endkey."
- ),
-
- etap:fun_is(
- fun
- ({ok, [{{"even", _}, 500}, {{"odd", _}, 500}]}) ->
- true;
- (_) ->
- false
- end,
- couch_btree:fold_reduce(Btree2, FoldFun, [], [{key_group_fun, GroupFun}, {dir, rev}]),
- "Reducing backwards works with no startkey or endkey."
- ),
-
- etap:fun_is(
- fun
- ({ok, [{{"odd", _}, 500}, {{"even", _}, 500}]}) ->
- true;
- (_) ->
- false
- end,
- couch_btree:fold_reduce(Btree2, FoldFun, [], [{dir, fwd}, {key_group_fun, GroupFun}, {start_key, SK1}, {end_key, EK2}]),
- "Reducing works over the entire range with startkey and endkey set."
- ),
-
- etap:fun_is(
- fun
- ({ok, [{{"even", _}, 500}]}) -> true;
- (_) -> false
- end,
- couch_btree:fold_reduce(Btree2, FoldFun, [], [{dir, fwd}, {key_group_fun, GroupFun}, {start_key, SK1}, {end_key, EK1}]),
- "Reducing forward over first half works with a startkey and endkey."
- ),
-
- etap:fun_is(
- fun
- ({ok, [{{"odd", _}, 500}]}) -> true;
- (_) -> false
- end,
- couch_btree:fold_reduce(Btree2, FoldFun, [], [{dir, fwd}, {key_group_fun, GroupFun}, {start_key, SK2}, {end_key, EK2}]),
- "Reducing forward over second half works with second startkey and endkey"
- ),
-
- etap:fun_is(
- fun
- ({ok, [{{"odd", _}, 500}]}) -> true;
- (_) -> false
- end,
- couch_btree:fold_reduce(Btree2, FoldFun, [], [{dir, rev}, {key_group_fun, GroupFun}, {start_key, EK2}, {end_key, SK2}]),
- "Reducing in reverse works after swapping the startkey and endkey."
- ),
-
- etap:fun_is(
- fun
- ({ok, [{{"even", _}, 500}, {{"odd", _}, 500}]}) ->
- true;
- (_) ->
- false
- end,
- couch_btree:fold_reduce(Btree2, FoldFun, [], [{dir, rev}, {key_group_fun, GroupFun}, {start_key, EK2}, {end_key, SK1}]),
- "Reducing in reverse results in reversed accumulator."
- ),
-
- etap:is(
- couch_btree:fold_reduce(Btree2, FoldFun, [], [
- {dir, fwd}, {key_group_fun, GroupFun},
- {start_key, {"even", 0}}, {end_key, {"odd", rows() + 1}}
- ]),
- {ok, [{{"odd", 1}, 500}, {{"even", 2}, 500}]},
- "Right fold reduce value for whole range with inclusive end key"),
-
- etap:is(
- couch_btree:fold_reduce(Btree2, FoldFun, [], [
- {dir, fwd}, {key_group_fun, GroupFun},
- {start_key, {"even", 0}}, {end_key_gt, {"odd", 999}}
- ]),
- {ok, [{{"odd", 1}, 499}, {{"even", 2}, 500}]},
- "Right fold reduce value for whole range without inclusive end key"),
-
- etap:is(
- couch_btree:fold_reduce(Btree2, FoldFun, [], [
- {dir, rev}, {key_group_fun, GroupFun},
- {start_key, {"odd", 999}}, {end_key, {"even", 2}}
- ]),
- {ok, [{{"even", 1000}, 500}, {{"odd", 999}, 500}]},
- "Right fold reduce value for whole reversed range with inclusive end key"),
-
- etap:is(
- couch_btree:fold_reduce(Btree2, FoldFun, [], [
- {dir, rev}, {key_group_fun, GroupFun},
- {start_key, {"odd", 999}}, {end_key_gt, {"even", 2}}
- ]),
- {ok, [{{"even", 1000}, 499}, {{"odd", 999}, 500}]},
- "Right fold reduce value for whole reversed range without inclusive end key"),
-
- etap:is(
- couch_btree:fold_reduce(Btree2, FoldFun, [], [
- {dir, fwd}, {key_group_fun, GroupFun},
- {start_key, {"even", 0}}, {end_key, {"odd", 499}}
- ]),
- {ok, [{{"odd", 1}, 250}, {{"even", 2}, 500}]},
- "Right fold reduce value for first half with inclusive end key"),
-
- etap:is(
- couch_btree:fold_reduce(Btree2, FoldFun, [], [
- {dir, fwd}, {key_group_fun, GroupFun},
- {start_key, {"even", 0}}, {end_key_gt, {"odd", 499}}
- ]),
- {ok, [{{"odd", 1}, 249}, {{"even", 2}, 500}]},
- "Right fold reduce value for first half without inclusive end key"),
-
- etap:is(
- couch_btree:fold_reduce(Btree2, FoldFun, [], [
- {dir, rev}, {key_group_fun, GroupFun},
- {start_key, {"odd", 999}}, {end_key, {"even", 500}}
- ]),
- {ok, [{{"even", 1000}, 251}, {{"odd", 999}, 500}]},
- "Right fold reduce value for first half reversed with inclusive end key"),
-
- etap:is(
- couch_btree:fold_reduce(Btree2, FoldFun, [], [
- {dir, rev}, {key_group_fun, GroupFun},
- {start_key, {"odd", 999}}, {end_key_gt, {"even", 500}}
- ]),
- {ok, [{{"even", 1000}, 250}, {{"odd", 999}, 500}]},
- "Right fold reduce value for first half reversed without inclusive end key"),
-
- etap:is(
- couch_btree:fold_reduce(Btree2, FoldFun, [], [
- {dir, fwd}, {key_group_fun, GroupFun},
- {start_key, {"even", 500}}, {end_key, {"odd", 999}}
- ]),
- {ok, [{{"odd", 1}, 500}, {{"even", 500}, 251}]},
- "Right fold reduce value for second half with inclusive end key"),
-
- etap:is(
- couch_btree:fold_reduce(Btree2, FoldFun, [], [
- {dir, fwd}, {key_group_fun, GroupFun},
- {start_key, {"even", 500}}, {end_key_gt, {"odd", 999}}
- ]),
- {ok, [{{"odd", 1}, 499}, {{"even", 500}, 251}]},
- "Right fold reduce value for second half without inclusive end key"),
-
- etap:is(
- couch_btree:fold_reduce(Btree2, FoldFun, [], [
- {dir, rev}, {key_group_fun, GroupFun},
- {start_key, {"odd", 501}}, {end_key, {"even", 2}}
- ]),
- {ok, [{{"even", 1000}, 500}, {{"odd", 501}, 251}]},
- "Right fold reduce value for second half reversed with inclusive end key"),
-
- etap:is(
- couch_btree:fold_reduce(Btree2, FoldFun, [], [
- {dir, rev}, {key_group_fun, GroupFun},
- {start_key, {"odd", 501}}, {end_key_gt, {"even", 2}}
- ]),
- {ok, [{{"even", 1000}, 499}, {{"odd", 501}, 251}]},
- "Right fold reduce value for second half reversed without inclusive end key"),
-
- couch_file:close(Fd).
diff --git a/test/etap/030-doc-from-json.t b/test/etap/030-doc-from-json.t
deleted file mode 100755
index b0c393efe..000000000
--- a/test/etap/030-doc-from-json.t
+++ /dev/null
@@ -1,236 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-%%! -pa ./src/couchdb -pa ./src/mochiweb -sasl errlog_type false -noshell
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-%% XXX: Figure out how to -include("couch_db.hrl")
--record(doc, {id= <<"">>, revs={0, []}, body={[]},
- atts=[], deleted=false, meta=[]}).
--record(att, {name, type, att_len, disk_len, md5= <<>>, revpos=0, data,
- encoding=identity}).
-
-main(_) ->
- test_util:init_code_path(),
- etap:plan(26),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- etap:bail()
- end,
- ok.
-
-test() ->
- couch_config:start_link(test_util:config_files()),
- couch_config:set("attachments", "compression_level", "0", false),
- ok = test_from_json_success(),
- ok = test_from_json_errors(),
- ok.
-
-test_from_json_success() ->
- Cases = [
- {
- {[]},
- #doc{},
- "Return an empty document for an empty JSON object."
- },
- {
- {[{<<"_id">>, <<"zing!">>}]},
- #doc{id= <<"zing!">>},
- "Parses document ids."
- },
- {
- {[{<<"_id">>, <<"_design/foo">>}]},
- #doc{id= <<"_design/foo">>},
- "_design/document ids."
- },
- {
- {[{<<"_id">>, <<"_local/bam">>}]},
- #doc{id= <<"_local/bam">>},
- "_local/document ids."
- },
- {
- {[{<<"_rev">>, <<"4-230234">>}]},
- #doc{revs={4, [<<"230234">>]}},
- "_rev stored in revs."
- },
- {
- {[{<<"soap">>, 35}]},
- #doc{body={[{<<"soap">>, 35}]}},
- "Non underscore prefixed fields stored in body."
- },
- {
- {[{<<"_attachments">>, {[
- {<<"my_attachment.fu">>, {[
- {<<"stub">>, true},
- {<<"content_type">>, <<"application/awesome">>},
- {<<"length">>, 45}
- ]}},
- {<<"noahs_private_key.gpg">>, {[
- {<<"data">>, <<"SSBoYXZlIGEgcGV0IGZpc2gh">>},
- {<<"content_type">>, <<"application/pgp-signature">>}
- ]}}
- ]}}]},
- #doc{atts=[
- #att{
- name = <<"my_attachment.fu">>,
- data = stub,
- type = <<"application/awesome">>,
- att_len = 45,
- disk_len = 45,
- revpos = nil
- },
- #att{
- name = <<"noahs_private_key.gpg">>,
- data = <<"I have a pet fish!">>,
- type = <<"application/pgp-signature">>,
- att_len = 18,
- disk_len = 18,
- revpos = 0
- }
- ]},
- "Attachments are parsed correctly."
- },
- {
- {[{<<"_deleted">>, true}]},
- #doc{deleted=true},
- "_deleted controls the deleted field."
- },
- {
- {[{<<"_deleted">>, false}]},
- #doc{},
- "{\"_deleted\": false} is ok."
- },
- {
- {[
- {<<"_revisions">>, {[
- {<<"start">>, 4},
- {<<"ids">>, [<<"foo1">>, <<"phi3">>, <<"omega">>]}
- ]}},
- {<<"_rev">>, <<"6-something">>}
- ]},
- #doc{revs={4, [<<"foo1">>, <<"phi3">>, <<"omega">>]}},
- "_revisions attribute are preferred to _rev."
- },
- {
- {[{<<"_revs_info">>, dropping}]},
- #doc{},
- "Drops _revs_info."
- },
- {
- {[{<<"_local_seq">>, dropping}]},
- #doc{},
- "Drops _local_seq."
- },
- {
- {[{<<"_conflicts">>, dropping}]},
- #doc{},
- "Drops _conflicts."
- },
- {
- {[{<<"_deleted_conflicts">>, dropping}]},
- #doc{},
- "Drops _deleted_conflicts."
- }
- ],
-
- lists:foreach(fun({EJson, Expect, Mesg}) ->
- etap:is(couch_doc:from_json_obj(EJson), Expect, Mesg)
- end, Cases),
- ok.
-
-test_from_json_errors() ->
- Cases = [
- {
- [],
- {bad_request, "Document must be a JSON object"},
- "arrays are invalid"
- },
- {
- 4,
- {bad_request, "Document must be a JSON object"},
- "integers are invalid"
- },
- {
- true,
- {bad_request, "Document must be a JSON object"},
- "literals are invalid"
- },
- {
- {[{<<"_id">>, {[{<<"foo">>, 5}]}}]},
- {bad_request, <<"Document id must be a string">>},
- "Document id must be a string."
- },
- {
- {[{<<"_id">>, <<"_random">>}]},
- {bad_request,
- <<"Only reserved document ids may start with underscore.">>},
- "Disallow arbitrary underscore prefixed docids."
- },
- {
- {[{<<"_rev">>, 5}]},
- {bad_request, <<"Invalid rev format">>},
- "_rev must be a string"
- },
- {
- {[{<<"_rev">>, "foobar"}]},
- {bad_request, <<"Invalid rev format">>},
- "_rev must be %d-%s"
- },
- {
- {[{<<"_rev">>, "foo-bar"}]},
- "Error if _rev's integer expection is broken."
- },
- {
- {[{<<"_revisions">>, {[{<<"start">>, true}]}}]},
- {doc_validation, "_revisions.start isn't an integer."},
- "_revisions.start must be an integer."
- },
- {
- {[{<<"_revisions">>, {[
- {<<"start">>, 0},
- {<<"ids">>, 5}
- ]}}]},
- {doc_validation, "_revisions.ids isn't a array."},
- "_revions.ids must be a list."
- },
- {
- {[{<<"_revisions">>, {[
- {<<"start">>, 0},
- {<<"ids">>, [5]}
- ]}}]},
- {doc_validation, "RevId isn't a string"},
- "Revision ids must be strings."
- },
- {
- {[{<<"_something">>, 5}]},
- {doc_validation, <<"Bad special document member: _something">>},
- "Underscore prefix fields are reserved."
- }
- ],
-
- lists:foreach(fun
- ({EJson, Expect, Mesg}) ->
- Error = (catch couch_doc:from_json_obj(EJson)),
- etap:is(Error, Expect, Mesg);
- ({EJson, Mesg}) ->
- try
- couch_doc:from_json_obj(EJson),
- etap:ok(false, "Conversion failed to raise an exception.")
- catch
- _:_ -> etap:ok(true, Mesg)
- end
- end, Cases),
- ok.
diff --git a/test/etap/031-doc-to-json.t b/test/etap/031-doc-to-json.t
deleted file mode 100755
index ce950f959..000000000
--- a/test/etap/031-doc-to-json.t
+++ /dev/null
@@ -1,197 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-%%! -pa ./src/couchdb -pa ./src/mochiweb -sasl errlog_type false -noshell
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-%% XXX: Figure out how to -include("couch_db.hrl")
--record(doc, {id= <<"">>, revs={0, []}, body={[]},
- atts=[], deleted=false, meta=[]}).
--record(att, {name, type, att_len, disk_len, md5= <<>>, revpos=0, data,
- encoding=identity}).
-
-main(_) ->
- test_util:init_code_path(),
- etap:plan(12),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- etap:bail()
- end,
- ok.
-
-test() ->
- couch_config:start_link(test_util:config_files()),
- couch_config:set("attachments", "compression_level", "0", false),
- ok = test_to_json_success(),
- ok.
-
-test_to_json_success() ->
- Cases = [
- {
- #doc{},
- {[{<<"_id">>, <<"">>}]},
- "Empty docs are {\"_id\": \"\"}"
- },
- {
- #doc{id= <<"foo">>},
- {[{<<"_id">>, <<"foo">>}]},
- "_id is added."
- },
- {
- #doc{revs={5, ["foo"]}},
- {[{<<"_id">>, <<>>}, {<<"_rev">>, <<"5-foo">>}]},
- "_rev is added."
- },
- {
- [revs],
- #doc{revs={5, [<<"first">>, <<"second">>]}},
- {[
- {<<"_id">>, <<>>},
- {<<"_rev">>, <<"5-first">>},
- {<<"_revisions">>, {[
- {<<"start">>, 5},
- {<<"ids">>, [<<"first">>, <<"second">>]}
- ]}}
- ]},
- "_revisions include with revs option"
- },
- {
- #doc{body={[{<<"foo">>, <<"bar">>}]}},
- {[{<<"_id">>, <<>>}, {<<"foo">>, <<"bar">>}]},
- "Arbitrary fields are added."
- },
- {
- #doc{deleted=true, body={[{<<"foo">>, <<"bar">>}]}},
- {[{<<"_id">>, <<>>}, {<<"foo">>, <<"bar">>}, {<<"_deleted">>, true}]},
- "Deleted docs no longer drop body members."
- },
- {
- #doc{meta=[
- {revs_info, 4, [{<<"fin">>, deleted}, {<<"zim">>, missing}]}
- ]},
- {[
- {<<"_id">>, <<>>},
- {<<"_revs_info">>, [
- {[{<<"rev">>, <<"4-fin">>}, {<<"status">>, <<"deleted">>}]},
- {[{<<"rev">>, <<"3-zim">>}, {<<"status">>, <<"missing">>}]}
- ]}
- ]},
- "_revs_info field is added correctly."
- },
- {
- #doc{meta=[{local_seq, 5}]},
- {[{<<"_id">>, <<>>}, {<<"_local_seq">>, 5}]},
- "_local_seq is added as an integer."
- },
- {
- #doc{meta=[{conflicts, [{3, <<"yep">>}, {1, <<"snow">>}]}]},
- {[
- {<<"_id">>, <<>>},
- {<<"_conflicts">>, [<<"3-yep">>, <<"1-snow">>]}
- ]},
- "_conflicts is added as an array of strings."
- },
- {
- #doc{meta=[{deleted_conflicts, [{10923, <<"big_cowboy_hat">>}]}]},
- {[
- {<<"_id">>, <<>>},
- {<<"_deleted_conflicts">>, [<<"10923-big_cowboy_hat">>]}
- ]},
- "_deleted_conflicsts is added as an array of strings."
- },
- {
- #doc{atts=[
- #att{
- name = <<"big.xml">>,
- type = <<"xml/sucks">>,
- data = fun() -> ok end,
- revpos = 1,
- att_len = 400,
- disk_len = 400
- },
- #att{
- name = <<"fast.json">>,
- type = <<"json/ftw">>,
- data = <<"{\"so\": \"there!\"}">>,
- revpos = 1,
- att_len = 16,
- disk_len = 16
- }
- ]},
- {[
- {<<"_id">>, <<>>},
- {<<"_attachments">>, {[
- {<<"big.xml">>, {[
- {<<"content_type">>, <<"xml/sucks">>},
- {<<"revpos">>, 1},
- {<<"length">>, 400},
- {<<"stub">>, true}
- ]}},
- {<<"fast.json">>, {[
- {<<"content_type">>, <<"json/ftw">>},
- {<<"revpos">>, 1},
- {<<"length">>, 16},
- {<<"stub">>, true}
- ]}}
- ]}}
- ]},
- "Attachments attached as stubs only include a length."
- },
- {
- [attachments],
- #doc{atts=[
- #att{
- name = <<"stuff.txt">>,
- type = <<"text/plain">>,
- data = fun() -> <<"diet pepsi">> end,
- revpos = 1,
- att_len = 10,
- disk_len = 10
- },
- #att{
- name = <<"food.now">>,
- type = <<"application/food">>,
- revpos = 1,
- data = <<"sammich">>
- }
- ]},
- {[
- {<<"_id">>, <<>>},
- {<<"_attachments">>, {[
- {<<"stuff.txt">>, {[
- {<<"content_type">>, <<"text/plain">>},
- {<<"revpos">>, 1},
- {<<"data">>, <<"ZGlldCBwZXBzaQ==">>}
- ]}},
- {<<"food.now">>, {[
- {<<"content_type">>, <<"application/food">>},
- {<<"revpos">>, 1},
- {<<"data">>, <<"c2FtbWljaA==">>}
- ]}}
- ]}}
- ]},
- "Attachments included inline with attachments option."
- }
- ],
-
- lists:foreach(fun
- ({Doc, EJson, Mesg}) ->
- etap:is(couch_doc:to_json_obj(Doc, []), EJson, Mesg);
- ({Options, Doc, EJson, Mesg}) ->
- etap:is(couch_doc:to_json_obj(Doc, Options), EJson, Mesg)
- end, Cases),
- ok.
-
diff --git a/test/etap/040-util.t b/test/etap/040-util.t
deleted file mode 100755
index d57a32ed2..000000000
--- a/test/etap/040-util.t
+++ /dev/null
@@ -1,80 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-main(_) ->
- test_util:init_code_path(),
- application:start(crypto),
-
- etap:plan(14),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- etap:bail(Other)
- end,
- ok.
-
-test() ->
- % to_existing_atom
- etap:is(true, couch_util:to_existing_atom(true), "An atom is an atom."),
- etap:is(foo, couch_util:to_existing_atom(<<"foo">>),
- "A binary foo is the atom foo."),
- etap:is(foobarbaz, couch_util:to_existing_atom("foobarbaz"),
- "A list of atoms is one munged atom."),
-
- % implode
- etap:is([1, 38, 2, 38, 3], couch_util:implode([1,2,3],"&"),
- "use & as separator in list."),
-
- % trim
- Strings = [" foo", "foo ", "\tfoo", " foo ", "foo\t", "foo\n", "\nfoo"],
- etap:ok(lists:all(fun(S) -> couch_util:trim(S) == "foo" end, Strings),
- "everything here trimmed should be foo."),
-
- % abs_pathname
- {ok, Cwd} = file:get_cwd(),
- etap:is(Cwd ++ "/foo", couch_util:abs_pathname("./foo"),
- "foo is in this directory."),
-
- % should_flush
- etap:ok(not couch_util:should_flush(),
- "Not using enough memory to flush."),
- AcquireMem = fun() ->
- _IntsToAGazillion = lists:seq(1, 200000),
- _LotsOfData = lists:map(
- fun(Int) -> {Int, <<"foobar">>} end,
- lists:seq(1, 500000)),
- etap:ok(couch_util:should_flush(),
- "Allocation 200K tuples puts us above the memory threshold.")
- end,
- AcquireMem(),
-
- etap:ok(not couch_util:should_flush(),
- "Checking to flush invokes GC."),
-
- % verify
- etap:is(true, couch_util:verify("It4Vooya", "It4Vooya"),
- "String comparison."),
- etap:is(false, couch_util:verify("It4VooyaX", "It4Vooya"),
- "String comparison (unequal lengths)."),
- etap:is(true, couch_util:verify(<<"ahBase3r">>, <<"ahBase3r">>),
- "Binary comparison."),
- etap:is(false, couch_util:verify(<<"ahBase3rX">>, <<"ahBase3r">>),
- "Binary comparison (unequal lengths)."),
- etap:is(false, couch_util:verify(nil, <<"ahBase3r">>),
- "Binary comparison with atom."),
-
- ok.
diff --git a/test/etap/041-uuid-gen-seq.ini b/test/etap/041-uuid-gen-seq.ini
deleted file mode 100644
index 94cebc6f5..000000000
--- a/test/etap/041-uuid-gen-seq.ini
+++ /dev/null
@@ -1,19 +0,0 @@
-; Licensed to the Apache Software Foundation (ASF) under one
-; or more contributor license agreements. See the NOTICE file
-; distributed with this work for additional information
-; regarding copyright ownership. The ASF licenses this file
-; to you under the Apache License, Version 2.0 (the
-; "License"); you may not use this file except in compliance
-; with the License. You may obtain a copy of the License at
-;
-; http://www.apache.org/licenses/LICENSE-2.0
-;
-; Unless required by applicable law or agreed to in writing,
-; software distributed under the License is distributed on an
-; "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-; KIND, either express or implied. See the License for the
-; specific language governing permissions and limitations
-; under the License.
-
-[uuids]
-algorithm = sequential
diff --git a/test/etap/041-uuid-gen-utc.ini b/test/etap/041-uuid-gen-utc.ini
deleted file mode 100644
index c2b838314..000000000
--- a/test/etap/041-uuid-gen-utc.ini
+++ /dev/null
@@ -1,19 +0,0 @@
-; Licensed to the Apache Software Foundation (ASF) under one
-; or more contributor license agreements. See the NOTICE file
-; distributed with this work for additional information
-; regarding copyright ownership. The ASF licenses this file
-; to you under the Apache License, Version 2.0 (the
-; "License"); you may not use this file except in compliance
-; with the License. You may obtain a copy of the License at
-;
-; http://www.apache.org/licenses/LICENSE-2.0
-;
-; Unless required by applicable law or agreed to in writing,
-; software distributed under the License is distributed on an
-; "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-; KIND, either express or implied. See the License for the
-; specific language governing permissions and limitations
-; under the License.
-
-[uuids]
-algorithm = utc_random
diff --git a/test/etap/041-uuid-gen.t b/test/etap/041-uuid-gen.t
deleted file mode 100755
index 72349698e..000000000
--- a/test/etap/041-uuid-gen.t
+++ /dev/null
@@ -1,147 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-default_config() ->
- test_util:build_file("etc/couchdb/default_dev.ini").
-
-seq_alg_config() ->
- test_util:source_file("test/etap/041-uuid-gen-seq.ini").
-
-utc_alg_config() ->
- test_util:source_file("test/etap/041-uuid-gen-utc.ini").
-
-utc_id_alg_config() ->
- test_util:source_file("test/etap/041-uuid-gen-id.ini").
-
-% Run tests and wait for the gen_servers to shutdown
-run_test(IniFiles, Test) ->
- {ok, Pid} = couch_config:start_link(IniFiles),
- erlang:monitor(process, Pid),
- couch_uuids:start(),
- Test(),
- couch_uuids:stop(),
- couch_config:stop(),
- receive
- {'DOWN', _, _, Pid, _} -> ok;
- _Other -> etap:diag("OTHER: ~p~n", [_Other])
- after
- 1000 -> throw({timeout_error, config_stop})
- end.
-
-main(_) ->
- test_util:init_code_path(),
- application:start(crypto),
- etap:plan(9),
-
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- etap:bail(Other)
- end,
- ok.
-
-test() ->
-
- TestUnique = fun() ->
- etap:is(
- test_unique(10000, couch_uuids:new()),
- true,
- "Can generate 10K unique IDs"
- )
- end,
- run_test([default_config()], TestUnique),
- run_test([default_config(), seq_alg_config()], TestUnique),
- run_test([default_config(), utc_alg_config()], TestUnique),
- run_test([default_config(), utc_id_alg_config()], TestUnique),
-
- TestMonotonic = fun () ->
- etap:is(
- couch_uuids:new() < couch_uuids:new(),
- true,
- "should produce monotonically increasing ids"
- )
- end,
- run_test([default_config(), seq_alg_config()], TestMonotonic),
- run_test([default_config(), utc_alg_config()], TestMonotonic),
- run_test([default_config(), utc_id_alg_config()], TestMonotonic),
-
- % Pretty sure that the average of a uniform distribution is the
- % midpoint of the range. Thus, to exceed a threshold, we need
- % approximately Total / (Range/2 + RangeMin) samples.
- %
- % In our case this works out to be 8194. (0xFFF000 / 0x7FF)
- % These tests just fudge the limits for a good generator at 25%
- % in either direction. Technically it should be possible to generate
- % bounds that will show if your random number generator is not
- % sufficiently random but I hated statistics in school.
- TestRollOver = fun() ->
- UUID = binary_to_list(couch_uuids:new()),
- Prefix = element(1, lists:split(26, UUID)),
- N = gen_until_pref_change(Prefix,0),
- etap:diag("N is: ~p~n",[N]),
- etap:is(
- N >= 5000 andalso N =< 11000,
- true,
- "should roll over every so often."
- )
- end,
- run_test([default_config(), seq_alg_config()], TestRollOver),
-
- TestSuffix = fun() ->
- UUID = binary_to_list(couch_uuids:new()),
- Suffix = get_suffix(UUID),
- etap:is(
- test_same_suffix(100, Suffix),
- true,
- "utc_id ids should have the same suffix."
- )
- end,
- run_test([default_config(), utc_id_alg_config()], TestSuffix).
-
-test_unique(0, _) ->
- true;
-test_unique(N, UUID) ->
- case couch_uuids:new() of
- UUID ->
- etap:diag("N: ~p~n", [N]),
- false;
- Else -> test_unique(N-1, Else)
- end.
-
-get_prefix(UUID) ->
- element(1, lists:split(26, binary_to_list(UUID))).
-
-gen_until_pref_change(_, Count) when Count > 8251 ->
- Count;
-gen_until_pref_change(Prefix, N) ->
- case get_prefix(couch_uuids:new()) of
- Prefix -> gen_until_pref_change(Prefix, N+1);
- _ -> N
- end.
-
-get_suffix(UUID) when is_binary(UUID)->
- get_suffix(binary_to_list(UUID));
-get_suffix(UUID) ->
- element(2, lists:split(14, UUID)).
-
-test_same_suffix(0, _) ->
- true;
-test_same_suffix(N, Suffix) ->
- case get_suffix(couch_uuids:new()) of
- Suffix -> test_same_suffix(N-1, Suffix);
- _ -> false
- end.
diff --git a/test/etap/042-work-queue.t b/test/etap/042-work-queue.t
deleted file mode 100755
index 8594a6f87..000000000
--- a/test/etap/042-work-queue.t
+++ /dev/null
@@ -1,500 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-main(_) ->
- test_util:init_code_path(),
-
- etap:plan(155),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- etap:bail(Other)
- end,
- ok.
-
-
-test() ->
- ok = crypto:start(),
- test_single_consumer_max_item_count(),
- test_single_consumer_max_size(),
- test_single_consumer_max_item_count_and_size(),
- test_multiple_consumers(),
- ok.
-
-
-test_single_consumer_max_item_count() ->
- etap:diag("Spawning a queue with 3 max items, 1 producer and 1 consumer"),
-
- {ok, Q} = couch_work_queue:new([{max_items, 3}]),
- Producer = spawn_producer(Q),
- Consumer = spawn_consumer(Q),
-
- etap:is(couch_work_queue:item_count(Q), 0, "Queue item count is 0"),
-
- consume(Consumer, 1),
- etap:is(ping(Consumer), timeout,
- "Consumer blocked when attempting to dequeue 1 item from empty queue"),
-
- Item1 = produce(Producer, 10),
- etap:is(ping(Producer), ok, "Producer not blocked"),
-
- etap:is(ping(Consumer), ok, "Consumer unblocked"),
- etap:is(last_consumer_items(Consumer), {ok, [Item1]},
- "Consumer received the right item"),
-
- Item2 = produce(Producer, 20),
- etap:is(ping(Producer), ok, "Producer not blocked with non full queue"),
- etap:is(couch_work_queue:item_count(Q), 1, "Queue item count is 1"),
-
- Item3 = produce(Producer, 15),
- etap:is(ping(Producer), ok, "Producer not blocked with non full queue"),
- etap:is(couch_work_queue:item_count(Q), 2, "Queue item count is 2"),
-
- Item4 = produce(Producer, 3),
- etap:is(couch_work_queue:item_count(Q), 3, "Queue item count is 3"),
- etap:is(ping(Producer), timeout, "Producer blocked with full queue"),
-
- consume(Consumer, 2),
- etap:is(ping(Consumer), ok,
- "Consumer not blocked when attempting to dequeue 2 items from queue"),
- etap:is(last_consumer_items(Consumer), {ok, [Item2, Item3]},
- "Consumer received the right items"),
- etap:is(couch_work_queue:item_count(Q), 1, "Queue item count is 1"),
-
- consume(Consumer, 2),
- etap:is(ping(Consumer), ok,
- "Consumer not blocked when attempting to dequeue 2 items from queue"),
- etap:is(last_consumer_items(Consumer), {ok, [Item4]},
- "Consumer received the right item"),
- etap:is(couch_work_queue:item_count(Q), 0, "Queue item count is 0"),
-
- consume(Consumer, 100),
- etap:is(ping(Consumer), timeout,
- "Consumer blocked when attempting to dequeue 100 items from empty queue"),
- etap:is(couch_work_queue:item_count(Q), 0, "Queue item count is 0"),
-
- Item5 = produce(Producer, 11),
- etap:is(ping(Producer), ok, "Producer not blocked with empty queue"),
- etap:is(couch_work_queue:item_count(Q), 0, "Queue item count is 0"),
-
- Item6 = produce(Producer, 19),
- etap:is(ping(Producer), ok, "Producer not blocked with non full queue"),
- etap:is(couch_work_queue:item_count(Q), 1, "Queue item count is 1"),
-
- Item7 = produce(Producer, 2),
- etap:is(ping(Producer), ok, "Producer not blocked with non full queue"),
- etap:is(couch_work_queue:item_count(Q), 2, "Queue item count is 2"),
-
- Item8 = produce(Producer, 33),
- etap:is(ping(Producer), timeout, "Producer blocked with full queue"),
- etap:is(couch_work_queue:item_count(Q), 3, "Queue item count is 3"),
-
- etap:is(ping(Consumer), ok, "Consumer unblocked"),
- etap:is(last_consumer_items(Consumer), {ok, [Item5]},
- "Consumer received the first queued item"),
- etap:is(couch_work_queue:item_count(Q), 3, "Queue item count is 3"),
-
- consume(Consumer, all),
- etap:is(ping(Consumer), ok,
- "Consumer not blocked when attempting to dequeue all items from queue"),
- etap:is(last_consumer_items(Consumer), {ok, [Item6, Item7, Item8]},
- "Consumer received all queued items"),
-
- etap:is(couch_work_queue:item_count(Q), 0, "Queue item count is 0"),
-
- etap:is(close_queue(Q), ok, "Closed queue"),
- consume(Consumer, 1),
- etap:is(last_consumer_items(Consumer), closed, "Consumer got closed queue"),
- etap:is(couch_work_queue:item_count(Q), closed, "Queue closed"),
- etap:is(couch_work_queue:size(Q), closed, "Queue closed"),
-
- stop(Producer, "producer"),
- stop(Consumer, "consumer").
-
-
-
-test_single_consumer_max_size() ->
- etap:diag("Spawning a queue with max size of 160 bytes, "
- "1 producer and 1 consumer"),
-
- {ok, Q} = couch_work_queue:new([{max_size, 160}]),
- Producer = spawn_producer(Q),
- Consumer = spawn_consumer(Q),
-
- etap:is(couch_work_queue:item_count(Q), 0, "Queue item count is 0"),
- etap:is(couch_work_queue:size(Q), 0, "Queue size is 0 bytes"),
-
- consume(Consumer, 1),
- etap:is(ping(Consumer), timeout,
- "Consumer blocked when attempting to dequeue 1 item from empty queue"),
-
- Item1 = produce(Producer, 50),
- etap:is(ping(Producer), ok, "Producer not blocked"),
-
- etap:is(ping(Consumer), ok, "Consumer unblocked"),
- etap:is(last_consumer_items(Consumer), {ok, [Item1]},
- "Consumer received the right item"),
-
- etap:is(couch_work_queue:item_count(Q), 0, "Queue item count is 0"),
- etap:is(couch_work_queue:size(Q), 0, "Queue size is 0 bytes"),
-
- Item2 = produce(Producer, 50),
- etap:is(ping(Producer), ok, "Producer not blocked"),
- etap:is(couch_work_queue:item_count(Q), 1, "Queue item count is 1"),
- etap:is(couch_work_queue:size(Q), 50, "Queue size is 50 bytes"),
-
- Item3 = produce(Producer, 50),
- etap:is(ping(Producer), ok, "Producer not blocked"),
- etap:is(couch_work_queue:item_count(Q), 2, "Queue item count is 2"),
- etap:is(couch_work_queue:size(Q), 100, "Queue size is 100 bytes"),
-
- Item4 = produce(Producer, 61),
- etap:is(ping(Producer), timeout, "Producer blocked"),
- etap:is(couch_work_queue:item_count(Q), 3, "Queue item count is 3"),
- etap:is(couch_work_queue:size(Q), 161, "Queue size is 161 bytes"),
-
- consume(Consumer, 1),
- etap:is(ping(Consumer), ok,
- "Consumer not blocked when attempting to dequeue 1 item from full queue"),
- etap:is(last_consumer_items(Consumer), {ok, [Item2]},
- "Consumer received the right item"),
- etap:is(couch_work_queue:item_count(Q), 2, "Queue item count is 2"),
- etap:is(couch_work_queue:size(Q), 111, "Queue size is 111 bytes"),
-
- Item5 = produce(Producer, 20),
- etap:is(ping(Producer), ok, "Producer not blocked"),
- etap:is(couch_work_queue:item_count(Q), 3, "Queue item count is 3"),
- etap:is(couch_work_queue:size(Q), 131, "Queue size is 131 bytes"),
-
- Item6 = produce(Producer, 40),
- etap:is(ping(Producer), timeout, "Producer blocked"),
- etap:is(couch_work_queue:item_count(Q), 4, "Queue item count is 4"),
- etap:is(couch_work_queue:size(Q), 171, "Queue size is 171 bytes"),
-
- etap:is(close_queue(Q), timeout,
- "Timeout when trying to close non-empty queue"),
-
- consume(Consumer, 2),
- etap:is(ping(Consumer), ok,
- "Consumer not blocked when attempting to dequeue 2 items from full queue"),
- etap:is(last_consumer_items(Consumer), {ok, [Item3, Item4]},
- "Consumer received the right items"),
- etap:is(couch_work_queue:item_count(Q), 2, "Queue item count is 2"),
- etap:is(couch_work_queue:size(Q), 60, "Queue size is 60 bytes"),
-
- etap:is(close_queue(Q), timeout,
- "Timeout when trying to close non-empty queue"),
-
- consume(Consumer, all),
- etap:is(ping(Consumer), ok,
- "Consumer not blocked when attempting to dequeue all items from queue"),
- etap:is(last_consumer_items(Consumer), {ok, [Item5, Item6]},
- "Consumer received the right items"),
-
- etap:is(couch_work_queue:item_count(Q), closed, "Queue closed"),
- etap:is(couch_work_queue:size(Q), closed, "Queue closed"),
-
- consume(Consumer, all),
- etap:is(last_consumer_items(Consumer), closed, "Consumer got closed queue"),
-
- stop(Producer, "producer"),
- stop(Consumer, "consumer").
-
-
-test_single_consumer_max_item_count_and_size() ->
- etap:diag("Spawning a queue with 3 max items, max size of 200 bytes, "
- "1 producer and 1 consumer"),
-
- {ok, Q} = couch_work_queue:new([{max_items, 3}, {max_size, 200}]),
- Producer = spawn_producer(Q),
- Consumer = spawn_consumer(Q),
-
- etap:is(couch_work_queue:item_count(Q), 0, "Queue item count is 0"),
- etap:is(couch_work_queue:size(Q), 0, "Queue size is 0 bytes"),
-
- Item1 = produce(Producer, 100),
- etap:is(ping(Producer), ok, "Producer not blocked"),
- etap:is(couch_work_queue:item_count(Q), 1, "Queue item count is 1"),
- etap:is(couch_work_queue:size(Q), 100, "Queue size is 100 bytes"),
-
- Item2 = produce(Producer, 110),
- etap:is(ping(Producer), timeout,
- "Producer blocked when queue size >= max_size"),
- etap:is(couch_work_queue:item_count(Q), 2, "Queue item count is 2"),
- etap:is(couch_work_queue:size(Q), 210, "Queue size is 210 bytes"),
-
- consume(Consumer, all),
- etap:is(ping(Consumer), ok,
- "Consumer not blocked when attempting to dequeue all items from queue"),
- etap:is(last_consumer_items(Consumer), {ok, [Item1, Item2]},
- "Consumer received the right items"),
- etap:is(couch_work_queue:item_count(Q), 0, "Queue item count is 0"),
- etap:is(couch_work_queue:size(Q), 0, "Queue size is 0 bytes"),
-
- etap:is(ping(Producer), ok, "Producer not blocked anymore"),
-
- Item3 = produce(Producer, 10),
- etap:is(ping(Producer), ok, "Producer not blocked"),
- etap:is(couch_work_queue:item_count(Q), 1, "Queue item count is 1"),
- etap:is(couch_work_queue:size(Q), 10, "Queue size is 10 bytes"),
-
- Item4 = produce(Producer, 4),
- etap:is(ping(Producer), ok, "Producer not blocked"),
- etap:is(couch_work_queue:item_count(Q), 2, "Queue item count is 2"),
- etap:is(couch_work_queue:size(Q), 14, "Queue size is 14 bytes"),
-
- Item5 = produce(Producer, 2),
- etap:is(ping(Producer), timeout,
- "Producer blocked when queue item count = max_items"),
- etap:is(couch_work_queue:item_count(Q), 3, "Queue item count is 3"),
- etap:is(couch_work_queue:size(Q), 16, "Queue size is 16 bytes"),
-
- consume(Consumer, 1),
- etap:is(ping(Consumer), ok,
- "Consumer not blocked when attempting to dequeue 1 item from queue"),
- etap:is(last_consumer_items(Consumer), {ok, [Item3]},
- "Consumer received 1 item"),
- etap:is(couch_work_queue:item_count(Q), 2, "Queue item count is 2"),
- etap:is(couch_work_queue:size(Q), 6, "Queue size is 6 bytes"),
-
- etap:is(close_queue(Q), timeout,
- "Timeout when trying to close non-empty queue"),
-
- consume(Consumer, 1),
- etap:is(ping(Consumer), ok,
- "Consumer not blocked when attempting to dequeue 1 item from queue"),
- etap:is(last_consumer_items(Consumer), {ok, [Item4]},
- "Consumer received 1 item"),
- etap:is(couch_work_queue:item_count(Q), 1, "Queue item count is 1"),
- etap:is(couch_work_queue:size(Q), 2, "Queue size is 2 bytes"),
-
- Item6 = produce(Producer, 50),
- etap:is(ping(Producer), ok,
- "Producer not blocked when queue is not full and already received"
- " a close request"),
- etap:is(couch_work_queue:item_count(Q), 2, "Queue item count is 2"),
- etap:is(couch_work_queue:size(Q), 52, "Queue size is 52 bytes"),
-
- consume(Consumer, all),
- etap:is(ping(Consumer), ok,
- "Consumer not blocked when attempting to dequeue all items from queue"),
- etap:is(last_consumer_items(Consumer), {ok, [Item5, Item6]},
- "Consumer received all queued items"),
-
- etap:is(couch_work_queue:item_count(Q), closed, "Queue closed"),
- etap:is(couch_work_queue:size(Q), closed, "Queue closed"),
-
- consume(Consumer, 1),
- etap:is(last_consumer_items(Consumer), closed, "Consumer got closed queue"),
-
- stop(Producer, "producer"),
- stop(Consumer, "consumer").
-
-
-test_multiple_consumers() ->
- etap:diag("Spawning a queue with 3 max items, max size of 200 bytes, "
- "1 producer and 3 consumers"),
-
- {ok, Q} = couch_work_queue:new(
- [{max_items, 3}, {max_size, 200}, {multi_workers, true}]),
- Producer = spawn_producer(Q),
- Consumer1 = spawn_consumer(Q),
- Consumer2 = spawn_consumer(Q),
- Consumer3 = spawn_consumer(Q),
-
- etap:is(couch_work_queue:item_count(Q), 0, "Queue item count is 0"),
- etap:is(couch_work_queue:size(Q), 0, "Queue size is 0 bytes"),
-
- consume(Consumer1, 1),
- etap:is(ping(Consumer1), timeout,
- "Consumer 1 blocked when attempting to dequeue 1 item from empty queue"),
- consume(Consumer2, 2),
- etap:is(ping(Consumer2), timeout,
- "Consumer 2 blocked when attempting to dequeue 2 items from empty queue"),
- consume(Consumer3, 1),
- etap:is(ping(Consumer3), timeout,
- "Consumer 3 blocked when attempting to dequeue 1 item from empty queue"),
-
- Item1 = produce(Producer, 50),
- etap:is(ping(Producer), ok, "Producer not blocked"),
- etap:is(couch_work_queue:item_count(Q), 0, "Queue item count is 0"),
- etap:is(couch_work_queue:size(Q), 0, "Queue size is 0 bytes"),
-
- Item2 = produce(Producer, 50),
- etap:is(ping(Producer), ok, "Producer not blocked"),
- etap:is(couch_work_queue:item_count(Q), 0, "Queue item count is 0"),
- etap:is(couch_work_queue:size(Q), 0, "Queue size is 0 bytes"),
-
- Item3 = produce(Producer, 50),
- etap:is(ping(Producer), ok, "Producer not blocked"),
- etap:is(couch_work_queue:item_count(Q), 0, "Queue item count is 0"),
- etap:is(couch_work_queue:size(Q), 0, "Queue size is 0 bytes"),
-
- etap:is(ping(Consumer1), ok, "Consumer 1 unblocked"),
- etap:is(last_consumer_items(Consumer1), {ok, [Item1]},
- "Consumer 1 received 1 item"),
- etap:is(couch_work_queue:item_count(Q), 0, "Queue item count is 0"),
- etap:is(couch_work_queue:size(Q), 0, "Queue size is 0 bytes"),
-
- etap:is(ping(Consumer2), ok, "Consumer 2 unblocked"),
- etap:is(last_consumer_items(Consumer2), {ok, [Item2]},
- "Consumer 2 received 1 item"),
- etap:is(couch_work_queue:item_count(Q), 0, "Queue item count is 0"),
- etap:is(couch_work_queue:size(Q), 0, "Queue size is 0 bytes"),
-
- etap:is(ping(Consumer3), ok, "Consumer 3 unblocked"),
- etap:is(last_consumer_items(Consumer3), {ok, [Item3]},
- "Consumer 3 received 1 item"),
- etap:is(couch_work_queue:item_count(Q), 0, "Queue item count is 0"),
- etap:is(couch_work_queue:size(Q), 0, "Queue size is 0 bytes"),
-
- consume(Consumer1, 1),
- etap:is(ping(Consumer1), timeout,
- "Consumer 1 blocked when attempting to dequeue 1 item from empty queue"),
- consume(Consumer2, 2),
- etap:is(ping(Consumer2), timeout,
- "Consumer 2 blocked when attempting to dequeue 1 item from empty queue"),
- consume(Consumer3, 1),
- etap:is(ping(Consumer3), timeout,
- "Consumer 3 blocked when attempting to dequeue 1 item from empty queue"),
-
- Item4 = produce(Producer, 50),
- etap:is(ping(Producer), ok, "Producer not blocked"),
- etap:is(couch_work_queue:item_count(Q), 0, "Queue item count is 0"),
- etap:is(couch_work_queue:size(Q), 0, "Queue size is 0 bytes"),
-
- etap:is(close_queue(Q), ok, "Closed queue"),
-
- etap:is(ping(Consumer1), ok, "Consumer 1 unblocked"),
- etap:is(last_consumer_items(Consumer1), {ok, [Item4]},
- "Consumer 1 received 1 item"),
-
- etap:is(couch_work_queue:item_count(Q), closed, "Queue closed"),
- etap:is(couch_work_queue:size(Q), closed, "Queue closed"),
-
- etap:is(ping(Consumer2), ok, "Consumer 2 unblocked"),
- etap:is(last_consumer_items(Consumer2), closed,
- "Consumer 2 received 'closed' atom"),
-
- etap:is(ping(Consumer3), ok, "Consumer 3 unblocked"),
- etap:is(last_consumer_items(Consumer3), closed,
- "Consumer 3 received 'closed' atom"),
-
- stop(Producer, "producer"),
- stop(Consumer1, "consumer 1"),
- stop(Consumer2, "consumer 2"),
- stop(Consumer3, "consumer 3").
-
-
-close_queue(Q) ->
- ok = couch_work_queue:close(Q),
- MonRef = erlang:monitor(process, Q),
- receive
- {'DOWN', MonRef, process, Q, _Reason} ->
- etap:diag("Queue closed")
- after 3000 ->
- erlang:demonitor(MonRef),
- timeout
- end.
-
-
-spawn_consumer(Q) ->
- Parent = self(),
- spawn(fun() -> consumer_loop(Parent, Q, nil) end).
-
-
-consumer_loop(Parent, Q, PrevItem) ->
- receive
- {stop, Ref} ->
- Parent ! {ok, Ref};
- {ping, Ref} ->
- Parent ! {pong, Ref},
- consumer_loop(Parent, Q, PrevItem);
- {last_item, Ref} ->
- Parent ! {item, Ref, PrevItem},
- consumer_loop(Parent, Q, PrevItem);
- {consume, N} ->
- Result = couch_work_queue:dequeue(Q, N),
- consumer_loop(Parent, Q, Result)
- end.
-
-
-spawn_producer(Q) ->
- Parent = self(),
- spawn(fun() -> producer_loop(Parent, Q) end).
-
-
-producer_loop(Parent, Q) ->
- receive
- {stop, Ref} ->
- Parent ! {ok, Ref};
- {ping, Ref} ->
- Parent ! {pong, Ref},
- producer_loop(Parent, Q);
- {produce, Ref, Size} ->
- Item = crypto:rand_bytes(Size),
- Parent ! {item, Ref, Item},
- ok = couch_work_queue:queue(Q, Item),
- producer_loop(Parent, Q)
- end.
-
-
-consume(Consumer, N) ->
- Consumer ! {consume, N}.
-
-
-last_consumer_items(Consumer) ->
- Ref = make_ref(),
- Consumer ! {last_item, Ref},
- receive
- {item, Ref, Items} ->
- Items
- after 3000 ->
- timeout
- end.
-
-
-produce(Producer, Size) ->
- Ref = make_ref(),
- Producer ! {produce, Ref, Size},
- receive
- {item, Ref, Item} ->
- Item
- after 3000 ->
- etap:bail("Timeout asking producer to produce an item")
- end.
-
-
-ping(Pid) ->
- Ref = make_ref(),
- Pid ! {ping, Ref},
- receive
- {pong, Ref} ->
- ok
- after 3000 ->
- timeout
- end.
-
-
-stop(Pid, Name) ->
- Ref = make_ref(),
- Pid ! {stop, Ref},
- receive
- {ok, Ref} ->
- etap:diag("Stopped " ++ Name)
- after 3000 ->
- etap:bail("Timeout stopping " ++ Name)
- end.
diff --git a/test/etap/043-find-in-binary.t b/test/etap/043-find-in-binary.t
deleted file mode 100755
index dca1d9c72..000000000
--- a/test/etap/043-find-in-binary.t
+++ /dev/null
@@ -1,68 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-main(_) ->
- test_util:init_code_path(),
-
- etap:plan(length(cases())),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- etap:bail(Other)
- end,
- ok.
-
-
-test() ->
- lists:foreach(fun({Needle, Haystack, Result}) ->
- try
- Msg = io_lib:format("Looking for ~s in ~s", [Needle, Haystack]),
- etap:is(couch_util:find_in_binary(Needle, Haystack), Result, Msg)
- catch _T:_R ->
- etap:diag("~p", [{_T, _R}])
- end
- end, cases()),
- ok.
-
-
-cases() ->
- [
- {<<"foo">>, <<"foobar">>, {exact, 0}},
- {<<"foo">>, <<"foofoo">>, {exact, 0}},
- {<<"foo">>, <<"barfoo">>, {exact, 3}},
- {<<"foo">>, <<"barfo">>, {partial, 3}},
- {<<"f">>, <<"fobarfff">>, {exact, 0}},
- {<<"f">>, <<"obarfff">>, {exact, 4}},
- {<<"f">>, <<"obarggf">>, {exact, 6}},
- {<<"f">>, <<"f">>, {exact, 0}},
- {<<"f">>, <<"g">>, not_found},
- {<<"foo">>, <<"f">>, {partial, 0}},
- {<<"foo">>, <<"g">>, not_found},
- {<<"foo">>, <<"">>, not_found},
- {<<"fofo">>, <<"foofo">>, {partial, 3}},
- {<<"foo">>, <<"gfobarfo">>, {partial, 6}},
- {<<"foo">>, <<"gfobarf">>, {partial, 6}},
- {<<"foo">>, <<"gfobar">>, not_found},
- {<<"fog">>, <<"gbarfogquiz">>, {exact, 4}},
- {<<"ggg">>, <<"ggg">>, {exact, 0}},
- {<<"ggg">>, <<"ggggg">>, {exact, 0}},
- {<<"ggg">>, <<"bggg">>, {exact, 1}},
- {<<"ggg">>, <<"bbgg">>, {partial, 2}},
- {<<"ggg">>, <<"bbbg">>, {partial, 3}},
- {<<"ggg">>, <<"bgbggbggg">>, {exact, 6}},
- {<<"ggg">>, <<"bgbggb">>, not_found}
- ].
diff --git a/test/etap/050-stream.t b/test/etap/050-stream.t
deleted file mode 100755
index 0251f002b..000000000
--- a/test/etap/050-stream.t
+++ /dev/null
@@ -1,87 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-main(_) ->
- test_util:init_code_path(),
- etap:plan(13),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- etap:bail(Other)
- end,
- ok.
-
-read_all(Fd, PosList) ->
- Data = couch_stream:foldl(Fd, PosList, fun(Bin, Acc) -> [Bin, Acc] end, []),
- iolist_to_binary(Data).
-
-test() ->
- {ok, Fd} = couch_file:open("test/etap/temp.050", [create,overwrite]),
- {ok, Stream} = couch_stream:open(Fd),
-
- etap:is(ok, couch_stream:write(Stream, <<"food">>),
- "Writing to streams works."),
-
- etap:is(ok, couch_stream:write(Stream, <<"foob">>),
- "Consecutive writing to streams works."),
-
- etap:is(ok, couch_stream:write(Stream, <<>>),
- "Writing an empty binary does nothing."),
-
- {Ptrs, Length, _, _, _} = couch_stream:close(Stream),
- etap:is(Ptrs, [{0, 8}], "Close returns the file pointers."),
- etap:is(Length, 8, "Close also returns the number of bytes written."),
- etap:is(<<"foodfoob">>, read_all(Fd, Ptrs), "Returned pointers are valid."),
-
- % Remember where we expect the pointer to be.
- {ok, ExpPtr} = couch_file:bytes(Fd),
- {ok, Stream2} = couch_stream:open(Fd),
- OneBits = <<1:(8*10)>>,
- etap:is(ok, couch_stream:write(Stream2, OneBits),
- "Successfully wrote 79 zero bits and 1 one bit."),
-
- ZeroBits = <<0:(8*10)>>,
- etap:is(ok, couch_stream:write(Stream2, ZeroBits),
- "Successfully wrote 80 0 bits."),
-
- {Ptrs2, Length2, _, _, _} = couch_stream:close(Stream2),
- etap:is(Ptrs2, [{ExpPtr, 20}], "Closing stream returns the file pointers."),
- etap:is(Length2, 20, "Length written is 160 bytes."),
-
- AllBits = iolist_to_binary([OneBits,ZeroBits]),
- etap:is(AllBits, read_all(Fd, Ptrs2), "Returned pointers are valid."),
-
- % Stream more the 4K chunk size.
- {ok, ExpPtr2} = couch_file:bytes(Fd),
- {ok, Stream3} = couch_stream:open(Fd, [{buffer_size, 4096}]),
- lists:foldl(fun(_, Acc) ->
- Data = <<"a1b2c">>,
- couch_stream:write(Stream3, Data),
- [Data | Acc]
- end, [], lists:seq(1, 1024)),
- {Ptrs3, Length3, _, _, _} = couch_stream:close(Stream3),
-
- % 4095 because of 5 * 4096 rem 5 (last write before exceeding threshold)
- % + 5 puts us over the threshold
- % + 4 bytes for the term_to_binary adding a length header
- % + 1 byte every 4K for tail append headers
- SecondPtr = ExpPtr2 + 4095 + 5 + 4 + 1,
- etap:is(Ptrs3, [{ExpPtr2, 4100}, {SecondPtr, 1020}], "Pointers every 4K bytes."),
- etap:is(Length3, 5120, "Wrote the expected 5K bytes."),
-
- couch_file:close(Fd),
- ok.
diff --git a/test/etap/060-kt-merging.t b/test/etap/060-kt-merging.t
deleted file mode 100755
index efbdbf695..000000000
--- a/test/etap/060-kt-merging.t
+++ /dev/null
@@ -1,176 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-main(_) ->
- test_util:init_code_path(),
- etap:plan(16),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- etap:bail(Other)
- end,
- ok.
-
-test() ->
- One = {1, {"1","foo",[]}},
-
- etap:is(
- {[One], no_conflicts},
- couch_key_tree:merge([], One, 10),
- "The empty tree is the identity for merge."
- ),
- etap:is(
- {[One], no_conflicts},
- couch_key_tree:merge([One], One, 10),
- "Merging is reflexive."
- ),
-
- TwoSibs = [{1, {"1","foo",[]}},
- {1, {"2","foo",[]}}],
-
- etap:is(
- {TwoSibs, no_conflicts},
- couch_key_tree:merge(TwoSibs, One, 10),
- "Merging a prefix of a tree with the tree yields the tree."
- ),
-
- Three = {1, {"3","foo",[]}},
- ThreeSibs = [{1, {"1","foo",[]}},
- {1, {"2","foo",[]}},
- {1, {"3","foo",[]}}],
-
- etap:is(
- {ThreeSibs, conflicts},
- couch_key_tree:merge(TwoSibs, Three, 10),
- "Merging a third unrelated branch leads to a conflict."
- ),
-
-
- TwoChild = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}},
-
- etap:is(
- {[TwoChild], no_conflicts},
- couch_key_tree:merge([TwoChild], TwoChild, 10),
- "Merging two children is still reflexive."
- ),
-
- TwoChildSibs = {1, {"1","foo", [{"1a", "bar", []},
- {"1b", "bar", []}]}},
- etap:is(
- {[TwoChildSibs], no_conflicts},
- couch_key_tree:merge([TwoChildSibs], TwoChildSibs, 10),
- "Merging a tree to itself is itself."),
-
- TwoChildPlusSibs =
- {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]},
- {"1b", "bar", []}]}},
-
- etap:is(
- {[TwoChildPlusSibs], no_conflicts},
- couch_key_tree:merge([TwoChild], TwoChildSibs, 10),
- "Merging tree of uneven length at node 2."),
-
- Stemmed1b = {2, {"1a", "bar", []}},
- etap:is(
- {[TwoChildSibs], no_conflicts},
- couch_key_tree:merge([TwoChildSibs], Stemmed1b, 10),
- "Merging a tree with a stem."
- ),
-
- TwoChildSibs2 = {1, {"1","foo", [{"1a", "bar", []},
- {"1b", "bar", [{"1bb", "boo", []}]}]}},
- Stemmed1bb = {3, {"1bb", "boo", []}},
- etap:is(
- {[TwoChildSibs2], no_conflicts},
- couch_key_tree:merge([TwoChildSibs2], Stemmed1bb, 10),
- "Merging a stem at a deeper level."
- ),
-
- StemmedTwoChildSibs2 = [{2,{"1a", "bar", []}},
- {2,{"1b", "bar", [{"1bb", "boo", []}]}}],
-
- etap:is(
- {StemmedTwoChildSibs2, no_conflicts},
- couch_key_tree:merge(StemmedTwoChildSibs2, Stemmed1bb, 10),
- "Merging a stem at a deeper level against paths at deeper levels."
- ),
-
- Stemmed1aa = {3, {"1aa", "bar", []}},
- etap:is(
- {[TwoChild], no_conflicts},
- couch_key_tree:merge([TwoChild], Stemmed1aa, 10),
- "Merging a single tree with a deeper stem."
- ),
-
- Stemmed1a = {2, {"1a", "bar", [{"1aa", "bar", []}]}},
- etap:is(
- {[TwoChild], no_conflicts},
- couch_key_tree:merge([TwoChild], Stemmed1a, 10),
- "Merging a larger stem."
- ),
-
- etap:is(
- {[Stemmed1a], no_conflicts},
- couch_key_tree:merge([Stemmed1a], Stemmed1aa, 10),
- "More merging."
- ),
-
- OneChild = {1, {"1","foo",[{"1a", "bar", []}]}},
- Expect1 = [OneChild, Stemmed1aa],
- etap:is(
- {Expect1, conflicts},
- couch_key_tree:merge([OneChild], Stemmed1aa, 10),
- "Merging should create conflicts."
- ),
-
- etap:is(
- {[TwoChild], no_conflicts},
- couch_key_tree:merge(Expect1, TwoChild, 10),
- "Merge should have no conflicts."
- ),
-
- %% this test is based on couch-902-test-case2.py
- %% foo has conflicts from replication at depth two
- %% foo3 is the current value
- Foo = {1, {"foo",
- "val1",
- [{"foo2","val2",[]},
- {"foo3", "val3", []}
- ]}},
- %% foo now has an attachment added, which leads to foo4 and val4
- %% off foo3
- Bar = {1, {"foo",
- [],
- [{"foo3",
- [],
- [{"foo4","val4",[]}
- ]}]}},
- %% this is what the merge returns
- %% note that it ignore the conflicting branch as there's no match
- FooBar = {1, {"foo",
- "val1",
- [{"foo2","val2",[]},
- {"foo3", "val3", [{"foo4","val4",[]}]}
- ]}},
-
- etap:is(
- {[FooBar], no_conflicts},
- couch_key_tree:merge([Foo],Bar,10),
- "Merging trees with conflicts ought to behave."
- ),
-
- ok.
diff --git a/test/etap/061-kt-missing-leaves.t b/test/etap/061-kt-missing-leaves.t
deleted file mode 100755
index d60b4db8d..000000000
--- a/test/etap/061-kt-missing-leaves.t
+++ /dev/null
@@ -1,65 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-main(_) ->
- test_util:init_code_path(),
- etap:plan(4),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- etap:bail(Other)
- end,
- ok.
-
-test() ->
- TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
- Stemmed1 = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}],
- Stemmed2 = [{2, {"1aa", "bar", []}}],
-
- etap:is(
- [],
- couch_key_tree:find_missing(TwoChildSibs, [{0,"1"}, {1,"1a"}]),
- "Look for missing keys."
- ),
-
- etap:is(
- [{0, "10"}, {100, "x"}],
- couch_key_tree:find_missing(
- TwoChildSibs,
- [{0,"1"}, {0, "10"}, {1,"1a"}, {100, "x"}]
- ),
- "Look for missing keys."
- ),
-
- etap:is(
- [{0, "1"}, {100, "x"}],
- couch_key_tree:find_missing(
- Stemmed1,
- [{0,"1"}, {1,"1a"}, {100, "x"}]
- ),
- "Look for missing keys."
- ),
- etap:is(
- [{0, "1"}, {1,"1a"}, {100, "x"}],
- couch_key_tree:find_missing(
- Stemmed2,
- [{0,"1"}, {1,"1a"}, {100, "x"}]
- ),
- "Look for missing keys."
- ),
-
- ok.
diff --git a/test/etap/062-kt-remove-leaves.t b/test/etap/062-kt-remove-leaves.t
deleted file mode 100755
index 745a00be7..000000000
--- a/test/etap/062-kt-remove-leaves.t
+++ /dev/null
@@ -1,69 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-main(_) ->
- test_util:init_code_path(),
- etap:plan(6),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- etap:bail(Other)
- end,
- ok.
-
-test() ->
- OneChild = [{0, {"1","foo",[{"1a", "bar", []}]}}],
- TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
- Stemmed = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}],
-
- etap:is(
- {TwoChildSibs, []},
- couch_key_tree:remove_leafs(TwoChildSibs, []),
- "Removing no leaves has no effect on the tree."
- ),
-
- etap:is(
- {TwoChildSibs, []},
- couch_key_tree:remove_leafs(TwoChildSibs, [{0, "1"}]),
- "Removing a non-existant branch has no effect."
- ),
-
- etap:is(
- {OneChild, [{1, "1b"}]},
- couch_key_tree:remove_leafs(TwoChildSibs, [{1, "1b"}]),
- "Removing a leaf removes the leaf."
- ),
-
- etap:is(
- {[], [{1, "1b"},{1, "1a"}]},
- couch_key_tree:remove_leafs(TwoChildSibs, [{1, "1a"}, {1, "1b"}]),
- "Removing all leaves returns an empty tree."
- ),
-
- etap:is(
- {Stemmed, []},
- couch_key_tree:remove_leafs(Stemmed, [{1, "1a"}]),
- "Removing a non-existant node has no effect."
- ),
-
- etap:is(
- {[], [{2, "1aa"}]},
- couch_key_tree:remove_leafs(Stemmed, [{2, "1aa"}]),
- "Removing the last leaf returns an empty tree."
- ),
-
- ok.
diff --git a/test/etap/063-kt-get-leaves.t b/test/etap/063-kt-get-leaves.t
deleted file mode 100755
index 6d4e8007d..000000000
--- a/test/etap/063-kt-get-leaves.t
+++ /dev/null
@@ -1,98 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-main(_) ->
- test_util:init_code_path(),
- etap:plan(11),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- etap:bail(Other)
- end,
- ok.
-
-test() ->
- TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
- Stemmed = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}],
-
- etap:is(
- {[{"foo", {0, ["1"]}}],[]},
- couch_key_tree:get(TwoChildSibs, [{0, "1"}]),
- "extract a subtree."
- ),
-
- etap:is(
- {[{"bar", {1, ["1a", "1"]}}],[]},
- couch_key_tree:get(TwoChildSibs, [{1, "1a"}]),
- "extract a subtree."
- ),
-
- etap:is(
- {[],[{0,"x"}]},
- couch_key_tree:get_key_leafs(TwoChildSibs, [{0, "x"}]),
- "gather up the leaves."
- ),
-
- etap:is(
- {[{"bar", {1, ["1a","1"]}}],[]},
- couch_key_tree:get_key_leafs(TwoChildSibs, [{1, "1a"}]),
- "gather up the leaves."
- ),
-
- etap:is(
- {[{"bar", {1, ["1a","1"]}},{"bar",{1, ["1b","1"]}}],[]},
- couch_key_tree:get_key_leafs(TwoChildSibs, [{0, "1"}]),
- "gather up the leaves."
- ),
-
- etap:is(
- {[{0,[{"1", "foo"}]}],[]},
- couch_key_tree:get_full_key_paths(TwoChildSibs, [{0, "1"}]),
- "retrieve full key paths."
- ),
-
- etap:is(
- {[{1,[{"1a", "bar"},{"1", "foo"}]}],[]},
- couch_key_tree:get_full_key_paths(TwoChildSibs, [{1, "1a"}]),
- "retrieve full key paths."
- ),
-
- etap:is(
- [{2, [{"1aa", "bar"},{"1a", "bar"}]}],
- couch_key_tree:get_all_leafs_full(Stemmed),
- "retrieve all leaves."
- ),
-
- etap:is(
- [{1, [{"1a", "bar"},{"1", "foo"}]}, {1, [{"1b", "bar"},{"1", "foo"}]}],
- couch_key_tree:get_all_leafs_full(TwoChildSibs),
- "retrieve all the leaves."
- ),
-
- etap:is(
- [{"bar", {2, ["1aa","1a"]}}],
- couch_key_tree:get_all_leafs(Stemmed),
- "retrieve all leaves."
- ),
-
- etap:is(
- [{"bar", {1, ["1a", "1"]}}, {"bar", {1, ["1b","1"]}}],
- couch_key_tree:get_all_leafs(TwoChildSibs),
- "retrieve all the leaves."
- ),
-
- ok.
diff --git a/test/etap/064-kt-counting.t b/test/etap/064-kt-counting.t
deleted file mode 100755
index f182d2870..000000000
--- a/test/etap/064-kt-counting.t
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-main(_) ->
- test_util:init_code_path(),
- etap:plan(4),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- etap:bail(Other)
- end,
- ok.
-
-test() ->
- EmptyTree = [],
- One = [{0, {"1","foo",[]}}],
- TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
- Stemmed = [{2, {"1bb", "boo", []}}],
-
- etap:is(0, couch_key_tree:count_leafs(EmptyTree),
- "Empty trees have no leaves."),
-
- etap:is(1, couch_key_tree:count_leafs(One),
- "Single node trees have a single leaf."),
-
- etap:is(2, couch_key_tree:count_leafs(TwoChildSibs),
- "Two children siblings counted as two leaves."),
-
- etap:is(1, couch_key_tree:count_leafs(Stemmed),
- "Stemming does not affect leaf counting."),
-
- ok.
diff --git a/test/etap/065-kt-stemming.t b/test/etap/065-kt-stemming.t
deleted file mode 100755
index 6e781c1d3..000000000
--- a/test/etap/065-kt-stemming.t
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-main(_) ->
- test_util:init_code_path(),
- etap:plan(3),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- etap:bail(Other)
- end,
- ok.
-
-test() ->
- TwoChild = [{0, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}],
- Stemmed1 = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}],
- Stemmed2 = [{2, {"1aa", "bar", []}}],
-
- etap:is(TwoChild, couch_key_tree:stem(TwoChild, 3),
- "Stemming more levels than what exists does nothing."),
-
- etap:is(Stemmed1, couch_key_tree:stem(TwoChild, 2),
- "Stemming with a depth of two returns the deepest two nodes."),
-
- etap:is(Stemmed2, couch_key_tree:stem(TwoChild, 1),
- "Stemming to a depth of one returns the deepest node."),
-
- ok.
diff --git a/test/etap/070-couch-db.t b/test/etap/070-couch-db.t
deleted file mode 100755
index 787d6c6ac..000000000
--- a/test/etap/070-couch-db.t
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-main(_) ->
- test_util:init_code_path(),
-
- etap:plan(4),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- etap:bail(Other)
- end,
- ok.
-
-test() ->
-
- couch_server_sup:start_link(test_util:config_files()),
-
- couch_db:create(<<"etap-test-db">>, []),
- {ok, AllDbs} = couch_server:all_databases(),
- etap:ok(lists:member(<<"etap-test-db">>, AllDbs), "Database was created."),
-
- couch_server:delete(<<"etap-test-db">>, []),
- {ok, AllDbs2} = couch_server:all_databases(),
- etap:ok(not lists:member(<<"etap-test-db">>, AllDbs2),
- "Database was deleted."),
-
- gen_server:call(couch_server, {set_max_dbs_open, 3}),
- MkDbName = fun(Int) -> list_to_binary("lru-" ++ integer_to_list(Int)) end,
-
- lists:foreach(fun(Int) ->
- {ok, TestDbs} = couch_server:all_databases(),
- ok = case lists:member(MkDbName(Int), TestDbs) of
- true -> couch_server:delete(MkDbName(Int), []);
- _ -> ok
- end,
- {ok, Db} = couch_db:create(MkDbName(Int), []),
- ok = couch_db:close(Db)
- end, lists:seq(1, 6)),
-
- {ok, AllDbs3} = couch_server:all_databases(),
- NumCreated = lists:foldl(fun(Int, Acc) ->
- true = lists:member(MkDbName(Int), AllDbs3),
- Acc+1
- end, 0, lists:seq(1, 6)),
- etap:is(6, NumCreated, "Created all databases."),
-
- lists:foreach(fun(Int) ->
- ok = couch_server:delete(MkDbName(Int), [])
- end, lists:seq(1, 6)),
-
- {ok, AllDbs4} = couch_server:all_databases(),
- NumDeleted = lists:foldl(fun(Int, Acc) ->
- false = lists:member(MkDbName(Int), AllDbs4),
- Acc+1
- end, 0, lists:seq(1, 6)),
- etap:is(6, NumDeleted, "Deleted all databases."),
-
- ok.
diff --git a/test/etap/072-cleanup.t b/test/etap/072-cleanup.t
deleted file mode 100755
index 9cbcdfa3c..000000000
--- a/test/etap/072-cleanup.t
+++ /dev/null
@@ -1,126 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--define(TEST_DB, <<"etap-test-db">>).
-
--record(user_ctx, {
- name = null,
- roles = [],
- handler
-}).
-
--define(ADMIN_USER, #user_ctx{roles=[<<"_admin">>]}).
-
-main(_) ->
- test_util:init_code_path(),
-
- etap:plan(7),
- try test() of
- ok ->
- etap:end_tests()
- catch
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- timer:sleep(1000),
- etap:bail(Other)
- end,
- ok.
-
-test() ->
-
- {ok, _} = couch_server_sup:start_link(test_util:config_files()),
- couch_server:delete(?TEST_DB, []),
- timer:sleep(1000),
-
- couch_db:create(?TEST_DB, []),
-
- {ok, AllDbs} = couch_server:all_databases(),
- etap:ok(lists:member(?TEST_DB, AllDbs), "Database was created."),
-
- FooRev = create_design_doc(<<"_design/foo">>, <<"bar">>),
- query_view("foo", "bar"),
-
- BoozRev = create_design_doc(<<"_design/booz">>, <<"baz">>),
- query_view("booz", "baz"),
-
- {ok, _Db} = couch_db:open(?TEST_DB, [{user_ctx, ?ADMIN_USER}]),
- view_cleanup(),
- etap:is(count_index_files(), 2,
- "Two index files before any deletions."),
-
- delete_design_doc(<<"_design/foo">>, FooRev),
- view_cleanup(),
- etap:is(count_index_files(), 1,
- "One index file after first deletion and cleanup."),
-
- delete_design_doc(<<"_design/booz">>, BoozRev),
- view_cleanup(),
- etap:is(count_index_files(), 0,
- "No index files after second deletion and cleanup."),
-
- couch_server:delete(?TEST_DB, []),
- {ok, AllDbs2} = couch_server:all_databases(),
- etap:ok(not lists:member(?TEST_DB, AllDbs2),
- "Database was deleted."),
- ok.
-
-create_design_doc(DDName, ViewName) ->
- {ok, Db} = couch_db:open(?TEST_DB, [{user_ctx, ?ADMIN_USER}]),
- DDoc = couch_doc:from_json_obj({[
- {<<"_id">>, DDName},
- {<<"language">>, <<"javascript">>},
- {<<"views">>, {[
- {ViewName, {[
- {<<"map">>, <<"function(doc) { emit(doc.value, 1); }">>}
- ]}}
- ]}}
- ]}),
- {ok, Rev} = couch_db:update_doc(Db, DDoc, []),
- couch_db:ensure_full_commit(Db),
- couch_db:close(Db),
- Rev.
-
-delete_design_doc(DDName, Rev) ->
- {ok, Db} = couch_db:open(?TEST_DB, [{user_ctx, ?ADMIN_USER}]),
- DDoc = couch_doc:from_json_obj({[
- {<<"_id">>, DDName},
- {<<"_rev">>, couch_doc:rev_to_str(Rev)},
- {<<"_deleted">>, true}
- ]}),
- {ok, _} = couch_db:update_doc(Db, DDoc, [Rev]),
- couch_db:close(Db).
-
-db_url() ->
- Addr = couch_config:get("httpd", "bind_address", "127.0.0.1"),
- Port = integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
- "http://" ++ Addr ++ ":" ++ Port ++ "/" ++
- binary_to_list(?TEST_DB).
-
-query_view(DDoc, View) ->
- {ok, Code, _Headers, _Body} = test_util:request(
- db_url() ++ "/_design/" ++ DDoc ++ "/_view/" ++ View, [], get),
- etap:is(Code, 200, "Built view index for " ++ DDoc ++ "."),
- ok.
-
-view_cleanup() ->
- {ok, Db} = couch_db:open(?TEST_DB, [{user_ctx, ?ADMIN_USER}]),
- couch_mrview:cleanup(Db),
- couch_db:close(Db).
-
-count_index_files() ->
- % call server to fetch the index files
- RootDir = couch_config:get("couchdb", "view_index_dir"),
- length(filelib:wildcard(RootDir ++ "/." ++
- binary_to_list(?TEST_DB) ++ "_design"++"/mrview/*")).
diff --git a/test/etap/073-changes.t b/test/etap/073-changes.t
deleted file mode 100755
index d632c2f9d..000000000
--- a/test/etap/073-changes.t
+++ /dev/null
@@ -1,558 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-% Verify that compacting databases that are being used as the source or
-% target of a replication doesn't affect the replication and that the
-% replication doesn't hold their reference counters forever.
-
--record(user_ctx, {
- name = null,
- roles = [],
- handler
-}).
-
--record(changes_args, {
- feed = "normal",
- dir = fwd,
- since = 0,
- limit = 1000000000000000,
- style = main_only,
- heartbeat,
- timeout,
- filter = "",
- filter_fun,
- filter_args = [],
- include_docs = false,
- doc_options = [],
- conflicts = false,
- db_open_options = []
-}).
-
--record(row, {
- id,
- seq,
- deleted = false
-}).
-
-
-test_db_name() -> <<"couch_test_changes">>.
-
-
-main(_) ->
- test_util:init_code_path(),
-
- etap:plan(43),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- etap:bail(Other)
- end,
- ok.
-
-
-test() ->
- couch_server_sup:start_link(test_util:config_files()),
-
- test_by_doc_ids(),
- test_by_doc_ids_with_since(),
- test_by_doc_ids_continuous(),
- test_design_docs_only(),
- test_heartbeat(),
-
- couch_server_sup:stop(),
- ok.
-
-
-test_by_doc_ids() ->
- {ok, Db} = create_db(test_db_name()),
-
- {ok, _Rev1} = save_doc(Db, {[{<<"_id">>, <<"doc1">>}]}),
- {ok, _Rev2} = save_doc(Db, {[{<<"_id">>, <<"doc2">>}]}),
- {ok, Rev3} = save_doc(Db, {[{<<"_id">>, <<"doc3">>}]}),
- {ok, _Rev4} = save_doc(Db, {[{<<"_id">>, <<"doc4">>}]}),
- {ok, _Rev5} = save_doc(Db, {[{<<"_id">>, <<"doc5">>}]}),
- {ok, _Rev3_2} = save_doc(Db, {[{<<"_id">>, <<"doc3">>}, {<<"_rev">>, Rev3}]}),
- {ok, _Rev6} = save_doc(Db, {[{<<"_id">>, <<"doc6">>}]}),
- {ok, _Rev7} = save_doc(Db, {[{<<"_id">>, <<"doc7">>}]}),
- {ok, _Rev8} = save_doc(Db, {[{<<"_id">>, <<"doc8">>}]}),
-
- etap:diag("Folding changes in ascending order with _doc_ids filter"),
- ChangesArgs = #changes_args{
- filter = "_doc_ids"
- },
- DocIds = [<<"doc3">>, <<"doc4">>, <<"doc9999">>],
- Req = {json_req, {[{<<"doc_ids">>, DocIds}]}},
- Consumer = spawn_consumer(test_db_name(), ChangesArgs, Req),
-
- {Rows, LastSeq} = wait_finished(Consumer),
- {ok, Db2} = couch_db:open_int(test_db_name(), []),
- UpSeq = couch_db:get_update_seq(Db2),
- couch_db:close(Db2),
- etap:is(length(Rows), 2, "Received 2 changes rows"),
- etap:is(LastSeq, UpSeq, "LastSeq is same as database update seq number"),
- [#row{seq = Seq1, id = Id1}, #row{seq = Seq2, id = Id2}] = Rows,
- etap:is(Id1, <<"doc4">>, "First row is for doc doc4"),
- etap:is(Seq1, 4, "First row has seq 4"),
- etap:is(Id2, <<"doc3">>, "Second row is for doc doc3"),
- etap:is(Seq2, 6, "Second row has seq 6"),
-
- stop(Consumer),
- etap:diag("Folding changes in descending order with _doc_ids filter"),
- ChangesArgs2 = #changes_args{
- filter = "_doc_ids",
- dir = rev
- },
- Consumer2 = spawn_consumer(test_db_name(), ChangesArgs2, Req),
-
- {Rows2, LastSeq2} = wait_finished(Consumer2),
- etap:is(length(Rows2), 2, "Received 2 changes rows"),
- etap:is(LastSeq2, 4, "LastSeq is 4"),
- [#row{seq = Seq1_2, id = Id1_2}, #row{seq = Seq2_2, id = Id2_2}] = Rows2,
- etap:is(Id1_2, <<"doc3">>, "First row is for doc doc3"),
- etap:is(Seq1_2, 6, "First row has seq 4"),
- etap:is(Id2_2, <<"doc4">>, "Second row is for doc doc4"),
- etap:is(Seq2_2, 4, "Second row has seq 6"),
-
- stop(Consumer2),
- delete_db(Db).
-
-
-test_by_doc_ids_with_since() ->
- {ok, Db} = create_db(test_db_name()),
-
- {ok, _Rev1} = save_doc(Db, {[{<<"_id">>, <<"doc1">>}]}),
- {ok, _Rev2} = save_doc(Db, {[{<<"_id">>, <<"doc2">>}]}),
- {ok, Rev3} = save_doc(Db, {[{<<"_id">>, <<"doc3">>}]}),
- {ok, _Rev4} = save_doc(Db, {[{<<"_id">>, <<"doc4">>}]}),
- {ok, _Rev5} = save_doc(Db, {[{<<"_id">>, <<"doc5">>}]}),
- {ok, Rev3_2} = save_doc(Db, {[{<<"_id">>, <<"doc3">>}, {<<"_rev">>, Rev3}]}),
- {ok, _Rev6} = save_doc(Db, {[{<<"_id">>, <<"doc6">>}]}),
- {ok, _Rev7} = save_doc(Db, {[{<<"_id">>, <<"doc7">>}]}),
- {ok, _Rev8} = save_doc(Db, {[{<<"_id">>, <<"doc8">>}]}),
-
- ChangesArgs = #changes_args{
- filter = "_doc_ids",
- since = 5
- },
- DocIds = [<<"doc3">>, <<"doc4">>, <<"doc9999">>],
- Req = {json_req, {[{<<"doc_ids">>, DocIds}]}},
- Consumer = spawn_consumer(test_db_name(), ChangesArgs, Req),
-
- {Rows, LastSeq} = wait_finished(Consumer),
- {ok, Db2} = couch_db:open_int(test_db_name(), []),
- UpSeq = couch_db:get_update_seq(Db2),
- couch_db:close(Db2),
- etap:is(LastSeq, UpSeq, "LastSeq is same as database update seq number"),
- etap:is(length(Rows), 1, "Received 1 changes rows"),
- [#row{seq = Seq1, id = Id1}] = Rows,
- etap:is(Id1, <<"doc3">>, "First row is for doc doc3"),
- etap:is(Seq1, 6, "First row has seq 6"),
-
- stop(Consumer),
-
- ChangesArgs2 = #changes_args{
- filter = "_doc_ids",
- since = 6
- },
- Consumer2 = spawn_consumer(test_db_name(), ChangesArgs2, Req),
-
- {Rows2, LastSeq2} = wait_finished(Consumer2),
- {ok, Db3} = couch_db:open_int(test_db_name(), []),
- UpSeq2 = couch_db:get_update_seq(Db3),
- couch_db:close(Db3),
- etap:is(LastSeq2, UpSeq2, "LastSeq is same as database update seq number"),
- etap:is(length(Rows2), 0, "Received 0 change rows"),
-
- stop(Consumer2),
-
- {ok, _Rev3_3} = save_doc(
- Db,
- {[{<<"_id">>, <<"doc3">>}, {<<"_deleted">>, true}, {<<"_rev">>, Rev3_2}]}),
-
- ChangesArgs3 = #changes_args{
- filter = "_doc_ids",
- since = 9
- },
- Consumer3 = spawn_consumer(test_db_name(), ChangesArgs3, Req),
-
- {Rows3, LastSeq3} = wait_finished(Consumer3),
- {ok, Db4} = couch_db:open_int(test_db_name(), []),
- UpSeq3 = couch_db:get_update_seq(Db4),
- couch_db:close(Db4),
- etap:is(LastSeq3, UpSeq3, "LastSeq is same as database update seq number"),
- etap:is(length(Rows3), 1, "Received 1 changes rows"),
- etap:is(
- [#row{seq = LastSeq3, id = <<"doc3">>, deleted = true}],
- Rows3,
- "Received row with doc3 deleted"),
-
- stop(Consumer3),
-
- delete_db(Db).
-
-
-test_by_doc_ids_continuous() ->
- {ok, Db} = create_db(test_db_name()),
-
- {ok, _Rev1} = save_doc(Db, {[{<<"_id">>, <<"doc1">>}]}),
- {ok, _Rev2} = save_doc(Db, {[{<<"_id">>, <<"doc2">>}]}),
- {ok, Rev3} = save_doc(Db, {[{<<"_id">>, <<"doc3">>}]}),
- {ok, Rev4} = save_doc(Db, {[{<<"_id">>, <<"doc4">>}]}),
- {ok, _Rev5} = save_doc(Db, {[{<<"_id">>, <<"doc5">>}]}),
- {ok, Rev3_2} = save_doc(Db, {[{<<"_id">>, <<"doc3">>}, {<<"_rev">>, Rev3}]}),
- {ok, _Rev6} = save_doc(Db, {[{<<"_id">>, <<"doc6">>}]}),
- {ok, _Rev7} = save_doc(Db, {[{<<"_id">>, <<"doc7">>}]}),
- {ok, _Rev8} = save_doc(Db, {[{<<"_id">>, <<"doc8">>}]}),
-
- ChangesArgs = #changes_args{
- filter = "_doc_ids",
- feed = "continuous"
- },
- DocIds = [<<"doc3">>, <<"doc4">>, <<"doc9999">>],
- Req = {json_req, {[{<<"doc_ids">>, DocIds}]}},
- Consumer = spawn_consumer(test_db_name(), ChangesArgs, Req),
-
- pause(Consumer),
- Rows = get_rows(Consumer),
-
- etap:is(length(Rows), 2, "Received 2 changes rows"),
- [#row{seq = Seq1, id = Id1}, #row{seq = Seq2, id = Id2}] = Rows,
- etap:is(Id1, <<"doc4">>, "First row is for doc doc4"),
- etap:is(Seq1, 4, "First row has seq 4"),
- etap:is(Id2, <<"doc3">>, "Second row is for doc doc3"),
- etap:is(Seq2, 6, "Second row has seq 6"),
-
- clear_rows(Consumer),
- {ok, _Rev9} = save_doc(Db, {[{<<"_id">>, <<"doc9">>}]}),
- {ok, _Rev10} = save_doc(Db, {[{<<"_id">>, <<"doc10">>}]}),
- unpause(Consumer),
- pause(Consumer),
- etap:is(get_rows(Consumer), [], "No new rows"),
-
- {ok, Rev4_2} = save_doc(Db, {[{<<"_id">>, <<"doc4">>}, {<<"_rev">>, Rev4}]}),
- {ok, _Rev11} = save_doc(Db, {[{<<"_id">>, <<"doc11">>}]}),
- {ok, _Rev4_3} = save_doc(Db, {[{<<"_id">>, <<"doc4">>}, {<<"_rev">>, Rev4_2}]}),
- {ok, _Rev12} = save_doc(Db, {[{<<"_id">>, <<"doc12">>}]}),
- {ok, Rev3_3} = save_doc(Db, {[{<<"_id">>, <<"doc3">>}, {<<"_rev">>, Rev3_2}]}),
- unpause(Consumer),
- pause(Consumer),
-
- NewRows = get_rows(Consumer),
- etap:is(length(NewRows), 2, "Received 2 new rows"),
- [Row14, Row16] = NewRows,
- etap:is(Row14#row.seq, 14, "First row has seq 14"),
- etap:is(Row14#row.id, <<"doc4">>, "First row is for doc doc4"),
- etap:is(Row16#row.seq, 16, "Second row has seq 16"),
- etap:is(Row16#row.id, <<"doc3">>, "Second row is for doc doc3"),
-
- clear_rows(Consumer),
- {ok, _Rev3_4} = save_doc(Db, {[{<<"_id">>, <<"doc3">>}, {<<"_rev">>, Rev3_3}]}),
- unpause(Consumer),
- pause(Consumer),
- etap:is(get_rows(Consumer), [#row{seq = 17, id = <<"doc3">>}],
- "Got row for seq 17, doc doc3"),
-
- unpause(Consumer),
- stop(Consumer),
- delete_db(Db).
-
-
-test_design_docs_only() ->
- {ok, Db} = create_db(test_db_name()),
-
- {ok, _Rev1} = save_doc(Db, {[{<<"_id">>, <<"doc1">>}]}),
- {ok, _Rev2} = save_doc(Db, {[{<<"_id">>, <<"doc2">>}]}),
- {ok, Rev3} = save_doc(Db, {[{<<"_id">>, <<"_design/foo">>}]}),
-
- ChangesArgs = #changes_args{
- filter = "_design"
- },
- Consumer = spawn_consumer(test_db_name(), ChangesArgs, {json_req, null}),
-
- {Rows, LastSeq} = wait_finished(Consumer),
- {ok, Db2} = couch_db:open_int(test_db_name(), []),
- UpSeq = couch_db:get_update_seq(Db2),
- couch_db:close(Db2),
-
- etap:is(LastSeq, UpSeq, "LastSeq is same as database update seq number"),
- etap:is(length(Rows), 1, "Received 1 changes rows"),
- etap:is(Rows, [#row{seq = 3, id = <<"_design/foo">>}], "Received row with ddoc"),
-
- stop(Consumer),
-
- {ok, Db3} = couch_db:open_int(
- test_db_name(), [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}]),
- {ok, _Rev3_2} = save_doc(
- Db3,
- {[{<<"_id">>, <<"_design/foo">>}, {<<"_rev">>, Rev3},
- {<<"_deleted">>, true}]}),
-
- Consumer2 = spawn_consumer(test_db_name(), ChangesArgs, {json_req, null}),
-
- {Rows2, LastSeq2} = wait_finished(Consumer2),
- UpSeq2 = UpSeq + 1,
- couch_db:close(Db3),
-
- etap:is(LastSeq2, UpSeq2, "LastSeq is same as database update seq number"),
- etap:is(length(Rows2), 1, "Received 1 changes rows"),
- etap:is(
- Rows2,
- [#row{seq = 4, id = <<"_design/foo">>, deleted = true}],
- "Received row with deleted ddoc"),
-
- stop(Consumer2),
- delete_db(Db).
-
-test_heartbeat() ->
- {ok, Db} = create_db(test_db_name()),
-
- {ok, _} = save_doc(Db, {[
- {<<"_id">>, <<"_design/foo">>},
- {<<"language">>, <<"javascript">>},
- {<<"filters">>, {[
- {<<"foo">>, <<"function(doc) { if ((doc._id == 'doc10') ||
- (doc._id == 'doc11') ||
- (doc._id == 'doc12')) {
- return true;
- } else {
- return false;
- }}">>
- }]}}
- ]}),
-
- ChangesArgs = #changes_args{
- filter = "foo/foo",
- feed = "continuous",
- timeout = 10000,
- heartbeat = 1000
- },
- Consumer = spawn_consumer(test_db_name(), ChangesArgs, {json_req, null}),
-
- {ok, _Rev1} = save_doc(Db, {[{<<"_id">>, <<"doc1">>}]}),
- timer:sleep(200),
- {ok, _Rev2} = save_doc(Db, {[{<<"_id">>, <<"doc2">>}]}),
- timer:sleep(200),
- {ok, _Rev3} = save_doc(Db, {[{<<"_id">>, <<"doc3">>}]}),
- timer:sleep(200),
- {ok, _Rev4} = save_doc(Db, {[{<<"_id">>, <<"doc4">>}]}),
- timer:sleep(200),
- {ok, _Rev5} = save_doc(Db, {[{<<"_id">>, <<"doc5">>}]}),
- timer:sleep(200),
- {ok, _Rev6} = save_doc(Db, {[{<<"_id">>, <<"doc6">>}]}),
- timer:sleep(200),
- {ok, _Rev7} = save_doc(Db, {[{<<"_id">>, <<"doc7">>}]}),
- timer:sleep(200),
- {ok, _Rev8} = save_doc(Db, {[{<<"_id">>, <<"doc8">>}]}),
- timer:sleep(200),
- {ok, _Rev9} = save_doc(Db, {[{<<"_id">>, <<"doc9">>}]}),
- Heartbeats = get_heartbeats(Consumer),
- etap:is(Heartbeats, 2, "Received 2 heartbeats now"),
- {ok, _Rev10} = save_doc(Db, {[{<<"_id">>, <<"doc10">>}]}),
- timer:sleep(200),
- {ok, _Rev11} = save_doc(Db, {[{<<"_id">>, <<"doc11">>}]}),
- timer:sleep(200),
- {ok, _Rev12} = save_doc(Db, {[{<<"_id">>, <<"doc12">>}]}),
- Heartbeats2 = get_heartbeats(Consumer),
- etap:is(Heartbeats2, 3, "Received 3 heartbeats now"),
- Rows = get_rows(Consumer),
- etap:is(length(Rows), 3, "Received 3 changes rows"),
-
- {ok, _Rev13} = save_doc(Db, {[{<<"_id">>, <<"doc13">>}]}),
- timer:sleep(200),
- {ok, _Rev14} = save_doc(Db, {[{<<"_id">>, <<"doc14">>}]}),
- timer:sleep(200),
- Heartbeats3 = get_heartbeats(Consumer),
- etap:is(Heartbeats3, 6, "Received 6 heartbeats now"),
- stop(Consumer),
- couch_db:close(Db),
- delete_db(Db).
-
-
-save_doc(Db, Json) ->
- Doc = couch_doc:from_json_obj(Json),
- {ok, Rev} = couch_db:update_doc(Db, Doc, []),
- {ok, couch_doc:rev_to_str(Rev)}.
-
-
-get_rows(Consumer) ->
- Ref = make_ref(),
- Consumer ! {get_rows, Ref},
- receive
- {rows, Ref, Rows} ->
- Rows
- after 3000 ->
- etap:bail("Timeout getting rows from consumer")
- end.
-
-get_heartbeats(Consumer) ->
- Ref = make_ref(),
- Consumer ! {get_heartbeats, Ref},
- receive
- {hearthbeats, Ref, HeartBeats} ->
- HeartBeats
- after 3000 ->
- etap:bail("Timeout getting heartbeats from consumer")
- end.
-
-
-clear_rows(Consumer) ->
- Ref = make_ref(),
- Consumer ! {reset, Ref},
- receive
- {ok, Ref} ->
- ok
- after 3000 ->
- etap:bail("Timeout clearing consumer rows")
- end.
-
-
-stop(Consumer) ->
- Ref = make_ref(),
- Consumer ! {stop, Ref},
- receive
- {ok, Ref} ->
- ok
- after 3000 ->
- etap:bail("Timeout stopping consumer")
- end.
-
-
-pause(Consumer) ->
- Ref = make_ref(),
- Consumer ! {pause, Ref},
- receive
- {paused, Ref} ->
- ok
- after 3000 ->
- etap:bail("Timeout pausing consumer")
- end.
-
-
-unpause(Consumer) ->
- Ref = make_ref(),
- Consumer ! {continue, Ref},
- receive
- {ok, Ref} ->
- ok
- after 3000 ->
- etap:bail("Timeout unpausing consumer")
- end.
-
-
-wait_finished(_Consumer) ->
- receive
- {consumer_finished, Rows, LastSeq} ->
- {Rows, LastSeq}
- after 30000 ->
- etap:bail("Timeout waiting for consumer to finish")
- end.
-
-
-spawn_consumer(DbName, ChangesArgs0, Req) ->
- Parent = self(),
- spawn(fun() ->
- put(heartbeat_count, 0),
- Callback = fun({change, {Change}, _}, _, Acc) ->
- Id = couch_util:get_value(<<"id">>, Change),
- Seq = couch_util:get_value(<<"seq">>, Change),
- Del = couch_util:get_value(<<"deleted">>, Change, false),
- [#row{id = Id, seq = Seq, deleted = Del} | Acc];
- ({stop, LastSeq}, _, Acc) ->
- Parent ! {consumer_finished, lists:reverse(Acc), LastSeq},
- stop_loop(Parent, Acc);
- (timeout, _, Acc) ->
- put(heartbeat_count, get(heartbeat_count) + 1),
- maybe_pause(Parent, Acc);
- (_, _, Acc) ->
- maybe_pause(Parent, Acc)
- end,
- {ok, Db} = couch_db:open_int(DbName, []),
- ChangesArgs = case (ChangesArgs0#changes_args.timeout =:= undefined)
- andalso (ChangesArgs0#changes_args.heartbeat =:= undefined) of
- true ->
- ChangesArgs0#changes_args{timeout = 10, heartbeat = 10};
- false ->
- ChangesArgs0
- end,
- FeedFun = couch_changes:handle_changes(ChangesArgs, Req, Db),
- try
- FeedFun({Callback, []})
- catch throw:{stop, _} ->
- ok
- end,
- catch couch_db:close(Db)
- end).
-
-
-maybe_pause(Parent, Acc) ->
- receive
- {get_rows, Ref} ->
- Parent ! {rows, Ref, lists:reverse(Acc)},
- maybe_pause(Parent, Acc);
- {get_heartbeats, Ref} ->
- Parent ! {hearthbeats, Ref, get(heartbeat_count)},
- maybe_pause(Parent, Acc);
- {reset, Ref} ->
- Parent ! {ok, Ref},
- maybe_pause(Parent, []);
- {pause, Ref} ->
- Parent ! {paused, Ref},
- pause_loop(Parent, Acc);
- {stop, Ref} ->
- Parent ! {ok, Ref},
- throw({stop, Acc})
- after 0 ->
- Acc
- end.
-
-
-pause_loop(Parent, Acc) ->
- receive
- {stop, Ref} ->
- Parent ! {ok, Ref},
- throw({stop, Acc});
- {reset, Ref} ->
- Parent ! {ok, Ref},
- pause_loop(Parent, []);
- {continue, Ref} ->
- Parent ! {ok, Ref},
- Acc;
- {get_rows, Ref} ->
- Parent ! {rows, Ref, lists:reverse(Acc)},
- pause_loop(Parent, Acc)
- end.
-
-
-stop_loop(Parent, Acc) ->
- receive
- {get_rows, Ref} ->
- Parent ! {rows, Ref, lists:reverse(Acc)},
- stop_loop(Parent, Acc);
- {stop, Ref} ->
- Parent ! {ok, Ref},
- Acc
- end.
-
-
-create_db(DbName) ->
- couch_db:create(
- DbName,
- [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}, overwrite]).
-
-
-delete_db(Db) ->
- ok = couch_server:delete(
- couch_db:name(Db), [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}]).
diff --git a/test/etap/074-doc-update-conflicts.t b/test/etap/074-doc-update-conflicts.t
deleted file mode 100755
index 09d063313..000000000
--- a/test/etap/074-doc-update-conflicts.t
+++ /dev/null
@@ -1,218 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--record(user_ctx, {
- name = null,
- roles = [],
- handler
-}).
-
--define(i2l(I), integer_to_list(I)).
-
-test_db_name() -> <<"couch_test_update_conflicts">>.
-
-
-main(_) ->
- test_util:init_code_path(),
-
- etap:plan(35),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- etap:bail(Other)
- end,
- ok.
-
-
-test() ->
- couch_server_sup:start_link(test_util:config_files()),
- couch_config:set("couchdb", "delayed_commits", "true", false),
-
- lists:foreach(
- fun(NumClients) -> test_concurrent_doc_update(NumClients) end,
- [100, 500, 1000, 2000, 5000]),
-
- test_bulk_delete_create(),
-
- couch_server_sup:stop(),
- ok.
-
-
-% Verify that if multiple clients try to update the same document
-% simultaneously, only one of them will get success response and all
-% the other ones will get a conflict error. Also validate that the
-% client which got the success response got its document version
-% persisted into the database.
-test_concurrent_doc_update(NumClients) ->
- {ok, Db} = create_db(test_db_name()),
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"foobar">>},
- {<<"value">>, 0}
- ]}),
- {ok, Rev} = couch_db:update_doc(Db, Doc, []),
- ok = couch_db:close(Db),
- RevStr = couch_doc:rev_to_str(Rev),
- etap:diag("Created first revision of test document"),
-
- etap:diag("Spawning " ++ ?i2l(NumClients) ++
- " clients to update the document"),
- Clients = lists:map(
- fun(Value) ->
- ClientDoc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"foobar">>},
- {<<"_rev">>, RevStr},
- {<<"value">>, Value}
- ]}),
- Pid = spawn_client(ClientDoc),
- {Value, Pid, erlang:monitor(process, Pid)}
- end,
- lists:seq(1, NumClients)),
-
- lists:foreach(fun({_, Pid, _}) -> Pid ! go end, Clients),
- etap:diag("Waiting for clients to finish"),
-
- {NumConflicts, SavedValue} = lists:foldl(
- fun({Value, Pid, MonRef}, {AccConflicts, AccValue}) ->
- receive
- {'DOWN', MonRef, process, Pid, {ok, _NewRev}} ->
- {AccConflicts, Value};
- {'DOWN', MonRef, process, Pid, conflict} ->
- {AccConflicts + 1, AccValue};
- {'DOWN', MonRef, process, Pid, Error} ->
- etap:bail("Client " ++ ?i2l(Value) ++
- " got update error: " ++ couch_util:to_list(Error))
- after 60000 ->
- etap:bail("Timeout waiting for client " ++ ?i2l(Value) ++ " to die")
- end
- end,
- {0, nil},
- Clients),
-
- etap:diag("Verifying client results"),
- etap:is(
- NumConflicts,
- NumClients - 1,
- "Got " ++ ?i2l(NumClients - 1) ++ " client conflicts"),
-
- {ok, Db2} = couch_db:open_int(test_db_name(), []),
- {ok, Leaves} = couch_db:open_doc_revs(Db2, <<"foobar">>, all, []),
- ok = couch_db:close(Db2),
- etap:is(length(Leaves), 1, "Only one document revision was persisted"),
- [{ok, Doc2}] = Leaves,
- {JsonDoc} = couch_doc:to_json_obj(Doc2, []),
- etap:is(
- couch_util:get_value(<<"value">>, JsonDoc),
- SavedValue,
- "Persisted doc has the right value"),
-
- ok = timer:sleep(1000),
- etap:diag("Restarting the server"),
- couch_server_sup:stop(),
- ok = timer:sleep(1000),
- couch_server_sup:start_link(test_util:config_files()),
-
- {ok, Db3} = couch_db:open_int(test_db_name(), []),
- {ok, Leaves2} = couch_db:open_doc_revs(Db3, <<"foobar">>, all, []),
- ok = couch_db:close(Db3),
- etap:is(length(Leaves2), 1, "Only one document revision was persisted"),
- [{ok, Doc3}] = Leaves,
- etap:is(Doc3, Doc2, "Got same document after server restart"),
-
- delete_db(Db3).
-
-
-% COUCHDB-188
-test_bulk_delete_create() ->
- {ok, Db} = create_db(test_db_name()),
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"foobar">>},
- {<<"value">>, 0}
- ]}),
- {ok, Rev} = couch_db:update_doc(Db, Doc, []),
-
- DeletedDoc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"foobar">>},
- {<<"_rev">>, couch_doc:rev_to_str(Rev)},
- {<<"_deleted">>, true}
- ]}),
- NewDoc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"foobar">>},
- {<<"value">>, 666}
- ]}),
-
- {ok, Results} = couch_db:update_docs(Db, [DeletedDoc, NewDoc], []),
- ok = couch_db:close(Db),
-
- etap:is(length([ok || {ok, _} <- Results]), 2,
- "Deleted and non-deleted versions got an ok reply"),
-
- [{ok, Rev1}, {ok, Rev2}] = Results,
- {ok, Db2} = couch_db:open_int(test_db_name(), []),
-
- {ok, [{ok, Doc1}]} = couch_db:open_doc_revs(
- Db2, <<"foobar">>, [Rev1], [conflicts, deleted_conflicts]),
- {ok, [{ok, Doc2}]} = couch_db:open_doc_revs(
- Db2, <<"foobar">>, [Rev2], [conflicts, deleted_conflicts]),
- ok = couch_db:close(Db2),
-
- {Doc1Props} = couch_doc:to_json_obj(Doc1, []),
- {Doc2Props} = couch_doc:to_json_obj(Doc2, []),
-
- etap:is(couch_util:get_value(<<"_deleted">>, Doc1Props), true,
- "Document was deleted"),
- etap:is(couch_util:get_value(<<"_deleted">>, Doc2Props), undefined,
- "New document not flagged as deleted"),
- etap:is(couch_util:get_value(<<"value">>, Doc2Props), 666,
- "New leaf revision has the right value"),
- etap:is(couch_util:get_value(<<"_conflicts">>, Doc1Props), undefined,
- "Deleted document has no conflicts"),
- etap:is(couch_util:get_value(<<"_deleted_conflicts">>, Doc1Props), undefined,
- "Deleted document has no deleted conflicts"),
- etap:is(couch_util:get_value(<<"_conflicts">>, Doc2Props), undefined,
- "New leaf revision doesn't have conflicts"),
- etap:is(couch_util:get_value(<<"_deleted_conflicts">>, Doc2Props), undefined,
- "New leaf revision doesn't have deleted conflicts"),
-
- etap:is(element(1, Rev1), 2, "Deleted revision has position 2"),
- etap:is(element(1, Rev2), 1, "New leaf revision has position 1"),
-
- delete_db(Db2).
-
-
-spawn_client(Doc) ->
- spawn(fun() ->
- {ok, Db} = couch_db:open_int(test_db_name(), []),
- receive go -> ok end,
- erlang:yield(),
- Result = try
- couch_db:update_doc(Db, Doc, [])
- catch _:Error ->
- Error
- end,
- ok = couch_db:close(Db),
- exit(Result)
- end).
-
-
-create_db(DbName) ->
- couch_db:create(
- DbName,
- [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}, overwrite]).
-
-
-delete_db(Db) ->
- ok = couch_server:delete(
- couch_db:name(Db), [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}]).
diff --git a/test/etap/075-auth-cache.t b/test/etap/075-auth-cache.t
deleted file mode 100755
index 623884b0e..000000000
--- a/test/etap/075-auth-cache.t
+++ /dev/null
@@ -1,276 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--record(user_ctx, {
- name = null,
- roles = [],
- handler
-}).
-
--record(db, {
- main_pid = nil,
- update_pid = nil,
- compactor_pid = nil,
- instance_start_time, % number of microsecs since jan 1 1970 as a binary string
- fd,
- updater_fd,
- fd_ref_counter,
- header,
- committed_update_seq,
- fulldocinfo_by_id_btree,
- docinfo_by_seq_btree,
- local_docs_btree,
- update_seq,
- name,
- filepath,
- validate_doc_funs = [],
- security = [],
- security_ptr = nil,
- user_ctx = #user_ctx{},
- waiting_delayed_commit = nil,
- revs_limit = 1000,
- fsync_options = [],
- options = [],
- compression,
- before_doc_update = nil, % nil | fun(Doc, Db) -> NewDoc
- after_doc_read = nil % nil | fun(Doc, Db) -> NewDoc
-}).
-
-auth_db_name() -> <<"couch_test_auth_db">>.
-auth_db_2_name() -> <<"couch_test_auth_db_2">>.
-salt() -> <<"SALT">>.
-
-
-main(_) ->
- test_util:init_code_path(),
-
- etap:plan(19),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- etap:bail(Other)
- end,
- ok.
-
-
-test() ->
- couch_server_sup:start_link(test_util:config_files()),
- OrigName = couch_config:get("couch_httpd_auth", "authentication_db"),
- couch_config:set(
- "couch_httpd_auth", "authentication_db",
- binary_to_list(auth_db_name()), false),
- delete_db(auth_db_name()),
- delete_db(auth_db_2_name()),
-
- test_auth_db_crash(),
-
- couch_config:set("couch_httpd_auth", "authentication_db", OrigName, false),
- delete_db(auth_db_name()),
- delete_db(auth_db_2_name()),
- couch_server_sup:stop(),
- ok.
-
-
-test_auth_db_crash() ->
- Creds0 = couch_auth_cache:get_user_creds("joe"),
- etap:is(Creds0, nil, "Got nil when getting joe's credentials"),
-
- etap:diag("Adding first version of Joe's user doc"),
- PasswordHash1 = hash_password("pass1"),
- {ok, Rev1} = update_user_doc(auth_db_name(), "joe", "pass1"),
-
- Creds1 = couch_auth_cache:get_user_creds("joe"),
- etap:is(is_list(Creds1), true, "Got joe's credentials from cache"),
- etap:is(couch_util:get_value(<<"password_sha">>, Creds1), PasswordHash1,
- "Cached credentials have the right password"),
-
- etap:diag("Updating Joe's user doc password"),
- PasswordHash2 = hash_password("pass2"),
- {ok, _Rev2} = update_user_doc(auth_db_name(), "joe", "pass2", Rev1),
-
- Creds2 = couch_auth_cache:get_user_creds("joe"),
- etap:is(is_list(Creds2), true, "Got joe's credentials from cache"),
- etap:is(couch_util:get_value(<<"password_sha">>, Creds2), PasswordHash2,
- "Cached credentials have the new password"),
-
- etap:diag("Shutting down the auth database process"),
- shutdown_db(auth_db_name()),
-
- {ok, UpdateRev} = get_doc_rev(auth_db_name(), "joe"),
- PasswordHash3 = hash_password("pass3"),
- {ok, _Rev3} = update_user_doc(auth_db_name(), "joe", "pass3", UpdateRev),
-
- etap:is(get_user_doc_password_sha(auth_db_name(), "joe"),
- PasswordHash3,
- "Latest Joe's doc revision has the new password hash"),
-
- Creds3 = couch_auth_cache:get_user_creds("joe"),
- etap:is(is_list(Creds3), true, "Got joe's credentials from cache"),
- etap:is(couch_util:get_value(<<"password_sha">>, Creds3), PasswordHash3,
- "Cached credentials have the new password"),
-
- etap:diag("Deleting Joe's user doc"),
- delete_user_doc(auth_db_name(), "joe"),
- Creds4 = couch_auth_cache:get_user_creds("joe"),
- etap:is(nil, Creds4,
- "Joe's credentials not found in cache after user doc was deleted"),
-
- etap:diag("Adding new user doc for Joe"),
- PasswordHash5 = hash_password("pass5"),
- {ok, _NewRev1} = update_user_doc(auth_db_name(), "joe", "pass5"),
-
- Creds5 = couch_auth_cache:get_user_creds("joe"),
- etap:is(is_list(Creds5), true, "Got joe's credentials from cache"),
- etap:is(couch_util:get_value(<<"password_sha">>, Creds5), PasswordHash5,
- "Cached credentials have the right password"),
-
- full_commit(auth_db_name()),
-
- etap:diag("Changing the auth database"),
- couch_config:set(
- "couch_httpd_auth", "authentication_db",
- binary_to_list(auth_db_2_name()), false),
- ok = timer:sleep(500),
-
- Creds6 = couch_auth_cache:get_user_creds("joe"),
- etap:is(nil, Creds6,
- "Joe's credentials not found in cache after auth database changed"),
-
- etap:diag("Adding first version of Joe's user doc to new auth database"),
- PasswordHash7 = hash_password("pass7"),
- {ok, _} = update_user_doc(auth_db_2_name(), "joe", "pass7"),
-
- Creds7 = couch_auth_cache:get_user_creds("joe"),
- etap:is(is_list(Creds7), true, "Got joe's credentials from cache"),
- etap:is(couch_util:get_value(<<"password_sha">>, Creds7), PasswordHash7,
- "Cached credentials have the right password"),
-
- etap:diag("Shutting down the auth database process"),
- shutdown_db(auth_db_2_name()),
-
- {ok, UpdateRev2} = get_doc_rev(auth_db_2_name(), "joe"),
- PasswordHash8 = hash_password("pass8"),
- {ok, _Rev8} = update_user_doc(auth_db_2_name(), "joe", "pass8", UpdateRev2),
-
- etap:is(get_user_doc_password_sha(auth_db_2_name(), "joe"),
- PasswordHash8,
- "Latest Joe's doc revision has the new password hash"),
-
- Creds8 = couch_auth_cache:get_user_creds("joe"),
- etap:is(is_list(Creds8), true, "Got joe's credentials from cache"),
- etap:is(couch_util:get_value(<<"password_sha">>, Creds8), PasswordHash8,
- "Cached credentials have the new password"),
-
- etap:diag("Changing the auth database again"),
- couch_config:set(
- "couch_httpd_auth", "authentication_db",
- binary_to_list(auth_db_name()), false),
- ok = timer:sleep(500),
-
- Creds9 = couch_auth_cache:get_user_creds("joe"),
- etap:is(Creds9, Creds5,
- "Got same credentials as before the firt auth database change"),
- etap:is(couch_util:get_value(<<"password_sha">>, Creds9), PasswordHash5,
- "Cached credentials have the right password"),
- ok.
-
-
-update_user_doc(DbName, UserName, Password) ->
- update_user_doc(DbName, UserName, Password, nil).
-
-update_user_doc(DbName, UserName, Password, Rev) ->
- User = iolist_to_binary(UserName),
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"org.couchdb.user:", User/binary>>},
- {<<"name">>, User},
- {<<"type">>, <<"user">>},
- {<<"salt">>, salt()},
- {<<"password_sha">>, hash_password(Password)},
- {<<"roles">>, []}
- ] ++ case Rev of
- nil -> [];
- _ -> [{<<"_rev">>, Rev}]
- end}),
- {ok, AuthDb} = open_auth_db(DbName),
- {ok, NewRev} = couch_db:update_doc(AuthDb, Doc, []),
- ok = couch_db:close(AuthDb),
- {ok, couch_doc:rev_to_str(NewRev)}.
-
-
-hash_password(Password) ->
- list_to_binary(
- couch_util:to_hex(crypto:sha(iolist_to_binary([Password, salt()])))).
-
-
-shutdown_db(DbName) ->
- {ok, AuthDb} = open_auth_db(DbName),
- ok = couch_db:close(AuthDb),
- couch_util:shutdown_sync(AuthDb#db.main_pid),
- ok = timer:sleep(1000).
-
-
-get_doc_rev(DbName, UserName) ->
- DocId = iolist_to_binary([<<"org.couchdb.user:">>, UserName]),
- {ok, AuthDb} = open_auth_db(DbName),
- UpdateRev =
- case couch_db:open_doc(AuthDb, DocId, []) of
- {ok, Doc} ->
- {Props} = couch_doc:to_json_obj(Doc, []),
- couch_util:get_value(<<"_rev">>, Props);
- {not_found, missing} ->
- nil
- end,
- ok = couch_db:close(AuthDb),
- {ok, UpdateRev}.
-
-
-get_user_doc_password_sha(DbName, UserName) ->
- DocId = iolist_to_binary([<<"org.couchdb.user:">>, UserName]),
- {ok, AuthDb} = open_auth_db(DbName),
- {ok, Doc} = couch_db:open_doc(AuthDb, DocId, []),
- ok = couch_db:close(AuthDb),
- {Props} = couch_doc:to_json_obj(Doc, []),
- couch_util:get_value(<<"password_sha">>, Props).
-
-
-delete_user_doc(DbName, UserName) ->
- DocId = iolist_to_binary([<<"org.couchdb.user:">>, UserName]),
- {ok, AuthDb} = open_auth_db(DbName),
- {ok, Doc} = couch_db:open_doc(AuthDb, DocId, []),
- {Props} = couch_doc:to_json_obj(Doc, []),
- DeletedDoc = couch_doc:from_json_obj({[
- {<<"_id">>, DocId},
- {<<"_rev">>, couch_util:get_value(<<"_rev">>, Props)},
- {<<"_deleted">>, true}
- ]}),
- {ok, _} = couch_db:update_doc(AuthDb, DeletedDoc, []),
- ok = couch_db:close(AuthDb).
-
-
-full_commit(DbName) ->
- {ok, AuthDb} = open_auth_db(DbName),
- {ok, _} = couch_db:ensure_full_commit(AuthDb),
- ok = couch_db:close(AuthDb).
-
-
-open_auth_db(DbName) ->
- couch_db:open_int(
- DbName, [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}]).
-
-
-delete_db(Name) ->
- couch_server:delete(
- Name, [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}]).
diff --git a/test/etap/076-file-compression.t b/test/etap/076-file-compression.t
deleted file mode 100755
index 292923004..000000000
--- a/test/etap/076-file-compression.t
+++ /dev/null
@@ -1,186 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--record(user_ctx, {
- name = null,
- roles = [],
- handler
-}).
-
-test_db_name() -> <<"couch_test_file_compression">>.
-ddoc_id() -> <<"_design/test">>.
-num_docs() -> 5000.
-
-
-main(_) ->
- test_util:init_code_path(),
-
- etap:plan(10),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- etap:bail(Other)
- end,
- ok.
-
-
-test() ->
- couch_server_sup:start_link(test_util:config_files()),
- couch_config:set("couchdb", "file_compression", "none", false),
-
- create_database(),
- compact_db(),
- compact_view(),
- DbDiskSize1 = db_disk_size(),
- ViewDiskSize1 = view_disk_size(),
-
- couch_config:set("couchdb", "file_compression", "snappy", false),
- compact_db(),
- compact_view(),
- DbDiskSize2 = db_disk_size(),
- ViewDiskSize2 = view_disk_size(),
-
- etap:is(DbDiskSize2 < DbDiskSize1, true, "Database disk size decreased"),
- etap:is(ViewDiskSize2 < ViewDiskSize1, true, "Index disk size decreased"),
-
- couch_config:set("couchdb", "file_compression", "deflate_9", false),
- compact_db(),
- compact_view(),
- DbDiskSize3 = db_disk_size(),
- ViewDiskSize3 = view_disk_size(),
-
- etap:is(DbDiskSize3 < DbDiskSize2, true, "Database disk size decreased again"),
- etap:is(ViewDiskSize3 < ViewDiskSize2, true, "Index disk size decreased again"),
-
- couch_config:set("couchdb", "file_compression", "deflate_1", false),
- compact_db(),
- compact_view(),
- DbDiskSize4 = db_disk_size(),
- ViewDiskSize4 = view_disk_size(),
-
- etap:is(DbDiskSize4 > DbDiskSize3, true, "Database disk size increased"),
- etap:is(ViewDiskSize4 > ViewDiskSize3, true, "Index disk size increased"),
-
- couch_config:set("couchdb", "file_compression", "snappy", false),
- compact_db(),
- compact_view(),
- DbDiskSize5 = db_disk_size(),
- ViewDiskSize5 = view_disk_size(),
-
- etap:is(DbDiskSize5 > DbDiskSize4, true, "Database disk size increased again"),
- etap:is(ViewDiskSize5 > ViewDiskSize4, true, "Index disk size increased again"),
-
- couch_config:set("couchdb", "file_compression", "none", false),
- compact_db(),
- compact_view(),
- DbDiskSize6 = db_disk_size(),
- ViewDiskSize6 = view_disk_size(),
-
- etap:is(DbDiskSize6 > DbDiskSize5, true, "Database disk size increased again"),
- etap:is(ViewDiskSize6 > ViewDiskSize5, true, "Index disk size increased again"),
-
- delete_db(),
- couch_server_sup:stop(),
- ok.
-
-
-create_database() ->
- {ok, Db} = couch_db:create(
- test_db_name(),
- [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}, overwrite]),
- ok = populate_db(Db, num_docs()),
- DDoc = couch_doc:from_json_obj({[
- {<<"_id">>, ddoc_id()},
- {<<"language">>, <<"javascript">>},
- {<<"views">>, {[
- {<<"view1">>, {[
- {<<"map">>, <<"function(doc) { emit(doc._id, doc.string); }">>}
- ]}}
- ]}
- }
- ]}),
- {ok, _} = couch_db:update_doc(Db, DDoc, []),
- refresh_index(),
- ok = couch_db:close(Db).
-
-
-populate_db(_Db, NumDocs) when NumDocs =< 0 ->
- ok;
-populate_db(Db, NumDocs) ->
- Docs = lists:map(
- fun(_) ->
- couch_doc:from_json_obj({[
- {<<"_id">>, couch_uuids:random()},
- {<<"string">>, list_to_binary(lists:duplicate(1000, $X))}
- ]})
- end,
- lists:seq(1, 500)),
- {ok, _} = couch_db:update_docs(Db, Docs, []),
- populate_db(Db, NumDocs - 500).
-
-
-refresh_index() ->
- {ok, Db} = couch_db:open_int(test_db_name(), []),
- {ok, DDoc} = couch_db:open_doc(Db, ddoc_id(), [ejson_body]),
- couch_mrview:query_view(Db, DDoc, <<"view1">>, [{stale, false}]),
- ok = couch_db:close(Db).
-
-
-compact_db() ->
- {ok, Db} = couch_db:open_int(test_db_name(), []),
- {ok, CompactPid} = couch_db:start_compact(Db),
- MonRef = erlang:monitor(process, CompactPid),
- receive
- {'DOWN', MonRef, process, CompactPid, normal} ->
- ok;
- {'DOWN', MonRef, process, CompactPid, Reason} ->
- etap:bail("Error compacting database: " ++ couch_util:to_list(Reason))
- after 120000 ->
- etap:bail("Timeout waiting for database compaction")
- end,
- ok = couch_db:close(Db).
-
-
-compact_view() ->
- {ok, MonRef} = couch_mrview:compact(test_db_name(), ddoc_id(), [monitor]),
- receive
- {'DOWN', MonRef, process, _CompactPid, normal} ->
- ok;
- {'DOWN', MonRef, process, _CompactPid, Reason} ->
- etap:bail("Error compacting view group: " ++ couch_util:to_list(Reason))
- after 120000 ->
- etap:bail("Timeout waiting for view group compaction")
- end.
-
-
-db_disk_size() ->
- {ok, Db} = couch_db:open_int(test_db_name(), []),
- {ok, Info} = couch_db:get_db_info(Db),
- ok = couch_db:close(Db),
- couch_util:get_value(disk_size, Info).
-
-
-view_disk_size() ->
- {ok, Db} = couch_db:open_int(test_db_name(), []),
- {ok, DDoc} = couch_db:open_doc(Db, ddoc_id(), [ejson_body]),
- {ok, Info} = couch_mrview:get_info(Db, DDoc),
- ok = couch_db:close(Db),
- couch_util:get_value(disk_size, Info).
-
-
-delete_db() ->
- ok = couch_server:delete(
- test_db_name(), [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}]).
diff --git a/test/etap/077-couch-db-fast-db-delete-create.t b/test/etap/077-couch-db-fast-db-delete-create.t
deleted file mode 100644
index 202669879..000000000
--- a/test/etap/077-couch-db-fast-db-delete-create.t
+++ /dev/null
@@ -1,61 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-main(_) ->
-
- test_util:init_code_path(),
-
- etap:plan(unknown),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- Msg = io_lib:format("Test died abnormally: ~p", [Other]),
- etap:diag(Msg),
- etap:bail(Msg)
- end,
- ok.
-
-loop(0) ->
- ok;
-loop(N) ->
- ok = cycle(),
- loop(N - 1).
-
-cycle() ->
- ok = couch_server:delete(<<"etap-test-db">>, []),
- {ok, _Db} = couch_db:create(<<"etap-test-db">>, []),
- ok.
-
-test() ->
- couch_server_sup:start_link(test_util:config_files()),
-
- {ok, _Db} = couch_db:create(<<"etap-test-db">>, []),
-
- ok = loop(1),
- ok = loop(10),
- ok = loop(100),
- ok = loop(1000),
-
- % for more thorough testing:
- % ok = loop(10000),
- % ok = loop(100000),
- % ok = loop(1000000),
- % ok = loop(10000000),
-
- ok = couch_server:delete(<<"etap-test-db">>, []),
-
- etap:is(true, true, "lots of creating and deleting of a database"),
- ok.
diff --git a/test/etap/080-config-get-set.t b/test/etap/080-config-get-set.t
deleted file mode 100755
index 94a9cba57..000000000
--- a/test/etap/080-config-get-set.t
+++ /dev/null
@@ -1,128 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-default_config() ->
- test_util:build_file("etc/couchdb/default_dev.ini").
-
-main(_) ->
- test_util:init_code_path(),
- etap:plan(12),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- etap:bail(Other)
- end,
- ok.
-
-test() ->
- % start couch_config with default
- couch_config:start_link([default_config()]),
-
-
- % Check that we can get values
-
-
- etap:fun_is(
- fun(List) -> length(List) > 0 end,
- couch_config:all(),
- "Data was loaded from the INI file."
- ),
-
- etap:fun_is(
- fun(List) -> length(List) > 0 end,
- couch_config:get("daemons"),
- "There are settings in the [daemons] section of the INI file."
- ),
-
- etap:is(
- couch_config:get("httpd_design_handlers", "_view"),
- "{couch_mrview_http, handle_view_req}",
- "The {httpd_design_handlers, view} is the expected default."
- ),
-
- etap:is(
- couch_config:get("httpd", "foo", "bar"),
- "bar",
- "Returns the default when key doesn't exist in config."
- ),
-
- etap:is(
- couch_config:get("httpd", "foo"),
- undefined,
- "The default default is the atom 'undefined'."
- ),
-
- etap:is(
- couch_config:get("httpd", "port", "bar"),
- "5984",
- "Only returns the default when the config setting does not exist."
- ),
-
-
- % Check that setting values works.
-
-
- ok = couch_config:set("log", "level", "severe", false),
-
- etap:is(
- couch_config:get("log", "level"),
- "severe",
- "Non persisted changes take effect."
- ),
-
- etap:is(
- couch_config:get("new_section", "bizzle"),
- undefined,
- "Section 'new_section' does not exist."
- ),
-
- ok = couch_config:set("new_section", "bizzle", "bang", false),
-
- etap:is(
- couch_config:get("new_section", "bizzle"),
- "bang",
- "New section 'new_section' was created for a new key/value pair."
- ),
-
-
- % Check that deleting works
-
-
- ok = couch_config:delete("new_section", "bizzle", false),
- etap:is(
- couch_config:get("new_section", "bizzle"),
- undefined,
- "Deleting sets the value to \"\""
- ),
-
-
- % Check ge/set/delete binary strings
-
- ok = couch_config:set(<<"foo">>, <<"bar">>, <<"baz">>, false),
- etap:is(
- couch_config:get(<<"foo">>, <<"bar">>),
- <<"baz">>,
- "Can get and set with binary section and key values."
- ),
- ok = couch_config:delete(<<"foo">>, <<"bar">>, false),
- etap:is(
- couch_config:get(<<"foo">>, <<"bar">>),
- undefined,
- "Deleting with binary section/key pairs sets the value to \"\""
- ),
-
- ok.
diff --git a/test/etap/081-config-override.t b/test/etap/081-config-override.t
deleted file mode 100755
index 01f8b4c23..000000000
--- a/test/etap/081-config-override.t
+++ /dev/null
@@ -1,212 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-default_config() ->
- test_util:build_file("etc/couchdb/default_dev.ini").
-
-local_config_1() ->
- test_util:source_file("test/etap/081-config-override.1.ini").
-
-local_config_2() ->
- test_util:source_file("test/etap/081-config-override.2.ini").
-
-local_config_write() ->
- test_util:build_file("test/etap/temp.081").
-
-% Run tests and wait for the config gen_server to shutdown.
-run_tests(IniFiles, Tests) ->
- {ok, Pid} = couch_config:start_link(IniFiles),
- erlang:monitor(process, Pid),
- Tests(),
- couch_config:stop(),
- receive
- {'DOWN', _, _, Pid, _} -> ok;
- _Other -> etap:diag("OTHER: ~p~n", [_Other])
- after
- 1000 -> throw({timeout_error, config_stop})
- end.
-
-main(_) ->
- test_util:init_code_path(),
- etap:plan(17),
-
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- etap:bail(Other)
- end,
- ok.
-
-test() ->
-
- CheckStartStop = fun() -> ok end,
- run_tests([default_config()], CheckStartStop),
-
- CheckDefaults = fun() ->
- etap:is(
- couch_config:get("couchdb", "max_dbs_open"),
- "100",
- "{couchdb, max_dbs_open} is 100 by defualt."
- ),
-
- etap:is(
- couch_config:get("httpd","port"),
- "5984",
- "{httpd, port} is 5984 by default"
- ),
-
- etap:is(
- couch_config:get("fizbang", "unicode"),
- undefined,
- "{fizbang, unicode} is undefined by default"
- )
- end,
-
- run_tests([default_config()], CheckDefaults),
-
-
- % Check that subsequent files override values appropriately
-
- CheckOverride = fun() ->
- etap:is(
- couch_config:get("couchdb", "max_dbs_open"),
- "10",
- "{couchdb, max_dbs_open} was overriden with the value 10"
- ),
-
- etap:is(
- couch_config:get("httpd", "port"),
- "4895",
- "{httpd, port} was overriden with the value 4895"
- )
- end,
-
- run_tests([default_config(), local_config_1()], CheckOverride),
-
-
- % Check that overrides can create new sections
-
- CheckOverride2 = fun() ->
- etap:is(
- couch_config:get("httpd", "port"),
- "80",
- "{httpd, port} is overriden with the value 80"
- ),
-
- etap:is(
- couch_config:get("fizbang", "unicode"),
- "normalized",
- "{fizbang, unicode} was created by override INI file"
- )
- end,
-
- run_tests([default_config(), local_config_2()], CheckOverride2),
-
-
- % Check that values can be overriden multiple times
-
- CheckOverride3 = fun() ->
- etap:is(
- couch_config:get("httpd", "port"),
- "80",
- "{httpd, port} value was taken from the last specified INI file."
- )
- end,
-
- run_tests(
- [default_config(), local_config_1(), local_config_2()],
- CheckOverride3
- ),
-
- % Check persistence to last file.
-
- % Empty the file in case it exists.
- {ok, Fd} = file:open(local_config_write(), write),
- ok = file:truncate(Fd),
- ok = file:close(Fd),
-
- % Open and write a value
- CheckCanWrite = fun() ->
- etap:is(
- couch_config:get("httpd", "port"),
- "5984",
- "{httpd, port} is still 5984 by default"
- ),
-
- etap:is(
- couch_config:set("httpd", "port", "8080"),
- ok,
- "Writing {httpd, port} is kosher."
- ),
-
- etap:is(
- couch_config:get("httpd", "port"),
- "8080",
- "{httpd, port} was updated to 8080 successfully."
- ),
-
- etap:is(
- couch_config:delete("httpd", "bind_address"),
- ok,
- "Deleting {httpd, bind_address} succeeds"
- ),
-
- etap:is(
- couch_config:get("httpd", "bind_address"),
- undefined,
- "{httpd, bind_address} was actually deleted."
- )
- end,
-
- run_tests([default_config(), local_config_write()], CheckCanWrite),
-
- % Open and check where we don't expect persistence.
-
- CheckDidntWrite = fun() ->
- etap:is(
- couch_config:get("httpd", "port"),
- "5984",
- "{httpd, port} was not persisted to the primary INI file."
- ),
-
- etap:is(
- couch_config:get("httpd", "bind_address"),
- "127.0.0.1",
- "{httpd, bind_address} was not deleted form the primary INI file."
- )
- end,
-
- run_tests([default_config()], CheckDidntWrite),
-
- % Open and check we have only the persistence we expect.
- CheckDidWrite = fun() ->
- etap:is(
- couch_config:get("httpd", "port"),
- "8080",
- "{httpd, port} is still 8080 after reopening the config."
- ),
-
- etap:is(
- couch_config:get("httpd", "bind_address"),
- undefined,
- "{httpd, bind_address} is still \"\" after reopening."
- )
- end,
-
- run_tests([local_config_write()], CheckDidWrite),
-
- ok.
diff --git a/test/etap/082-config-register.t b/test/etap/082-config-register.t
deleted file mode 100755
index 191ba8f8b..000000000
--- a/test/etap/082-config-register.t
+++ /dev/null
@@ -1,94 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-default_config() ->
- test_util:build_file("etc/couchdb/default_dev.ini").
-
-main(_) ->
- test_util:init_code_path(),
- etap:plan(5),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- etap:bail(Other)
- end,
- ok.
-
-test() ->
- couch_config:start_link([default_config()]),
-
- etap:is(
- couch_config:get("httpd", "port"),
- "5984",
- "{httpd, port} is 5984 by default."
- ),
-
- ok = couch_config:set("httpd", "port", "4895", false),
-
- etap:is(
- couch_config:get("httpd", "port"),
- "4895",
- "{httpd, port} changed to 4895"
- ),
-
- SentinelFunc = fun() ->
- % Ping/Pong to make sure we wait for this
- % process to die
- receive {ping, From} -> From ! pong end
- end,
- SentinelPid = spawn(SentinelFunc),
-
- couch_config:register(
- fun("httpd", "port", Value) ->
- etap:is(Value, "8080", "Registered function got notification.")
- end,
- SentinelPid
- ),
-
- ok = couch_config:set("httpd", "port", "8080", false),
-
- % Implicitly checking that we *don't* call the function
- etap:is(
- couch_config:get("httpd", "bind_address"),
- "127.0.0.1",
- "{httpd, bind_address} is not '0.0.0.0'"
- ),
- ok = couch_config:set("httpd", "bind_address", "0.0.0.0", false),
-
- % Ping-Pong kill process
- SentinelPid ! {ping, self()},
- receive
- _Any -> ok
- after 1000 ->
- throw({timeout_error, registered_pid})
- end,
-
- ok = couch_config:set("httpd", "port", "80", false),
- etap:is(
- couch_config:get("httpd", "port"),
- "80",
- "Implicitly test that the function got de-registered"
- ),
-
- % test passing of Persist flag
- couch_config:register(
- fun("httpd", _, _, Persist) ->
- etap:is(Persist, false)
- end),
- ok = couch_config:set("httpd", "port", "80", false),
-
- ok.
diff --git a/test/etap/083-config-no-files.t b/test/etap/083-config-no-files.t
deleted file mode 100755
index 0ce38e667..000000000
--- a/test/etap/083-config-no-files.t
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-
-main(_) ->
- test_util:init_code_path(),
- etap:plan(3),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- etap:bail(Other)
- end,
- ok.
-
-test() ->
- couch_config:start_link([]),
-
- etap:fun_is(
- fun(KVPairs) -> length(KVPairs) == 0 end,
- couch_config:all(),
- "No INI files specified returns 0 key/value pairs."
- ),
-
- ok = couch_config:set("httpd", "port", "80", false),
-
- etap:is(
- couch_config:get("httpd", "port"),
- "80",
- "Created a new non-persisted k/v pair."
- ),
-
- ok = couch_config:set("httpd", "bind_address", "127.0.0.1"),
- etap:is(
- couch_config:get("httpd", "bind_address"),
- "127.0.0.1",
- "Asking for a persistent key/value pair doesn't choke."
- ),
-
- ok.
diff --git a/test/etap/090-task-status.t b/test/etap/090-task-status.t
deleted file mode 100755
index 23115bdaa..000000000
--- a/test/etap/090-task-status.t
+++ /dev/null
@@ -1,279 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-main(_) ->
- test_util:init_code_path(),
- etap:plan(28),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- etap:bail(Other)
- end,
- ok.
-
-get_task_prop(Pid, Prop) ->
- From = list_to_binary(pid_to_list(Pid)),
- Element = lists:foldl(
- fun(PropList,Acc) ->
- case couch_util:get_value(pid,PropList) of
- From ->
- [PropList | Acc];
- _ ->
- []
- end
- end,
- [], couch_task_status:all()
- ),
- case couch_util:get_value(Prop, hd(Element), nil) of
- nil ->
- etap:bail("Could not get property '" ++ couch_util:to_list(Prop) ++
- "' for task " ++ pid_to_list(Pid));
- Value ->
- Value
- end.
-
-
-loop() ->
- receive
- {add, Props, From} ->
- Resp = couch_task_status:add_task(Props),
- From ! {ok, self(), Resp},
- loop();
- {update, Props, From} ->
- Resp = couch_task_status:update(Props),
- From ! {ok, self(), Resp},
- loop();
- {update_frequency, Msecs, From} ->
- Resp = couch_task_status:set_update_frequency(Msecs),
- From ! {ok, self(), Resp},
- loop();
- {done, From} ->
- From ! {ok, self(), ok}
- end.
-
-call(Pid, Command) ->
- Pid ! {Command, self()},
- wait(Pid).
-
-call(Pid, Command, Arg) ->
- Pid ! {Command, Arg, self()},
- wait(Pid).
-
-wait(Pid) ->
- receive
- {ok, Pid, Msg} -> Msg
- after 1000 ->
- throw(timeout_error)
- end.
-
-test() ->
- {ok, TaskStatusPid} = couch_task_status:start_link(),
-
- TaskUpdater = fun() -> loop() end,
- % create three updaters
- Pid1 = spawn(TaskUpdater),
- Pid2 = spawn(TaskUpdater),
- Pid3 = spawn(TaskUpdater),
-
- ok = call(Pid1, add, [{type, replication}, {progress, 0}]),
- etap:is(
- length(couch_task_status:all()),
- 1,
- "Started a task"
- ),
- Task1StartTime = get_task_prop(Pid1, started_on),
- etap:is(
- is_integer(Task1StartTime),
- true,
- "Task start time is defined."
- ),
- etap:is(
- get_task_prop(Pid1, updated_on),
- Task1StartTime,
- "Task's start time is the same as the update time before an update."
- ),
-
- etap:is(
- call(Pid1, add, [{type, compaction}, {progress, 0}]),
- {add_task_error, already_registered},
- "Unable to register multiple tasks for a single Pid."
- ),
-
- etap:is(
- get_task_prop(Pid1, type),
- replication,
- "Task type is 'replication'."
- ),
- etap:is(
- get_task_prop(Pid1, progress),
- 0,
- "Task progress is 0."
- ),
-
- ok = timer:sleep(1000),
- call(Pid1, update, [{progress, 25}]),
- etap:is(
- get_task_prop(Pid1, progress),
- 25,
- "Task progress is 25."
- ),
- etap:is(
- get_task_prop(Pid1, updated_on) > Task1StartTime,
- true,
- "Task's last update time has increased after an update."
- ),
-
- call(Pid2, add, [{type, compaction}, {progress, 0}]),
- etap:is(
- length(couch_task_status:all()),
- 2,
- "Started a second task."
- ),
- Task2StartTime = get_task_prop(Pid2, started_on),
- etap:is(
- is_integer(Task2StartTime),
- true,
- "Second task's start time is defined."
- ),
- etap:is(
- get_task_prop(Pid2, updated_on),
- Task2StartTime,
- "Second task's start time is the same as the update time before an update."
- ),
-
- etap:is(
- get_task_prop(Pid2, type),
- compaction,
- "Second task's type is 'compaction'."
- ),
- etap:is(
- get_task_prop(Pid2, progress),
- 0,
- "Second task's progress is 0."
- ),
-
- ok = timer:sleep(1000),
- call(Pid2, update, [{progress, 33}]),
- etap:is(
- get_task_prop(Pid2, progress),
- 33,
- "Second task's progress updated to 33."
- ),
- etap:is(
- get_task_prop(Pid2, updated_on) > Task2StartTime,
- true,
- "Second task's last update time has increased after an update."
- ),
-
- call(Pid3, add, [{type, indexer}, {progress, 0}]),
- etap:is(
- length(couch_task_status:all()),
- 3,
- "Registered a third task."
- ),
- Task3StartTime = get_task_prop(Pid3, started_on),
- etap:is(
- is_integer(Task3StartTime),
- true,
- "Third task's start time is defined."
- ),
- etap:is(
- get_task_prop(Pid3, updated_on),
- Task3StartTime,
- "Third task's start time is the same as the update time before an update."
- ),
-
- etap:is(
- get_task_prop(Pid3, type),
- indexer,
- "Third task's type is 'indexer'."
- ),
- etap:is(
- get_task_prop(Pid3, progress),
- 0,
- "Third task's progress is 0."
- ),
-
- ok = timer:sleep(1000),
- call(Pid3, update, [{progress, 50}]),
- etap:is(
- get_task_prop(Pid3, progress),
- 50,
- "Third task's progress updated to 50."
- ),
- etap:is(
- get_task_prop(Pid3, updated_on) > Task3StartTime,
- true,
- "Third task's last update time has increased after an update."
- ),
-
- call(Pid3, update_frequency, 500),
- call(Pid3, update, [{progress, 66}]),
- etap:is(
- get_task_prop(Pid3, progress),
- 66,
- "Third task's progress updated to 66."
- ),
-
- call(Pid3, update, [{progress, 67}]),
- etap:is(
- get_task_prop(Pid3, progress),
- 66,
- "Task update dropped because of frequency limit."
- ),
-
- call(Pid3, update_frequency, 0),
- call(Pid3, update, [{progress, 77}]),
- etap:is(
- get_task_prop(Pid3, progress),
- 77,
- "Task updated after reseting frequency limit."
- ),
-
-
- call(Pid1, done),
- etap:is(
- length(couch_task_status:all()),
- 2,
- "First task finished."
- ),
-
- call(Pid2, done),
- etap:is(
- length(couch_task_status:all()),
- 1,
- "Second task finished."
- ),
-
- call(Pid3, done),
- etap:is(
- length(couch_task_status:all()),
- 0,
- "Third task finished."
- ),
-
- erlang:monitor(process, TaskStatusPid),
- couch_task_status:stop(),
- receive
- {'DOWN', _, _, TaskStatusPid, _} ->
- ok
- after
- 1000 ->
- throw(timeout_error)
- end,
-
- ok.
diff --git a/test/etap/100-ref-counter.t b/test/etap/100-ref-counter.t
deleted file mode 100755
index 8f996d04d..000000000
--- a/test/etap/100-ref-counter.t
+++ /dev/null
@@ -1,114 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-main(_) ->
- test_util:init_code_path(),
- etap:plan(8),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- etap:bail(Other)
- end,
- ok.
-
-loop() ->
- receive
- close -> ok
- end.
-
-wait() ->
- receive
- {'DOWN', _, _, _, _} -> ok
- after 1000 ->
- throw(timeout_error)
- end.
-
-test() ->
- {ok, RefCtr} = couch_ref_counter:start([]),
-
- etap:is(
- couch_ref_counter:count(RefCtr),
- 1,
- "A ref_counter is initialized with the calling process as a referer."
- ),
-
- ChildPid1 = spawn(fun() -> loop() end),
-
- % This is largely implicit in that nothing else breaks
- % as ok is just returned from gen_server:cast()
- etap:is(
- couch_ref_counter:drop(RefCtr, ChildPid1),
- ok,
- "Dropping an unknown Pid is ignored."
- ),
-
- couch_ref_counter:add(RefCtr, ChildPid1),
- etap:is(
- couch_ref_counter:count(RefCtr),
- 2,
- "Adding a Pid to the ref_counter increases it's count."
- ),
-
- couch_ref_counter:add(RefCtr, ChildPid1),
- etap:is(
- couch_ref_counter:count(RefCtr),
- 2,
- "Readding the same Pid maintains the count but increments it's refs."
- ),
-
- couch_ref_counter:drop(RefCtr, ChildPid1),
- etap:is(
- couch_ref_counter:count(RefCtr),
- 2,
- "Droping the doubly added Pid only removes a ref, not a referer."
- ),
-
- couch_ref_counter:drop(RefCtr, ChildPid1),
- etap:is(
- couch_ref_counter:count(RefCtr),
- 1,
- "Dropping the second ref drops the referer."
- ),
-
- couch_ref_counter:add(RefCtr, ChildPid1),
- etap:is(
- couch_ref_counter:count(RefCtr),
- 2,
- "Sanity checking that the Pid was re-added."
- ),
-
- erlang:monitor(process, ChildPid1),
- ChildPid1 ! close,
- wait(),
-
- CheckFun = fun
- (Iter, nil) ->
- case couch_ref_counter:count(RefCtr) of
- 1 -> Iter;
- _ -> nil
- end;
- (_, Acc) ->
- Acc
- end,
- Result = lists:foldl(CheckFun, nil, lists:seq(1, 10000)),
- etap:isnt(
- Result,
- nil,
- "The referer count was decremented automatically on process exit."
- ),
-
- ok.
diff --git a/test/etap/120-stats-collect.t b/test/etap/120-stats-collect.t
deleted file mode 100755
index a30f9ac5d..000000000
--- a/test/etap/120-stats-collect.t
+++ /dev/null
@@ -1,150 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-main(_) ->
- test_util:init_code_path(),
- etap:plan(11),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- etap:bail()
- end,
- ok.
-
-test() ->
- couch_stats_collector:start(),
- ok = test_counters(),
- ok = test_abs_values(),
- ok = test_proc_counting(),
- ok = test_all(),
- ok.
-
-test_counters() ->
- AddCount = fun() -> couch_stats_collector:increment(foo) end,
- RemCount = fun() -> couch_stats_collector:decrement(foo) end,
- repeat(AddCount, 100),
- repeat(RemCount, 25),
- repeat(AddCount, 10),
- repeat(RemCount, 5),
- etap:is(
- couch_stats_collector:get(foo),
- 80,
- "Incrememnt tracks correctly."
- ),
-
- repeat(RemCount, 80),
- etap:is(
- couch_stats_collector:get(foo),
- 0,
- "Decremented to zaro."
- ),
- ok.
-
-test_abs_values() ->
- lists:map(fun(Val) ->
- couch_stats_collector:record(bar, Val)
- end, lists:seq(1, 15)),
- etap:is(
- couch_stats_collector:get(bar),
- lists:seq(1, 15),
- "Absolute values are recorded correctly."
- ),
-
- couch_stats_collector:clear(bar),
- etap:is(
- couch_stats_collector:get(bar),
- nil,
- "Absolute values are cleared correctly."
- ),
- ok.
-
-test_proc_counting() ->
- Self = self(),
- OnePid = spawn(fun() ->
- couch_stats_collector:track_process_count(hoopla),
- Self ! reporting,
- receive sepuku -> ok end
- end),
- R1 = erlang:monitor(process, OnePid),
- receive reporting -> ok end,
- etap:is(
- couch_stats_collector:get(hoopla),
- 1,
- "track_process_count increments the counter."
- ),
-
- TwicePid = spawn(fun() ->
- couch_stats_collector:track_process_count(hoopla),
- couch_stats_collector:track_process_count(hoopla),
- Self ! reporting,
- receive sepuku -> ok end
- end),
- R2 = erlang:monitor(process, TwicePid),
- receive reporting -> ok end,
- etap:is(
- couch_stats_collector:get(hoopla),
- 3,
- "track_process_count allows more than one incrememnt per Pid"
- ),
-
- OnePid ! sepuku,
- receive {'DOWN', R1, _, _, _} -> ok end,
- timer:sleep(250),
- etap:is(
- couch_stats_collector:get(hoopla),
- 2,
- "Process count is decremented when process exits."
- ),
-
- TwicePid ! sepuku,
- receive {'DOWN', R2, _, _, _} -> ok end,
- timer:sleep(250),
- etap:is(
- couch_stats_collector:get(hoopla),
- 0,
- "Process count is decremented for each call to track_process_count."
- ),
- ok.
-
-test_all() ->
- couch_stats_collector:record(bar, 0.0),
- couch_stats_collector:record(bar, 1.0),
- etap:is(
- lists:sort(couch_stats_collector:all()),
- [ {bar,[1.0,0.0]}, {foo,0}, { hoopla,0} ],
- "all/0 returns all counters and absolute values."
- ),
-
- etap:is(
- lists:sort(couch_stats_collector:all(incremental)),
- [ {foo, 0}, {hoopla, 0} ],
- "all/1 returns only the specified type."
- ),
-
- couch_stats_collector:record(zing, 90),
- etap:is(
- lists:sort(couch_stats_collector:all(absolute)),
- [ {bar,[1.0,0.0]}, {zing,"Z"} ],
- "all/1 returns only the specified type."
- ),
- ok.
-
-repeat(_, 0) ->
- ok;
-repeat(Fun, Count) ->
- Fun(),
- repeat(Fun, Count-1).
diff --git a/test/etap/121-stats-aggregates.t b/test/etap/121-stats-aggregates.t
deleted file mode 100755
index d678aa9d4..000000000
--- a/test/etap/121-stats-aggregates.t
+++ /dev/null
@@ -1,171 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-ini_file() ->
- test_util:source_file("test/etap/121-stats-aggregates.ini").
-
-cfg_file() ->
- test_util:source_file("test/etap/121-stats-aggregates.cfg").
-
-main(_) ->
- test_util:init_code_path(),
- etap:plan(17),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- etap:bail()
- end,
- ok.
-
-test() ->
- couch_config:start_link([ini_file()]),
- couch_stats_collector:start(),
- couch_stats_aggregator:start(cfg_file()),
- ok = test_all_empty(),
- ok = test_get_empty(),
- ok = test_count_stats(),
- ok = test_abs_stats(),
- ok.
-
-test_all_empty() ->
- {Aggs} = couch_stats_aggregator:all(),
-
- etap:is(length(Aggs), 2, "There are only two aggregate types in testing."),
- etap:is(
- couch_util:get_value(testing, Aggs),
- {[{stuff, make_agg(<<"yay description">>,
- null, null, null, null, null)}]},
- "{testing, stuff} is empty at start."
- ),
- etap:is(
- couch_util:get_value(number, Aggs),
- {[{'11', make_agg(<<"randomosity">>,
- null, null, null, null, null)}]},
- "{number, '11'} is empty at start."
- ),
- ok.
-
-test_get_empty() ->
- etap:is(
- couch_stats_aggregator:get_json({testing, stuff}),
- make_agg(<<"yay description">>, null, null, null, null, null),
- "Getting {testing, stuff} returns an empty aggregate."
- ),
- etap:is(
- couch_stats_aggregator:get_json({number, '11'}),
- make_agg(<<"randomosity">>, null, null, null, null, null),
- "Getting {number, '11'} returns an empty aggregate."
- ),
- ok.
-
-test_count_stats() ->
- lists:foreach(fun(_) ->
- couch_stats_collector:increment({testing, stuff})
- end, lists:seq(1, 100)),
- couch_stats_aggregator:collect_sample(),
- etap:is(
- couch_stats_aggregator:get_json({testing, stuff}),
- make_agg(<<"yay description">>, 100, 100, null, 100, 100),
- "COUNT: Adding values changes the stats."
- ),
- etap:is(
- couch_stats_aggregator:get_json({testing, stuff}, 1),
- make_agg(<<"yay description">>, 100, 100, null, 100, 100),
- "COUNT: Adding values changes stats for all times."
- ),
-
- timer:sleep(500),
- couch_stats_aggregator:collect_sample(),
- etap:is(
- couch_stats_aggregator:get_json({testing, stuff}),
- make_agg(<<"yay description">>, 100, 50, 70.711, 0, 100),
- "COUNT: Removing values changes stats."
- ),
- etap:is(
- couch_stats_aggregator:get_json({testing, stuff}, 1),
- make_agg(<<"yay description">>, 100, 50, 70.711, 0, 100),
- "COUNT: Removing values changes stats for all times."
- ),
-
- timer:sleep(600),
- couch_stats_aggregator:collect_sample(),
- etap:is(
- couch_stats_aggregator:get_json({testing, stuff}),
- make_agg(<<"yay description">>, 100, 33.333, 57.735, 0, 100),
- "COUNT: Letting time passes doesn't remove data from time 0 aggregates"
- ),
- etap:is(
- couch_stats_aggregator:get_json({testing, stuff}, 1),
- make_agg(<<"yay description">>, 0, 0, 0, 0, 0),
- "COUNT: Letting time pass removes data from other time aggregates."
- ),
- ok.
-
-test_abs_stats() ->
- lists:foreach(fun(X) ->
- couch_stats_collector:record({number, 11}, X)
- end, lists:seq(0, 10)),
- couch_stats_aggregator:collect_sample(),
- etap:is(
- couch_stats_aggregator:get_json({number, 11}),
- make_agg(<<"randomosity">>, 5, 5, null, 5, 5),
- "ABS: Adding values changes the stats."
- ),
- etap:is(
- couch_stats_aggregator:get_json({number, 11}, 1),
- make_agg(<<"randomosity">>, 5, 5, null, 5, 5),
- "ABS: Adding values changes stats for all times."
- ),
-
- timer:sleep(500),
- couch_stats_collector:record({number, 11}, 15),
- couch_stats_aggregator:collect_sample(),
- etap:is(
- couch_stats_aggregator:get_json({number, 11}),
- make_agg(<<"randomosity">>, 20, 10, 7.071, 5, 15),
- "ABS: New values changes stats"
- ),
- etap:is(
- couch_stats_aggregator:get_json({number, 11}, 1),
- make_agg(<<"randomosity">>, 20, 10, 7.071, 5, 15),
- "ABS: Removing values changes stats for all times."
- ),
-
- timer:sleep(600),
- couch_stats_aggregator:collect_sample(),
- etap:is(
- couch_stats_aggregator:get_json({number, 11}),
- make_agg(<<"randomosity">>, 20, 10, 7.071, 5, 15),
- "ABS: Letting time passes doesn't remove data from time 0 aggregates"
- ),
- etap:is(
- couch_stats_aggregator:get_json({number, 11}, 1),
- make_agg(<<"randomosity">>, 15, 15, null, 15, 15),
- "ABS: Letting time pass removes data from other time aggregates."
- ),
- ok.
-
-make_agg(Desc, Sum, Mean, StdDev, Min, Max) ->
- {[
- {description, Desc},
- {current, Sum},
- {sum, Sum},
- {mean, Mean},
- {stddev, StdDev},
- {min, Min},
- {max, Max}
- ]}.
diff --git a/test/etap/130-attachments-md5.t b/test/etap/130-attachments-md5.t
deleted file mode 100755
index a91c9bf18..000000000
--- a/test/etap/130-attachments-md5.t
+++ /dev/null
@@ -1,248 +0,0 @@
-#!/usr/bin/env escript
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-test_db_name() ->
- <<"etap-test-db">>.
-
-docid() ->
- case get(docid) of
- undefined ->
- put(docid, 1),
- "1";
- Count ->
- put(docid, Count+1),
- integer_to_list(Count+1)
- end.
-
-main(_) ->
- test_util:init_code_path(),
-
- etap:plan(16),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- etap:bail(Other)
- end,
- ok.
-
-test() ->
- couch_server_sup:start_link(test_util:config_files()),
- Addr = couch_config:get("httpd", "bind_address", any),
- put(addr, Addr),
- put(port, mochiweb_socket_server:get(couch_httpd, port)),
- timer:sleep(1000),
-
- couch_server:delete(test_db_name(), []),
- couch_db:create(test_db_name(), []),
-
- test_identity_without_md5(),
- test_chunked_without_md5(),
-
- test_identity_with_valid_md5(),
- test_chunked_with_valid_md5_header(),
- test_chunked_with_valid_md5_trailer(),
-
- test_identity_with_invalid_md5(),
- test_chunked_with_invalid_md5_header(),
- test_chunked_with_invalid_md5_trailer(),
-
- couch_server:delete(test_db_name(), []),
- couch_server_sup:stop(),
- ok.
-
-test_identity_without_md5() ->
- Data = [
- "PUT /", test_db_name(), "/", docid(), "/readme.txt HTTP/1.1\r\n",
- "Content-Type: text/plain\r\n",
- "Content-Length: 34\r\n",
- "\r\n",
- "We all live in a yellow submarine!"],
-
- {Code, Json} = do_request(Data),
- etap:is(Code, 201, "Stored with identity encoding and no MD5"),
- etap:is(get_json(Json, [<<"ok">>]), true, "Body indicates success.").
-
-test_chunked_without_md5() ->
- AttData = <<"We all live in a yellow submarine!">>,
- <<Part1:21/binary, Part2:13/binary>> = AttData,
- Data = [
- "PUT /", test_db_name(), "/", docid(), "/readme.txt HTTP/1.1\r\n",
- "Content-Type: text/plain\r\n",
- "Transfer-Encoding: chunked\r\n",
- "\r\n",
- to_hex(size(Part1)), "\r\n",
- Part1, "\r\n",
- to_hex(size(Part2)), "\r\n",
- Part2, "\r\n"
- "0\r\n"
- "\r\n"],
-
- {Code, Json} = do_request(Data),
- etap:is(Code, 201, "Stored with chunked encoding and no MD5"),
- etap:is(get_json(Json, [<<"ok">>]), true, "Body indicates success.").
-
-test_identity_with_valid_md5() ->
- AttData = "We all live in a yellow submarine!",
- Data = [
- "PUT /", test_db_name(), "/", docid(), "/readme.txt HTTP/1.1\r\n",
- "Content-Type: text/plain\r\n",
- "Content-Length: 34\r\n",
- "Content-MD5: ", base64:encode(couch_util:md5(AttData)), "\r\n",
- "\r\n",
- AttData],
-
- {Code, Json} = do_request(Data),
- etap:is(Code, 201, "Stored with identity encoding and valid MD5"),
- etap:is(get_json(Json, [<<"ok">>]), true, "Body indicates success.").
-
-test_chunked_with_valid_md5_header() ->
- AttData = <<"We all live in a yellow submarine!">>,
- <<Part1:21/binary, Part2:13/binary>> = AttData,
- Data = [
- "PUT /", test_db_name(), "/", docid(), "/readme.txt HTTP/1.1\r\n",
- "Content-Type: text/plain\r\n",
- "Transfer-Encoding: chunked\r\n",
- "Content-MD5: ", base64:encode(couch_util:md5(AttData)), "\r\n",
- "\r\n",
- to_hex(size(Part1)), "\r\n",
- Part1, "\r\n",
- to_hex(size(Part2)), "\r\n",
- Part2, "\r\n",
- "0\r\n",
- "\r\n"],
-
- {Code, Json} = do_request(Data),
- etap:is(Code, 201, "Stored with chunked encoding and valid MD5 header."),
- etap:is(get_json(Json, [<<"ok">>]), true, "Body indicates success.").
-
-test_chunked_with_valid_md5_trailer() ->
- AttData = <<"We all live in a yellow submarine!">>,
- <<Part1:21/binary, Part2:13/binary>> = AttData,
- Data = [
- "PUT /", test_db_name(), "/", docid(), "/readme.txt HTTP/1.1\r\n",
- "Content-Type: text/plain\r\n",
- "Transfer-Encoding: chunked\r\n",
- "Trailer: Content-MD5\r\n",
- "\r\n",
- to_hex(size(Part1)), "\r\n",
- Part1, "\r\n",
- to_hex(size(Part2)), "\r\n",
- Part2, "\r\n",
- "0\r\n",
- "Content-MD5: ", base64:encode(couch_util:md5(AttData)), "\r\n",
- "\r\n"],
-
- {Code, Json} = do_request(Data),
- etap:is(Code, 201, "Stored with chunked encoding and valid MD5 trailer."),
- etap:is(get_json(Json, [<<"ok">>]), true, "Body indicates success.").
-
-test_identity_with_invalid_md5() ->
- Data = [
- "PUT /", test_db_name(), "/", docid(), "/readme.txt HTTP/1.1\r\n",
- "Content-Type: text/plain\r\n",
- "Content-Length: 34\r\n",
- "Content-MD5: ", base64:encode(<<"foobar!">>), "\r\n",
- "\r\n",
- "We all live in a yellow submarine!"],
-
- {Code, Json} = do_request(Data),
- etap:is(Code, 400, "Invalid MD5 header causes an error: identity"),
- etap:is(
- get_json(Json, [<<"error">>]),
- <<"content_md5_mismatch">>,
- "Body indicates reason for failure."
- ).
-
-test_chunked_with_invalid_md5_header() ->
- AttData = <<"We all live in a yellow submarine!">>,
- <<Part1:21/binary, Part2:13/binary>> = AttData,
- Data = [
- "PUT /", test_db_name(), "/", docid(), "/readme.txt HTTP/1.1\r\n",
- "Content-Type: text/plain\r\n",
- "Transfer-Encoding: chunked\r\n",
- "Content-MD5: ", base64:encode(<<"so sneaky...">>), "\r\n",
- "\r\n",
- to_hex(size(Part1)), "\r\n",
- Part1, "\r\n",
- to_hex(size(Part2)), "\r\n",
- Part2, "\r\n",
- "0\r\n",
- "\r\n"],
-
- {Code, Json} = do_request(Data),
- etap:is(Code, 400, "Invalid MD5 header causes an error: chunked"),
- etap:is(
- get_json(Json, [<<"error">>]),
- <<"content_md5_mismatch">>,
- "Body indicates reason for failure."
- ).
-
-test_chunked_with_invalid_md5_trailer() ->
- AttData = <<"We all live in a yellow submarine!">>,
- <<Part1:21/binary, Part2:13/binary>> = AttData,
- Data = [
- "PUT /", test_db_name(), "/", docid(), "/readme.txt HTTP/1.1\r\n",
- "Content-Type: text/plain\r\n",
- "Transfer-Encoding: chunked\r\n",
- "Trailer: Content-MD5\r\n",
- "\r\n",
- to_hex(size(Part1)), "\r\n",
- Part1, "\r\n",
- to_hex(size(Part2)), "\r\n",
- Part2, "\r\n",
- "0\r\n",
- "Content-MD5: ", base64:encode(<<"Kool-Aid Fountain!">>), "\r\n",
- "\r\n"],
-
- {Code, Json} = do_request(Data),
- etap:is(Code, 400, "Invalid MD5 Trailer causes an error"),
- etap:is(
- get_json(Json, [<<"error">>]),
- <<"content_md5_mismatch">>,
- "Body indicates reason for failure."
- ).
-
-
-get_socket() ->
- Options = [binary, {packet, 0}, {active, false}],
- {ok, Sock} = gen_tcp:connect(get(addr), get(port), Options),
- Sock.
-
-do_request(Request) ->
- Sock = get_socket(),
- gen_tcp:send(Sock, list_to_binary(lists:flatten(Request))),
- timer:sleep(1000),
- {ok, R} = gen_tcp:recv(Sock, 0),
- gen_tcp:close(Sock),
- [Header, Body] = re:split(R, "\r\n\r\n", [{return, binary}]),
- {ok, {http_response, _, Code, _}, _} =
- erlang:decode_packet(http, Header, []),
- Json = ejson:decode(Body),
- {Code, Json}.
-
-get_json(Json, Path) ->
- couch_util:get_nested_json_value(Json, Path).
-
-to_hex(Val) ->
- to_hex(Val, []).
-
-to_hex(0, Acc) ->
- Acc;
-to_hex(Val, Acc) ->
- to_hex(Val div 16, [hex_char(Val rem 16) | Acc]).
-
-hex_char(V) when V < 10 -> $0 + V;
-hex_char(V) -> $A + V - 10.
-
diff --git a/test/etap/140-attachment-comp.t b/test/etap/140-attachment-comp.t
deleted file mode 100755
index 6f075ce44..000000000
--- a/test/etap/140-attachment-comp.t
+++ /dev/null
@@ -1,728 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-test_db_name() ->
- <<"couch_test_atts_compression">>.
-
-main(_) ->
- test_util:init_code_path(),
-
- etap:plan(85),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- etap:bail(Other)
- end,
- ok.
-
-test() ->
- couch_server_sup:start_link(test_util:config_files()),
- put(addr, couch_config:get("httpd", "bind_address", "127.0.0.1")),
- put(port, integer_to_list(mochiweb_socket_server:get(couch_httpd, port))),
- timer:sleep(1000),
- couch_server:delete(test_db_name(), []),
- couch_db:create(test_db_name(), []),
-
- couch_config:set("attachments", "compression_level", "8", false),
- couch_config:set("attachments", "compressible_types", "text/*", false),
-
- create_1st_text_att(),
- create_1st_png_att(),
- create_2nd_text_att(),
- create_2nd_png_att(),
-
- tests_for_1st_text_att(),
- tests_for_1st_png_att(),
- tests_for_2nd_text_att(),
- tests_for_2nd_png_att(),
-
- create_already_compressed_att(db_url() ++ "/doc_comp_att", "readme.txt"),
- test_already_compressed_att(db_url() ++ "/doc_comp_att", "readme.txt"),
-
- test_create_already_compressed_att_with_invalid_content_encoding(
- db_url() ++ "/doc_att_deflate",
- "readme.txt",
- zlib:compress(test_text_data()),
- "deflate"
- ),
-
- % COUCHDB-1711 - avoid weird timng/scheduling/request handling issue
- timer:sleep(100),
-
- test_create_already_compressed_att_with_invalid_content_encoding(
- db_url() ++ "/doc_att_compress",
- "readme.txt",
- % Note: As of OTP R13B04, it seems there's no LZW compression
- % (i.e. UNIX compress utility implementation) lib in OTP.
- % However there's a simple working Erlang implementation at:
- % http://scienceblogs.com/goodmath/2008/01/simple_lempelziv_compression_i.php
- test_text_data(),
- "compress"
- ),
-
- test_compressible_type_with_parameters(),
-
- timer:sleep(3000), % to avoid mochiweb socket closed exceptions
- couch_server:delete(test_db_name(), []),
- couch_server_sup:stop(),
- ok.
-
-db_url() ->
- "http://" ++ get(addr) ++ ":" ++ get(port) ++ "/" ++
- binary_to_list(test_db_name()).
-
-create_1st_text_att() ->
- {ok, Code, _Headers, _Body} = test_util:request(
- db_url() ++ "/testdoc1/readme.txt",
- [{"Content-Type", "text/plain"}],
- put,
- test_text_data()),
- etap:is(Code, 201, "Created text attachment using the standalone api"),
- ok.
-
-create_1st_png_att() ->
- {ok, Code, _Headers, _Body} = test_util:request(
- db_url() ++ "/testdoc2/icon.png",
- [{"Content-Type", "image/png"}],
- put,
- test_png_data()),
- etap:is(Code, 201, "Created png attachment using the standalone api"),
- ok.
-
-% create a text attachment using the non-standalone attachment api
-create_2nd_text_att() ->
- DocJson = {[
- {<<"_attachments">>, {[
- {<<"readme.txt">>, {[
- {<<"content_type">>, <<"text/plain">>},
- {<<"data">>, base64:encode(test_text_data())}
- ]}
- }]}}
- ]},
- {ok, Code, _Headers, _Body} = test_util:request(
- db_url() ++ "/testdoc3",
- [{"Content-Type", "application/json"}],
- put,
- ejson:encode(DocJson)),
- etap:is(Code, 201, "Created text attachment using the non-standalone api"),
- ok.
-
-% create a png attachment using the non-standalone attachment api
-create_2nd_png_att() ->
- DocJson = {[
- {<<"_attachments">>, {[
- {<<"icon.png">>, {[
- {<<"content_type">>, <<"image/png">>},
- {<<"data">>, base64:encode(test_png_data())}
- ]}
- }]}}
- ]},
- {ok, Code, _Headers, _Body} = test_util:request(
- db_url() ++ "/testdoc4",
- [{"Content-Type", "application/json"}],
- put,
- ejson:encode(DocJson)),
- etap:is(Code, 201, "Created png attachment using the non-standalone api"),
- ok.
-
-create_already_compressed_att(DocUri, AttName) ->
- {ok, Code, _Headers, _Body} = test_util:request(
- DocUri ++ "/" ++ AttName,
- [{"Content-Type", "text/plain"}, {"Content-Encoding", "gzip"}],
- put,
- zlib:gzip(test_text_data())),
- etap:is(
- Code,
- 201,
- "Created already compressed attachment using the standalone api"
- ),
- ok.
-
-tests_for_1st_text_att() ->
- test_get_1st_text_att_with_accept_encoding_gzip(),
- test_get_1st_text_att_without_accept_encoding_header(),
- test_get_1st_text_att_with_accept_encoding_deflate(),
- test_get_1st_text_att_with_accept_encoding_deflate_only(),
- test_get_doc_with_1st_text_att(),
- test_1st_text_att_stub().
-
-tests_for_1st_png_att() ->
- test_get_1st_png_att_without_accept_encoding_header(),
- test_get_1st_png_att_with_accept_encoding_gzip(),
- test_get_1st_png_att_with_accept_encoding_deflate(),
- test_get_doc_with_1st_png_att(),
- test_1st_png_att_stub().
-
-tests_for_2nd_text_att() ->
- test_get_2nd_text_att_with_accept_encoding_gzip(),
- test_get_2nd_text_att_without_accept_encoding_header(),
- test_get_doc_with_2nd_text_att(),
- test_2nd_text_att_stub().
-
-tests_for_2nd_png_att() ->
- test_get_2nd_png_att_without_accept_encoding_header(),
- test_get_2nd_png_att_with_accept_encoding_gzip(),
- test_get_doc_with_2nd_png_att(),
- test_2nd_png_att_stub().
-
-test_get_1st_text_att_with_accept_encoding_gzip() ->
- {ok, Code, Headers, Body} = test_util:request(
- db_url() ++ "/testdoc1/readme.txt",
- [{"Accept-Encoding", "gzip"}],
- get),
- etap:is(Code, 200, "HTTP response code is 200"),
- Gziped = lists:member({"Content-Encoding", "gzip"}, Headers),
- etap:is(Gziped, true, "received body is gziped"),
- Uncompressed = zlib:gunzip(iolist_to_binary(Body)),
- etap:is(
- Uncompressed,
- test_text_data(),
- "received data for the 1st text attachment is ok"
- ),
- ok.
-
-test_get_1st_text_att_without_accept_encoding_header() ->
- {ok, Code, Headers, Body} = test_util:request(
- db_url() ++ "/testdoc1/readme.txt",
- [],
- get),
- etap:is(Code, 200, "HTTP response code is 200"),
- Gziped = lists:member({"Content-Encoding", "gzip"}, Headers),
- etap:is(Gziped, false, "received body is not gziped"),
- etap:is(
- iolist_to_binary(Body),
- test_text_data(),
- "received data for the 1st text attachment is ok"
- ),
- ok.
-
-test_get_1st_text_att_with_accept_encoding_deflate() ->
- {ok, Code, Headers, Body} = test_util:request(
- db_url() ++ "/testdoc1/readme.txt",
- [{"Accept-Encoding", "deflate"}],
- get),
- etap:is(Code, 200, "HTTP response code is 200"),
- Gziped = lists:member({"Content-Encoding", "gzip"}, Headers),
- etap:is(Gziped, false, "received body is not gziped"),
- Deflated = lists:member({"Content-Encoding", "deflate"}, Headers),
- etap:is(Deflated, false, "received body is not deflated"),
- etap:is(
- iolist_to_binary(Body),
- test_text_data(),
- "received data for the 1st text attachment is ok"
- ),
- ok.
-
-test_get_1st_text_att_with_accept_encoding_deflate_only() ->
- {ok, Code, _Headers, _Body} = test_util:request(
- db_url() ++ "/testdoc1/readme.txt",
- [{"Accept-Encoding", "deflate, *;q=0"}],
- get),
- etap:is(
- Code,
- 406,
- "HTTP response code is 406 for an unsupported content encoding request"
- ),
- ok.
-
-test_get_1st_png_att_without_accept_encoding_header() ->
- {ok, Code, Headers, Body} = test_util:request(
- db_url() ++ "/testdoc2/icon.png",
- [],
- get),
- etap:is(Code, 200, "HTTP response code is 200"),
- Encoding = couch_util:get_value("Content-Encoding", Headers),
- etap:is(Encoding, undefined, "received body is not gziped"),
- etap:is(
- iolist_to_binary(Body),
- test_png_data(),
- "received data for the 1st png attachment is ok"
- ),
- ok.
-
-test_get_1st_png_att_with_accept_encoding_gzip() ->
- {ok, Code, Headers, Body} = test_util:request(
- db_url() ++ "/testdoc2/icon.png",
- [{"Accept-Encoding", "gzip"}],
- get),
- etap:is(Code, 200, "HTTP response code is 200"),
- Encoding = couch_util:get_value("Content-Encoding", Headers),
- etap:is(Encoding, undefined, "received body is not gziped"),
- etap:is(
- iolist_to_binary(Body),
- test_png_data(),
- "received data for the 1st png attachment is ok"
- ),
- ok.
-
-test_get_1st_png_att_with_accept_encoding_deflate() ->
- {ok, Code, Headers, Body} = test_util:request(
- db_url() ++ "/testdoc2/icon.png",
- [{"Accept-Encoding", "deflate"}],
- get),
- etap:is(Code, 200, "HTTP response code is 200"),
- Encoding = couch_util:get_value("Content-Encoding", Headers),
- etap:is(Encoding, undefined, "received body is in identity form"),
- etap:is(
- iolist_to_binary(Body),
- test_png_data(),
- "received data for the 1st png attachment is ok"
- ),
- ok.
-
-test_get_doc_with_1st_text_att() ->
- {ok, Code, _Headers, Body} = test_util:request(
- db_url() ++ "/testdoc1?attachments=true",
- [{"Accept", "application/json"}],
- get),
- etap:is(Code, 200, "HTTP response code is 200"),
- Json = ejson:decode(Body),
- TextAttJson = couch_util:get_nested_json_value(
- Json,
- [<<"_attachments">>, <<"readme.txt">>]
- ),
- TextAttType = couch_util:get_nested_json_value(
- TextAttJson,
- [<<"content_type">>]
- ),
- TextAttData = couch_util:get_nested_json_value(
- TextAttJson,
- [<<"data">>]
- ),
- etap:is(
- TextAttType,
- <<"text/plain">>,
- "1st text attachment has type text/plain"
- ),
- %% check the attachment's data is the base64 encoding of the plain text
- %% and not the base64 encoding of the gziped plain text
- etap:is(
- TextAttData,
- base64:encode(test_text_data()),
- "1st text attachment data is properly base64 encoded"
- ),
- ok.
-
-test_1st_text_att_stub() ->
- {ok, Code, _Headers, Body} = test_util:request(
- db_url() ++ "/testdoc1?att_encoding_info=true",
- [],
- get),
- etap:is(Code, 200, "HTTP response code is 200"),
- Json = ejson:decode(Body),
- {TextAttJson} = couch_util:get_nested_json_value(
- Json,
- [<<"_attachments">>, <<"readme.txt">>]
- ),
- TextAttLength = couch_util:get_value(<<"length">>, TextAttJson),
- etap:is(
- TextAttLength,
- byte_size(test_text_data()),
- "1st text attachment stub length matches the uncompressed length"
- ),
- TextAttEncoding = couch_util:get_value(<<"encoding">>, TextAttJson),
- etap:is(
- TextAttEncoding,
- <<"gzip">>,
- "1st text attachment stub has the encoding field set to gzip"
- ),
- TextAttEncLength = couch_util:get_value(<<"encoded_length">>, TextAttJson),
- etap:is(
- TextAttEncLength,
- iolist_size(zlib:gzip(test_text_data())),
- "1st text attachment stub encoded_length matches the compressed length"
- ),
- ok.
-
-test_get_doc_with_1st_png_att() ->
- {ok, Code, _Headers, Body} = test_util:request(
- db_url() ++ "/testdoc2?attachments=true",
- [{"Accept", "application/json"}],
- get),
- etap:is(Code, 200, "HTTP response code is 200"),
- Json = ejson:decode(Body),
- PngAttJson = couch_util:get_nested_json_value(
- Json,
- [<<"_attachments">>, <<"icon.png">>]
- ),
- PngAttType = couch_util:get_nested_json_value(
- PngAttJson,
- [<<"content_type">>]
- ),
- PngAttData = couch_util:get_nested_json_value(
- PngAttJson,
- [<<"data">>]
- ),
- etap:is(PngAttType, <<"image/png">>, "attachment has type image/png"),
- etap:is(
- PngAttData,
- base64:encode(test_png_data()),
- "1st png attachment data is properly base64 encoded"
- ),
- ok.
-
-test_1st_png_att_stub() ->
- {ok, Code, _Headers, Body} = test_util:request(
- db_url() ++ "/testdoc2?att_encoding_info=true",
- [{"Accept", "application/json"}],
- get),
- etap:is(Code, 200, "HTTP response code is 200"),
- Json = ejson:decode(Body),
- {PngAttJson} = couch_util:get_nested_json_value(
- Json,
- [<<"_attachments">>, <<"icon.png">>]
- ),
- PngAttLength = couch_util:get_value(<<"length">>, PngAttJson),
- etap:is(
- PngAttLength,
- byte_size(test_png_data()),
- "1st png attachment stub length matches the uncompressed length"
- ),
- PngEncoding = couch_util:get_value(<<"encoding">>, PngAttJson),
- etap:is(
- PngEncoding,
- undefined,
- "1st png attachment stub doesn't have an encoding field"
- ),
- PngEncLength = couch_util:get_value(<<"encoded_length">>, PngAttJson),
- etap:is(
- PngEncLength,
- undefined,
- "1st png attachment stub doesn't have an encoded_length field"
- ),
- ok.
-
-test_get_2nd_text_att_with_accept_encoding_gzip() ->
- {ok, Code, Headers, Body} = test_util:request(
- db_url() ++ "/testdoc3/readme.txt",
- [{"Accept-Encoding", "gzip"}],
- get),
- etap:is(Code, 200, "HTTP response code is 200"),
- Gziped = lists:member({"Content-Encoding", "gzip"}, Headers),
- etap:is(Gziped, true, "received body is gziped"),
- Uncompressed = zlib:gunzip(iolist_to_binary(Body)),
- etap:is(
- Uncompressed,
- test_text_data(),
- "received data for the 2nd text attachment is ok"
- ),
- ok.
-
-test_get_2nd_text_att_without_accept_encoding_header() ->
- {ok, Code, Headers, Body} = test_util:request(
- db_url() ++ "/testdoc3/readme.txt",
- [],
- get),
- etap:is(Code, 200, "HTTP response code is 200"),
- Gziped = lists:member({"Content-Encoding", "gzip"}, Headers),
- etap:is(Gziped, false, "received body is not gziped"),
- etap:is(
- Body,
- test_text_data(),
- "received data for the 2nd text attachment is ok"
- ),
- ok.
-
-test_get_2nd_png_att_without_accept_encoding_header() ->
- {ok, Code, Headers, Body} = test_util:request(
- db_url() ++ "/testdoc4/icon.png",
- [],
- get),
- etap:is(Code, 200, "HTTP response code is 200"),
- Gziped = lists:member({"Content-Encoding", "gzip"}, Headers),
- etap:is(Gziped, false, "received body is not gziped"),
- etap:is(
- Body,
- test_png_data(),
- "received data for the 2nd png attachment is ok"
- ),
- ok.
-
-test_get_2nd_png_att_with_accept_encoding_gzip() ->
- {ok, Code, Headers, Body} = test_util:request(
- db_url() ++ "/testdoc4/icon.png",
- [{"Accept-Encoding", "gzip"}],
- get),
- etap:is(Code, 200, "HTTP response code is 200"),
- Gziped = lists:member({"Content-Encoding", "gzip"}, Headers),
- etap:is(Gziped, false, "received body is not gziped"),
- etap:is(
- Body,
- test_png_data(),
- "received data for the 2nd png attachment is ok"
- ),
- ok.
-
-test_get_doc_with_2nd_text_att() ->
- {ok, Code, _Headers, Body} = test_util:request(
- db_url() ++ "/testdoc3?attachments=true",
- [{"Accept", "application/json"}],
- get),
- etap:is(Code, 200, "HTTP response code is 200"),
- Json = ejson:decode(Body),
- TextAttJson = couch_util:get_nested_json_value(
- Json,
- [<<"_attachments">>, <<"readme.txt">>]
- ),
- TextAttType = couch_util:get_nested_json_value(
- TextAttJson,
- [<<"content_type">>]
- ),
- TextAttData = couch_util:get_nested_json_value(
- TextAttJson,
- [<<"data">>]
- ),
- etap:is(TextAttType, <<"text/plain">>, "attachment has type text/plain"),
- %% check the attachment's data is the base64 encoding of the plain text
- %% and not the base64 encoding of the gziped plain text
- etap:is(
- TextAttData,
- base64:encode(test_text_data()),
- "2nd text attachment data is properly base64 encoded"
- ),
- ok.
-
-test_2nd_text_att_stub() ->
- {ok, Code, _Headers, Body} = test_util:request(
- db_url() ++ "/testdoc3?att_encoding_info=true",
- [],
- get),
- etap:is(Code, 200, "HTTP response code is 200"),
- Json = ejson:decode(Body),
- {TextAttJson} = couch_util:get_nested_json_value(
- Json,
- [<<"_attachments">>, <<"readme.txt">>]
- ),
- TextAttLength = couch_util:get_value(<<"length">>, TextAttJson),
- etap:is(
- TextAttLength,
- byte_size(test_text_data()),
- "2nd text attachment stub length matches the uncompressed length"
- ),
- TextAttEncoding = couch_util:get_value(<<"encoding">>, TextAttJson),
- etap:is(
- TextAttEncoding,
- <<"gzip">>,
- "2nd text attachment stub has the encoding field set to gzip"
- ),
- TextAttEncLength = couch_util:get_value(<<"encoded_length">>, TextAttJson),
- etap:is(
- TextAttEncLength,
- iolist_size(zlib:gzip(test_text_data())),
- "2nd text attachment stub encoded_length matches the compressed length"
- ),
- ok.
-
-test_get_doc_with_2nd_png_att() ->
- {ok, Code, _Headers, Body} = test_util:request(
- db_url() ++ "/testdoc4?attachments=true",
- [{"Accept", "application/json"}],
- get),
- etap:is(Code, 200, "HTTP response code is 200"),
- Json = ejson:decode(Body),
- PngAttJson = couch_util:get_nested_json_value(
- Json,
- [<<"_attachments">>, <<"icon.png">>]
- ),
- PngAttType = couch_util:get_nested_json_value(
- PngAttJson,
- [<<"content_type">>]
- ),
- PngAttData = couch_util:get_nested_json_value(
- PngAttJson,
- [<<"data">>]
- ),
- etap:is(PngAttType, <<"image/png">>, "attachment has type image/png"),
- etap:is(
- PngAttData,
- base64:encode(test_png_data()),
- "2nd png attachment data is properly base64 encoded"
- ),
- ok.
-
-test_2nd_png_att_stub() ->
- {ok, Code, _Headers, Body} = test_util:request(
- db_url() ++ "/testdoc4?att_encoding_info=true",
- [],
- get),
- etap:is(Code, 200, "HTTP response code is 200"),
- Json = ejson:decode(Body),
- {PngAttJson} = couch_util:get_nested_json_value(
- Json,
- [<<"_attachments">>, <<"icon.png">>]
- ),
- PngAttLength = couch_util:get_value(<<"length">>, PngAttJson),
- etap:is(
- PngAttLength,
- byte_size(test_png_data()),
- "2nd png attachment stub length matches the uncompressed length"
- ),
- PngEncoding = couch_util:get_value(<<"encoding">>, PngAttJson),
- etap:is(
- PngEncoding,
- undefined,
- "2nd png attachment stub doesn't have an encoding field"
- ),
- PngEncLength = couch_util:get_value(<<"encoded_length">>, PngAttJson),
- etap:is(
- PngEncLength,
- undefined,
- "2nd png attachment stub doesn't have an encoded_length field"
- ),
- ok.
-
-test_already_compressed_att(DocUri, AttName) ->
- test_get_already_compressed_att_with_accept_gzip(DocUri, AttName),
- test_get_already_compressed_att_without_accept(DocUri, AttName),
- test_get_already_compressed_att_stub(DocUri, AttName).
-
-test_get_already_compressed_att_with_accept_gzip(DocUri, AttName) ->
- {ok, Code, Headers, Body} = test_util:request(
- DocUri ++ "/" ++ AttName,
- [{"Accept-Encoding", "gzip"}],
- get),
- etap:is(Code, 200, "HTTP response code is 200"),
- Gziped = lists:member({"Content-Encoding", "gzip"}, Headers),
- etap:is(Gziped, true, "received body is gziped"),
- etap:is(
- Body,
- zlib:gzip(test_text_data()),
- "received data for the already compressed attachment is ok"
- ),
- ok.
-
-test_get_already_compressed_att_without_accept(DocUri, AttName) ->
- {ok, Code, Headers, Body} = test_util:request(
- DocUri ++ "/" ++ AttName,
- [],
- get),
- etap:is(Code, 200, "HTTP response code is 200"),
- Gziped = lists:member({"Content-Encoding", "gzip"}, Headers),
- etap:is(Gziped, false, "received body is not gziped"),
- etap:is(
- Body,
- test_text_data(),
- "received data for the already compressed attachment is ok"
- ),
- ok.
-
-test_get_already_compressed_att_stub(DocUri, AttName) ->
- {ok, Code, _Headers, Body} = test_util:request(
- DocUri ++ "?att_encoding_info=true",
- [],
- get),
- etap:is(Code, 200, "HTTP response code is 200"),
- Json = ejson:decode(Body),
- {AttJson} = couch_util:get_nested_json_value(
- Json,
- [<<"_attachments">>, iolist_to_binary(AttName)]
- ),
- AttLength = couch_util:get_value(<<"length">>, AttJson),
- etap:is(
- AttLength,
- iolist_size((zlib:gzip(test_text_data()))),
- "Already compressed attachment stub length matches the "
- "compressed length"
- ),
- Encoding = couch_util:get_value(<<"encoding">>, AttJson),
- etap:is(
- Encoding,
- <<"gzip">>,
- "Already compressed attachment stub has the encoding field set to gzip"
- ),
- EncLength = couch_util:get_value(<<"encoded_length">>, AttJson),
- etap:is(
- EncLength,
- AttLength,
- "Already compressed attachment stub encoded_length matches the "
- "length field value"
- ),
- ok.
-
-test_create_already_compressed_att_with_invalid_content_encoding(
- DocUri, AttName, AttData, Encoding) ->
- {ok, Code, _Headers, _Body} = test_util:request(
- DocUri ++ "/" ++ AttName,
- [{"Content-Encoding", Encoding}, {"Content-Type", "text/plain"}],
- put,
- AttData),
- etap:is(
- Code,
- 415,
- "Couldn't create an already compressed attachment using the "
- "unsupported encoding '" ++ Encoding ++ "'"
- ),
- ok.
-
-test_compressible_type_with_parameters() ->
- {ok, Code, _Headers, _Body} = test_util:request(
- db_url() ++ "/testdoc5/readme.txt",
- [{"Content-Type", "text/plain; charset=UTF-8"}],
- put,
- test_text_data()),
- etap:is(Code, 201, "Created text attachment with MIME type "
- "'text/plain; charset=UTF-8' using the standalone api"),
- {ok, Code2, Headers2, Body} = test_util:request(
- db_url() ++ "/testdoc5/readme.txt",
- [{"Accept-Encoding", "gzip"}],
- get),
- etap:is(Code2, 200, "HTTP response code is 200"),
- Gziped = lists:member({"Content-Encoding", "gzip"}, Headers2),
- etap:is(Gziped, true, "received body is gziped"),
- Uncompressed = zlib:gunzip(iolist_to_binary(Body)),
- etap:is(Uncompressed, test_text_data(), "received data is gzipped"),
- {ok, Code3, _Headers3, Body3} = test_util:request(
- db_url() ++ "/testdoc5?att_encoding_info=true",
- [],
- get),
- etap:is(Code3, 200, "HTTP response code is 200"),
- Json = ejson:decode(Body3),
- {TextAttJson} = couch_util:get_nested_json_value(
- Json,
- [<<"_attachments">>, <<"readme.txt">>]
- ),
- TextAttLength = couch_util:get_value(<<"length">>, TextAttJson),
- etap:is(
- TextAttLength,
- byte_size(test_text_data()),
- "text attachment stub length matches the uncompressed length"
- ),
- TextAttEncoding = couch_util:get_value(<<"encoding">>, TextAttJson),
- etap:is(
- TextAttEncoding,
- <<"gzip">>,
- "text attachment stub has the encoding field set to gzip"
- ),
- TextAttEncLength = couch_util:get_value(<<"encoded_length">>, TextAttJson),
- etap:is(
- TextAttEncLength,
- iolist_size(zlib:gzip(test_text_data())),
- "text attachment stub encoded_length matches the compressed length"
- ),
- ok.
-
-test_png_data() ->
- {ok, Data} = file:read_file(
- test_util:source_file("share/www/image/logo.png")
- ),
- Data.
-
-test_text_data() ->
- {ok, Data} = file:read_file(
- test_util:source_file("README.rst")
- ),
- Data.
diff --git a/test/etap/150-invalid-view-seq.t b/test/etap/150-invalid-view-seq.t
deleted file mode 100755
index 681875aff..000000000
--- a/test/etap/150-invalid-view-seq.t
+++ /dev/null
@@ -1,183 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--record(user_ctx, {
- name = null,
- roles = [],
- handler
-}).
-
-test_db_name() ->
- <<"couch_test_invalid_view_seq">>.
-
-main(_) ->
- test_util:init_code_path(),
-
- etap:plan(10),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- etap:bail(Other)
- end,
- ok.
-
-%% NOTE: since during the test we stop the server,
-%% a huge and ugly but harmless stack trace is sent to stderr
-%%
-test() ->
- couch_server_sup:start_link(test_util:config_files()),
- timer:sleep(1000),
- delete_db(),
- create_db(),
-
- create_docs(),
- create_design_doc(),
-
- % make DB file backup
- backup_db_file(),
-
- put(addr, couch_config:get("httpd", "bind_address", "127.0.0.1")),
- put(port, integer_to_list(mochiweb_socket_server:get(couch_httpd, port))),
-
- create_new_doc(),
- query_view_before_restore_backup(),
-
- % restore DB file backup after querying view
- restore_backup_db_file(),
-
- query_view_after_restore_backup(),
-
- delete_db(),
- couch_server_sup:stop(),
- ok.
-
-admin_user_ctx() ->
- {user_ctx, #user_ctx{roles=[<<"_admin">>]}}.
-
-create_db() ->
- {ok, _} = couch_db:create(test_db_name(), [admin_user_ctx()]).
-
-delete_db() ->
- couch_server:delete(test_db_name(), [admin_user_ctx()]).
-
-create_docs() ->
- {ok, Db} = couch_db:open(test_db_name(), [admin_user_ctx()]),
- Doc1 = couch_doc:from_json_obj({[
- {<<"_id">>, <<"doc1">>},
- {<<"value">>, 1}
-
- ]}),
- Doc2 = couch_doc:from_json_obj({[
- {<<"_id">>, <<"doc2">>},
- {<<"value">>, 2}
-
- ]}),
- Doc3 = couch_doc:from_json_obj({[
- {<<"_id">>, <<"doc3">>},
- {<<"value">>, 3}
-
- ]}),
- {ok, _} = couch_db:update_docs(Db, [Doc1, Doc2, Doc3]),
- couch_db:ensure_full_commit(Db),
- couch_db:close(Db).
-
-create_design_doc() ->
- {ok, Db} = couch_db:open(test_db_name(), [admin_user_ctx()]),
- DDoc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/foo">>},
- {<<"language">>, <<"javascript">>},
- {<<"views">>, {[
- {<<"bar">>, {[
- {<<"map">>, <<"function(doc) { emit(doc.value, 1); }">>}
- ]}}
- ]}}
- ]}),
- {ok, _} = couch_db:update_docs(Db, [DDoc]),
- couch_db:ensure_full_commit(Db),
- couch_db:close(Db).
-
-backup_db_file() ->
- DbFile = test_util:build_file("tmp/lib/" ++
- binary_to_list(test_db_name()) ++ ".couch"),
- {ok, _} = file:copy(DbFile, DbFile ++ ".backup"),
- ok.
-
-create_new_doc() ->
- {ok, Db} = couch_db:open(test_db_name(), [admin_user_ctx()]),
- Doc666 = couch_doc:from_json_obj({[
- {<<"_id">>, <<"doc666">>},
- {<<"value">>, 999}
-
- ]}),
- {ok, _} = couch_db:update_docs(Db, [Doc666]),
- couch_db:ensure_full_commit(Db),
- couch_db:close(Db).
-
-db_url() ->
- "http://" ++ get(addr) ++ ":" ++ get(port) ++ "/" ++
- binary_to_list(test_db_name()).
-
-query_view_before_restore_backup() ->
- {ok, Code, _Headers, Body} = test_util:request(
- db_url() ++ "/_design/foo/_view/bar", [], get),
- etap:is(Code, 200, "Got view response before restoring backup."),
- ViewJson = ejson:decode(Body),
- Rows = couch_util:get_nested_json_value(ViewJson, [<<"rows">>]),
- HasDoc1 = has_doc("doc1", Rows),
- HasDoc2 = has_doc("doc2", Rows),
- HasDoc3 = has_doc("doc3", Rows),
- HasDoc666 = has_doc("doc666", Rows),
- etap:is(HasDoc1, true, "Before backup restore, view has doc1"),
- etap:is(HasDoc2, true, "Before backup restore, view has doc2"),
- etap:is(HasDoc3, true, "Before backup restore, view has doc3"),
- etap:is(HasDoc666, true, "Before backup restore, view has doc666"),
- ok.
-
-has_doc(DocId1, Rows) ->
- DocId = iolist_to_binary(DocId1),
- lists:any(
- fun({R}) -> lists:member({<<"id">>, DocId}, R) end,
- Rows
- ).
-
-restore_backup_db_file() ->
- couch_server_sup:stop(),
- timer:sleep(3000),
- DbFile = test_util:build_file("tmp/lib/" ++
- binary_to_list(test_db_name()) ++ ".couch"),
- ok = file:delete(DbFile),
- ok = file:rename(DbFile ++ ".backup", DbFile),
- couch_server_sup:start_link(test_util:config_files()),
- timer:sleep(1000),
- put(port, integer_to_list(mochiweb_socket_server:get(couch_httpd, port))),
- ok.
-
-query_view_after_restore_backup() ->
- {ok, Code, _Headers, Body} = test_util:request(
- db_url() ++ "/_design/foo/_view/bar", [], get),
- etap:is(Code, 200, "Got view response after restoring backup."),
- ViewJson = ejson:decode(Body),
- Rows = couch_util:get_nested_json_value(ViewJson, [<<"rows">>]),
- HasDoc1 = has_doc("doc1", Rows),
- HasDoc2 = has_doc("doc2", Rows),
- HasDoc3 = has_doc("doc3", Rows),
- HasDoc666 = has_doc("doc666", Rows),
- etap:is(HasDoc1, true, "After backup restore, view has doc1"),
- etap:is(HasDoc2, true, "After backup restore, view has doc2"),
- etap:is(HasDoc3, true, "After backup restore, view has doc3"),
- etap:is(HasDoc666, false, "After backup restore, view does not have doc666"),
- ok.
diff --git a/test/etap/160-vhosts.t b/test/etap/160-vhosts.t
deleted file mode 100755
index 46fdd7393..000000000
--- a/test/etap/160-vhosts.t
+++ /dev/null
@@ -1,371 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--record(user_ctx, {
- name = null,
- roles = [],
- handler
-}).
-
-server() ->
- lists:concat([
- "http://127.0.0.1:", mochiweb_socket_server:get(couch_httpd, port), "/"
- ]).
-
-dbname() -> "etap-test-db".
-admin_user_ctx() -> {user_ctx, #user_ctx{roles=[<<"_admin">>]}}.
-
-main(_) ->
- test_util:init_code_path(),
-
- etap:plan(20),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- etap:bail(Other)
- end,
- ok.
-
-test() ->
- couch_server_sup:start_link(test_util:config_files()),
- ibrowse:start(),
- crypto:start(),
-
- timer:sleep(1000),
- couch_server:delete(list_to_binary(dbname()), [admin_user_ctx()]),
- {ok, Db} = couch_db:create(list_to_binary(dbname()), [admin_user_ctx()]),
-
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"doc1">>},
- {<<"value">>, 666}
- ]}),
-
- Doc1 = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/doc1">>},
- {<<"shows">>, {[
- {<<"test">>, <<"function(doc, req) {
- return { json: {
- requested_path: '/' + req.requested_path.join('/'),
- path: '/' + req.path.join('/')
- }};
-}">>}
- ]}},
- {<<"rewrites">>, [
- {[
- {<<"from">>, <<"/">>},
- {<<"to">>, <<"_show/test">>}
- ]}
- ]}
- ]}),
-
- {ok, _} = couch_db:update_docs(Db, [Doc, Doc1]),
-
- couch_db:ensure_full_commit(Db),
-
- %% end boilerplate, start test
-
- ok = couch_config:set("vhosts", "example.com", "/etap-test-db", false),
- ok = couch_config:set("vhosts", "*.example.com",
- "/etap-test-db/_design/doc1/_rewrite", false),
- ok = couch_config:set("vhosts", "example.com/test", "/etap-test-db", false),
- ok = couch_config:set("vhosts", "example1.com",
- "/etap-test-db/_design/doc1/_rewrite/", false),
- ok = couch_config:set("vhosts",":appname.:dbname.example1.com",
- "/:dbname/_design/:appname/_rewrite/", false),
- ok = couch_config:set("vhosts", ":dbname.example1.com", "/:dbname", false),
-
- ok = couch_config:set("vhosts", "*.example2.com", "/*", false),
- ok = couch_config:set("vhosts", "*.example2.com/test", "/*", false),
- ok = couch_config:set("vhosts", "*/test", "/etap-test-db", false),
- ok = couch_config:set("vhosts", "*/test1",
- "/etap-test-db/_design/doc1/_show/test", false),
- ok = couch_config:set("vhosts", "example3.com", "/", false),
-
- %% reload rules
- couch_httpd_vhost:reload(),
-
- test_regular_request(),
- test_vhost_request(),
- test_vhost_request_with_qs(),
- test_vhost_request_with_global(),
- test_vhost_requested_path(),
- test_vhost_requested_path_path(),
- test_vhost_request_wildcard(),
- test_vhost_request_replace_var(),
- test_vhost_request_replace_var1(),
- test_vhost_request_replace_wildcard(),
- test_vhost_request_path(),
- test_vhost_request_path1(),
- test_vhost_request_path2(),
- test_vhost_request_path3(),
- test_vhost_request_to_root(),
- test_vhost_request_with_oauth(Db),
-
- %% restart boilerplate
- couch_db:close(Db),
- ok = couch_server:delete(couch_db:name(Db), [admin_user_ctx()]),
- timer:sleep(3000),
- couch_server_sup:stop(),
-
- ok.
-
-test_regular_request() ->
- case ibrowse:send_req(server(), [], get, []) of
- {ok, _, _, Body} ->
- {Props} = ejson:decode(Body),
- Couchdb = couch_util:get_value(<<"couchdb">>, Props),
- Version = couch_util:get_value(<<"version">>, Props),
- Vendor = couch_util:get_value(<<"vendor">>, Props),
- etap:isnt(Couchdb, undefined, "Found couchdb property"),
- etap:isnt(Version, undefined, "Found version property"),
- etap:isnt(Vendor, undefined, "Found vendor property");
- _Else ->
- etap:bail("http GET / request failed")
- end.
-
-test_vhost_request() ->
- case ibrowse:send_req(server(), [], get, [], [{host_header, "example.com"}]) of
- {ok, _, _, Body} ->
- {JsonBody} = ejson:decode(Body),
- HasDbNameInfo = proplists:is_defined(<<"db_name">>, JsonBody),
- etap:is(HasDbNameInfo, true, "should return database info");
- _Else ->
- etap:is(false, true, <<"ibrowse fail">>)
- end.
-
-test_vhost_request_with_qs() ->
- Url = server() ++ "doc1?revs_info=true",
- case ibrowse:send_req(Url, [], get, [], [{host_header, "example.com"}]) of
- {ok, _, _, Body} ->
- {JsonProps} = ejson:decode(Body),
- HasRevsInfo = proplists:is_defined(<<"_revs_info">>, JsonProps),
- etap:is(HasRevsInfo, true, "should return _revs_info");
- _Else ->
- etap:is(false, true, <<"ibrowse fail">>)
- end.
-
-test_vhost_request_with_global() ->
- Url2 = server() ++ "_utils/index.html",
- case ibrowse:send_req(Url2, [], get, [], [{host_header, "example.com"}]) of
- {ok, _, _, Body2} ->
- "<!DOCTYPE" ++ _Foo = Body2,
- etap:is(true, true, "should serve /_utils even inside vhosts");
- _Else ->
- etap:is(false, true, <<"ibrowse fail">>)
- end.
-
-test_vhost_requested_path() ->
- case ibrowse:send_req(server(), [], get, [], [{host_header, "example1.com"}]) of
- {ok, _, _, Body} ->
- {Json} = ejson:decode(Body),
- etap:is(case proplists:get_value(<<"requested_path">>, Json) of
- <<"/">> -> true;
- _ -> false
- end, true, <<"requested path in req ok">>);
- _Else ->
- etap:is(false, true, <<"ibrowse fail">>)
- end.
-
-test_vhost_requested_path_path() ->
- case ibrowse:send_req(server(), [], get, [], [{host_header, "example1.com"}]) of
- {ok, _, _, Body} ->
- {Json} = ejson:decode(Body),
- etap:is(case proplists:get_value(<<"path">>, Json) of
- <<"/etap-test-db/_design/doc1/_show/test">> -> true;
- _ -> false
- end, true, <<"path in req ok">>);
- _Else ->
- etap:is(false, true, <<"ibrowse fail">>)
- end.
-
-test_vhost_request_wildcard()->
- case ibrowse:send_req(server(), [], get, [], [{host_header, "test.example.com"}]) of
- {ok, _, _, Body} ->
- {Json} = ejson:decode(Body),
- etap:is(case proplists:get_value(<<"path">>, Json) of
- <<"/etap-test-db/_design/doc1/_show/test">> -> true;
- _ -> false
- end, true, <<"wildcard ok">>);
- _Else -> etap:is(false, true, <<"ibrowse fail">>)
- end.
-
-
-test_vhost_request_replace_var() ->
- case ibrowse:send_req(server(), [], get, [], [{host_header,"etap-test-db.example1.com"}]) of
- {ok, _, _, Body} ->
- {JsonBody} = ejson:decode(Body),
- HasDbNameInfo = proplists:is_defined(<<"db_name">>, JsonBody),
- etap:is(HasDbNameInfo, true, "should return database info");
- _Else -> etap:is(false, true, <<"ibrowse fail">>)
- end.
-
-test_vhost_request_replace_var1() ->
- case ibrowse:send_req(server(), [], get, [], [{host_header, "doc1.etap-test-db.example1.com"}]) of
- {ok, _, _, Body} ->
- {Json} = ejson:decode(Body),
- etap:is(case proplists:get_value(<<"path">>, Json) of
- <<"/etap-test-db/_design/doc1/_show/test">> -> true;
- _ -> false
- end, true, <<"wildcard ok">>);
- _Else -> etap:is(false, true, <<"ibrowse fail">>)
- end.
-
-test_vhost_request_replace_wildcard() ->
- case ibrowse:send_req(server(), [], get, [], [{host_header,"etap-test-db.example2.com"}]) of
- {ok, _, _, Body} ->
- {JsonBody} = ejson:decode(Body),
- HasDbNameInfo = proplists:is_defined(<<"db_name">>, JsonBody),
- etap:is(HasDbNameInfo, true, "should return database info");
- _Else -> etap:is(false, true, <<"ibrowse fail">>)
- end.
-
-test_vhost_request_path() ->
- Uri = server() ++ "test",
- case ibrowse:send_req(Uri, [], get, [], [{host_header, "example.com"}]) of
- {ok, _, _, Body} ->
- {JsonBody} = ejson:decode(Body),
- HasDbNameInfo = proplists:is_defined(<<"db_name">>, JsonBody),
- etap:is(HasDbNameInfo, true, "should return database info");
- _Else -> etap:is(false, true, <<"ibrowse fail">>)
- end.
-
-test_vhost_request_path1() ->
- Url = server() ++ "test/doc1?revs_info=true",
- case ibrowse:send_req(Url, [], get, [], []) of
- {ok, _, _, Body} ->
- {JsonProps} = ejson:decode(Body),
- HasRevsInfo = proplists:is_defined(<<"_revs_info">>, JsonProps),
- etap:is(HasRevsInfo, true, "should return _revs_info");
- _Else -> etap:is(false, true, <<"ibrowse fail">>)
- end.
-
-test_vhost_request_path2() ->
- Uri = server() ++ "test",
- case ibrowse:send_req(Uri, [], get, [], [{host_header,"etap-test-db.example2.com"}]) of
- {ok, _, _, Body} ->
- {JsonBody} = ejson:decode(Body),
- HasDbNameInfo = proplists:is_defined(<<"db_name">>, JsonBody),
- etap:is(HasDbNameInfo, true, "should return database info");
- _Else -> etap:is(false, true, <<"ibrowse fail">>)
- end.
-
-test_vhost_request_path3() ->
- Uri = server() ++ "test1",
- case ibrowse:send_req(Uri, [], get, [], []) of
- {ok, _, _, Body} ->
- {Json} = ejson:decode(Body),
- etap:is(case proplists:get_value(<<"path">>, Json) of
- <<"/etap-test-db/_design/doc1/_show/test">> -> true;
- _ -> false
- end, true, <<"path in req ok">>);
- _Else -> etap:is(false, true, <<"ibrowse fail">>)
- end.
-
-test_vhost_request_to_root() ->
- Uri = server(),
- case ibrowse:send_req(Uri, [], get, [], []) of
- {ok, _, _, Body} ->
- {JsonBody} = ejson:decode(Body),
- HasCouchDBWelcome = proplists:is_defined(<<"couchdb">>, JsonBody),
- etap:is(HasCouchDBWelcome, true, "should allow redirect to /");
- _Else -> etap:is(false, true, <<"ibrowse fail">>)
- end.
-
-test_vhost_request_with_oauth(Db) ->
- {ok, AuthDb} = couch_db:create(
- <<"tap_test_sec_db">>, [admin_user_ctx(), overwrite]),
- PrevAuthDbName = couch_config:get("couch_httpd_auth", "authentication_db"),
- couch_config:set("couch_httpd_auth", "authentication_db", "tap_test_sec_db", false),
- couch_config:set("oauth_token_users", "otoksec1", "joe", false),
- couch_config:set("oauth_consumer_secrets", "consec1", "foo", false),
- couch_config:set("oauth_token_secrets", "otoksec1", "foobar", false),
- couch_config:set("couch_httpd_auth", "require_valid_user", "true", false),
-
- DDoc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/test">>},
- {<<"language">>, <<"javascript">>},
- {<<"rewrites">>, [
- {[
- {<<"from">>, <<"foobar">>},
- {<<"to">>, <<"_info">>}
- ]}
- ]}
- ]}),
- {ok, _} = couch_db:update_doc(Db, DDoc, []),
-
- RewritePath = "/etap-test-db/_design/test/_rewrite/foobar",
- ok = couch_config:set("vhosts", "oauth-example.com", RewritePath, false),
- couch_httpd_vhost:reload(),
-
- case ibrowse:send_req(server(), [], get, [], [{host_header, "oauth-example.com"}]) of
- {ok, "401", _, Body} ->
- {JsonBody} = ejson:decode(Body),
- etap:is(
- couch_util:get_value(<<"error">>, JsonBody),
- <<"unauthorized">>,
- "Request without OAuth credentials failed");
- Error ->
- etap:bail("Request without OAuth credentials did not fail: " ++
- couch_util:to_list(Error))
- end,
-
- JoeDoc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"org.couchdb.user:joe">>},
- {<<"type">>, <<"user">>},
- {<<"name">>, <<"joe">>},
- {<<"roles">>, []},
- {<<"password_sha">>, <<"fe95df1ca59a9b567bdca5cbaf8412abd6e06121">>},
- {<<"salt">>, <<"4e170ffeb6f34daecfd814dfb4001a73">>}
- ]}),
- {ok, _} = couch_db:update_doc(AuthDb, JoeDoc, []),
-
- Url = "http://oauth-example.com/",
- Consumer = {"consec1", "foo", hmac_sha1},
- SignedParams = oauth:sign(
- "GET", Url, [], Consumer, "otoksec1", "foobar"),
- OAuthUrl = oauth:uri(server(), SignedParams),
-
- case ibrowse:send_req(OAuthUrl, [], get, [], [{host_header, "oauth-example.com"}]) of
- {ok, "200", _, Body2} ->
- {JsonBody2} = ejson:decode(Body2),
- etap:is(couch_util:get_value(<<"name">>, JsonBody2), <<"test">>,
- "should return ddoc info with OAuth credentials");
- Error2 ->
- etap:bail("Failed to access vhost with OAuth credentials: " ++
- couch_util:to_list(Error2))
- end,
-
- Consumer2 = {"consec1", "bad_secret", hmac_sha1},
- SignedParams2 = oauth:sign(
- "GET", Url, [], Consumer2, "otoksec1", "foobar"),
- OAuthUrl2 = oauth:uri(server(), SignedParams2),
-
- case ibrowse:send_req(OAuthUrl2, [], get, [], [{host_header, "oauth-example.com"}]) of
- {ok, "401", _, Body3} ->
- {JsonBody3} = ejson:decode(Body3),
- etap:is(
- couch_util:get_value(<<"error">>, JsonBody3),
- <<"unauthorized">>,
- "Request with bad OAuth credentials failed");
- Error3 ->
- etap:bail("Failed to access vhost with bad OAuth credentials: " ++
- couch_util:to_list(Error3))
- end,
-
- couch_config:set("couch_httpd_auth", "authentication_db", PrevAuthDbName, false),
- couch_config:set("couch_httpd_auth", "require_valid_user", "false", false),
- ok = couch_server:delete(couch_db:name(AuthDb), [admin_user_ctx()]).
diff --git a/test/etap/170-os-daemons.t b/test/etap/170-os-daemons.t
deleted file mode 100755
index 6feaa1bf4..000000000
--- a/test/etap/170-os-daemons.t
+++ /dev/null
@@ -1,114 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--record(daemon, {
- port,
- name,
- cmd,
- kill,
- status=running,
- cfg_patterns=[],
- errors=[],
- buf=[]
-}).
-
-config_files() ->
- lists:map(fun test_util:build_file/1, [
- "etc/couchdb/default_dev.ini"
- ]).
-
-daemon_cmd() ->
- test_util:source_file("test/etap/170-os-daemons.es").
-
-main(_) ->
- test_util:init_code_path(),
-
- etap:plan(49),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- etap:bail(Other)
- end,
- ok.
-
-test() ->
- couch_config:start_link(config_files()),
- couch_os_daemons:start_link(),
-
- etap:diag("Daemons boot after configuration added."),
- couch_config:set("os_daemons", "foo", daemon_cmd(), false),
- timer:sleep(1000),
-
- {ok, [D1]} = couch_os_daemons:info([table]),
- check_daemon(D1, "foo"),
-
- % Check table form
- {ok, Tab1} = couch_os_daemons:info(),
- [T1] = ets:tab2list(Tab1),
- check_daemon(T1, "foo"),
-
- etap:diag("Daemons stop after configuration removed."),
- couch_config:delete("os_daemons", "foo", false),
- timer:sleep(500),
-
- {ok, []} = couch_os_daemons:info([table]),
- {ok, Tab2} = couch_os_daemons:info(),
- etap:is(ets:tab2list(Tab2), [], "As table returns empty table."),
-
- etap:diag("Adding multiple daemons causes both to boot."),
- couch_config:set("os_daemons", "bar", daemon_cmd(), false),
- couch_config:set("os_daemons", "baz", daemon_cmd(), false),
- timer:sleep(500),
- {ok, Daemons} = couch_os_daemons:info([table]),
- lists:foreach(fun(D) ->
- check_daemon(D)
- end, Daemons),
-
- {ok, Tab3} = couch_os_daemons:info(),
- lists:foreach(fun(D) ->
- check_daemon(D)
- end, ets:tab2list(Tab3)),
-
- etap:diag("Removing one daemon leaves the other alive."),
- couch_config:delete("os_daemons", "bar", false),
- timer:sleep(500),
-
- {ok, [D2]} = couch_os_daemons:info([table]),
- check_daemon(D2, "baz"),
-
- % Check table version
- {ok, Tab4} = couch_os_daemons:info(),
- [T4] = ets:tab2list(Tab4),
- check_daemon(T4, "baz"),
-
- ok.
-
-check_daemon(D) ->
- check_daemon(D, D#daemon.name).
-
-check_daemon(D, Name) ->
- BaseName = "170-os-daemons.es",
- BaseLen = length(BaseName),
- CmdLen = length(D#daemon.cmd),
- CmdName = lists:sublist(D#daemon.cmd, CmdLen-BaseLen+1, BaseLen),
-
- etap:is(is_port(D#daemon.port), true, "Daemon port is a port."),
- etap:is(D#daemon.name, Name, "Daemon name was set correctly."),
- etap:is(CmdName, BaseName, "Command name was set correctly."),
- etap:isnt(D#daemon.kill, undefined, "Kill command was set."),
- etap:is(D#daemon.errors, [], "No errors occurred while booting."),
- etap:is(D#daemon.buf, [], "No extra data left in the buffer.").
diff --git a/test/etap/171-os-daemons-config.t b/test/etap/171-os-daemons-config.t
deleted file mode 100755
index e9dc3f322..000000000
--- a/test/etap/171-os-daemons-config.t
+++ /dev/null
@@ -1,74 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--record(daemon, {
- port,
- name,
- cmd,
- kill,
- status=running,
- cfg_patterns=[],
- errors=[],
- buf=[]
-}).
-
-config_files() ->
- lists:map(fun test_util:build_file/1, [
- "etc/couchdb/default_dev.ini"
- ]).
-
-daemon_cmd() ->
- test_util:source_file("test/etap/171-os-daemons-config.es").
-
-main(_) ->
- test_util:init_code_path(),
-
- etap:plan(6),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- etap:bail(Other)
- end,
- ok.
-
-test() ->
- couch_config:start_link(config_files()),
- couch_config:set("log", "level", "debug", false),
- couch_log:start_link(),
- couch_os_daemons:start_link(),
-
- % "foo" is a required name by this test.
- couch_config:set("os_daemons", "foo", daemon_cmd(), false),
- timer:sleep(1000),
-
- {ok, [D1]} = couch_os_daemons:info([table]),
- check_daemon(D1, "foo"),
-
- ok.
-
-check_daemon(D, Name) ->
- BaseName = "171-os-daemons-config.es",
- BaseLen = length(BaseName),
- CmdLen = length(D#daemon.cmd),
- CmdName = lists:sublist(D#daemon.cmd, CmdLen-BaseLen+1, BaseLen),
-
- etap:is(is_port(D#daemon.port), true, "Daemon port is a port."),
- etap:is(D#daemon.name, Name, "Daemon name was set correctly."),
- etap:is(CmdName, BaseName, "Command name was set correctly."),
- etap:isnt(D#daemon.kill, undefined, "Kill command was set."),
- etap:is(D#daemon.errors, [], "No errors occurred while booting."),
- etap:is(D#daemon.buf, [], "No extra data left in the buffer.").
diff --git a/test/etap/172-os-daemon-errors.t b/test/etap/172-os-daemon-errors.t
deleted file mode 100755
index bde5c6ffb..000000000
--- a/test/etap/172-os-daemon-errors.t
+++ /dev/null
@@ -1,126 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--record(daemon, {
- port,
- name,
- cmd,
- kill,
- status=running,
- cfg_patterns=[],
- errors=[],
- buf=[]
-}).
-
-config_files() ->
- lists:map(fun test_util:build_file/1, [
- "etc/couchdb/default_dev.ini"
- ]).
-
-bad_perms() ->
- test_util:source_file("test/etap/172-os-daemon-errors.1.sh").
-
-die_on_boot() ->
- test_util:source_file("test/etap/172-os-daemon-errors.2.sh").
-
-die_quickly() ->
- test_util:source_file("test/etap/172-os-daemon-errors.3.sh").
-
-can_reboot() ->
- test_util:source_file("test/etap/172-os-daemon-errors.4.sh").
-
-main(_) ->
- test_util:init_code_path(),
-
- etap:plan(36),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- etap:bail(Other)
- end,
- ok.
-
-test() ->
- couch_config:start_link(config_files()),
- couch_os_daemons:start_link(),
-
- etap:diag("Daemon not executable."),
- test_halts("foo", bad_perms(), 1000),
-
- etap:diag("Daemon dies on boot."),
- test_halts("bar", die_on_boot(), 1000),
-
- etap:diag("Daemon dies quickly after boot."),
- test_halts("baz", die_quickly(), 4000),
-
- etap:diag("Daemon dies, but not quickly enough to be halted."),
- test_runs("bam", can_reboot()),
-
- ok.
-
-test_halts(Name, Cmd, Time) ->
- couch_config:set("os_daemons", Name, Cmd ++ " 2> /dev/null", false),
- timer:sleep(Time),
- {ok, [D]} = couch_os_daemons:info([table]),
- check_dead(D, Name, Cmd),
- couch_config:delete("os_daemons", Name, false).
-
-test_runs(Name, Cmd) ->
- couch_config:set("os_daemons", Name, Cmd, false),
-
- timer:sleep(1000),
- {ok, [D1]} = couch_os_daemons:info([table]),
- check_daemon(D1, Name, Cmd, 0),
-
- % Should reboot every two seconds. We're at 1s, so wait
- % utnil 3s to be in the middle of the next invocation's
- % life span.
- timer:sleep(2000),
- {ok, [D2]} = couch_os_daemons:info([table]),
- check_daemon(D2, Name, Cmd, 1),
-
- % If the kill command changed, that means we rebooted the process.
- etap:isnt(D1#daemon.kill, D2#daemon.kill, "Kill command changed.").
-
-check_dead(D, Name, Cmd) ->
- BaseName = filename:basename(Cmd) ++ " 2> /dev/null",
- BaseLen = length(BaseName),
- CmdLen = length(D#daemon.cmd),
- CmdName = lists:sublist(D#daemon.cmd, CmdLen-BaseLen+1, BaseLen),
-
- etap:is(is_port(D#daemon.port), true, "Daemon port is a port."),
- etap:is(D#daemon.name, Name, "Daemon name was set correctly."),
- etap:is(CmdName, BaseName, "Command name was set correctly."),
- etap:isnt(D#daemon.kill, undefined, "Kill command was set."),
- etap:is(D#daemon.status, halted, "Daemon has been halted."),
- etap:is(D#daemon.errors, nil, "Errors have been disabled."),
- etap:is(D#daemon.buf, nil, "Buffer has been switched off.").
-
-check_daemon(D, Name, Cmd, Errs) ->
- BaseName = filename:basename(Cmd),
- BaseLen = length(BaseName),
- CmdLen = length(D#daemon.cmd),
- CmdName = lists:sublist(D#daemon.cmd, CmdLen-BaseLen+1, BaseLen),
-
- etap:is(is_port(D#daemon.port), true, "Daemon port is a port."),
- etap:is(D#daemon.name, Name, "Daemon name was set correctly."),
- etap:is(CmdName, BaseName, "Command name was set correctly."),
- etap:isnt(D#daemon.kill, undefined, "Kill command was set."),
- etap:is(D#daemon.status, running, "Daemon still running."),
- etap:is(length(D#daemon.errors), Errs, "Found expected number of errors."),
- etap:is(D#daemon.buf, [], "No extra data left in the buffer.").
-
diff --git a/test/etap/173-os-daemon-cfg-register.t b/test/etap/173-os-daemon-cfg-register.t
deleted file mode 100755
index 256ee7d54..000000000
--- a/test/etap/173-os-daemon-cfg-register.t
+++ /dev/null
@@ -1,116 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--record(daemon, {
- port,
- name,
- cmd,
- kill,
- status=running,
- cfg_patterns=[],
- errors=[],
- buf=[]
-}).
-
-daemon_name() ->
- "wheee".
-
-daemon_cmd() ->
- test_util:build_file("test/etap/test_cfg_register").
-
-main(_) ->
- test_util:init_code_path(),
-
- etap:plan(27),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- etap:bail(Other)
- end,
- ok.
-
-test() ->
- couch_config:start_link(test_util:config_files()),
- couch_os_daemons:start_link(),
-
- DaemonCmd = daemon_cmd() ++ " 2> /dev/null",
-
- etap:diag("Booting the daemon"),
- couch_config:set("os_daemons", daemon_name(), DaemonCmd, false),
- wait_for_start(10),
- {ok, [D1]} = couch_os_daemons:info([table]),
- check_daemon(D1, running),
-
- etap:diag("Daemon restarts when section changes."),
- couch_config:set("s1", "k", "foo", false),
- wait_for_restart(10),
- {ok, [D2]} = couch_os_daemons:info([table]),
- check_daemon(D2, running),
- etap:isnt(D2#daemon.kill, D1#daemon.kill, "Kill command shows restart."),
-
- etap:diag("Daemon doesn't restart for ignored section key."),
- couch_config:set("s2", "k2", "baz", false),
- timer:sleep(1000), % Message travel time.
- {ok, [D3]} = couch_os_daemons:info([table]),
- etap:is(D3, D2, "Same daemon info after ignored config change."),
-
- etap:diag("Daemon restarts for specific section/key pairs."),
- couch_config:set("s2", "k", "bingo", false),
- wait_for_restart(10),
- {ok, [D4]} = couch_os_daemons:info([table]),
- check_daemon(D4, running),
- etap:isnt(D4#daemon.kill, D3#daemon.kill, "Kill command changed again."),
-
- ok.
-
-wait_for_start(0) ->
- throw({error, wait_for_start});
-wait_for_start(N) ->
- case couch_os_daemons:info([table]) of
- {ok, []} ->
- timer:sleep(200),
- wait_for_start(N-1);
- _ ->
- timer:sleep(1000)
- end.
-
-wait_for_restart(0) ->
- throw({error, wait_for_restart});
-wait_for_restart(N) ->
- {ok, [D]} = couch_os_daemons:info([table]),
- case D#daemon.status of
- restarting ->
- timer:sleep(200),
- wait_for_restart(N-1);
- _ ->
- timer:sleep(1000)
- end.
-
-check_daemon(D, Status) ->
- BaseName = filename:basename(daemon_cmd()) ++ " 2> /dev/null",
- BaseLen = length(BaseName),
- CmdLen = length(D#daemon.cmd),
- CmdName = lists:sublist(D#daemon.cmd, CmdLen-BaseLen+1, BaseLen),
-
- etap:is(is_port(D#daemon.port), true, "Daemon port is a port."),
- etap:is(D#daemon.name, daemon_name(), "Daemon name was set correctly."),
- etap:is(CmdName, BaseName, "Command name was set correctly."),
- etap:isnt(D#daemon.kill, undefined, "Kill command was set."),
- etap:is(D#daemon.status, Status, "Daemon status is correct."),
- etap:is(D#daemon.cfg_patterns, [{"s1"}, {"s2", "k"}], "Cfg patterns set"),
- etap:is(D#daemon.errors, [], "No errors have occurred."),
- etap:isnt(D#daemon.buf, nil, "Buffer is active.").
diff --git a/test/etap/180-http-proxy.ini b/test/etap/180-http-proxy.ini
deleted file mode 100644
index 3e2ba1379..000000000
--- a/test/etap/180-http-proxy.ini
+++ /dev/null
@@ -1,20 +0,0 @@
-; Licensed to the Apache Software Foundation (ASF) under one
-; or more contributor license agreements. See the NOTICE file
-; distributed with this work for additional information
-; regarding copyright ownership. The ASF licenses this file
-; to you under the Apache License, Version 2.0 (the
-; "License"); you may not use this file except in compliance
-; with the License. You may obtain a copy of the License at
-;
-; http://www.apache.org/licenses/LICENSE-2.0
-;
-; Unless required by applicable law or agreed to in writing,
-; software distributed under the License is distributed on an
-; "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-; KIND, either express or implied. See the License for the
-; specific language governing permissions and limitations
-; under the License.
-
-; 49151 is IANA Reserved, let's assume no one is listening there
-[httpd_global_handlers]
-_error = {couch_httpd_proxy, handle_proxy_req, <<"http://127.0.0.1:49151/">>}
diff --git a/test/etap/180-http-proxy.t b/test/etap/180-http-proxy.t
deleted file mode 100755
index da6760364..000000000
--- a/test/etap/180-http-proxy.t
+++ /dev/null
@@ -1,376 +0,0 @@
-#!/usr/bin/env escript
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--record(req, {method=get, path="", headers=[], body="", opts=[]}).
-
-server() ->
- lists:concat([
- "http://127.0.0.1:",
- mochiweb_socket_server:get(couch_httpd, port),
- "/_test/"
- ]).
-
-proxy() ->
- "http://127.0.0.1:" ++ integer_to_list(test_web:get_port()) ++ "/".
-
-external() -> "https://www.google.com/".
-
-main(_) ->
- test_util:init_code_path(),
-
- etap:plan(61),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag("Test died abnormally: ~p", [Other]),
- etap:bail("Bad return value.")
- end,
- ok.
-
-check_request(Name, Req, Remote, Local) ->
- case Remote of
- no_remote -> ok;
- _ -> test_web:set_assert(Remote)
- end,
- Url = case proplists:lookup(url, Req#req.opts) of
- none -> server() ++ Req#req.path;
- {url, DestUrl} -> DestUrl
- end,
- Opts = [{headers_as_is, true} | Req#req.opts],
- Resp =ibrowse:send_req(
- Url, Req#req.headers, Req#req.method, Req#req.body, Opts
- ),
- %etap:diag("ibrowse response: ~p", [Resp]),
- case Local of
- no_local -> ok;
- _ -> etap:fun_is(Local, Resp, Name)
- end,
- case {Remote, Local} of
- {no_remote, _} ->
- ok;
- {_, no_local} ->
- ok;
- _ ->
- etap:is(test_web:check_last(), was_ok, Name ++ " - request handled")
- end,
- Resp.
-
-test() ->
- ExtraConfig = [test_util:source_file("test/etap/180-http-proxy.ini")],
- couch_server_sup:start_link(test_util:config_files() ++ ExtraConfig),
- ibrowse:start(),
- crypto:start(),
-
- % start the test_web server on a random port
- test_web:start_link(),
- Url = lists:concat([
- "{couch_httpd_proxy, handle_proxy_req, <<\"http://127.0.0.1:",
- test_web:get_port(),
- "/\">>}"
- ]),
- couch_config:set("httpd_global_handlers", "_test", Url, false),
-
- % let couch_httpd restart
- timer:sleep(100),
-
- test_basic(),
- test_alternate_status(),
- test_trailing_slash(),
- test_passes_header(),
- test_passes_host_header(),
- test_passes_header_back(),
- test_rewrites_location_headers(),
- test_doesnt_rewrite_external_locations(),
- test_rewrites_relative_location(),
- test_uses_same_version(),
- test_passes_body(),
- test_passes_eof_body_back(),
- test_passes_chunked_body(),
- test_passes_chunked_body_back(),
-
- test_connect_error(),
-
- ok.
-
-test_basic() ->
- Remote = fun(Req) ->
- 'GET' = Req:get(method),
- "/" = Req:get(path),
- 0 = Req:get(body_length),
- <<>> = Req:recv_body(),
- {ok, {200, [{"Content-Type", "text/plain"}], "ok"}}
- end,
- Local = fun({ok, "200", _, "ok"}) -> true; (_) -> false end,
- check_request("Basic proxy test", #req{}, Remote, Local).
-
-test_alternate_status() ->
- Remote = fun(Req) ->
- "/alternate_status" = Req:get(path),
- {ok, {201, [], "ok"}}
- end,
- Local = fun({ok, "201", _, "ok"}) -> true; (_) -> false end,
- Req = #req{path="alternate_status"},
- check_request("Alternate status", Req, Remote, Local).
-
-test_trailing_slash() ->
- Remote = fun(Req) ->
- "/trailing_slash/" = Req:get(path),
- {ok, {200, [], "ok"}}
- end,
- Local = fun({ok, "200", _, "ok"}) -> true; (_) -> false end,
- Req = #req{path="trailing_slash/"},
- check_request("Trailing slash", Req, Remote, Local).
-
-test_passes_header() ->
- Remote = fun(Req) ->
- "/passes_header" = Req:get(path),
- "plankton" = Req:get_header_value("X-CouchDB-Ralph"),
- {ok, {200, [], "ok"}}
- end,
- Local = fun({ok, "200", _, "ok"}) -> true; (_) -> false end,
- Req = #req{
- path="passes_header",
- headers=[{"X-CouchDB-Ralph", "plankton"}]
- },
- check_request("Passes header", Req, Remote, Local).
-
-test_passes_host_header() ->
- Remote = fun(Req) ->
- "/passes_host_header" = Req:get(path),
- "www.google.com" = Req:get_header_value("Host"),
- {ok, {200, [], "ok"}}
- end,
- Local = fun({ok, "200", _, "ok"}) -> true; (_) -> false end,
- Req = #req{
- path="passes_host_header",
- headers=[{"Host", "www.google.com"}]
- },
- check_request("Passes host header", Req, Remote, Local).
-
-test_passes_header_back() ->
- Remote = fun(Req) ->
- "/passes_header_back" = Req:get(path),
- {ok, {200, [{"X-CouchDB-Plankton", "ralph"}], "ok"}}
- end,
- Local = fun
- ({ok, "200", Headers, "ok"}) ->
- lists:member({"X-CouchDB-Plankton", "ralph"}, Headers);
- (_) ->
- false
- end,
- Req = #req{path="passes_header_back"},
- check_request("Passes header back", Req, Remote, Local).
-
-test_rewrites_location_headers() ->
- etap:diag("Testing location header rewrites."),
- do_rewrite_tests([
- {"Location", proxy() ++ "foo/bar", server() ++ "foo/bar"},
- {"Content-Location", proxy() ++ "bing?q=2", server() ++ "bing?q=2"},
- {"Uri", proxy() ++ "zip#frag", server() ++ "zip#frag"},
- {"Destination", proxy(), server()}
- ]).
-
-test_doesnt_rewrite_external_locations() ->
- etap:diag("Testing no rewrite of external locations."),
- do_rewrite_tests([
- {"Location", external() ++ "search", external() ++ "search"},
- {"Content-Location", external() ++ "s?q=2", external() ++ "s?q=2"},
- {"Uri", external() ++ "f#f", external() ++ "f#f"},
- {"Destination", external() ++ "f?q=2#f", external() ++ "f?q=2#f"}
- ]).
-
-test_rewrites_relative_location() ->
- etap:diag("Testing relative rewrites."),
- do_rewrite_tests([
- {"Location", "/foo", server() ++ "foo"},
- {"Content-Location", "bar", server() ++ "bar"},
- {"Uri", "/zing?q=3", server() ++ "zing?q=3"},
- {"Destination", "bing?q=stuff#yay", server() ++ "bing?q=stuff#yay"}
- ]).
-
-do_rewrite_tests(Tests) ->
- lists:foreach(fun({Header, Location, Url}) ->
- do_rewrite_test(Header, Location, Url)
- end, Tests).
-
-do_rewrite_test(Header, Location, Url) ->
- Remote = fun(Req) ->
- "/rewrite_test" = Req:get(path),
- {ok, {302, [{Header, Location}], "ok"}}
- end,
- Local = fun
- ({ok, "302", Headers, "ok"}) ->
- etap:is(
- couch_util:get_value(Header, Headers),
- Url,
- "Header rewritten correctly."
- ),
- true;
- (_) ->
- false
- end,
- Req = #req{path="rewrite_test"},
- Label = "Rewrite test for ",
- check_request(Label ++ Header, Req, Remote, Local).
-
-test_uses_same_version() ->
- Remote = fun(Req) ->
- "/uses_same_version" = Req:get(path),
- {1, 0} = Req:get(version),
- {ok, {200, [], "ok"}}
- end,
- Local = fun({ok, "200", _, "ok"}) -> true; (_) -> false end,
- Req = #req{
- path="uses_same_version",
- opts=[{http_vsn, {1, 0}}]
- },
- check_request("Uses same version", Req, Remote, Local).
-
-test_passes_body() ->
- Remote = fun(Req) ->
- 'PUT' = Req:get(method),
- "/passes_body" = Req:get(path),
- <<"Hooray!">> = Req:recv_body(),
- {ok, {201, [], "ok"}}
- end,
- Local = fun({ok, "201", _, "ok"}) -> true; (_) -> false end,
- Req = #req{
- method=put,
- path="passes_body",
- body="Hooray!"
- },
- check_request("Passes body", Req, Remote, Local).
-
-test_passes_eof_body_back() ->
- BodyChunks = [<<"foo">>, <<"bar">>, <<"bazinga">>],
- Remote = fun(Req) ->
- 'GET' = Req:get(method),
- "/passes_eof_body" = Req:get(path),
- {raw, {200, [{"Connection", "close"}], BodyChunks}}
- end,
- Local = fun({ok, "200", _, "foobarbazinga"}) -> true; (_) -> false end,
- Req = #req{path="passes_eof_body"},
- check_request("Passes eof body", Req, Remote, Local).
-
-test_passes_chunked_body() ->
- BodyChunks = [<<"foo">>, <<"bar">>, <<"bazinga">>],
- Remote = fun(Req) ->
- 'POST' = Req:get(method),
- "/passes_chunked_body" = Req:get(path),
- RecvBody = fun
- ({Length, Chunk}, [Chunk | Rest]) ->
- Length = size(Chunk),
- Rest;
- ({0, []}, []) ->
- ok
- end,
- ok = Req:stream_body(1024*1024, RecvBody, BodyChunks),
- {ok, {201, [], "ok"}}
- end,
- Local = fun({ok, "201", _, "ok"}) -> true; (_) -> false end,
- Req = #req{
- method=post,
- path="passes_chunked_body",
- headers=[{"Transfer-Encoding", "chunked"}],
- body=mk_chunked_body(BodyChunks)
- },
- check_request("Passes chunked body", Req, Remote, Local).
-
-test_passes_chunked_body_back() ->
- Name = "Passes chunked body back",
- Remote = fun(Req) ->
- 'GET' = Req:get(method),
- "/passes_chunked_body_back" = Req:get(path),
- BodyChunks = [<<"foo">>, <<"bar">>, <<"bazinga">>],
- {chunked, {200, [{"Transfer-Encoding", "chunked"}], BodyChunks}}
- end,
- Req = #req{
- path="passes_chunked_body_back",
- opts=[{stream_to, self()}]
- },
-
- Resp = check_request(Name, Req, Remote, no_local),
-
- etap:fun_is(
- fun({ibrowse_req_id, _}) -> true; (_) -> false end,
- Resp,
- "Received an ibrowse request id."
- ),
- {_, ReqId} = Resp,
-
- % Grab headers from response
- receive
- {ibrowse_async_headers, ReqId, "200", Headers} ->
- etap:is(
- proplists:get_value("Transfer-Encoding", Headers),
- "chunked",
- "Response included the Transfer-Encoding: chunked header"
- ),
- ibrowse:stream_next(ReqId)
- after 1000 ->
- throw({error, timeout})
- end,
-
- % Check body received
- % TODO: When we upgrade to ibrowse >= 2.0.0 this check needs to
- % check that the chunks returned are what we sent from the
- % Remote test.
- etap:diag("TODO: UPGRADE IBROWSE"),
- etap:is(recv_body(ReqId, []), <<"foobarbazinga">>, "Decoded chunked body."),
-
- % Check test_web server.
- etap:is(test_web:check_last(), was_ok, Name ++ " - request handled").
-
-test_connect_error() ->
- Local = fun({ok, "500", _Headers, _Body}) -> true; (_) -> false end,
- Url = lists:concat([
- "http://127.0.0.1:",
- mochiweb_socket_server:get(couch_httpd, port),
- "/_error"
- ]),
- Req = #req{opts=[{url, Url}]},
- check_request("Connect error", Req, no_remote, Local).
-
-
-mk_chunked_body(Chunks) ->
- mk_chunked_body(Chunks, []).
-
-mk_chunked_body([], Acc) ->
- iolist_to_binary(lists:reverse(Acc, "0\r\n\r\n"));
-mk_chunked_body([Chunk | Rest], Acc) ->
- Size = to_hex(size(Chunk)),
- mk_chunked_body(Rest, ["\r\n", Chunk, "\r\n", Size | Acc]).
-
-to_hex(Val) ->
- to_hex(Val, []).
-
-to_hex(0, Acc) ->
- Acc;
-to_hex(Val, Acc) ->
- to_hex(Val div 16, [hex_char(Val rem 16) | Acc]).
-
-hex_char(V) when V < 10 -> $0 + V;
-hex_char(V) -> $A + V - 10.
-
-recv_body(ReqId, Acc) ->
- receive
- {ibrowse_async_response, ReqId, Data} ->
- recv_body(ReqId, [Data | Acc]);
- {ibrowse_async_response_end, ReqId} ->
- iolist_to_binary(lists:reverse(Acc));
- Else ->
- throw({error, unexpected_mesg, Else})
- after 5000 ->
- throw({error, timeout})
- end.
diff --git a/test/etap/200-view-group-no-db-leaks.t b/test/etap/200-view-group-no-db-leaks.t
deleted file mode 100755
index 9583d0984..000000000
--- a/test/etap/200-view-group-no-db-leaks.t
+++ /dev/null
@@ -1,308 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--record(user_ctx, {
- name = null,
- roles = [],
- handler
-}).
-
--record(db, {
- main_pid = nil,
- update_pid = nil,
- compactor_pid = nil,
- instance_start_time, % number of microsecs since jan 1 1970 as a binary string
- fd,
- updater_fd,
- fd_ref_counter,
- header = nil,
- committed_update_seq,
- fulldocinfo_by_id_btree,
- docinfo_by_seq_btree,
- local_docs_btree,
- update_seq,
- name,
- filepath,
- validate_doc_funs = [],
- security = [],
- security_ptr = nil,
- user_ctx = #user_ctx{},
- waiting_delayed_commit = nil,
- revs_limit = 1000,
- fsync_options = [],
- options = [],
- compression,
- before_doc_update,
- after_doc_read
-}).
-
-test_db_name() -> <<"couch_test_view_group_db_leaks">>.
-ddoc_name() -> <<"foo">>.
-
-main(_) ->
- test_util:init_code_path(),
-
- etap:plan(28),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- etap:bail(Other)
- end,
- ok.
-
-test() ->
- couch_server_sup:start_link(test_util:config_files()),
- timer:sleep(1000),
- put(addr, couch_config:get("httpd", "bind_address", "127.0.0.1")),
- put(port, integer_to_list(mochiweb_socket_server:get(couch_httpd, port))),
-
- delete_db(),
- create_db(),
-
- create_docs(),
- {ok, DDocRev} = create_design_doc(),
-
- {ok, IndexerPid} = couch_index_server:get_index(
- couch_mrview_index, test_db_name(), <<"_design/", (ddoc_name())/binary>>
- ),
- etap:is(is_pid(IndexerPid), true, "got view group pid"),
- etap:is(is_process_alive(IndexerPid), true, "view group pid is alive"),
-
- query_view(3, null, false),
- check_db_ref_count(),
- etap:is(is_process_alive(IndexerPid), true, "view group pid is alive"),
-
- create_new_doc(<<"doc1000">>),
- query_view(4, null, false),
- check_db_ref_count(),
- etap:is(is_process_alive(IndexerPid), true, "view group pid is alive"),
-
- Ref1 = get_db_ref_counter(),
- compact_db(),
- check_db_ref_count(),
- Ref2 = get_db_ref_counter(),
- etap:isnt(Ref1, Ref2, "DB ref counter changed"),
- etap:is(false, is_process_alive(Ref1), "old DB ref counter is not alive"),
- etap:is(is_process_alive(IndexerPid), true, "view group pid is alive"),
-
- compact_view_group(),
- check_db_ref_count(),
- Ref3 = get_db_ref_counter(),
- etap:is(Ref3, Ref2, "DB ref counter didn't change"),
- etap:is(is_process_alive(IndexerPid), true, "view group pid is alive"),
-
- create_new_doc(<<"doc1001">>),
- query_view(5, null, false),
- check_db_ref_count(),
- etap:is(is_process_alive(IndexerPid), true, "view group pid is alive"),
-
- etap:diag("updating the design document with a new view definition"),
- {ok, _NewDDocRev} = update_ddoc_view(DDocRev),
-
- {ok, NewIndexerPid} = couch_index_server:get_index(
- couch_mrview_index, test_db_name(), <<"_design/", (ddoc_name())/binary>>
- ),
- etap:is(is_pid(NewIndexerPid), true, "got new view group pid"),
- etap:is(is_process_alive(NewIndexerPid), true, "new view group pid is alive"),
- etap:isnt(NewIndexerPid, IndexerPid, "new view group has a different pid"),
- etap:diag("querying view with ?stale=ok, must return empty row set"),
- query_view(0, foo, ok),
- etap:diag("querying view (without stale), must return 5 rows with value 1"),
- query_view(5, 1, false),
- MonRef = erlang:monitor(process, IndexerPid),
- receive
- {'DOWN', MonRef, _, _, _} ->
- etap:diag("old view group is dead after ddoc update")
- after 5000 ->
- etap:bail("old view group is not dead after ddoc update")
- end,
-
- etap:diag("deleting database"),
- MonRef2 = erlang:monitor(process, NewIndexerPid),
- ok = couch_server:delete(test_db_name(), []),
- receive
- {'DOWN', MonRef2, _, _, _} ->
- etap:diag("new view group is dead after DB deletion")
- after 5000 ->
- etap:bail("new view group did not die after DB deletion")
- end,
-
- ok = timer:sleep(1000),
- delete_db(),
- couch_server_sup:stop(),
- ok.
-
-admin_user_ctx() ->
- {user_ctx, #user_ctx{roles=[<<"_admin">>]}}.
-
-create_db() ->
- {ok, #db{main_pid = Pid} = Db} = couch_db:create(
- test_db_name(), [admin_user_ctx()]),
- put(db_main_pid, Pid),
- ok = couch_db:close(Db).
-
-delete_db() ->
- couch_server:delete(test_db_name(), [admin_user_ctx()]).
-
-compact_db() ->
- {ok, Db} = couch_db:open_int(test_db_name(), []),
- {ok, _} = couch_db:start_compact(Db),
- ok = couch_db:close(Db),
- wait_db_compact_done(10).
-
-wait_db_compact_done(0) ->
- etap:bail("DB compaction failed to finish.");
-wait_db_compact_done(N) ->
- {ok, Db} = couch_db:open_int(test_db_name(), []),
- ok = couch_db:close(Db),
- case is_pid(Db#db.compactor_pid) of
- false ->
- ok;
- true ->
- ok = timer:sleep(500),
- wait_db_compact_done(N - 1)
- end.
-
-compact_view_group() ->
- DDoc = list_to_binary("_design/" ++ binary_to_list(ddoc_name())),
- ok = couch_mrview:compact(test_db_name(), DDoc),
- wait_view_compact_done(10).
-
-wait_view_compact_done(0) ->
- etap:bail("View group compaction failed to finish.");
-wait_view_compact_done(N) ->
- {ok, Code, _Headers, Body} = test_util:request(
- db_url() ++ "/_design/" ++ binary_to_list(ddoc_name()) ++ "/_info",
- [],
- get),
- case Code of
- 200 -> ok;
- _ -> etap:bail("Invalid view group info.")
- end,
- {Info} = ejson:decode(Body),
- {IndexInfo} = couch_util:get_value(<<"view_index">>, Info),
- CompactRunning = couch_util:get_value(<<"compact_running">>, IndexInfo),
- case CompactRunning of
- false ->
- ok;
- true ->
- ok = timer:sleep(500),
- wait_view_compact_done(N - 1)
- end.
-
-get_db_ref_counter() ->
- {ok, #db{fd_ref_counter = Ref} = Db} = couch_db:open_int(test_db_name(), []),
- ok = couch_db:close(Db),
- Ref.
-
-check_db_ref_count() ->
- {ok, #db{fd_ref_counter = Ref} = Db} = couch_db:open_int(test_db_name(), []),
- ok = couch_db:close(Db),
- timer:sleep(200), % sleep a bit to prevent race condition
- etap:is(couch_ref_counter:count(Ref), 2,
- "DB ref counter is only held by couch_db and couch_db_updater"),
- ok.
-
-create_docs() ->
- {ok, Db} = couch_db:open(test_db_name(), [admin_user_ctx()]),
- Doc1 = couch_doc:from_json_obj({[
- {<<"_id">>, <<"doc1">>},
- {<<"value">>, 1}
- ]}),
- Doc2 = couch_doc:from_json_obj({[
- {<<"_id">>, <<"doc2">>},
- {<<"value">>, 2}
-
- ]}),
- Doc3 = couch_doc:from_json_obj({[
- {<<"_id">>, <<"doc3">>},
- {<<"value">>, 3}
- ]}),
- {ok, _} = couch_db:update_docs(Db, [Doc1, Doc2, Doc3]),
- couch_db:ensure_full_commit(Db),
- couch_db:close(Db).
-
-create_design_doc() ->
- {ok, Db} = couch_db:open(test_db_name(), [admin_user_ctx()]),
- DDoc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/", (ddoc_name())/binary>>},
- {<<"language">>, <<"javascript">>},
- {<<"views">>, {[
- {<<"bar">>, {[
- {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>}
- ]}}
- ]}}
- ]}),
- {ok, Rev} = couch_db:update_doc(Db, DDoc, []),
- couch_db:ensure_full_commit(Db),
- couch_db:close(Db),
- {ok, Rev}.
-
-update_ddoc_view(DDocRev) ->
- {ok, Db} = couch_db:open(test_db_name(), [admin_user_ctx()]),
- DDoc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/", (ddoc_name())/binary>>},
- {<<"_rev">>, couch_doc:rev_to_str(DDocRev)},
- {<<"language">>, <<"javascript">>},
- {<<"views">>, {[
- {<<"bar">>, {[
- {<<"map">>, <<"function(doc) { emit(doc._id, 1); }">>}
- ]}}
- ]}}
- ]}),
- {ok, NewRev} = couch_db:update_doc(Db, DDoc, []),
- couch_db:ensure_full_commit(Db),
- couch_db:close(Db),
- {ok, NewRev}.
-
-create_new_doc(Id) ->
- {ok, Db} = couch_db:open(test_db_name(), [admin_user_ctx()]),
- Doc666 = couch_doc:from_json_obj({[
- {<<"_id">>, Id},
- {<<"value">>, 999}
- ]}),
- {ok, _} = couch_db:update_docs(Db, [Doc666]),
- couch_db:ensure_full_commit(Db),
- couch_db:close(Db).
-
-db_url() ->
- "http://" ++ get(addr) ++ ":" ++ get(port) ++ "/" ++
- binary_to_list(test_db_name()).
-
-query_view(ExpectedRowCount, ExpectedRowValue, Stale) ->
- {ok, Code, _Headers, Body} = test_util:request(
- db_url() ++ "/_design/" ++ binary_to_list(ddoc_name()) ++ "/_view/bar"
- ++ case Stale of
- false -> [];
- _ -> "?stale=" ++ atom_to_list(Stale)
- end,
- [],
- get),
- etap:is(Code, 200, "got view response"),
- {Props} = ejson:decode(Body),
- Rows = couch_util:get_value(<<"rows">>, Props, []),
- etap:is(length(Rows), ExpectedRowCount, "result set has correct # of rows"),
- lists:foreach(
- fun({Row}) ->
- case couch_util:get_value(<<"value">>, Row) of
- ExpectedRowValue ->
- ok;
- _ ->
- etap:bail("row has incorrect value")
- end
- end,
- Rows).
diff --git a/test/etap/201-view-group-shutdown.t b/test/etap/201-view-group-shutdown.t
deleted file mode 100755
index c51ec44d2..000000000
--- a/test/etap/201-view-group-shutdown.t
+++ /dev/null
@@ -1,293 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--record(user_ctx, {
- name = null,
- roles = [],
- handler
-}).
-
--record(db, {
- main_pid = nil,
- update_pid = nil,
- compactor_pid = nil,
- instance_start_time, % number of microsecs since jan 1 1970 as a binary string
- fd,
- updater_fd,
- fd_ref_counter,
- header = nil,
- committed_update_seq,
- fulldocinfo_by_id_btree,
- docinfo_by_seq_btree,
- local_docs_btree,
- update_seq,
- name,
- filepath,
- validate_doc_funs = [],
- security = [],
- security_ptr = nil,
- user_ctx = #user_ctx{},
- waiting_delayed_commit = nil,
- revs_limit = 1000,
- fsync_options = [],
- options = [],
- compression,
- before_doc_update,
- after_doc_read
-}).
-
-main_db_name() -> <<"couch_test_view_group_shutdown">>.
-
-
-main(_) ->
- test_util:init_code_path(),
-
- etap:plan(17),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- etap:bail(Other)
- end,
- ok.
-
-
-test() ->
- couch_server_sup:start_link(test_util:config_files()),
- ok = couch_config:set("couchdb", "max_dbs_open", "3", false),
- ok = couch_config:set("couchdb", "delayed_commits", "false", false),
- crypto:start(),
-
- % Test that while a view group is being compacted its database can not
- % be closed by the database LRU system.
- test_view_group_compaction(),
-
- couch_server_sup:stop(),
- ok.
-
-
-test_view_group_compaction() ->
- {ok, DbWriter3} = create_db(<<"couch_test_view_group_shutdown_w3">>),
- ok = couch_db:close(DbWriter3),
-
- {ok, MainDb} = create_main_db(),
- ok = couch_db:close(MainDb),
-
- {ok, DbWriter1} = create_db(<<"couch_test_view_group_shutdown_w1">>),
- ok = couch_db:close(DbWriter1),
-
- {ok, DbWriter2} = create_db(<<"couch_test_view_group_shutdown_w2">>),
- ok = couch_db:close(DbWriter2),
-
- Writer1 = spawn_writer(DbWriter1#db.name),
- Writer2 = spawn_writer(DbWriter2#db.name),
- etap:is(is_process_alive(Writer1), true, "Spawned writer 1"),
- etap:is(is_process_alive(Writer2), true, "Spawned writer 2"),
-
- etap:is(get_writer_status(Writer1), ok, "Writer 1 opened his database"),
- etap:is(get_writer_status(Writer2), ok, "Writer 2 opened his database"),
-
- {ok, MonRef} = couch_mrview:compact(MainDb#db.name, <<"_design/foo">>, [monitor]),
-
- % Add some more docs to database and trigger view update
- {ok, MainDb2} = couch_db:open_int(MainDb#db.name, []),
- ok = populate_main_db(MainDb2, 3, 3),
- update_view(MainDb2#db.name, <<"_design/foo">>, <<"foo">>),
- ok = couch_db:close(MainDb2),
-
- % Assuming the view compaction takes more than 50ms to complete
- ok = timer:sleep(50),
- Writer3 = spawn_writer(DbWriter3#db.name),
- etap:is(is_process_alive(Writer3), true, "Spawned writer 3"),
-
- etap:is(get_writer_status(Writer3), {error, all_dbs_active},
- "Writer 3 got {error, all_dbs_active} when opening his database"),
-
- etap:is(is_process_alive(Writer1), true, "Writer 1 still alive"),
- etap:is(is_process_alive(Writer2), true, "Writer 2 still alive"),
- etap:is(is_process_alive(Writer3), true, "Writer 3 still alive"),
-
- receive
- {'DOWN', MonRef, process, _, normal} ->
- etap:diag("View group compaction successful"),
- ok;
- {'DOWN', MonRef, process, _, _Reason} ->
- etap:bail("Failure compacting view group")
- end,
-
- ok = timer:sleep(2000),
-
- etap:is(writer_try_again(Writer3), ok,
- "Told writer 3 to try open his database again"),
- etap:is(get_writer_status(Writer3), ok,
- "Writer 3 was able to open his database"),
-
- etap:is(is_process_alive(Writer1), true, "Writer 1 still alive"),
- etap:is(is_process_alive(Writer2), true, "Writer 2 still alive"),
- etap:is(is_process_alive(Writer3), true, "Writer 3 still alive"),
-
- etap:is(stop_writer(Writer1), ok, "Stopped writer 1"),
- etap:is(stop_writer(Writer2), ok, "Stopped writer 2"),
- etap:is(stop_writer(Writer3), ok, "Stopped writer 3"),
-
- delete_db(MainDb),
- delete_db(DbWriter1),
- delete_db(DbWriter2),
- delete_db(DbWriter3).
-
-
-create_main_db() ->
- {ok, Db} = create_db(main_db_name()),
- DDoc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/foo">>},
- {<<"language">>, <<"javascript">>},
- {<<"views">>, {[
- {<<"foo">>, {[
- {<<"map">>, <<"function(doc) { emit(doc._id, doc); }">>}
- ]}},
- {<<"foo2">>, {[
- {<<"map">>, <<"function(doc) { emit(doc._id, doc); }">>}
- ]}},
- {<<"foo3">>, {[
- {<<"map">>, <<"function(doc) { emit(doc._id, doc); }">>}
- ]}},
- {<<"foo4">>, {[
- {<<"map">>, <<"function(doc) { emit(doc._id, doc); }">>}
- ]}},
- {<<"foo5">>, {[
- {<<"map">>, <<"function(doc) { emit(doc._id, doc); }">>}
- ]}}
- ]}}
- ]}),
- {ok, _} = couch_db:update_doc(Db, DDoc, []),
- ok = populate_main_db(Db, 1000, 20000),
- update_view(Db#db.name, <<"_design/foo">>, <<"foo">>),
- {ok, Db}.
-
-
-populate_main_db(Db, BatchSize, N) when N > 0 ->
- Docs = lists:map(
- fun(_) ->
- couch_doc:from_json_obj({[
- {<<"_id">>, couch_uuids:new()},
- {<<"value">>, base64:encode(crypto:rand_bytes(1000))}
- ]})
- end,
- lists:seq(1, BatchSize)),
- {ok, _} = couch_db:update_docs(Db, Docs, []),
- populate_main_db(Db, BatchSize, N - length(Docs));
-populate_main_db(_Db, _, _) ->
- ok.
-
-
-update_view(DbName, DDocName, ViewName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, DDoc} = couch_db:open_doc(Db, DDocName, [ejson_body]),
- couch_mrview:query_view(Db, DDoc, ViewName, [{stale, false}]),
- ok = couch_db:close(Db),
- etap:diag("View group updated").
-
-
-create_db(DbName) ->
- {ok, Db} = couch_db:create(
- DbName,
- [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}, overwrite]),
- {ok, Db}.
-
-
-delete_db(#db{name = DbName, main_pid = Pid}) ->
- ok = couch_server:delete(
- DbName, [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}]),
- MonRef = erlang:monitor(process, Pid),
- receive
- {'DOWN', MonRef, process, Pid, _Reason} ->
- ok
- after 30000 ->
- etap:bail("Timeout deleting database")
- end.
-
-
-spawn_writer(DbName) ->
- Parent = self(),
- spawn(fun() ->
- process_flag(priority, high),
- writer_loop(DbName, Parent)
- end).
-
-
-get_writer_status(Writer) ->
- Ref = make_ref(),
- Writer ! {get_status, Ref},
- receive
- {db_open, Ref} ->
- ok;
- {db_open_error, Error, Ref} ->
- Error
- after 5000 ->
- timeout
- end.
-
-
-writer_try_again(Writer) ->
- Ref = make_ref(),
- Writer ! {try_again, Ref},
- receive
- {ok, Ref} ->
- ok
- after 5000 ->
- timeout
- end.
-
-
-stop_writer(Writer) ->
- Ref = make_ref(),
- Writer ! {stop, Ref},
- receive
- {ok, Ref} ->
- ok
- after 5000 ->
- etap:bail("Timeout stopping writer process")
- end.
-
-
-% Just keep the database open, no need to actually do something on it.
-writer_loop(DbName, Parent) ->
- case couch_db:open_int(DbName, []) of
- {ok, Db} ->
- writer_loop_1(Db, Parent);
- Error ->
- writer_loop_2(DbName, Parent, Error)
- end.
-
-writer_loop_1(Db, Parent) ->
- receive
- {get_status, Ref} ->
- Parent ! {db_open, Ref},
- writer_loop_1(Db, Parent);
- {stop, Ref} ->
- ok = couch_db:close(Db),
- Parent ! {ok, Ref}
- end.
-
-writer_loop_2(DbName, Parent, Error) ->
- receive
- {get_status, Ref} ->
- Parent ! {db_open_error, Error, Ref},
- writer_loop_2(DbName, Parent, Error);
- {try_again, Ref} ->
- Parent ! {ok, Ref},
- writer_loop(DbName, Parent)
- end.
diff --git a/test/etap/210-os-proc-pool.t b/test/etap/210-os-proc-pool.t
deleted file mode 100755
index d80707e89..000000000
--- a/test/etap/210-os-proc-pool.t
+++ /dev/null
@@ -1,163 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-main(_) ->
- test_util:init_code_path(),
-
- etap:plan(21),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- etap:bail(Other)
- end,
- ok.
-
-
-test() ->
- couch_server_sup:start_link(test_util:config_files()),
- couch_config:set("query_server_config", "os_process_limit", "3", false),
-
- test_pool_full(),
- test_client_unexpected_exit(),
-
- couch_server_sup:stop(),
- ok.
-
-
-test_pool_full() ->
- Client1 = spawn_client(),
- Client2 = spawn_client(),
- Client3 = spawn_client(),
-
- etap:diag("Check that we can spawn the max number of processes."),
- etap:is(ping_client(Client1), ok, "Client 1 started ok."),
- etap:is(ping_client(Client2), ok, "Client 2 started ok."),
- etap:is(ping_client(Client3), ok, "Client 3 started ok."),
-
- Proc1 = get_client_proc(Client1, "1"),
- Proc2 = get_client_proc(Client2, "2"),
- Proc3 = get_client_proc(Client3, "3"),
- etap:isnt(Proc1, Proc2, "Clients 1 and 2 got different procs."),
- etap:isnt(Proc2, Proc3, "Clients 2 and 3 got different procs."),
- etap:isnt(Proc1, Proc3, "Clients 1 and 3 got different procs."),
-
- etap:diag("Check that client 4 blocks waiting for a process."),
- Client4 = spawn_client(),
- etap:is(ping_client(Client4), timeout, "Client 4 blocked while waiting."),
-
- etap:diag("Check that stopping a client gives up its process."),
- etap:is(stop_client(Client1), ok, "First client stopped."),
-
- etap:diag("And check that our blocked process has been unblocked."),
- etap:is(ping_client(Client4), ok, "Client was unblocked."),
-
- Proc4 = get_client_proc(Client4, "4"),
- etap:is(Proc4, Proc1, "Client 4 got proc that client 1 got before."),
-
- lists:map(fun(C) -> ok = stop_client(C) end, [Client2, Client3, Client4]).
-
-
-test_client_unexpected_exit() ->
- Client1 = spawn_client(),
- Client2 = spawn_client(),
- Client3 = spawn_client(),
-
- etap:diag("Check that up to os_process_limit clients started."),
- etap:is(ping_client(Client1), ok, "Client 1 started ok."),
- etap:is(ping_client(Client2), ok, "Client 2 started ok."),
- etap:is(ping_client(Client3), ok, "Client 3 started ok."),
-
- Proc1 = get_client_proc(Client1, "1"),
- Proc2 = get_client_proc(Client2, "2"),
- Proc3 = get_client_proc(Client3, "3"),
- etap:isnt(Proc1, Proc2, "Clients 1 and 2 got different procs."),
- etap:isnt(Proc2, Proc3, "Clients 2 and 3 got different procs."),
- etap:isnt(Proc1, Proc3, "Clients 1 and 3 got different procs."),
-
- etap:diag("Check that killing a client frees an os_process."),
- etap:is(kill_client(Client1), ok, "Client 1 died all right."),
-
- etap:diag("Check that a new client is not blocked on boot."),
- Client4 = spawn_client(),
- etap:is(ping_client(Client4), ok, "New client booted without blocking."),
-
- Proc4 = get_client_proc(Client4, "4"),
- etap:isnt(Proc4, Proc1,
- "Client 4 got a proc different from the one client 1 got before."),
- etap:isnt(Proc4, Proc2, "Client 4's proc different from client 2's proc."),
- etap:isnt(Proc4, Proc3, "Client 4's proc different from client 3's proc."),
-
- lists:map(fun(C) -> ok = stop_client(C) end, [Client2, Client3, Client4]).
-
-
-spawn_client() ->
- Parent = self(),
- Ref = make_ref(),
- Pid = spawn(fun() ->
- Proc = couch_query_servers:get_os_process(<<"javascript">>),
- loop(Parent, Ref, Proc)
- end),
- {Pid, Ref}.
-
-
-ping_client({Pid, Ref}) ->
- Pid ! ping,
- receive
- {pong, Ref} -> ok
- after 3000 -> timeout
- end.
-
-
-get_client_proc({Pid, Ref}, ClientName) ->
- Pid ! get_proc,
- receive
- {proc, Ref, Proc} -> Proc
- after 3000 ->
- etap:bail("Timeout getting client " ++ ClientName ++ " proc.")
- end.
-
-
-stop_client({Pid, Ref}) ->
- Pid ! stop,
- receive
- {stop, Ref} -> ok
- after 3000 -> timeout
- end.
-
-
-kill_client({Pid, Ref}) ->
- Pid ! die,
- receive
- {die, Ref} -> ok
- after 3000 -> timeout
- end.
-
-
-loop(Parent, Ref, Proc) ->
- receive
- ping ->
- Parent ! {pong, Ref},
- loop(Parent, Ref, Proc);
- get_proc ->
- Parent ! {proc, Ref, Proc},
- loop(Parent, Ref, Proc);
- stop ->
- couch_query_servers:ret_os_process(Proc),
- Parent ! {stop, Ref};
- die ->
- Parent ! {die, Ref},
- exit(some_error)
- end.
diff --git a/test/etap/220-compaction-daemon.t b/test/etap/220-compaction-daemon.t
deleted file mode 100755
index 4c63b66b4..000000000
--- a/test/etap/220-compaction-daemon.t
+++ /dev/null
@@ -1,225 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--record(user_ctx, {
- name = null,
- roles = [],
- handler
-}).
-
-test_db_name() ->
- <<"couch_test_compaction_daemon">>.
-
-main(_) ->
- test_util:init_code_path(),
-
- etap:plan(10),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- etap:bail(Other)
- end,
- ok.
-
-test() ->
- couch_server_sup:start_link(test_util:config_files()),
- timer:sleep(1000),
- put(addr, couch_config:get("httpd", "bind_address", "127.0.0.1")),
- put(port, integer_to_list(mochiweb_socket_server:get(couch_httpd, port))),
-
- disable_compact_daemon(),
-
- delete_db(),
- {ok, Db} = create_db(),
-
- add_design_doc(Db),
- couch_db:close(Db),
- populate(70, 70, 200 * 1024),
-
- {_, DbFileSize} = get_db_frag(),
- {_, ViewFileSize} = get_view_frag(),
-
- % enable automatic compaction
- ok = couch_config:set("compaction_daemon", "check_interval", "3", false),
- ok = couch_config:set("compaction_daemon", "min_file_size", "100000", false),
- ok = couch_config:set(
- "compactions",
- binary_to_list(test_db_name()),
- "[{db_fragmentation, \"70%\"}, {view_fragmentation, \"70%\"}]",
- false),
-
- ok = timer:sleep(4000), % something >= check_interval
- wait_compaction_finished(),
-
- {DbFrag2, DbFileSize2} = get_db_frag(),
- {ViewFrag2, ViewFileSize2} = get_view_frag(),
-
- etap:is(true, (DbFrag2 < 70), "Database fragmentation is < 70% after compaction"),
- etap:is(true, (ViewFrag2 < 70), "View fragmentation is < 70% after compaction"),
- etap:is(true, (DbFileSize2 < DbFileSize), "Database file size decreased"),
- etap:is(true, (ViewFileSize2 < ViewFileSize), "View file size decreased"),
-
- disable_compact_daemon(),
- ok = timer:sleep(6000), % 2 times check_interval
- etap:is(couch_db:is_idle(Db), true, "Database is idle"),
- populate(70, 70, 200 * 1024),
- {_, DbFileSize3} = get_db_frag(),
- {_, ViewFileSize3} = get_view_frag(),
-
- % enable automatic compaction
- ok = couch_config:set(
- "compactions",
- "_default",
- "[{db_fragmentation, \"70%\"}, {view_fragmentation, \"70%\"}]",
- false),
-
- ok = timer:sleep(4000), % something >= check_interval
- wait_compaction_finished(),
-
- {DbFrag4, DbFileSize4} = get_db_frag(),
- {ViewFrag4, ViewFileSize4} = get_view_frag(),
-
- etap:is(true, (DbFrag4 < 70), "Database fragmentation is < 70% after compaction"),
- etap:is(true, (ViewFrag4 < 70), "View fragmentation is < 70% after compaction"),
- etap:is(true, (DbFileSize4 < DbFileSize3), "Database file size decreased again"),
- etap:is(true, (ViewFileSize4 < ViewFileSize3), "View file size decreased again"),
-
- ok = timer:sleep(6000), % 2 times check_interval
- etap:is(couch_db:is_idle(Db), true, "Database is idle"),
-
- delete_db(),
- couch_server_sup:stop(),
- ok.
-
-disable_compact_daemon() ->
- Configs = couch_config:get("compactions"),
- lists:foreach(
- fun({DbName, _}) ->
- ok = couch_config:delete("compactions", DbName, false)
- end,
- Configs).
-
-admin_user_ctx() ->
- {user_ctx, #user_ctx{roles = [<<"_admin">>]}}.
-
-create_db() ->
- {ok, _} = couch_db:create(test_db_name(), [admin_user_ctx()]).
-
-delete_db() ->
- couch_server:delete(test_db_name(), [admin_user_ctx()]).
-
-add_design_doc(Db) ->
- DDoc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/foo">>},
- {<<"language">>, <<"javascript">>},
- {<<"views">>, {[
- {<<"foo">>, {[
- {<<"map">>, <<"function(doc) { emit(doc._id, doc); }">>}
- ]}},
- {<<"foo2">>, {[
- {<<"map">>, <<"function(doc) { emit(doc._id, doc); }">>}
- ]}},
- {<<"foo3">>, {[
- {<<"map">>, <<"function(doc) { emit(doc._id, doc); }">>}
- ]}}
- ]}}
- ]}),
- {ok, _} = couch_db:update_docs(Db, [DDoc]),
- {ok, _} = couch_db:ensure_full_commit(Db),
- ok.
-
-populate(DbFrag, ViewFrag, MinFileSize) ->
- {CurDbFrag, DbFileSize} = get_db_frag(),
- {CurViewFrag, ViewFileSize} = get_view_frag(),
- populate(
- DbFrag, ViewFrag, MinFileSize, CurDbFrag, CurViewFrag,
- lists:min([DbFileSize, ViewFileSize])).
-
-populate(DbFrag, ViewFrag, MinFileSize, CurDbFrag, CurViewFrag, FileSize)
- when CurDbFrag >= DbFrag, CurViewFrag >= ViewFrag, FileSize >= MinFileSize ->
- ok;
-populate(DbFrag, ViewFrag, MinFileSize, _, _, _) ->
- update(),
- {CurDbFrag, DbFileSize} = get_db_frag(),
- {CurViewFrag, ViewFileSize} = get_view_frag(),
- populate(
- DbFrag, ViewFrag, MinFileSize, CurDbFrag, CurViewFrag,
- lists:min([DbFileSize, ViewFileSize])).
-
-update() ->
- {ok, Db} = couch_db:open_int(test_db_name(), []),
- lists:foreach(fun(_) ->
- Doc = couch_doc:from_json_obj({[{<<"_id">>, couch_uuids:new()}]}),
- {ok, _} = couch_db:update_docs(Db, [Doc]),
- query_view()
- end, lists:seq(1, 100)),
- couch_db:close(Db).
-
-db_url() ->
- "http://" ++ get(addr) ++ ":" ++ get(port) ++ "/" ++
- binary_to_list(test_db_name()).
-
-query_view() ->
- {ok, Code, _Headers, _Body} = test_util:request(
- db_url() ++ "/_design/foo/_view/foo", [], get),
- case Code of
- 200 ->
- ok;
- _ ->
- etap:bail("error querying view")
- end.
-
-get_db_frag() ->
- {ok, Db} = couch_db:open_int(test_db_name(), []),
- {ok, Info} = couch_db:get_db_info(Db),
- couch_db:close(Db),
- FileSize = couch_util:get_value(disk_size, Info),
- DataSize = couch_util:get_value(data_size, Info),
- {round((FileSize - DataSize) / FileSize * 100), FileSize}.
-
-get_view_frag() ->
- {ok, Db} = couch_db:open_int(test_db_name(), []),
- {ok, Info} = couch_mrview:get_info(Db, <<"_design/foo">>),
- couch_db:close(Db),
- FileSize = couch_util:get_value(disk_size, Info),
- DataSize = couch_util:get_value(data_size, Info),
- {round((FileSize - DataSize) / FileSize * 100), FileSize}.
-
-
-wait_compaction_finished() ->
- Parent = self(),
- Loop = spawn_link(fun() -> wait_loop(Parent) end),
- receive
- {done, Loop} ->
- etap:diag("Database and view compaction have finished")
- after 60000 ->
- etap:bail("Compaction not triggered")
- end.
-
-wait_loop(Parent) ->
- {ok, Db} = couch_db:open_int(test_db_name(), []),
- {ok, DbInfo} = couch_db:get_db_info(Db),
- {ok, ViewInfo} = couch_mrview:get_info(Db, <<"_design/foo">>),
- couch_db:close(Db),
- case (couch_util:get_value(compact_running, ViewInfo) =:= true) orelse
- (couch_util:get_value(compact_running, DbInfo) =:= true) of
- false ->
- Parent ! {done, self()};
- true ->
- ok = timer:sleep(500),
- wait_loop(Parent)
- end.
diff --git a/test/etap/230-pbkfd2.t b/test/etap/230-pbkfd2.t
deleted file mode 100644
index d980ef698..000000000
--- a/test/etap/230-pbkfd2.t
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-main(_) ->
- test_util:init_code_path(),
- etap:plan(6),
- etap:is(couch_passwords:pbkdf2(<<"password">>, <<"salt">>, 1, 20),
- {ok, <<"0c60c80f961f0e71f3a9b524af6012062fe037a6">>},
- "test vector #1"),
- etap:is(couch_passwords:pbkdf2(<<"password">>, <<"salt">>, 2, 20),
- {ok, <<"ea6c014dc72d6f8ccd1ed92ace1d41f0d8de8957">>},
- "test vector #2"),
- etap:is(couch_passwords:pbkdf2(<<"password">>, <<"salt">>, 4096, 20),
- {ok, <<"4b007901b765489abead49d926f721d065a429c1">>},
- "test vector #3"),
- etap:is(couch_passwords:pbkdf2(<<"passwordPASSWORDpassword">>,
- <<"saltSALTsaltSALTsaltSALTsaltSALTsalt">>, 4096, 25),
- {ok, <<"3d2eec4fe41c849b80c8d83662c0e44a8b291a964cf2f07038">>},
- "test vector #4"),
- etap:is(couch_passwords:pbkdf2(<<"pass\0word">>, <<"sa\0lt">>, 4096, 16),
- {ok, <<"56fa6aa75548099dcc37d7f03425e0c3">>},
- "test vector #5"),
- etap:is(couch_passwords:pbkdf2(<<"password">>, <<"salt">>, 16777216, 20),
- {ok, <<"eefe3d61cd4da4e4e9945b3d6ba2158c2634e984">>},
- "test vector #6"),
- etap:end_tests().
diff --git a/test/etap/231-cors.t b/test/etap/231-cors.t
deleted file mode 100644
index 2f420d1c4..000000000
--- a/test/etap/231-cors.t
+++ /dev/null
@@ -1,430 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--record(user_ctx, {
- name = null,
- roles = [],
- handler
-}).
-
-
--define(SUPPORTED_METHODS, "GET, HEAD, POST, PUT, DELETE, TRACE, CONNECT, COPY, OPTIONS").
-server() ->
- lists:concat([
- "http://127.0.0.1:",
- mochiweb_socket_server:get(couch_httpd, port),
- "/"
- ]).
-
-
-main(_) ->
- test_util:init_code_path(),
-
- etap:plan(29),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- etap:bail(Other)
- end,
- ok.
-
-dbname() -> "etap-test-db".
-dbname1() -> "etap-test-db1".
-dbname2() -> "etap-test-db2".
-
-admin_user_ctx() -> {user_ctx, #user_ctx{roles=[<<"_admin">>]}}.
-
-set_admin_password(UserName, Password) ->
- Hashed = couch_passwords:hash_admin_password(Password),
- couch_config:set("admins", UserName, Hashed, false).
-
-cycle_db(DbName) ->
- couch_server:delete(list_to_binary(DbName), [admin_user_ctx()]),
- {ok, Db} = couch_db:create(list_to_binary(DbName), [admin_user_ctx()]),
- Db.
-
-test() ->
- %% launch couchdb
- couch_server_sup:start_link(test_util:config_files()),
-
- %% initialize db
- timer:sleep(1000),
- Db = cycle_db(dbname()),
- Db1 = cycle_db(dbname1()),
- Db2 = cycle_db(dbname2()),
-
- % CORS is disabled by default
- test_no_headers_server(),
- test_no_headers_db(),
-
- % Now enable CORS
- ok = couch_config:set("httpd", "enable_cors", "true", false),
- ok = couch_config:set("cors", "origins", "http://example.com", false),
-
- %% do tests
- test_incorrect_origin_simple_request(),
- test_incorrect_origin_preflight_request(),
-
- test_preflight_request(),
- test_db_request(),
- test_doc_with_attachment_request(),
- test_doc_with_attachment_range_request(),
- test_db_preflight_request(),
- test_db1_origin_request(),
- test_preflight_with_port1(),
- test_preflight_with_scheme1(),
- test_if_none_match_header(),
-
- ok = couch_config:set("cors", "origins", "http://example.com:5984", false),
- test_preflight_with_port2(),
-
- ok = couch_config:set("cors", "origins", "https://example.com:5984", false),
- test_preflight_with_scheme2(),
-
- ok = couch_config:set("cors", "origins", "*", false),
- test_preflight_with_wildcard(),
-
- ok = couch_config:set("cors", "origins", "http://example.com", false),
- test_case_sensitive_mismatch_of_allowed_origins(),
-
- % http://www.w3.org/TR/cors/#supports-credentials
- % 6.1.3
- % If the resource supports credentials add a single
- % Access-Control-Allow-Origin header, with the value
- % of the Origin header as value, and add a single
- % Access-Control-Allow-Credentials header with the
- % case-sensitive string "true" as value.
- % Otherwise, add a single Access-Control-Allow-Origin
- % header, with either the value of the Origin header
- % or the string "*" as value.
- % Note: The string "*" cannot be used for a resource
- % that supports credentials.
- test_db_request_credentials_header_off(),
- ok = couch_config:set("cors", "credentials", "true", false),
- test_db_request_credentials_header_on(),
- % We don’t test wildcards & credentials as that would
- % fall into the realm of validating config values
- % which we don’t do at all yet
-
- % test with vhosts
- ok = couch_config:set("vhosts", "example.com", "/", false),
- test_preflight_request(true),
- test_db_request(true),
- test_db_preflight_request(true),
- test_db1_origin_request(true),
- test_preflight_with_port1(true),
- test_preflight_with_scheme1(true),
-
- % TBD
- % test multiple per-host configuration
-
- %% do tests with auth
- ok = set_admin_password("test", <<"test">>),
-
- test_db_preflight_auth_request(),
- test_db_origin_auth_request(),
-
-
- %% restart boilerplate
- catch couch_db:close(Db),
- catch couch_db:close(Db1),
- catch couch_db:close(Db2),
-
- couch_server:delete(list_to_binary(dbname()), [admin_user_ctx()]),
- couch_server:delete(list_to_binary(dbname1()), [admin_user_ctx()]),
- couch_server:delete(list_to_binary(dbname2()), [admin_user_ctx()]),
-
- timer:sleep(3000),
- couch_server_sup:stop(),
- ok.
-
-test_preflight_request() -> test_preflight_request(false).
-test_db_request() -> test_db_request(false).
-test_db_preflight_request() -> test_db_preflight_request(false).
-test_db1_origin_request() -> test_db1_origin_request(false).
-test_preflight_with_port1() -> test_preflight_with_port1(false).
-test_preflight_with_scheme1() -> test_preflight_with_scheme1(false).
-
-%% Cors is disabled, should not return Access-Control-Allow-Origin
-test_no_headers_server() ->
- Headers = [{"Origin", "http://127.0.0.1"}],
- {ok, _, Resp, _} = ibrowse:send_req(server(), Headers, get, []),
- etap:is(proplists:get_value("Access-Control-Allow-Origin", Resp),
- undefined, "No CORS Headers when disabled").
-
-%% Cors is disabled, should not return Access-Control-Allow-Origin
-test_no_headers_db() ->
- Headers = [{"Origin", "http://127.0.0.1"}],
- Url = server() ++ "etap-test-db",
- {ok, _, Resp, _} = ibrowse:send_req(Url, Headers, get, []),
- etap:is(proplists:get_value("Access-Control-Allow-Origin", Resp),
- undefined, "No CORS Headers when disabled").
-
-test_incorrect_origin_simple_request() ->
- Headers = [{"Origin", "http://127.0.0.1"}],
- {ok, _, RespHeaders, _} = ibrowse:send_req(server(), Headers, get, []),
- etap:is(proplists:get_value("Access-Control-Allow-Origin", RespHeaders),
- undefined,
- "Specified invalid origin, no Access").
-
-test_incorrect_origin_preflight_request() ->
- Headers = [{"Origin", "http://127.0.0.1"},
- {"Access-Control-Request-Method", "GET"}],
- {ok, _, RespHeaders, _} = ibrowse:send_req(server(), Headers, options, []),
- etap:is(proplists:get_value("Access-Control-Allow-Origin", RespHeaders),
- undefined,
- "invalid origin").
-
-test_preflight_request(VHost) ->
- Headers = [{"Origin", "http://example.com"},
- {"Access-Control-Request-Method", "GET"}]
- ++ maybe_append_vhost(VHost),
-
- case ibrowse:send_req(server(), Headers, options, []) of
- {ok, _, RespHeaders, _} ->
- etap:is(proplists:get_value("Access-Control-Allow-Methods", RespHeaders),
- ?SUPPORTED_METHODS,
- "test_preflight_request Access-Control-Allow-Methods ok");
- _ ->
- etap:is(false, true, "ibrowse failed")
- end.
-
-test_db_request(VHost) ->
- Headers = [{"Origin", "http://example.com"}]
- ++ maybe_append_vhost(VHost),
- Url = server() ++ "etap-test-db",
- case ibrowse:send_req(Url, Headers, get, []) of
- {ok, _, RespHeaders, _Body} ->
- etap:is(proplists:get_value("Access-Control-Allow-Origin", RespHeaders),
- "http://example.com",
- "db Access-Control-Allow-Origin ok"),
- etap:is(proplists:get_value("Access-Control-Expose-Headers", RespHeaders),
- "Cache-Control, Content-Type, Server",
- "db Access-Control-Expose-Headers ok");
- _ ->
- etap:is(false, true, "ibrowse failed")
- end.
-
-% COUCHDB-1689
-test_doc_with_attachment_request() ->
- DocUrl = server() ++ "etap-test-db/doc1",
- ibrowse:send_req(DocUrl ++ "/attachment.txt",
- [{"Content-Type", "text/plain"}], put, "this is a text attachment"),
-
- Headers = [{"Origin", "http://example.com"}],
- Url = DocUrl ++ "?attachments=true",
- case ibrowse:send_req(Url, Headers, get, []) of
- {ok, Code, _RespHeaders, _Body} ->
- etap:is(Code, "200", "Response without errors");
- _ ->
- etap:is(false, true, "ibrowse failed")
- end.
-
-% COUCHDB-1689
-test_doc_with_attachment_range_request() ->
- AttachmentUrl = server() ++ "etap-test-db/doc2/attachment.bin",
- % Use a Content-Type that doesn't get compressed
- ibrowse:send_req(AttachmentUrl,
- [{"Content-Type", "application/octet-stream"}], put,
- "this is an attachment"),
-
- Headers = [{"Origin", "http://example.com"}, {"Range", "bytes=0-6"}],
- case ibrowse:send_req(AttachmentUrl, Headers, get, []) of
- {ok, Code, _RespHeaders, _Body} ->
- etap:is(Code, "206", "Response without errors");
- _ ->
- etap:is(false, true, "ibrowse failed")
- end.
-
-% COUCHDB-1697
-test_if_none_match_header() ->
- Url = server() ++ "etap-test-db/doc2",
- Headers = [{"Origin", "http://example.com"}],
- {ok, _, _RespHeaders, _} = ibrowse:send_req(Url, Headers, get, []),
- ETag = proplists:get_value("ETag", _RespHeaders),
- Headers2 = [{"Origin", "http://example.com"}, {"If-None-Match", ETag}],
- case ibrowse:send_req(Url, Headers2, get, []) of
- {ok, Code, _RespHeaders2, _} ->
- etap:is(Code, "304", "Responded with Not Modified");
- _ ->
- etap:is(false, true, "ibrowse failed")
- end.
-
-test_db_request_credentials_header_off() ->
- Headers = [{"Origin", "http://example.com"}],
- Url = server() ++ "etap-test-db",
- case ibrowse:send_req(Url, Headers, get, []) of
- {ok, _, RespHeaders, _Body} ->
- etap:is(proplists:get_value("Access-Control-Allow-Credentials", RespHeaders),
- undefined,
- "db Access-Control-Allow-Credentials off");
- _ ->
- etap:is(false, true, "ibrowse failed")
- end.
-
-test_db_request_credentials_header_on() ->
- Headers = [{"Origin", "http://example.com"}],
- Url = server() ++ "etap-test-db",
- case ibrowse:send_req(Url, Headers, get, []) of
- {ok, _, RespHeaders, _Body} ->
- etap:is(proplists:get_value("Access-Control-Allow-Credentials", RespHeaders),
- "true",
- "db Access-Control-Allow-Credentials ok");
- _ ->
- etap:is(false, true, "ibrowse failed")
- end.
-
-test_db_preflight_request(VHost) ->
- Url = server() ++ "etap-test-db",
- Headers = [{"Origin", "http://example.com"},
- {"Access-Control-Request-Method", "GET"}]
- ++ maybe_append_vhost(VHost),
- case ibrowse:send_req(Url, Headers, options, []) of
- {ok, _, RespHeaders, _} ->
- etap:is(proplists:get_value("Access-Control-Allow-Methods", RespHeaders),
- ?SUPPORTED_METHODS,
- "db Access-Control-Allow-Methods ok");
- _ ->
- etap:is(false, true, "ibrowse failed")
- end.
-
-
-test_db1_origin_request(VHost) ->
- Headers = [{"Origin", "http://example.com"}]
- ++ maybe_append_vhost(VHost),
- Url = server() ++ "etap-test-db1",
- case ibrowse:send_req(Url, Headers, get, [], [{host_header, "example.com"}]) of
- {ok, _, RespHeaders, _Body} ->
- etap:is(proplists:get_value("Access-Control-Allow-Origin", RespHeaders),
- "http://example.com",
- "db origin ok");
- _Else ->
- io:format("else ~p~n", [_Else]),
- etap:is(false, true, "ibrowse failed")
- end.
-
-test_db_preflight_auth_request() ->
- Url = server() ++ "etap-test-db2",
- Headers = [{"Origin", "http://example.com"},
- {"Access-Control-Request-Method", "GET"}],
- case ibrowse:send_req(Url, Headers, options, []) of
- {ok, _Status, RespHeaders, _} ->
- etap:is(proplists:get_value("Access-Control-Allow-Methods", RespHeaders),
- ?SUPPORTED_METHODS,
- "db Access-Control-Allow-Methods ok");
- _ ->
- etap:is(false, true, "ibrowse failed")
- end.
-
-
-test_db_origin_auth_request() ->
- Headers = [{"Origin", "http://example.com"}],
- Url = server() ++ "etap-test-db2",
-
- case ibrowse:send_req(Url, Headers, get, [],
- [{basic_auth, {"test", "test"}}]) of
- {ok, _, RespHeaders, _Body} ->
- etap:is(proplists:get_value("Access-Control-Allow-Origin", RespHeaders),
- "http://example.com",
- "db origin ok");
- _ ->
- etap:is(false, true, "ibrowse failed")
- end.
-
-test_preflight_with_wildcard() ->
- Headers = [{"Origin", "http://example.com"},
- {"Access-Control-Request-Method", "GET"}],
- case ibrowse:send_req(server(), Headers, options, []) of
- {ok, _, RespHeaders, _} ->
- % I would either expect the current origin or a wildcard to be returned
- etap:is(proplists:get_value("Access-Control-Allow-Origin", RespHeaders),
- "http://example.com",
- "db origin ok");
- _ ->
- etap:is(false, true, "ibrowse failed")
- end.
-
-test_preflight_with_port1(VHost) ->
- Headers = [{"Origin", "http://example.com:5984"},
- {"Access-Control-Request-Method", "GET"}]
- ++ maybe_append_vhost(VHost),
- case ibrowse:send_req(server(), Headers, options, []) of
- {ok, _, RespHeaders, _} ->
- % I would either expect the current origin or a wildcard to be returned
- etap:is(proplists:get_value("Access-Control-Allow-Origin", RespHeaders),
- undefined,
- "check non defined host:port in origin ok");
- _ ->
- etap:is(false, true, "ibrowse failed")
- end.
-
-test_preflight_with_port2() ->
- Headers = [{"Origin", "http://example.com:5984"},
- {"Access-Control-Request-Method", "GET"}],
- case ibrowse:send_req(server(), Headers, options, []) of
- {ok, _, RespHeaders, _} ->
- % I would either expect the current origin or a wildcard to be returned
- etap:is(proplists:get_value("Access-Control-Allow-Origin", RespHeaders),
- "http://example.com:5984",
- "check host:port in origin ok");
- _ ->
- etap:is(false, true, "ibrowse failed")
- end.
-
-test_preflight_with_scheme1(VHost) ->
- Headers = [{"Origin", "https://example.com:5984"},
- {"Access-Control-Request-Method", "GET"}]
- ++ maybe_append_vhost(VHost),
- case ibrowse:send_req(server(), Headers, options, []) of
- {ok, _, RespHeaders, _} ->
- % I would either expect the current origin or a wildcard to be returned
- etap:is(proplists:get_value("Access-Control-Allow-Origin", RespHeaders),
- undefined,
- "check non defined scheme in origin ok");
- _ ->
- etap:is(false, true, "ibrowse failed")
- end.
-
-test_preflight_with_scheme2() ->
- Headers = [{"Origin", "https://example.com:5984"},
- {"Access-Control-Request-Method", "GET"}],
- case ibrowse:send_req(server(), Headers, options, []) of
- {ok, _, RespHeaders, _} ->
- % I would either expect the current origin or a wildcard to be returned
- etap:is(proplists:get_value("Access-Control-Allow-Origin", RespHeaders),
- "https://example.com:5984",
- "check scheme in origin ok");
- _ ->
- etap:is(false, true, "ibrowse failed")
- end.
-
-test_case_sensitive_mismatch_of_allowed_origins() ->
- Headers = [{"Origin", "http://EXAMPLE.COM"}],
- Url = server() ++ "etap-test-db",
- case ibrowse:send_req(Url, Headers, get, []) of
- {ok, _, RespHeaders, _Body} ->
- etap:is(proplists:get_value("Access-Control-Allow-Origin", RespHeaders),
- undefined,
- "db access config case mismatch");
- _ ->
- etap:is(false, true, "ibrowse failed")
- end.
-
-maybe_append_vhost(true) ->
- [{"Host", "http://example.com"}];
-maybe_append_vhost(Else) ->
- [].
diff --git a/test/etap/250-upgrade-legacy-view-files.t b/test/etap/250-upgrade-legacy-view-files.t
deleted file mode 100644
index e720b1c44..000000000
--- a/test/etap/250-upgrade-legacy-view-files.t
+++ /dev/null
@@ -1,168 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-main(_) ->
- test_util:init_code_path(),
-
- etap:plan(8),
- case (catch test()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
- etap:bail(Other)
- end,
- ok.
-
-
-test() ->
- couch_server_sup:start_link(test_util:config_files()),
-
- % commit sofort
- ok = couch_config:set("query_server_config", "commit_freq", "0"),
-
- test_upgrade(),
-
- couch_server_sup:stop(),
- ok.
-
-fixture_path() ->
- test_util:source_file("test/etap/fixtures").
-
-old_db() ->
- fixture_path() ++ "/" ++ old_db_name().
-
-old_db_name() ->
- "test.couch".
-
-old_view() ->
- fixture_path() ++ "/" ++ old_view_name().
-
-old_view_name() ->
- "3b835456c235b1827e012e25666152f3.view".
-
-new_view_name() ->
- "a1c5929f912aca32f13446122cc6ce50.view".
-
-couch_url() ->
- "http://" ++ addr() ++ ":" ++ port().
-
-addr() ->
- couch_config:get("httpd", "bind_address", "127.0.0.1").
-
-port() ->
- integer_to_list(mochiweb_socket_server:get(couch_httpd, port)).
-
-
-% <= 1.2.x
--record(index_header,
- {seq=0,
- purge_seq=0,
- id_btree_state=nil,
- view_states=nil
- }).
-
-% >= 1.3.x
--record(mrheader, {
- seq=0,
- purge_seq=0,
- id_btree_state=nil,
- view_states=nil
-}).
-
-ensure_header(File, MatchFun, Msg) ->
- {ok, Fd} = couch_file:open(File),
- {ok, {_Sig, Header}} = couch_file:read_header(Fd),
- couch_file:close(Fd),
- etap:fun_is(MatchFun, Header, "ensure " ++ Msg ++ " header for file: " ++ File).
-
-file_exists(File) ->
- % open without creating
- case file:open(File, [read, raw]) of
- {ok, Fd_Read} ->
- file:close(Fd_Read),
- true;
- _Error ->
- false
- end.
-
-cleanup() ->
- DbDir = couch_config:get("couchdb", "database_dir"),
- Files = [
- DbDir ++ "/test.couch",
- DbDir ++ "/.test_design/" ++ old_view_name(),
- DbDir ++ "/.test_design/mrview/" ++ new_view_name()
- ],
- lists:foreach(fun(File) -> file:delete(File) end, Files),
- etap:ok(true, "cleanup").
-
-test_upgrade() ->
-
- cleanup(),
-
- % copy old db file into db dir
- DbDir = couch_config:get("couchdb", "database_dir"),
- DbTarget = DbDir ++ "/" ++ old_db_name(),
- filelib:ensure_dir(DbDir),
- OldDbName = old_db(),
- {ok, _} = file:copy(OldDbName, DbTarget),
-
- % copy old view file into view dir
- ViewDir = couch_config:get("couchdb", "view_index_dir"),
- ViewTarget = ViewDir ++ "/.test_design/" ++ old_view_name(),
- filelib:ensure_dir(ViewTarget),
- OldViewName = old_view(),
- {ok, _} = file:copy(OldViewName, ViewTarget),
-
- % ensure old header
- ensure_header(ViewTarget, fun(#index_header{}) -> true; (_) -> false end, "old"),
-
- % query view
- ViewUrl = couch_url() ++ "/test/_design/test/_view/test",
- {ok, Code, _Headers, Body} = test_util:request(ViewUrl, [], get),
-
- % expect results
- etap:is(Code, 200, "valid view result http status code"),
- ExpectBody = <<"{\"total_rows\":2,\"offset\":0,\"rows\":[\r\n{\"id\":\"193f2f9c596ddc7ad326f7da470009ec\",\"key\":1,\"value\":null},\r\n{\"id\":\"193f2f9c596ddc7ad326f7da470012b6\",\"key\":2,\"value\":null}\r\n]}\n">>,
- etap:is(Body, ExpectBody, "valid view result"),
-
- % ensure old file gone.
- etap:is(file_exists(ViewTarget), false, "ensure old file is gone"),
-
- % ensure new header
- NewViewFile = ViewDir ++ "/.test_design/mrview/" ++ new_view_name(),
-
- % add doc(s)
- test_util:request(
- couch_url() ++ "/test/boo",
- [{"Content-Type", "application/json"}],
- put,
- <<"{\"a\":3}">>),
-
- % query again
- {ok, Code2, _Headers2, Body2} = test_util:request(ViewUrl, [], get),
-
- % expect results
- etap:is(Code2, 200, "valid view result http status code"),
- ExpectBody2 = <<"{\"total_rows\":3,\"offset\":0,\"rows\":[\r\n{\"id\":\"193f2f9c596ddc7ad326f7da470009ec\",\"key\":1,\"value\":null},\r\n{\"id\":\"193f2f9c596ddc7ad326f7da470012b6\",\"key\":2,\"value\":null},\r\n{\"id\":\"boo\",\"key\":3,\"value\":null}\r\n]}\n">>,
- etap:is(Body2, ExpectBody2, "valid view result after doc add"),
-
- % ensure no rebuild
- % TBD no idea how to actually test this.
-
- % ensure new header.
- timer:sleep(2000),
- ensure_header(NewViewFile, fun(#mrheader{}) -> true; (_) -> false end, "new"),
-
- ok.
diff --git a/test/etap/Makefile.am b/test/etap/Makefile.am
deleted file mode 100644
index c9778ca6e..000000000
--- a/test/etap/Makefile.am
+++ /dev/null
@@ -1,109 +0,0 @@
-## Licensed under the Apache License, Version 2.0 (the "License"); you may not
-## use this file except in compliance with the License. You may obtain a copy of
-## the License at
-##
-## http://www.apache.org/licenses/LICENSE-2.0
-##
-## Unless required by applicable law or agreed to in writing, software
-## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-## License for the specific language governing permissions and limitations under
-## the License.
-
-noinst_SCRIPTS = run
-noinst_DATA = test_util.beam test_web.beam
-
-noinst_PROGRAMS = test_cfg_register
-test_cfg_register_SOURCES = test_cfg_register.c
-test_cfg_register_CFLAGS = -D_BSD_SOURCE
-
-%.beam: %.erl
- $(ERLC) $<
-
-run: run.tpl
- sed -e "s|%abs_top_srcdir%|@abs_top_srcdir@|g" \
- -e "s|%abs_top_builddir%|@abs_top_builddir@|g" > \
- $@ < $<
- chmod +x $@
-
-# @@ wildcards are NOT portable, please replace with clean-local rules
-CLEANFILES = run *.beam
-
-DISTCLEANFILES = temp.*
-
-fixture_files = \
- fixtures/3b835456c235b1827e012e25666152f3.view \
- fixtures/test.couch
-
-tap_files = \
- 001-load.t \
- 002-icu-driver.t \
- 010-file-basics.t \
- 011-file-headers.t \
- 020-btree-basics.t \
- 021-btree-reductions.t \
- 030-doc-from-json.t \
- 031-doc-to-json.t \
- 040-util.t \
- 041-uuid-gen-id.ini \
- 041-uuid-gen-seq.ini \
- 041-uuid-gen-utc.ini \
- 041-uuid-gen.t \
- 042-work-queue.t \
- 043-find-in-binary.t \
- 050-stream.t \
- 060-kt-merging.t \
- 061-kt-missing-leaves.t \
- 062-kt-remove-leaves.t \
- 063-kt-get-leaves.t \
- 064-kt-counting.t \
- 065-kt-stemming.t \
- 070-couch-db.t \
- 072-cleanup.t \
- 073-changes.t \
- 074-doc-update-conflicts.t \
- 075-auth-cache.t \
- 076-file-compression.t \
- 077-couch-db-fast-db-delete-create.t \
- 080-config-get-set.t \
- 081-config-override.1.ini \
- 081-config-override.2.ini \
- 081-config-override.t \
- 082-config-register.t \
- 083-config-no-files.t \
- 090-task-status.t \
- 100-ref-counter.t \
- 120-stats-collect.t \
- 121-stats-aggregates.cfg \
- 121-stats-aggregates.ini \
- 121-stats-aggregates.t \
- 130-attachments-md5.t \
- 140-attachment-comp.t \
- 150-invalid-view-seq.t \
- 160-vhosts.t \
- 170-os-daemons.es \
- 170-os-daemons.t \
- 171-os-daemons-config.es \
- 171-os-daemons-config.t \
- 172-os-daemon-errors.1.sh \
- 172-os-daemon-errors.2.sh \
- 172-os-daemon-errors.3.sh \
- 172-os-daemon-errors.4.sh \
- 172-os-daemon-errors.t \
- 173-os-daemon-cfg-register.t \
- 180-http-proxy.ini \
- 180-http-proxy.t \
- 190-json-stream-parse.t \
- 200-view-group-no-db-leaks.t \
- 201-view-group-shutdown.t \
- 210-os-proc-pool.t \
- 220-compaction-daemon.t \
- 230-pbkfd2.t \
- 231-cors.t \
- 250-upgrade-legacy-view-files.t
-
-EXTRA_DIST = \
- run.tpl \
- test_web.erl \
- $(fixture_files) \
- $(tap_files)
diff --git a/test/etap/run.tpl b/test/etap/run.tpl
deleted file mode 100644
index d6d6dbe9a..000000000
--- a/test/etap/run.tpl
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/bin/sh -e
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-SRCDIR="%abs_top_srcdir%"
-BUILDDIR="%abs_top_builddir%"
-export ERL_LIBS="$BUILDDIR/src/:$ERL_LIBS"
-export ERL_FLAGS="$ERL_FLAGS -pa $BUILDDIR/test/etap/"
-
-if test $# -eq 1; then
- OPTS=""
- TGT=$1
-else
- OPTS=$1
- TGT=$2
-fi
-
-if test -f $TGT; then
- prove $OPTS $TGT
-else
- prove $OPTS $TGT/*.t
-fi
diff --git a/test/etap/test_util.erl.in b/test/etap/test_util.erl.in
deleted file mode 100644
index 352714e35..000000000
--- a/test/etap/test_util.erl.in
+++ /dev/null
@@ -1,94 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(test_util).
-
--export([init_code_path/0]).
--export([source_file/1, build_file/1, config_files/0]).
--export([run/2]).
--export([request/3, request/4]).
-
-srcdir() ->
- "@abs_top_srcdir@".
-
-builddir() ->
- "@abs_top_builddir@".
-
-init_code_path() ->
- Paths = [
- "etap",
- "couchdb",
- "ejson",
- "erlang-oauth",
- "ibrowse",
- "mochiweb",
- "snappy"
- ],
- lists:foreach(fun(Name) ->
- code:add_patha(filename:join([builddir(), "src", Name]))
- end, Paths).
-
-source_file(Name) ->
- filename:join([srcdir(), Name]).
-
-build_file(Name) ->
- filename:join([builddir(), Name]).
-
-config_files() ->
- [
- build_file("etc/couchdb/default_dev.ini"),
- source_file("test/random_port.ini"),
- build_file("etc/couchdb/local_dev.ini")
- ].
-
-
-run(Plan, Fun) ->
- test_util:init_code_path(),
- etap:plan(Plan),
- case (catch Fun()) of
- ok ->
- etap:end_tests();
- Other ->
- etap:diag(io_lib:format("Test died abnormally:~n~p", [Other])),
- timer:sleep(500),
- etap:bail(Other)
- end,
- ok.
-
-
-request(Url, Headers, Method) ->
- request(Url, Headers, Method, []).
-
-request(Url, Headers, Method, Body) ->
- request(Url, Headers, Method, Body, 3).
-
-request(_Url, _Headers, _Method, _Body, 0) ->
- {error, request_failed};
-request(Url, Headers, Method, Body, N) ->
- case code:is_loaded(ibrowse) of
- false ->
- {ok, _} = ibrowse:start();
- _ ->
- ok
- end,
- case ibrowse:send_req(Url, Headers, Method, Body) of
- {ok, Code0, RespHeaders, RespBody0} ->
- Code = list_to_integer(Code0),
- RespBody = iolist_to_binary(RespBody0),
- {ok, Code, RespHeaders, RespBody};
- {error, {'EXIT', {normal, _}}} ->
- % Connection closed right after a successful request that
- % used the same connection.
- request(Url, Headers, Method, Body, N - 1);
- Error ->
- Error
- end.