summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAdam Kocoloski <kocolosk@apache.org>2019-06-21 10:10:32 -0400
committerGitHub <noreply@github.com>2019-06-21 10:10:32 -0400
commitcbf88041012cf634a80b4d1d036276844da988c1 (patch)
tree62574acbf82463ef886b4530ef25e1ad4a597848
parent6e753551f4a045a3247c3ec2d270309069247678 (diff)
parentc517618ba54892b0b5d26e529924af04b9e62f49 (diff)
downloadcouchdb-cbf88041012cf634a80b4d1d036276844da988c1.tar.gz
Merge pull request #2037 from kocolosk/dreyfus-by-default
Add "Dreyfus" (Erlang side of Search) to CouchDB
-rw-r--r--rebar.config.script1
-rw-r--r--rel/apps/couch_epi.config1
-rw-r--r--rel/overlay/etc/default.ini29
-rw-r--r--rel/reltool.config2
-rw-r--r--share/server/dreyfus.js62
-rw-r--r--share/server/loop.js2
-rw-r--r--src/dreyfus/.gitignore4
-rw-r--r--src/dreyfus/LICENSE.txt202
-rw-r--r--src/dreyfus/README.md78
-rw-r--r--src/dreyfus/include/dreyfus.hrl74
-rw-r--r--src/dreyfus/priv/stats_descriptions.cfg65
-rw-r--r--src/dreyfus/src/clouseau_rpc.erl114
-rw-r--r--src/dreyfus/src/dreyfus.app.src22
-rw-r--r--src/dreyfus/src/dreyfus_app.erl24
-rw-r--r--src/dreyfus/src/dreyfus_bookmark.erl90
-rw-r--r--src/dreyfus/src/dreyfus_config.erl15
-rw-r--r--src/dreyfus/src/dreyfus_epi.erl46
-rw-r--r--src/dreyfus/src/dreyfus_fabric.erl108
-rw-r--r--src/dreyfus/src/dreyfus_fabric_cleanup.erl74
-rw-r--r--src/dreyfus/src/dreyfus_fabric_group1.erl126
-rw-r--r--src/dreyfus/src/dreyfus_fabric_group2.erl155
-rw-r--r--src/dreyfus/src/dreyfus_fabric_info.erl108
-rw-r--r--src/dreyfus/src/dreyfus_fabric_search.erl265
-rw-r--r--src/dreyfus/src/dreyfus_httpd.erl600
-rw-r--r--src/dreyfus/src/dreyfus_httpd_handlers.erl29
-rw-r--r--src/dreyfus/src/dreyfus_index.erl367
-rw-r--r--src/dreyfus/src/dreyfus_index_manager.erl153
-rw-r--r--src/dreyfus/src/dreyfus_index_updater.erl181
-rw-r--r--src/dreyfus/src/dreyfus_plugin_couch_db.erl26
-rw-r--r--src/dreyfus/src/dreyfus_rpc.erl130
-rw-r--r--src/dreyfus/src/dreyfus_sup.erl32
-rw-r--r--src/dreyfus/src/dreyfus_util.erl418
-rw-r--r--src/dreyfus/test/dreyfus_blacklist_await_test.erl76
-rw-r--r--src/dreyfus/test/dreyfus_blacklist_request_test.erl96
-rw-r--r--src/dreyfus/test/dreyfus_config_test.erl71
-rw-r--r--src/dreyfus/test/dreyfus_purge_test.erl867
-rw-r--r--src/dreyfus/test/dreyfus_test_util.erl13
-rw-r--r--src/dreyfus/test/elixir/mix.exs30
-rw-r--r--src/dreyfus/test/elixir/mix.lock5
-rwxr-xr-xsrc/dreyfus/test/elixir/run4
-rw-r--r--src/dreyfus/test/elixir/test/partition_search_test.exs219
-rw-r--r--src/dreyfus/test/elixir/test/test_helper.exs4
-rw-r--r--src/mango/src/mango_idx.erl8
-rw-r--r--src/mango/src/mango_native_proc.erl2
-rw-r--r--support/build_js.escript2
45 files changed, 4995 insertions, 5 deletions
diff --git a/rebar.config.script b/rebar.config.script
index 254c67478..b41917f67 100644
--- a/rebar.config.script
+++ b/rebar.config.script
@@ -83,6 +83,7 @@ SubDirs = [
"src/couch_peruser",
"src/couch_tests",
"src/ddoc_cache",
+ "src/dreyfus",
"src/fabric",
"src/global_changes",
"src/mango",
diff --git a/rel/apps/couch_epi.config b/rel/apps/couch_epi.config
index a07ae2a42..a53721a48 100644
--- a/rel/apps/couch_epi.config
+++ b/rel/apps/couch_epi.config
@@ -14,6 +14,7 @@
couch_db_epi,
chttpd_epi,
couch_index_epi,
+ dreyfus_epi,
global_changes_epi,
mango_epi,
mem3_epi,
diff --git a/rel/overlay/etc/default.ini b/rel/overlay/etc/default.ini
index 0d7ac6d77..5a8d0f952 100644
--- a/rel/overlay/etc/default.ini
+++ b/rel/overlay/etc/default.ini
@@ -481,3 +481,32 @@ min_priority = 2.0
[smoosh.ratio_views]
min_priority = 2.0
+
+[dreyfus]
+; The name and location of the Clouseau Java service required to
+; enable Search functionality.
+; name = clouseau@127.0.0.1
+
+; CouchDB will try to re-connect to Clouseau using a bounded
+; exponential backoff with the following number of iterations.
+; retry_limit = 5
+
+; The default number of results returned from a global search query.
+; limit = 25
+
+; The default number of results returned from a search on a partition
+; of a database.
+; limit_partitions = 2000
+
+; The maximum number of results that can be returned from a global
+; search query (or any search query on a database without user-defined
+; partitions). Attempts to set ?limit=N higher than this value will
+; be rejected.
+; max_limit = 200
+
+; The maximum number of results that can be returned when searching
+; a partition of a database. Attempts to set ?limit=N higher than this
+; value will be rejected. If this config setting is not defined,
+; CouchDB will use the value of `max_limit` instead. If neither is
+; defined, the default is 2000 as stated here.
+; max_limit_partitions = 2000 \ No newline at end of file
diff --git a/rel/reltool.config b/rel/reltool.config
index 1051d2e77..da85f36bc 100644
--- a/rel/reltool.config
+++ b/rel/reltool.config
@@ -42,6 +42,7 @@
couch_event,
couch_peruser,
ddoc_cache,
+ dreyfus,
ets_lru,
fabric,
folsom,
@@ -99,6 +100,7 @@
{app, couch_event, [{incl_cond, include}]},
{app, couch_peruser, [{incl_cond, include}]},
{app, ddoc_cache, [{incl_cond, include}]},
+ {app, dreyfus, [{incl_cond, include}]},
{app, ets_lru, [{incl_cond, include}]},
{app, fabric, [{incl_cond, include}]},
{app, folsom, [{incl_cond, include}]},
diff --git a/share/server/dreyfus.js b/share/server/dreyfus.js
new file mode 100644
index 000000000..7bed97352
--- /dev/null
+++ b/share/server/dreyfus.js
@@ -0,0 +1,62 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+var Dreyfus = (function() {
+
+ var index_results = []; // holds temporary emitted values during index
+
+ function handleIndexError(err, doc) {
+ if (err == "fatal_error") {
+ throw(["error", "map_runtime_error", "function raised 'fatal_error'"]);
+ } else if (err[0] == "fatal") {
+ throw(err);
+ }
+ var message = "function raised exception " + err.toSource();
+ if (doc) message += " with doc._id " + doc._id;
+ log(message);
+ };
+
+ return {
+ index: function(name, value, options) {
+ if (typeof name !== 'string') {
+ throw({name: 'TypeError', message: 'name must be a string not ' + typeof name});
+ }
+ if (name.substring(0, 1) === '_') {
+ throw({name: 'ReservedName', message: 'name must not start with an underscore'});
+ }
+ if (typeof value !== 'string' && typeof value !== 'number' && typeof value !== 'boolean') {
+ throw({name: 'TypeError', message: 'value must be a string, a number or boolean not ' + typeof value});
+ }
+ if (options && typeof options !== 'object') {
+ throw({name: 'TypeError', message: 'options must be an object not ' + typeof options});
+ }
+ index_results.push([name, value, options || {}]);
+ },
+
+ indexDoc: function(doc) {
+ Couch.recursivelySeal(doc);
+ var buf = [];
+ for each (fun in State.funs) {
+ index_results = [];
+ try {
+ fun(doc);
+ buf.push(index_results);
+ } catch (err) {
+ handleIndexError(err, doc);
+ buf.push([]);
+ }
+ }
+ print(JSON.stringify(buf));
+ }
+
+ }
+})();
diff --git a/share/server/loop.js b/share/server/loop.js
index f17983940..5d7738911 100644
--- a/share/server/loop.js
+++ b/share/server/loop.js
@@ -25,6 +25,7 @@ function create_sandbox() {
sandbox.send = Render.send;
sandbox.getRow = Render.getRow;
sandbox.isArray = isArray;
+ sandbox.index = Dreyfus.index;
} catch (e) {
var sandbox = {};
}
@@ -114,6 +115,7 @@ var Loop = function() {
"add_fun" : State.addFun,
"add_lib" : State.addLib,
"map_doc" : Views.mapDoc,
+ "index_doc": Dreyfus.indexDoc,
"reduce" : Views.reduce,
"rereduce" : Views.rereduce
};
diff --git a/src/dreyfus/.gitignore b/src/dreyfus/.gitignore
new file mode 100644
index 000000000..16fd00698
--- /dev/null
+++ b/src/dreyfus/.gitignore
@@ -0,0 +1,4 @@
+ebin/
+.*.sw?
+test/elixir/_build
+test/elixir/deps
diff --git a/src/dreyfus/LICENSE.txt b/src/dreyfus/LICENSE.txt
new file mode 100644
index 000000000..1561dafac
--- /dev/null
+++ b/src/dreyfus/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2015 IBM Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/src/dreyfus/README.md b/src/dreyfus/README.md
new file mode 100644
index 000000000..d653432d0
--- /dev/null
+++ b/src/dreyfus/README.md
@@ -0,0 +1,78 @@
+What is dreyfus?
+-------------
+Dreyfus manages Clouseau nodes to deliver full-text search features.
+
+Dreyfus consists of the following files:
+
+- **dreyfus.app.src** - application resource file. As can be seen from this file, a callback module for the application is dreyfus_app, and the two registered processes started in this application are: dreyfus_index_manager and dreyfus_sup.
+- **dreyfus_app.erl** - a callback module for the application that starts the top supervisor by dreyfus_sup:start_link().
+- **dreyfus_sup.erl** - the top supervisor that starts dreyfus_index_manager as its child worker process.
+- **dreyfus_index_manager.erl** - manages multiple processes of dreyfus_index.
+- **dreyfus_index.erl** - contains main callback functions to operate on index. One process is created for every index (a distinct index function in a design document).
+- **dreyfus_index_updater.erl** - contains callback functions for index update.
+- **dreyfus_httpd.erl** - handles http requests.
+- **dreyfus_fabric.erl**, dreyfus_fabric_cleanup.erl, dreyfus_fabric_group1.erl, dreyfus_fabric_group2.erl, dreyfus_fabric_info.erl, dreyfus_fabric_search.erl - collection of proxy functions for operations in a cluster with shards.
+- **dreyfus_rpc.erl** - proxy functions executed for every shard.
+- **clouseau_rpc.erl** - contains remote procedure calls functions to Clouseau nodes.
+- **dreyfus_bookmark.erl** - utility functions for managing bookmarks for retrieving the next set of results
+- **dreyfus_util.erl** - various utility functions
+
+
+
+Life of http request
+-------------
+Http requests have the following life cycle:
+
+![Dreyfus](https://cloud.githubusercontent.com/assets/5738841/7590919/cbaf1c50-f898-11e4-8a4c-462a1a680135.png)
+
+1. A request from chttpd goes to dreyfus_httpd.
+2. dreyfus_httpd:
+ - passes and validates the request in functions: `parse_index_params` & `validate_index_query`.
+ - depending on the type of the request invokes one of the fabric_functions: dreyfus_fabric_search, dreyfus_fabric_group1, dreyfus_fabric_group2, dreyfus_fabric_info, or dreyfus_fabric_cleanup.
+3. dreyfus_fabric:
+ - Get shards and workers to be executed on every shard:
+ `Shards = dreyfus_util:get_shards(DbName, QueryArgs)`,
+ `Workers = fabric_util:submit_jobs(Shards, dreyfus_rpc, search,
+ [DDoc, IndexName, dreyfus_util:export(QueryArgs)])`
+ - spawns processes to execute jobs on every shard using a RPC server rexi: `rexi_utils:recv(Workers, #shard.ref, fun handle_message/3, State, infinity, 1000 * 60 * 60)
+`
+4. dreyfus_rpc:
+ - is executed on every shard of every node at the same time.
+ - calls `dreyfus_index_manager:get_index(DbName, Index)` to get an index. dreyfus_index_manager will spawn a process of creating an index if the index doesn't exist.
+ - an index of every shard will be updated if necessary with an instruction `dreyfus_index:await(Pid, MinSeq)`.
+ - calls `dreyfus_index:Fun(Pid, QueryArgs)` with a corresponding search request.
+
+5. dreyfus_index:
+ - synchronously calls `clouseau_rpc:search`.
+6. clouseau_rp:
+ - calls `ioq:call(Ref, Msg, erlang:get(io_priority))` to run search on clouseau nodes using Lucene.
+7. top_docs are returned from Lucene
+8. top_docs are passed to dreyfus_index
+9. top_docs are passed to dreyfus_rpc
+10. dreyfus_rpc processes pass their individual top_docs as a reply `rexi:reply(Result)` to the initial dreyfus_fabric process that spawned them.
+11. dreyfus_fabric merges documents from all shards: `MergedTopDocs = merge_top_docs(TopDocs, Sortable, Limit, Sort)` and returns the results to dreyfus_httpd.
+12. dreyfus_httpd returns the formatted results to chttpd through send_json(..)
+
+
+Indexing
+-------------
+
+### Indexing triggered by a search request
+During a search request, before dreyfus_rpc calls dreyfus_index:search, dreyfus_rpc first initiates the updating of Lucene indexes. It does it in the following way:
+
+![DreyfusIndexing.png](https://cloud.githubusercontent.com/assets/5738841/7590923/d12303fe-f898-11e4-833d-b1387b7048a6.png)
+
+1. The last sequence number (signifying the number of the last change in the database) in calculated: `{_LastSeq, MinSeq} = calculate_seqs(Db, Stale)`. For the stale queries (queries that don't need to reflect recent changes in the database), MinSeq will be 0, meaning that they don't need to initiate update of the index, before returning query results. The meaning of 0 is 'wait until index is at least at update_seq 0' which is true even for empty indexes.
+
+2. Function call `dreyfus_index:design_doc_to_index(DDoc, IndexName)` returns a record representation of an index: `#index{
+ analyzer=Analyzer,
+ ddoc_id=Id,
+ def=Def,
+ def_lang=Language,
+ name=IndexName,
+ sig=Sig}`. `Sig` here is a hashed version of an index function and an analyzer represented in a Javascript function in a design document. `Sig` is used to check if an index description is changed, and the index needs to be reconstructed.
+
+
+3. Function call `dreyfus_index_manager:get_index(DbName, Index)` will return Pid of the corresponding to this index dreyfus_index process. dreyfus_index_manager stores all the dreyfus_index processes for all indexes in the storage: `ets:new(?BY_SIG, [set, private, named_table])`. If the dreyfus_index process of the given index exists in the ets ?BY_SIG, it will be returned. If it doesn't exist, a new dreyfus_index process will be spawned. For this, dreyfus_index_manager in the `handle_call({get_index,..)` will return `{noreply, State};` to not block gen_server, and will transfer handling creation of a new index process to the spawned process - `spawn_link(fun() -> new_index(DbName, Index) end)`, remembering the Pid of the caller in the ets ?BY_SIG. `new_index` will create a new index process, sending `open_ok` message to the dreyfus_index_manager gen_server. `handle_call({open_ok,..) ` will retrieve the Pid - `From` of the original caller, and send a reply to this caller, a message containing a Pid of the created index process - NewPid. Calling `add_to_ets(NewPid, DbName, Sig)` will update two ets ?BY_SIG and ?BY_Pid.
+
+4. `dreyfus_index:await(Pid, MinSeq)` will initiate the update of the index, if the requested MinSeq is bigger than the current Seq stored in the index. It will do this by calling `dreyfus_index_updater:update(IndexPid, Index)`. Dreyfus_index_updater will load all documents, modified since last seq stored in the drefus index, and for every document will call `clouseau_rpc:delete` to delete documents in Java Lucene Index, or `clouseau_rpc:update` to update an index in Java Lucene Index.
diff --git a/src/dreyfus/include/dreyfus.hrl b/src/dreyfus/include/dreyfus.hrl
new file mode 100644
index 000000000..7c6a36945
--- /dev/null
+++ b/src/dreyfus/include/dreyfus.hrl
@@ -0,0 +1,74 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-record(index, {
+ current_seq=0,
+ dbname,
+ ddoc_id,
+ analyzer,
+ def,
+ def_lang,
+ name,
+ sig=nil
+}).
+
+-record(grouping, {
+ by=nil,
+ groups=[],
+ offset=0,
+ limit=10,
+ sort=relevance,
+ new_api=true
+}).
+
+-record(index_query_args, {
+ q,
+ partition=nil,
+ limit=25,
+ stale=false,
+ include_docs=false,
+ bookmark=nil,
+ sort=relevance,
+ grouping=#grouping{},
+ stable=false,
+ counts=nil,
+ ranges=nil,
+ drilldown=[],
+ include_fields=nil,
+ highlight_fields=nil,
+ highlight_pre_tag = <<"<em>">>,
+ highlight_post_tag = <<"</em>">>,
+ highlight_number=1,
+ highlight_size=0,
+ raw_bookmark=false
+}).
+
+-record(sortable, {
+ order, % sort order
+ shard, % originating shard
+ item % the item itself
+}).
+
+% Our local representation of top_docs, not equal to wire format.
+-record(top_docs, {
+ update_seq,
+ total_hits,
+ hits,
+ counts,
+ ranges
+}).
+
+%% These must match the case classes in clouseau.
+-record(hit, {
+ order,
+ fields
+}).
diff --git a/src/dreyfus/priv/stats_descriptions.cfg b/src/dreyfus/priv/stats_descriptions.cfg
new file mode 100644
index 000000000..7f93ee26a
--- /dev/null
+++ b/src/dreyfus/priv/stats_descriptions.cfg
@@ -0,0 +1,65 @@
+%% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+%% use this file except in compliance with the License. You may obtain a copy of
+%% the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+%% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+%% License for the specific language governing permissions and limitations under
+%% the License.
+
+
+{[dreyfus, httpd, search], [
+ {type, histogram},
+ {desc, <<"Distribution of overall search request latency as experienced by the end user">>}
+]}.
+{[dreyfus, rpc, search], [
+ {type, histogram},
+ {desc, <<"length of a search RPC worker">>}
+]}.
+{[dreyfus, rpc, group1], [
+ {type, histogram},
+ {desc, <<"length of a group1 RPC worker">>}
+]}.
+{[dreyfus, rpc, group2], [
+ {type, histogram},
+ {desc, <<"length of a group2 RPC worker">>}
+]}.
+{[dreyfus, rpc, info], [
+ {type, histogram},
+ {desc, <<"length of an info RPC worker">>}
+]}.
+{[dreyfus, index, await], [
+ {type, histogram},
+ {desc, <<"length of an dreyfus_index await request">>}
+]}.
+{[dreyfus, index, search], [
+ {type, histogram},
+ {desc, <<"length of an dreyfus_index search request">>}
+]}.
+{[dreyfus, index, group1], [
+ {type, histogram},
+ {desc, <<"length of an dreyfus_index group1 request">>}
+]}.
+{[dreyfus, index, group2], [
+ {type, histogram},
+ {desc, <<"length of an dreyfus_index group2 request">>}
+]}.
+{[dreyfus, index, info], [
+ {type, histogram},
+ {desc, <<"length of an dreyfus_index info request">>}
+]}.
+
+%% Declare IOQ search channel metrics
+{[couchdb, io_queue, search], [
+ {type, counter},
+ {desc, <<"Search IO directly triggered by client requests">>}
+]}.
+
+%% Declare IOQ2 search channel metrics
+{[couchdb, io_queue2, search, count], [
+ {type, counter},
+ {desc, <<"Search IO directly triggered by client requests">>}
+]}.
diff --git a/src/dreyfus/src/clouseau_rpc.erl b/src/dreyfus/src/clouseau_rpc.erl
new file mode 100644
index 000000000..38247ff81
--- /dev/null
+++ b/src/dreyfus/src/clouseau_rpc.erl
@@ -0,0 +1,114 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+
+%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
+
+-module(clouseau_rpc).
+
+-include("dreyfus.hrl").
+
+-export([open_index/3]).
+-export([await/2, commit/2, get_update_seq/1, info/1, search/2]).
+-export([group1/7, group2/2]).
+-export([delete/2, update/3, cleanup/1, cleanup/2, rename/1]).
+-export([analyze/2, version/0, disk_size/1]).
+-export([set_purge_seq/2, get_purge_seq/1, get_root_dir/0]).
+-export([connected/0]).
+
+open_index(Peer, Path, Analyzer) ->
+ rpc({main, clouseau()}, {open, Peer, Path, Analyzer}).
+
+disk_size(Path) ->
+ rpc({main, clouseau()}, {disk_size, Path}).
+get_root_dir() ->
+ rpc({main, clouseau()}, {get_root_dir}).
+
+await(Ref, MinSeq) ->
+ rpc(Ref, {await, MinSeq}).
+
+commit(Ref, NewCommitSeq) ->
+ rpc(Ref, {commit, NewCommitSeq}).
+
+info(Ref) ->
+ rpc(Ref, info).
+
+get_update_seq(Ref) ->
+ rpc(Ref, get_update_seq).
+
+set_purge_seq(Ref, Seq) ->
+ rpc(Ref, {set_purge_seq, Seq}).
+
+get_purge_seq(Ref) ->
+ rpc(Ref, get_purge_seq).
+
+search(Ref, Args) ->
+ case rpc(Ref, {search, Args}) of
+ {ok, Response} when is_list(Response) ->
+ {ok, #top_docs{
+ update_seq = couch_util:get_value(update_seq, Response),
+ total_hits = couch_util:get_value(total_hits, Response),
+ hits = couch_util:get_value(hits, Response),
+ counts = couch_util:get_value(counts, Response),
+ ranges = couch_util:get_value(ranges, Response)
+ }};
+ Else ->
+ Else
+ end.
+
+group1(Ref, Query, GroupBy, Refresh, Sort, Offset, Limit) ->
+ rpc(Ref, {group1, Query, GroupBy, Refresh, Sort, Offset, Limit}).
+
+group2(Ref, Args) ->
+ rpc(Ref, {group2, Args}).
+
+delete(Ref, Id) ->
+ rpc(Ref, {delete, couch_util:to_binary(Id)}).
+
+update(Ref, Id, Fields) ->
+ rpc(Ref, {update, Id, Fields}).
+
+cleanup(DbName) ->
+ gen_server:cast({cleanup, clouseau()}, {cleanup, DbName}).
+
+rename(DbName) ->
+ gen_server:cast({cleanup, clouseau()}, {rename, DbName}).
+
+cleanup(DbName, ActiveSigs) ->
+ gen_server:cast({cleanup, clouseau()}, {cleanup, DbName, ActiveSigs}).
+
+analyze(Analyzer, Text) ->
+ rpc({analyzer, clouseau()}, {analyze, Analyzer, Text}).
+
+version() ->
+ rpc({main, clouseau()}, version).
+
+connected() ->
+ HiddenNodes = erlang:nodes(hidden),
+ case lists:member(clouseau(), HiddenNodes) of
+ true ->
+ true;
+ false ->
+ % We might have just booted up, so let's send a test RPC
+ case (catch version()) of
+ {ok, _} ->
+ true;
+ _Err ->
+ false
+ end
+ end.
+
+rpc(Ref, Msg) ->
+ ioq:call(Ref, Msg, erlang:get(io_priority)).
+
+clouseau() ->
+ list_to_atom(config:get("dreyfus", "name", "clouseau@127.0.0.1")).
diff --git a/src/dreyfus/src/dreyfus.app.src b/src/dreyfus/src/dreyfus.app.src
new file mode 100644
index 000000000..be6595222
--- /dev/null
+++ b/src/dreyfus/src/dreyfus.app.src
@@ -0,0 +1,22 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+
+%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
+
+{application, dreyfus, [
+ {description, "Clouseau index manager"},
+ {vsn, git},
+ {mod, {dreyfus_app, []}},
+ {registered, [dreyfus_index_manager, dreyfus_sup]},
+ {applications, [kernel, stdlib, couch_log, config, couch_event, mem3, ioq, couch_epi]}
+]}.
diff --git a/src/dreyfus/src/dreyfus_app.erl b/src/dreyfus/src/dreyfus_app.erl
new file mode 100644
index 000000000..7cd7f4a31
--- /dev/null
+++ b/src/dreyfus/src/dreyfus_app.erl
@@ -0,0 +1,24 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+
+%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
+
+-module(dreyfus_app).
+-behaviour(application).
+-export([start/2, stop/1]).
+
+start(_Type, []) ->
+ dreyfus_sup:start_link().
+
+stop([]) ->
+ ok.
diff --git a/src/dreyfus/src/dreyfus_bookmark.erl b/src/dreyfus/src/dreyfus_bookmark.erl
new file mode 100644
index 000000000..9a2979b25
--- /dev/null
+++ b/src/dreyfus/src/dreyfus_bookmark.erl
@@ -0,0 +1,90 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+
+%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
+
+-module(dreyfus_bookmark).
+
+-include("dreyfus.hrl").
+-include_lib("mem3/include/mem3.hrl").
+
+-export([
+ update/3,
+ unpack/2,
+ pack/1,
+ add_missing_shards/2
+]).
+
+
+update(_Sort, Bookmark, []) ->
+ Bookmark;
+update(relevance, Bookmark, [#sortable{} = Sortable | Rest]) ->
+ #sortable{
+ order = [Score, Doc],
+ shard = Shard
+ } = Sortable,
+ B1 = fabric_dict:store(Shard, {Score, Doc}, Bookmark),
+ B2 = fabric_view:remove_overlapping_shards(Shard, B1),
+ update(relevance, B2, Rest);
+update(Sort, Bookmark, [#sortable{} = Sortable | Rest]) ->
+ #sortable{
+ order = Order,
+ shard = Shard
+ } = Sortable,
+ B1 = fabric_dict:store(Shard, Order, Bookmark),
+ B2 = fabric_view:remove_overlapping_shards(Shard, B1),
+ update(Sort, B2, Rest).
+
+
+unpack(DbName, #index_query_args{bookmark=nil} = Args) ->
+ fabric_dict:init(dreyfus_util:get_shards(DbName, Args), nil);
+unpack(DbName, #index_query_args{} = Args) ->
+ unpack(DbName, Args#index_query_args.bookmark);
+unpack(DbName, Packed) when is_binary(Packed) ->
+ lists:map(fun({Node, Range, After}) ->
+ case mem3:get_shard(DbName, Node, Range) of
+ {ok, Shard} ->
+ {Shard, After};
+ {error, not_found} ->
+ PlaceHolder = #shard{
+ node = Node,
+ range = Range,
+ dbname = DbName,
+ _='_'
+ },
+ {PlaceHolder, After}
+ end
+ end, binary_to_term(couch_util:decodeBase64Url(Packed))).
+
+
+pack(nil) ->
+ null;
+pack(Workers) ->
+ Workers1 = [{N,R,A} || {#shard{node=N, range=R}, A} <- Workers, A =/= nil],
+ Bin = term_to_binary(Workers1, [compressed, {minor_version,1}]),
+ couch_util:encodeBase64Url(Bin).
+
+
+add_missing_shards(Bookmark, LiveShards) ->
+ {BookmarkShards, _} = lists:unzip(Bookmark),
+ add_missing_shards(Bookmark, BookmarkShards, LiveShards).
+
+
+add_missing_shards(Bookmark, _, []) ->
+ Bookmark;
+add_missing_shards(Bookmark, BMShards, [H | T]) ->
+ Bookmark1 = case lists:keymember(H#shard.range, #shard.range, BMShards) of
+ true -> Bookmark;
+ false -> fabric_dict:store(H, nil, Bookmark)
+ end,
+ add_missing_shards(Bookmark1, BMShards, T).
diff --git a/src/dreyfus/src/dreyfus_config.erl b/src/dreyfus/src/dreyfus_config.erl
new file mode 100644
index 000000000..b7555c1d0
--- /dev/null
+++ b/src/dreyfus/src/dreyfus_config.erl
@@ -0,0 +1,15 @@
+ -module(dreyfus_config).
+
+ -export([data/0, get/1]).
+
+data() ->
+ try
+ config:get("dreyfus_blacklist")
+ catch error:badarg ->
+ % lazy workaround to address issue with epi invocation on startup
+ []
+ end.
+
+get(Key) ->
+ Handle = couch_epi:get_handle({dreyfus, black_list}),
+ couch_epi:get_value(Handle, dreyfus, Key).
diff --git a/src/dreyfus/src/dreyfus_epi.erl b/src/dreyfus/src/dreyfus_epi.erl
new file mode 100644
index 000000000..cb07f8a34
--- /dev/null
+++ b/src/dreyfus/src/dreyfus_epi.erl
@@ -0,0 +1,46 @@
+-module(dreyfus_epi).
+
+-behaviour(couch_epi_plugin).
+
+-export([
+ app/0,
+ providers/0,
+ services/0,
+ data_subscriptions/0,
+ data_providers/0,
+ processes/0,
+ notify/3
+]).
+
+-define(DATA_INTERVAL, 1000).
+
+app() ->
+ dreyfus.
+
+providers() ->
+ [
+ {couch_db, dreyfus_plugin_couch_db},
+ {chttpd_handlers, dreyfus_httpd_handlers}
+ ].
+
+
+services() ->
+ [].
+
+data_subscriptions() ->
+ [{dreyfus, black_list}].
+
+data_providers() ->
+ [
+ {{dreyfus, black_list}, {callback_module, dreyfus_config},
+ [{interval, ?DATA_INTERVAL}]}
+ ].
+
+processes() ->
+ [].
+
+notify(_Key, _Old, _New) ->
+ Listeners = application:get_env(dreyfus, config_listeners, []),
+ lists:foreach(fun(L) ->
+ L ! dreyfus_config_change_finished
+ end, Listeners).
diff --git a/src/dreyfus/src/dreyfus_fabric.erl b/src/dreyfus/src/dreyfus_fabric.erl
new file mode 100644
index 000000000..a953b6a38
--- /dev/null
+++ b/src/dreyfus/src/dreyfus_fabric.erl
@@ -0,0 +1,108 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+
+%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
+
+-module(dreyfus_fabric).
+-export([get_json_docs/2, handle_error_message/6]).
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("mem3/include/mem3.hrl").
+-include("dreyfus.hrl").
+
+get_json_docs(DbName, DocIds) ->
+ fabric:all_docs(DbName, fun callback/2, [], [{keys, DocIds}, {include_docs, true}]).
+
+callback({meta,_}, Acc) ->
+ {ok, Acc};
+callback({error, Reason}, _Acc) ->
+ {error, Reason};
+callback({row, Row}, Acc) ->
+ {id, Id} = lists:keyfind(id, 1, Row),
+ {ok, [{Id, lists:keyfind(doc, 1, Row)}|Acc]};
+callback(complete, Acc) ->
+ {ok, lists:reverse(Acc)};
+callback(timeout, _Acc) ->
+ {error, timeout}.
+
+handle_error_message({rexi_DOWN, _, {_, NodeRef}, _}, _Worker,
+ Counters, _Replacements, _StartFun, _StartArgs) ->
+ case fabric_util:remove_down_workers(Counters, NodeRef) of
+ {ok, NewCounters} ->
+ {ok, NewCounters};
+ error ->
+ {error, {nodedown, <<"progress not possible">>}}
+ end;
+handle_error_message({rexi_EXIT, {maintenance_mode, _}}, Worker,
+ Counters, Replacements, StartFun, StartArgs) ->
+ handle_replacement(Worker, Counters, Replacements, StartFun, StartArgs);
+handle_error_message({rexi_EXIT, Reason}, Worker,
+ Counters, _Replacements, _StartFun, _StartArgs) ->
+ handle_error(Reason, Worker, Counters);
+handle_error_message({error, Reason}, Worker,
+ Counters, _Replacements, _StartFun, _StartArgs) ->
+ handle_error(Reason, Worker, Counters);
+handle_error_message({'EXIT', Reason}, Worker,
+ Counters, _Replacements, _StartFun, _StartArgs) ->
+ handle_error({exit, Reason}, Worker, Counters);
+handle_error_message(Reason, Worker, Counters,
+ _Replacements, _StartFun, _StartArgs) ->
+ couch_log:error("Unexpected error during request: ~p", [Reason]),
+ handle_error(Reason, Worker, Counters).
+
+handle_error(Reason, Worker, Counters0) ->
+ Counters = fabric_dict:erase(Worker, Counters0),
+ case fabric_view:is_progress_possible(Counters) of
+ true ->
+ {ok, Counters};
+ false ->
+ {error, Reason}
+ end.
+
+handle_replacement(Worker, OldCntrs0, OldReplacements, StartFun, StartArgs) ->
+ OldCounters = lists:filter(fun({#shard{ref=R}, _}) ->
+ R /= Worker#shard.ref
+ end, OldCntrs0),
+ case lists:keytake(Worker#shard.range, 1, OldReplacements) of
+ {value, {_Range, Replacements}, NewReplacements} ->
+ NewCounters = lists:foldl(fun(Repl, CounterAcc) ->
+ NewCounter = start_replacement(StartFun, StartArgs, Repl),
+ fabric_dict:store(NewCounter, nil, CounterAcc)
+ end, OldCounters, Replacements),
+ true = fabric_view:is_progress_possible(NewCounters),
+ NewRefs = fabric_dict:fetch_keys(NewCounters),
+ {new_refs, NewRefs, NewCounters, NewReplacements};
+ false ->
+ handle_error({nodedown, <<"progress not possible">>},
+ Worker, OldCounters)
+ end.
+
+start_replacement(StartFun, StartArgs, Shard) ->
+ [DDoc, IndexName, QueryArgs] = StartArgs,
+ After = case QueryArgs#index_query_args.bookmark of
+ Bookmark when is_list(Bookmark) ->
+ lists:foldl(fun({#shard{range=R0}, After0}, Acc) ->
+ case R0 == Shard#shard.range of
+ true -> After0;
+ false -> Acc
+ end
+ end, nil, Bookmark);
+ _ ->
+ nil
+ end,
+ QueryArgs1 = QueryArgs#index_query_args{bookmark=After},
+ StartArgs1 = [DDoc, IndexName, QueryArgs1],
+ Ref = rexi:cast(Shard#shard.node,
+ {dreyfus_rpc, StartFun,
+ [Shard#shard.name|StartArgs1]}),
+ Shard#shard{ref = Ref}.
diff --git a/src/dreyfus/src/dreyfus_fabric_cleanup.erl b/src/dreyfus/src/dreyfus_fabric_cleanup.erl
new file mode 100644
index 000000000..b5e030db0
--- /dev/null
+++ b/src/dreyfus/src/dreyfus_fabric_cleanup.erl
@@ -0,0 +1,74 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+
+%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
+
+-module(dreyfus_fabric_cleanup).
+
+-include("dreyfus.hrl").
+-include_lib("mem3/include/mem3.hrl").
+-include_lib("couch/include/couch_db.hrl").
+
+-export([go/1]).
+
+go(DbName) ->
+ {ok, DesignDocs} = fabric:design_docs(DbName),
+ ActiveSigs = lists:usort(lists:flatmap(fun active_sigs/1,
+ [couch_doc:from_json_obj(DD) || DD <- DesignDocs])),
+ cleanup_local_purge_doc(DbName, ActiveSigs),
+ clouseau_rpc:cleanup(DbName, ActiveSigs),
+ ok.
+
+active_sigs(#doc{body={Fields}}=Doc) ->
+ {RawIndexes} = couch_util:get_value(<<"indexes">>, Fields, {[]}),
+ {IndexNames, _} = lists:unzip(RawIndexes),
+ [begin
+ {ok, Index} = dreyfus_index:design_doc_to_index(Doc, IndexName),
+ Index#index.sig
+ end || IndexName <- IndexNames].
+
+cleanup_local_purge_doc(DbName, ActiveSigs) ->
+ {ok, BaseDir} = clouseau_rpc:get_root_dir(),
+ DbNamePattern = <<DbName/binary, ".*">>,
+ Pattern0 = filename:join([BaseDir, "shards", "*", DbNamePattern, "*"]),
+ Pattern = binary_to_list(iolist_to_binary(Pattern0)),
+ DirListStrs = filelib:wildcard(Pattern),
+ DirList = [iolist_to_binary(DL) || DL <- DirListStrs],
+ LocalShards = mem3:local_shards(DbName),
+ ActiveDirs = lists:foldl(fun(LS, AccOuter) ->
+ lists:foldl(fun(Sig, AccInner) ->
+ DirName = filename:join([BaseDir, LS#shard.name, Sig]),
+ [DirName | AccInner]
+ end, AccOuter, ActiveSigs)
+ end, [], LocalShards),
+
+ DeadDirs = DirList -- ActiveDirs,
+ lists:foldl(fun(IdxDir) ->
+ Sig = dreyfus_util:get_signature_from_idxdir(IdxDir),
+ case Sig of undefined -> ok; _ ->
+ DocId = dreyfus_util:get_local_purge_doc_id(Sig),
+ LocalShards = mem3:local_shards(DbName),
+ lists:foldl(fun(LS, _AccOuter) ->
+ ShardDbName = LS#shard.name,
+ {ok, ShardDb} = couch_db:open_int(ShardDbName, []),
+ case couch_db:open_doc(ShardDb, DocId, []) of
+ {ok, LocalPurgeDoc} ->
+ couch_db:update_doc(ShardDb,
+ LocalPurgeDoc#doc{deleted=true}, [?ADMIN_CTX]);
+ {not_found, _} ->
+ ok
+ end,
+ couch_db:close(ShardDb)
+ end, [], LocalShards)
+ end
+ end, [], DeadDirs).
diff --git a/src/dreyfus/src/dreyfus_fabric_group1.erl b/src/dreyfus/src/dreyfus_fabric_group1.erl
new file mode 100644
index 000000000..2d530ca7e
--- /dev/null
+++ b/src/dreyfus/src/dreyfus_fabric_group1.erl
@@ -0,0 +1,126 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+
+%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
+
+-module(dreyfus_fabric_group1).
+
+-include("dreyfus.hrl").
+-include_lib("mem3/include/mem3.hrl").
+-include_lib("couch/include/couch_db.hrl").
+
+-export([go/4]).
+
+-record(state, {
+ limit,
+ sort,
+ top_groups,
+ counters,
+ start_args,
+ replacements
+}).
+
+go(DbName, GroupId, IndexName, QueryArgs) when is_binary(GroupId) ->
+ {ok, DDoc} = fabric:open_doc(DbName, <<"_design/", GroupId/binary>>, []),
+ dreyfus_util:maybe_deny_index(DbName, GroupId, IndexName),
+ go(DbName, DDoc, IndexName, QueryArgs);
+
+go(DbName, DDoc, IndexName, #index_query_args{}=QueryArgs) ->
+ DesignName = dreyfus_util:get_design_docid(DDoc),
+ dreyfus_util:maybe_deny_index(DbName, DesignName, IndexName),
+ Shards = dreyfus_util:get_shards(DbName, QueryArgs),
+ Workers = fabric_util:submit_jobs(Shards, dreyfus_rpc, group1, [DDoc,
+ IndexName, dreyfus_util:export(QueryArgs)]),
+ Replacements = fabric_view:get_shard_replacements(DbName, Workers),
+ Counters = fabric_dict:init(Workers, nil),
+ RexiMon = fabric_util:create_monitors(Workers),
+ State = #state{
+ limit = QueryArgs#index_query_args.grouping#grouping.limit,
+ sort = QueryArgs#index_query_args.grouping#grouping.sort,
+ top_groups = [],
+ counters = Counters,
+ start_args = [DDoc, IndexName, QueryArgs],
+ replacements = Replacements
+ },
+ try
+ rexi_utils:recv(Workers, #shard.ref, fun handle_message/3,
+ State, infinity, 1000 * 60 * 60)
+ after
+ rexi_monitor:stop(RexiMon),
+ fabric_util:cleanup(Workers)
+ end;
+go(DbName, DDoc, IndexName, OldArgs) ->
+ go(DbName, DDoc, IndexName, dreyfus_util:upgrade(OldArgs)).
+
+handle_message({ok, NewTopGroups}, Shard, State0) ->
+ State = upgrade_state(State0),
+ #state{top_groups=TopGroups, limit=Limit, sort=Sort} = State,
+ case fabric_dict:lookup_element(Shard, State#state.counters) of
+ undefined ->
+ %% already heard from someone else in this range
+ {ok, State};
+ nil ->
+ C1 = fabric_dict:store(Shard, ok, State#state.counters),
+ C2 = fabric_view:remove_overlapping_shards(Shard, C1),
+ MergedTopGroups = merge_top_groups(TopGroups, make_sortable(Shard, NewTopGroups), Limit, Sort),
+ State1 = State#state{
+ counters=C2,
+ top_groups=MergedTopGroups
+ },
+ case fabric_dict:any(nil, C2) of
+ true ->
+ {ok, State1};
+ false ->
+ {stop, remove_sortable(MergedTopGroups)}
+ end
+ end;
+
+handle_message(Error, Worker, State0) ->
+ State = upgrade_state(State0),
+ case dreyfus_fabric:handle_error_message(Error, Worker,
+ State#state.counters, State#state.replacements,
+ group1, State#state.start_args) of
+ {ok, Counters} ->
+ {ok, State#state{counters=Counters}};
+ {new_refs, NewRefs, NewCounters, NewReplacements} ->
+ NewState = State#state{
+ counters = NewCounters,
+ replacements = NewReplacements
+ },
+ {new_refs, NewRefs, NewState};
+ Else ->
+ Else
+ end.
+
+merge_top_groups(TopGroupsA, TopGroupsB, Limit, Sort) ->
+ MergedGroups0 = TopGroupsA ++ TopGroupsB,
+ GNs = lists:usort([N || #sortable{item={N,_}} <- MergedGroups0]),
+ MergedGroups = [merge_top_group(Sort, [S || #sortable{item={N,_}}=S <- MergedGroups0, N =:= GN]) || GN <- GNs],
+ lists:sublist(dreyfus_util:sort(Sort, MergedGroups), Limit).
+
+merge_top_group(_Sort, [Group]) ->
+ Group;
+merge_top_group(Sort, [_, _] = Groups) ->
+ hd(dreyfus_util:sort(Sort, Groups)).
+
+make_sortable(Shard, TopGroups) ->
+ [#sortable{item=G, order=Order, shard=Shard} || {_Name, Order}=G <- TopGroups].
+
+remove_sortable(Sortables) ->
+ [Item || #sortable{item=Item} <- Sortables].
+
+upgrade_state({state, Limit, Sort, TopGroups, Counters}) ->
+ #state{limit=Limit, sort=Sort, top_groups=TopGroups, counters=Counters,
+ replacements=[]};
+upgrade_state(#state{}=State) ->
+ State.
diff --git a/src/dreyfus/src/dreyfus_fabric_group2.erl b/src/dreyfus/src/dreyfus_fabric_group2.erl
new file mode 100644
index 000000000..1239f8b74
--- /dev/null
+++ b/src/dreyfus/src/dreyfus_fabric_group2.erl
@@ -0,0 +1,155 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+
+%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
+
+-module(dreyfus_fabric_group2).
+
+-include("dreyfus.hrl").
+-include_lib("mem3/include/mem3.hrl").
+-include_lib("couch/include/couch_db.hrl").
+
+-export([go/4]).
+
+-record(state, {
+ limit,
+ sort,
+ total_hits,
+ total_grouped_hits,
+ top_groups,
+ counters,
+ start_args,
+ replacements
+}).
+
+go(DbName, GroupId, IndexName, QueryArgs) when is_binary(GroupId) ->
+ {ok, DDoc} = fabric:open_doc(DbName, <<"_design/", GroupId/binary>>, []),
+ dreyfus_util:maybe_deny_index(DbName, GroupId, IndexName),
+ go(DbName, DDoc, IndexName, QueryArgs);
+
+go(DbName, DDoc, IndexName, #index_query_args{}=QueryArgs) ->
+ DesignName = dreyfus_util:get_design_docid(DDoc),
+ dreyfus_util:maybe_deny_index(DbName, DesignName, IndexName),
+ Shards = dreyfus_util:get_shards(DbName, QueryArgs),
+ Workers = fabric_util:submit_jobs(Shards, dreyfus_rpc, group2,
+ [DDoc, IndexName, dreyfus_util:export(QueryArgs)]),
+ Replacements = fabric_view:get_shard_replacements(DbName, Workers),
+ Counters = fabric_dict:init(Workers, nil),
+ RexiMon = fabric_util:create_monitors(Workers),
+ State = #state{
+ limit = QueryArgs#index_query_args.limit,
+ sort = QueryArgs#index_query_args.sort,
+ total_hits = 0,
+ total_grouped_hits = 0,
+ top_groups = [],
+ counters = Counters,
+ start_args = [DDoc, IndexName, QueryArgs],
+ replacements = Replacements
+ },
+ try
+ rexi_utils:recv(Workers, #shard.ref, fun handle_message/3,
+ State, infinity, 1000 * 60 * 60)
+ after
+ rexi_monitor:stop(RexiMon),
+ fabric_util:cleanup(Workers)
+ end;
+go(DbName, DDoc, IndexName, OldArgs) ->
+ go(DbName, DDoc, IndexName, dreyfus_util:upgrade(OldArgs)).
+
+
+handle_message({ok, NewTotalHits, NewTotalGroupedHits, NewTopGroups},
+ Shard, State0) ->
+ State = upgrade_state(State0),
+ #state{total_hits=TotalHits, total_grouped_hits=TotalGroupedHits,
+ top_groups=TopGroups, limit=Limit, sort=Sort} = State,
+ case fabric_dict:lookup_element(Shard, State#state.counters) of
+ undefined ->
+ %% already heard from someone else in this range
+ {ok, State};
+ nil ->
+ C1 = fabric_dict:store(Shard, ok, State#state.counters),
+ C2 = fabric_view:remove_overlapping_shards(Shard, C1),
+ MergedTotalHits = NewTotalHits + TotalHits,
+ MergedTotalGroupedHits = NewTotalGroupedHits + TotalGroupedHits,
+ Sortable = make_sortable(Shard, NewTopGroups),
+ MergedTopGroups = merge_top_groups(TopGroups, Sortable, Limit, Sort),
+ State1 = State#state{
+ counters=C2,
+ total_hits=MergedTotalHits,
+ total_grouped_hits=MergedTotalGroupedHits,
+ top_groups=MergedTopGroups
+ },
+ case fabric_dict:any(nil, C2) of
+ true ->
+ {ok, State1};
+ false ->
+ {stop, {MergedTotalHits, MergedTotalGroupedHits,
+ remove_sortable(MergedTopGroups)}}
+ end
+ end;
+
+handle_message(Error, Worker, State0) ->
+ State = upgrade_state(State0),
+ case dreyfus_fabric:handle_error_message(Error, Worker,
+ State#state.counters, State#state.replacements,
+ group2, State#state.start_args) of
+ {ok, Counters} ->
+ {ok, State#state{counters=Counters}};
+ {new_refs, NewRefs, NewCounters, NewReplacements} ->
+ NewState = State#state{
+ counters = NewCounters,
+ replacements = NewReplacements
+ },
+ {new_refs, NewRefs, NewState};
+ Else ->
+ Else
+ end.
+
+merge_top_groups([], TopGroups, _Limit, _Sort) ->
+ TopGroups;
+merge_top_groups(TopGroupsA, TopGroupsB, Limit, Sort) ->
+ lists:zipwith(fun(A,B) -> merge_top_group(A, B, Limit, Sort) end,
+ TopGroupsA,
+ TopGroupsB).
+
+merge_top_group({Name, TotalA, HitsA}, {Name, TotalB, HitsB}, Limit, Sort) ->
+ MergedHits = lists:sublist(dreyfus_util:sort(Sort, HitsA ++ HitsB), Limit),
+ {Name, TotalA + TotalB, MergedHits}.
+
+
+make_sortable(Shard, TopGroups) ->
+ [make_sortable_group(Shard, TopGroup) || TopGroup <- TopGroups].
+
+make_sortable_group(Shard, {Name, TotalHits, Hits}) ->
+ {Name, TotalHits, [make_sortable_hit(Shard, Hit) || Hit <- Hits]}.
+
+make_sortable_hit(Shard, Hit) ->
+ #sortable{item=Hit, order=Hit#hit.order, shard=Shard}.
+
+remove_sortable(SortableGroups) ->
+ [remove_sortable_group(G) || G <- SortableGroups].
+
+remove_sortable_group({Name, TotalHits, SortableHits}) ->
+ {Name, TotalHits, [remove_sortable_hit(H) || H <- SortableHits]}.
+
+remove_sortable_hit(SortableHit) ->
+ SortableHit#sortable.item.
+
+upgrade_state({state, Limit, Sort, TotalHits, TotalGroupedHits,
+ TopGroups, Counters}) ->
+ #state{limit = Limit, sort = Sort, total_hits = TotalHits,
+ total_grouped_hits = TotalGroupedHits,
+ top_groups = TopGroups, counters = Counters,
+ replacements = []};
+upgrade_state(#state{} = State) ->
+ State.
diff --git a/src/dreyfus/src/dreyfus_fabric_info.erl b/src/dreyfus/src/dreyfus_fabric_info.erl
new file mode 100644
index 000000000..27eec8065
--- /dev/null
+++ b/src/dreyfus/src/dreyfus_fabric_info.erl
@@ -0,0 +1,108 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+
+%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
+
+-module(dreyfus_fabric_info).
+
+-include("dreyfus.hrl").
+-include_lib("mem3/include/mem3.hrl").
+-include_lib("couch/include/couch_db.hrl").
+
+-export([go/4]).
+
+go(DbName, DDocId, IndexName, InfoLevel) when is_binary(DDocId) ->
+ {ok, DDoc} = fabric:open_doc(DbName, <<"_design/", DDocId/binary>>, []),
+ dreyfus_util:maybe_deny_index(DbName, DDocId, IndexName),
+ go(DbName, DDoc, IndexName, InfoLevel);
+
+go(DbName, DDoc, IndexName, InfoLevel) ->
+ DesignName = dreyfus_util:get_design_docid(DDoc),
+ dreyfus_util:maybe_deny_index(DbName, DesignName, IndexName),
+ Shards = mem3:shards(DbName),
+ Workers = fabric_util:submit_jobs(Shards, dreyfus_rpc, InfoLevel, [DDoc, IndexName]),
+ RexiMon = fabric_util:create_monitors(Shards),
+ Acc0 = {fabric_dict:init(Workers, nil), []},
+ try
+ fabric_util:recv(Workers, #shard.ref, fun handle_message/3, Acc0)
+ after
+ rexi_monitor:stop(RexiMon)
+ end.
+
+handle_message({rexi_DOWN, _, {_,NodeRef},_}, _Worker, {Counters, Acc}) ->
+ case fabric_util:remove_down_workers(Counters, NodeRef) of
+ {ok, NewCounters} ->
+ {ok, {NewCounters, Acc}};
+ error ->
+ {error, {nodedown, <<"progress not possible">>}}
+ end;
+
+handle_message({rexi_EXIT, Reason}, Worker, {Counters, Acc}) ->
+ NewCounters = fabric_dict:erase(Worker, Counters),
+ case fabric_view:is_progress_possible(NewCounters) of
+ true ->
+ {ok, {NewCounters, Acc}};
+ false ->
+ {error, Reason}
+ end;
+
+handle_message({ok, Info}, Worker, {Counters, Acc}) ->
+ case fabric_dict:lookup_element(Worker, Counters) of
+ undefined ->
+ % already heard from someone else in this range
+ {ok, {Counters, Acc}};
+ nil ->
+ C1 = fabric_dict:store(Worker, ok, Counters),
+ C2 = fabric_view:remove_overlapping_shards(Worker, C1),
+ case fabric_dict:any(nil, C2) of
+ true ->
+ {ok, {C2, [Info|Acc]}};
+ false ->
+ {stop, merge_results(lists:flatten([Info|Acc]))}
+ end
+ end;
+
+handle_message({error, Reason}, Worker, {Counters, Acc}) ->
+ NewCounters = fabric_dict:erase(Worker, Counters),
+ case fabric_view:is_progress_possible(NewCounters) of
+ true ->
+ {ok, {NewCounters, Acc}};
+ false ->
+ {error, Reason}
+ end;
+handle_message({'EXIT', _}, Worker, {Counters, Acc}) ->
+ NewCounters = fabric_dict:erase(Worker, Counters),
+ case fabric_view:is_progress_possible(NewCounters) of
+ true ->
+ {ok, {NewCounters, Acc}};
+ false ->
+ {error, {nodedown, <<"progress not possible">>}}
+ end.
+
+merge_results(Info) ->
+ Dict = lists:foldl(fun({K,V},D0) -> orddict:append(K,V,D0) end,
+ orddict:new(), Info),
+ orddict:fold(fun
+ (disk_size, X, Acc) ->
+ [{disk_size, lists:sum(X)} | Acc];
+ (doc_count, X, Acc) ->
+ [{doc_count, lists:sum(X)} | Acc];
+ (doc_del_count, X, Acc) ->
+ [{doc_del_count, lists:sum(X)} | Acc];
+ (committed_seq, X, Acc) ->
+ [{committed_seq, lists:sum(X)} | Acc];
+ (pending_seq, X, Acc) ->
+ [{pending_seq, lists:sum(X)} | Acc];
+ (_, _, Acc) ->
+ Acc
+ end, [], Dict).
diff --git a/src/dreyfus/src/dreyfus_fabric_search.erl b/src/dreyfus/src/dreyfus_fabric_search.erl
new file mode 100644
index 000000000..acf7a83ec
--- /dev/null
+++ b/src/dreyfus/src/dreyfus_fabric_search.erl
@@ -0,0 +1,265 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+
+%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
+
+-module(dreyfus_fabric_search).
+
+-include("dreyfus.hrl").
+-include_lib("mem3/include/mem3.hrl").
+-include_lib("couch/include/couch_db.hrl").
+
+-export([go/4]).
+
+-record(state, {
+ limit,
+ sort,
+ top_docs,
+ counters,
+ start_args,
+ replacements
+}).
+
+go(DbName, GroupId, IndexName, QueryArgs) when is_binary(GroupId) ->
+ {ok, DDoc} = fabric:open_doc(DbName, <<"_design/", GroupId/binary>>,
+ [ejson_body]),
+ dreyfus_util:maybe_deny_index(DbName, GroupId, IndexName),
+ go(DbName, DDoc, IndexName, QueryArgs);
+
+go(DbName, DDoc, IndexName, #index_query_args{bookmark=nil}=QueryArgs) ->
+ DesignName = dreyfus_util:get_design_docid(DDoc),
+ dreyfus_util:maybe_deny_index(DbName, DesignName, IndexName),
+ Shards = dreyfus_util:get_shards(DbName, QueryArgs),
+ Workers = fabric_util:submit_jobs(Shards, dreyfus_rpc, search,
+ [DDoc, IndexName, dreyfus_util:export(QueryArgs)]),
+ Counters = fabric_dict:init(Workers, nil),
+ go(DbName, DDoc, IndexName, QueryArgs, Counters, Counters);
+
+go(DbName, DDoc, IndexName, #index_query_args{}=QueryArgs) ->
+ Bookmark0 = try dreyfus_bookmark:unpack(DbName, QueryArgs)
+ catch
+ _:_ ->
+ throw({bad_request, "Invalid bookmark parameter supplied"})
+ end,
+ Shards = dreyfus_util:get_shards(DbName, QueryArgs),
+ LiveNodes = [node() | nodes()],
+ LiveShards = [S || #shard{node=Node} = S <- Shards, lists:member(Node, LiveNodes)],
+ Bookmark1 = dreyfus_bookmark:add_missing_shards(Bookmark0, LiveShards),
+ Counters0 = lists:flatmap(fun({#shard{name=Name, node=N} = Shard, After}) ->
+ QueryArgs1 = dreyfus_util:export(QueryArgs#index_query_args{
+ bookmark = After
+ }),
+ case lists:member(Shard, LiveShards) of
+ true ->
+ Ref = rexi:cast(N, {dreyfus_rpc, search,
+ [Name, DDoc, IndexName, QueryArgs1]}),
+ [Shard#shard{ref = Ref}];
+ false ->
+ lists:map(fun(#shard{name=Name2, node=N2} = NewShard) ->
+ Ref = rexi:cast(N2, {dreyfus_rpc, search,
+ [Name2, DDoc, IndexName, QueryArgs1]}),
+ NewShard#shard{ref = Ref}
+ end, find_replacement_shards(Shard, LiveShards))
+ end
+ end, Bookmark1),
+ Counters = fabric_dict:init(Counters0, nil),
+ QueryArgs2 = QueryArgs#index_query_args{
+ bookmark = Bookmark1
+ },
+ go(DbName, DDoc, IndexName, QueryArgs2, Counters, Bookmark1);
+go(DbName, DDoc, IndexName, OldArgs) ->
+ go(DbName, DDoc, IndexName, dreyfus_util:upgrade(OldArgs)).
+
+go(DbName, DDoc, IndexName, QueryArgs, Counters, Bookmark) ->
+ {Workers, _} = lists:unzip(Counters),
+ #index_query_args{
+ limit = Limit,
+ sort = Sort,
+ raw_bookmark = RawBookmark
+ } = QueryArgs,
+ Replacements = fabric_view:get_shard_replacements(DbName, Workers),
+ State = #state{
+ limit = Limit,
+ sort = Sort,
+ top_docs = #top_docs{total_hits=0,hits=[]},
+ counters = Counters,
+ start_args = [DDoc, IndexName, QueryArgs],
+ replacements = Replacements
+ },
+ RexiMon = fabric_util:create_monitors(Workers),
+ try rexi_utils:recv(Workers, #shard.ref, fun handle_message/3,
+ State, infinity, 1000 * 60 * 60) of
+ {ok, Result} ->
+ #state{top_docs=TopDocs} = Result,
+ #top_docs{total_hits=TotalHits, hits=Hits,
+ counts=Counts, ranges=Ranges} = TopDocs,
+ case RawBookmark of
+ true ->
+ {ok, Bookmark, TotalHits, Hits, Counts, Ranges};
+ false ->
+ Bookmark1 = dreyfus_bookmark:update(Sort, Bookmark, Hits),
+ Hits1 = remove_sortable(Hits),
+ {ok, Bookmark1, TotalHits, Hits1, Counts, Ranges}
+ end;
+ {error, Reason} ->
+ {error, Reason}
+ after
+ rexi_monitor:stop(RexiMon),
+ fabric_util:cleanup(Workers)
+ end.
+
+handle_message({ok, #top_docs{}=NewTopDocs}, Shard, State0) ->
+ State = upgrade_state(State0),
+ #state{top_docs=TopDocs, limit=Limit, sort=Sort} = State,
+ case fabric_dict:lookup_element(Shard, State#state.counters) of
+ undefined ->
+ %% already heard from someone else in this range
+ {ok, State};
+ nil ->
+ C1 = fabric_dict:store(Shard, ok, State#state.counters),
+ C2 = fabric_view:remove_overlapping_shards(Shard, C1),
+ Sortable = make_sortable(Shard, NewTopDocs),
+ MergedTopDocs = merge_top_docs(TopDocs, Sortable, Limit, Sort),
+ State1 = State#state{
+ counters=C2,
+ top_docs=MergedTopDocs
+ },
+ case fabric_dict:any(nil, C2) of
+ true ->
+ {ok, State1};
+ false ->
+ {stop, State1}
+ end
+ end;
+
+% upgrade clause
+handle_message({ok, {top_docs, UpdateSeq, TotalHits, Hits}}, Shard, State) ->
+ TopDocs = #top_docs{
+ update_seq = UpdateSeq,
+ total_hits = TotalHits,
+ hits = Hits},
+ handle_message({ok, TopDocs}, Shard, State);
+
+handle_message(Error, Worker, State0) ->
+ State = upgrade_state(State0),
+ case dreyfus_fabric:handle_error_message(Error, Worker,
+ State#state.counters, State#state.replacements,
+ search, State#state.start_args) of
+ {ok, Counters} ->
+ {ok, State#state{counters=Counters}};
+ {new_refs, NewRefs, NewCounters, NewReplacements} ->
+ NewState = State#state{
+ counters = NewCounters,
+ replacements = NewReplacements
+ },
+ {new_refs, NewRefs, NewState};
+ Else ->
+ Else
+ end.
+
+find_replacement_shards(#shard{range=Range}, AllShards) ->
+ [Shard || Shard <- AllShards, Shard#shard.range =:= Range].
+
+make_sortable(Shard, #top_docs{}=TopDocs) ->
+ Hits = make_sortable(Shard, TopDocs#top_docs.hits),
+ TopDocs#top_docs{hits=Hits};
+make_sortable(Shard, List) when is_list(List) ->
+ make_sortable(Shard, List, []).
+
+make_sortable(_, [], Acc) ->
+ lists:reverse(Acc);
+make_sortable(Shard, [#hit{}=Hit|Rest], Acc) ->
+ make_sortable(Shard, Rest, [#sortable{item=Hit, order=Hit#hit.order, shard=Shard} | Acc]).
+
+remove_sortable(List) ->
+ remove_sortable(List, []).
+
+remove_sortable([], Acc) ->
+ lists:reverse(Acc);
+remove_sortable([#sortable{item=Item} | Rest], Acc) ->
+ remove_sortable(Rest, [Item | Acc]).
+
+merge_top_docs(#top_docs{}=TopDocsA, #top_docs{}=TopDocsB, Limit, Sort) ->
+ MergedTotal = sum_element(#top_docs.total_hits, TopDocsA, TopDocsB),
+ MergedHits = lists:sublist(dreyfus_util:sort(Sort,
+ TopDocsA#top_docs.hits ++ TopDocsB#top_docs.hits), Limit),
+ MergedCounts = merge_facets(TopDocsA#top_docs.counts, TopDocsB#top_docs.counts),
+ MergedRanges = merge_facets(TopDocsA#top_docs.ranges, TopDocsB#top_docs.ranges),
+ #top_docs{total_hits=MergedTotal, hits=MergedHits,
+ counts=MergedCounts, ranges=MergedRanges}.
+
+merge_facets(undefined, undefined) ->
+ undefined;
+merge_facets(undefined, Facets) ->
+ sort_facets(Facets);
+merge_facets(Facets, undefined) ->
+ sort_facets(Facets);
+merge_facets(FacetsA, FacetsB) ->
+ merge_facets_int(sort_facets(FacetsA), sort_facets(FacetsB)).
+
+merge_facets_int([], []) ->
+ [];
+merge_facets_int(FacetsA, []) ->
+ FacetsA;
+merge_facets_int([], FacetsB) ->
+ FacetsB;
+merge_facets_int([{KA, _, _}=A | RA], [{KB, _, _} | _]=FB) when KA < KB ->
+ [A | merge_facets_int(RA, FB)];
+merge_facets_int([{KA, VA, CA} | RA], [{KB, VB, CB} | RB]) when KA =:= KB ->
+ [{KA, VA+VB, merge_facets_int(CA, CB)} | merge_facets_int(RA, RB)];
+merge_facets_int([{KA, _, _} | _]=FA, [{KB, _, _}=B | RB]) when KA > KB ->
+ [B | merge_facets_int(FA, RB)].
+
+sort_facets([]) ->
+ [];
+sort_facets(Facets) ->
+ lists:sort(lists:map(fun({K, V, C}) -> {K, V, sort_facets(C)} end,
+ Facets)).
+
+sum_element(N, T1, T2) ->
+ element(N, T1) + element(N, T2).
+
+upgrade_state({state, Limit, Sort, TopDocs, Counters}) ->
+ #state{limit=Limit, sort=Sort, top_docs=TopDocs, counters=Counters,
+ replacements=[]};
+upgrade_state(#state{}=State) ->
+ State.
+
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+
+merge_facets_test() ->
+ % empty list is a no-op
+ ?assertEqual([{foo, 1.0, []}], merge_facets([{foo, 1.0, []}], [])),
+
+ % one level, one key
+ ?assertEqual([{foo, 3.0, []}],
+ merge_facets([{foo, 1.0, []}],
+ [{foo, 2.0, []}])),
+
+ % one level, two keys
+ ?assertEqual([{bar, 6.0, []}, {foo, 9.0, []}],
+ merge_facets([{foo, 1.0, []}, {bar, 2.0, []}],
+ [{bar, 4.0, []}, {foo, 8.0, []}])),
+
+ % multi level, multi keys
+ ?assertEqual([{foo, 2.0, [{bar, 2.0, []}]}],
+ merge_facets([{foo, 1.0, [{bar, 1.0, []}]}],
+ [{foo, 1.0, [{bar, 1.0, []}]}])),
+
+ ?assertEqual([{foo, 5.0, [{bar, 7.0, [{bar, 1.0, []}, {baz, 3.0, []}, {foo, 6.5, []}]}]}],
+ merge_facets([{foo, 1.0, [{bar, 2.0, [{baz, 3.0, []}, {foo, 0.5, []}]}]}],
+ [{foo, 4.0, [{bar, 5.0, [{foo, 6.0, []}, {bar, 1.0, []}]}]}])).
+
+
+-endif.
diff --git a/src/dreyfus/src/dreyfus_httpd.erl b/src/dreyfus/src/dreyfus_httpd.erl
new file mode 100644
index 000000000..5c9db80d1
--- /dev/null
+++ b/src/dreyfus/src/dreyfus_httpd.erl
@@ -0,0 +1,600 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+
+%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
+
+-module(dreyfus_httpd).
+
+-export([handle_search_req/3, handle_info_req/3, handle_disk_size_req/3,
+ handle_cleanup_req/2, handle_analyze_req/1]).
+
+-include("dreyfus.hrl").
+-include_lib("couch/include/couch_db.hrl").
+-import(chttpd, [send_method_not_allowed/2, send_json/2, send_json/3,
+ send_error/2]).
+
+handle_search_req(Req, Db, DDoc) ->
+ handle_search_req(Req, Db, DDoc, 0, 500).
+
+handle_search_req(#httpd{method=Method, path_parts=[_, _, _, _, IndexName]}=Req
+ ,Db, DDoc, RetryCount, RetryPause)
+ when Method == 'GET'; Method == 'POST' ->
+ DbName = couch_db:name(Db),
+ Start = os:timestamp(),
+ QueryArgs = #index_query_args{
+ include_docs = IncludeDocs,
+ grouping = Grouping
+ } = parse_index_params(Req, Db),
+ validate_search_restrictions(Db, DDoc, QueryArgs),
+ Response = case Grouping#grouping.by of
+ nil ->
+ case dreyfus_fabric_search:go(DbName, DDoc, IndexName, QueryArgs) of
+ {ok, Bookmark0, TotalHits, Hits0} -> % legacy clause
+ Hits = hits_to_json(DbName, IncludeDocs, Hits0),
+ Bookmark = dreyfus_bookmark:pack(Bookmark0),
+ send_json(Req, 200, {[
+ {total_rows, TotalHits},
+ {bookmark, Bookmark},
+ {rows, Hits}
+ ]});
+ {ok, Bookmark0, TotalHits, Hits0, Counts0, Ranges0} ->
+ Hits = hits_to_json(DbName, IncludeDocs, Hits0),
+ Bookmark = dreyfus_bookmark:pack(Bookmark0),
+ Counts = case Counts0 of
+ undefined ->
+ [];
+ _ ->
+ [{counts, facets_to_json(Counts0)}]
+ end,
+ Ranges = case Ranges0 of
+ undefined ->
+ [];
+ _ ->
+ [{ranges, facets_to_json(Ranges0)}]
+ end,
+ send_json(Req, 200, {[
+ {total_rows, TotalHits},
+ {bookmark, Bookmark},
+ {rows, Hits}
+ ] ++ Counts ++ Ranges
+ });
+ {error, Reason} ->
+ handle_error(Req, Db, DDoc, RetryCount, RetryPause, Reason)
+ end;
+ _ ->
+ % ensure limit in group query >0
+ LimitValue = parse_positive_int_param("limit", QueryArgs#index_query_args.limit,
+ "max_limit", "200"),
+ UseNewApi = Grouping#grouping.new_api,
+ case dreyfus_fabric_group1:go(DbName, DDoc, IndexName, QueryArgs) of
+ {ok, []} ->
+ send_grouped_response(Req, {0, 0, []}, UseNewApi);
+ {ok, TopGroups} ->
+ QueryArgs1 = QueryArgs#index_query_args{grouping=Grouping#grouping{groups=TopGroups}},
+ case dreyfus_fabric_group2:go(DbName, DDoc,
+ IndexName, QueryArgs1) of
+ {ok, {TotalHits, TotalGroupedHits, Groups0}} ->
+ Groups = [group_to_json(DbName, IncludeDocs, Group, UseNewApi) || Group <- Groups0],
+ send_grouped_response(Req, {TotalHits, TotalGroupedHits, Groups}, UseNewApi);
+ {error, Reason} ->
+ handle_error(Req, Db, DDoc, RetryCount, RetryPause, Reason)
+ end;
+ {error, Reason} ->
+ handle_error(Req, Db, DDoc, RetryCount, RetryPause, Reason)
+ end
+ end,
+ RequestTime = timer:now_diff(os:timestamp(), Start) div 1000,
+ couch_stats:update_histogram([dreyfus, httpd, search], RequestTime),
+ Response;
+handle_search_req(#httpd{path_parts=[_, _, _, _, _]}=Req, _Db, _DDoc, _RetryCount, _RetryPause) ->
+ send_method_not_allowed(Req, "GET,POST");
+handle_search_req(Req, _Db, _DDoc, _RetryCount, _RetryPause) ->
+ send_error(Req, {bad_request, "path not recognized"}).
+
+handle_info_req(#httpd{method='GET', path_parts=[_, _, _, _, IndexName]}=Req
+ ,Db, #doc{id=Id}=DDoc) ->
+ DbName = couch_db:name(Db),
+ case dreyfus_fabric_info:go(DbName, DDoc, IndexName, info) of
+ {ok, IndexInfoList} ->
+ send_json(Req, 200, {[
+ {name, <<Id/binary,"/",IndexName/binary>>},
+ {search_index, {IndexInfoList}}
+ ]});
+ {error, Reason} ->
+ send_error(Req, Reason)
+ end;
+handle_info_req(#httpd{path_parts=[_, _, _, _, _]}=Req, _Db, _DDoc) ->
+ send_method_not_allowed(Req, "GET");
+handle_info_req(Req, _Db, _DDoc) ->
+ send_error(Req, {bad_request, "path not recognized"}).
+
+handle_disk_size_req(#httpd{method='GET', path_parts=[_, _, _, _, IndexName]}=Req, Db, #doc{id=Id}=DDoc) ->
+ DbName = couch_db:name(Db),
+ case dreyfus_fabric_info:go(DbName, DDoc, IndexName, disk_size) of
+ {ok, IndexInfoList} ->
+ send_json(Req, 200, {[
+ {name, <<Id/binary,"/",IndexName/binary>>},
+ {search_index, {IndexInfoList}}
+ ]});
+ {error, Reason} ->
+ send_error(Req, Reason)
+ end;
+handle_disk_size_req(#httpd{path_parts=[_, _, _, _, _]}=Req, _Db, _DDoc) ->
+ send_method_not_allowed(Req, "GET");
+handle_disk_size_req(Req, _Db, _DDoc) ->
+ send_error(Req, {bad_request, "path not recognized"}).
+
+handle_cleanup_req(#httpd{method='POST'}=Req, Db) ->
+ ok = dreyfus_fabric_cleanup:go(couch_db:name(Db)),
+ send_json(Req, 202, {[{ok, true}]});
+handle_cleanup_req(Req, _Db) ->
+ send_method_not_allowed(Req, "POST").
+
+handle_analyze_req(#httpd{method='GET'}=Req) ->
+ Analyzer = couch_httpd:qs_value(Req, "analyzer"),
+ Text = couch_httpd:qs_value(Req, "text"),
+ analyze(Req, Analyzer, Text);
+handle_analyze_req(#httpd{method='POST'}=Req) ->
+ couch_httpd:validate_ctype(Req, "application/json"),
+ {Fields} = chttpd:json_body_obj(Req),
+ Analyzer = couch_util:get_value(<<"analyzer">>, Fields),
+ Text = couch_util:get_value(<<"text">>, Fields),
+ analyze(Req, Analyzer, Text);
+handle_analyze_req(Req) ->
+ send_method_not_allowed(Req, "GET,POST").
+
+analyze(Req, Analyzer, Text) ->
+ case Analyzer of
+ undefined ->
+ throw({bad_request, "analyzer parameter is mandatory"});
+ _ when is_list(Analyzer) ->
+ ok;
+ _ when is_binary(Analyzer) ->
+ ok;
+ {[_|_]} ->
+ ok;
+ _ ->
+ throw({bad_request, "analyzer parameter must be a string or an object"})
+ end,
+ case Text of
+ undefined ->
+ throw({bad_request, "text parameter is mandatory"});
+ _ when is_list(Text) ->
+ ok;
+ _ when is_binary(Text) ->
+ ok;
+ _ ->
+ throw({bad_request, "text parameter must be a string"})
+ end,
+ case clouseau_rpc:analyze(couch_util:to_binary(Analyzer),
+ couch_util:to_binary(Text)) of
+ {ok, Tokens} ->
+ send_json(Req, 200, {[{tokens, Tokens}]});
+ {error, Reason} ->
+ send_error(Req, Reason)
+ end.
+
+parse_index_params(#httpd{method='GET'}=Req, Db) ->
+ IndexParams = lists:flatmap(fun({K, V}) -> parse_index_param(K, V) end,
+ chttpd:qs(Req)),
+ parse_index_params(IndexParams, Db);
+parse_index_params(#httpd{method='POST'}=Req, Db) ->
+ {JsonBody} = chttpd:json_body_obj(Req),
+ QSEntry = case chttpd:qs_value(Req, "partition") of
+ undefined -> [];
+ StrVal -> [{<<"partition">>, ?l2b(StrVal)}]
+ end,
+ IndexParams = lists:flatmap(fun({K, V}) ->
+ parse_json_index_param(K, V)
+ end, QSEntry ++ JsonBody),
+ ensure_unique_partition(IndexParams),
+ parse_index_params(IndexParams, Db);
+parse_index_params(IndexParams, Db) ->
+ DefaultLimit = case fabric_util:is_partitioned(Db) of
+ true ->
+ list_to_integer(config:get("dreyfus", "limit_partitions", "2000"));
+ false ->
+ list_to_integer(config:get("dreyfus", "limit", "25"))
+ end,
+ Args = #index_query_args{limit=DefaultLimit},
+ lists:foldl(fun({K, V}, Args2) ->
+ validate_index_query(K, V, Args2)
+ end, Args, IndexParams).
+
+validate_index_query(q, Value, Args) ->
+ Args#index_query_args{q=Value};
+validate_index_query(partition, Value, Args) ->
+ Args#index_query_args{partition=Value};
+validate_index_query(stale, Value, Args) ->
+ Args#index_query_args{stale=Value};
+validate_index_query(limit, Value, Args) ->
+ Args#index_query_args{limit=Value};
+validate_index_query(include_docs, Value, Args) ->
+ Args#index_query_args{include_docs=Value};
+validate_index_query(include_fields, Value, Args) ->
+ Args#index_query_args{include_fields=Value};
+validate_index_query(bookmark, Value, Args) ->
+ Args#index_query_args{bookmark=Value};
+validate_index_query(sort, Value, Args) ->
+ Args#index_query_args{sort=Value};
+validate_index_query(group_by, Value, #index_query_args{grouping=Grouping}=Args) ->
+ Args#index_query_args{grouping=Grouping#grouping{by=Value, new_api=false}};
+validate_index_query(group_field, Value, #index_query_args{grouping=Grouping}=Args) ->
+ Args#index_query_args{grouping=Grouping#grouping{by=Value, new_api=true}};
+validate_index_query(group_sort, Value, #index_query_args{grouping=Grouping}=Args) ->
+ Args#index_query_args{grouping=Grouping#grouping{sort=Value}};
+validate_index_query(group_limit, Value, #index_query_args{grouping=Grouping}=Args) ->
+ Args#index_query_args{grouping=Grouping#grouping{limit=Value}};
+validate_index_query(stable, Value, Args) ->
+ Args#index_query_args{stable=Value};
+validate_index_query(counts, Value, Args) ->
+ Args#index_query_args{counts=Value};
+validate_index_query(ranges, Value, Args) ->
+ Args#index_query_args{ranges=Value};
+validate_index_query(drilldown, Value, Args) ->
+ DrillDown = Args#index_query_args.drilldown,
+ Args#index_query_args{drilldown=[Value|DrillDown]};
+validate_index_query(highlight_fields, Value, Args) ->
+ Args#index_query_args{highlight_fields=Value};
+validate_index_query(highlight_pre_tag, Value, Args) ->
+ Args#index_query_args{highlight_pre_tag=Value};
+validate_index_query(highlight_post_tag, Value, Args) ->
+ Args#index_query_args{highlight_post_tag=Value};
+validate_index_query(highlight_number, Value, Args) ->
+ Args#index_query_args{highlight_number=Value};
+validate_index_query(highlight_size, Value, Args) ->
+ Args#index_query_args{highlight_size=Value};
+validate_index_query(extra, _Value, Args) ->
+ Args.
+
+parse_index_param("", _) ->
+ [];
+parse_index_param("q", Value) ->
+ [{q, ?l2b(Value)}];
+parse_index_param("query", Value) ->
+ [{q, ?l2b(Value)}];
+parse_index_param("partition", Value) ->
+ [{partition, ?l2b(Value)}];
+parse_index_param("bookmark", Value) ->
+ [{bookmark, ?l2b(Value)}];
+parse_index_param("sort", Value) ->
+ [{sort, ?JSON_DECODE(Value)}];
+parse_index_param("limit", Value) ->
+ [{limit, ?JSON_DECODE(Value)}];
+parse_index_param("stale", "ok") ->
+ [{stale, ok}];
+parse_index_param("stale", _Value) ->
+ throw({query_parse_error, <<"stale only available as stale=ok">>});
+parse_index_param("include_docs", Value) ->
+ [{include_docs, parse_bool_param("include_docs", Value)}];
+parse_index_param("group_by", Value) ->
+ [{group_by, ?l2b(Value)}];
+parse_index_param("group_field", Value) ->
+ [{group_field, ?l2b(Value)}];
+parse_index_param("group_sort", Value) ->
+ [{group_sort, ?JSON_DECODE(Value)}];
+parse_index_param("group_limit", Value) ->
+ [{group_limit, parse_positive_int_param("group_limit", Value, "max_group_limit", "200")}];
+parse_index_param("stable", Value) ->
+ [{stable, parse_bool_param("stable", Value)}];
+parse_index_param("include_fields", Value) ->
+ [{include_fields, ?JSON_DECODE(Value)}];
+parse_index_param("counts", Value) ->
+ [{counts, ?JSON_DECODE(Value)}];
+parse_index_param("ranges", Value) ->
+ [{ranges, ?JSON_DECODE(Value)}];
+parse_index_param("drilldown", Value) ->
+ [{drilldown, ?JSON_DECODE(Value)}];
+parse_index_param("highlight_fields", Value) ->
+ [{highlight_fields, ?JSON_DECODE(Value)}];
+parse_index_param("highlight_pre_tag", Value) ->
+ [{highlight_pre_tag, ?JSON_DECODE(Value)}];
+parse_index_param("highlight_post_tag", Value) ->
+ [{highlight_post_tag, ?JSON_DECODE(Value)}];
+parse_index_param("highlight_number", Value) ->
+ [{highlight_number, parse_positive_int_param2("highlight_number", Value)}];
+parse_index_param("highlight_size", Value) ->
+ [{highlight_size, parse_positive_int_param2("highlight_size", Value)}];
+parse_index_param(Key, Value) ->
+ [{extra, {Key, Value}}].
+
+parse_json_index_param(<<"q">>, Value) ->
+ [{q, Value}];
+parse_json_index_param(<<"query">>, Value) ->
+ [{q, Value}];
+parse_json_index_param(<<"partition">>, Value) ->
+ [{partition, Value}];
+parse_json_index_param(<<"bookmark">>, Value) ->
+ [{bookmark, Value}];
+parse_json_index_param(<<"sort">>, Value) ->
+ [{sort, Value}];
+parse_json_index_param(<<"limit">>, Value) ->
+ [{limit, Value}];
+parse_json_index_param(<<"stale">>, <<"ok">>) ->
+ [{stale, ok}];
+parse_json_index_param(<<"include_docs">>, Value) when is_boolean(Value) ->
+ [{include_docs, Value}];
+parse_json_index_param(<<"group_by">>, Value) ->
+ [{group_by, Value}];
+parse_json_index_param(<<"group_field">>, Value) ->
+ [{group_field, Value}];
+parse_json_index_param(<<"group_sort">>, Value) ->
+ [{group_sort, Value}];
+parse_json_index_param(<<"group_limit">>, Value) ->
+ [{group_limit, parse_positive_int_param("group_limit", Value, "max_group_limit", "200")}];
+parse_json_index_param(<<"stable">>, Value) ->
+ [{stable, parse_bool_param("stable", Value)}];
+parse_json_index_param(<<"include_fields">>, Value) ->
+ [{include_fields, Value}];
+parse_json_index_param(<<"counts">>, Value) ->
+ [{counts, Value}];
+parse_json_index_param(<<"ranges">>, Value) ->
+ [{ranges, Value}];
+parse_json_index_param(<<"drilldown">>, Value) ->
+ [{drilldown, Value}];
+parse_json_index_param(<<"highlight_fields">>, Value) ->
+ [{highlight_fields, Value}];
+parse_json_index_param(<<"highlight_pre_tag">>, Value) ->
+ [{highlight_pre_tag, Value}];
+parse_json_index_param(<<"highlight_pos_tag">>, Value) ->
+ [{highlight_post_tag, Value}];
+parse_json_index_param(<<"highlight_number">>, Value) ->
+ [{highlight_number, parse_positive_int_param2("highlight_number", Value)}];
+parse_json_index_param(<<"highlight_size">>, Value) ->
+ [{highlight_size, parse_positive_int_param2("highlight_size", Value)}];
+parse_json_index_param(Key, Value) ->
+ [{extra, {Key, Value}}].
+
+%% VV copied from chttpd_view.erl
+
+parse_bool_param(_, Val) when is_boolean(Val) ->
+ Val;
+parse_bool_param(_, "true") -> true;
+parse_bool_param(_, "false") -> false;
+parse_bool_param(Name, Val) ->
+ Msg = io_lib:format("Invalid value for ~s: ~p", [Name, Val]),
+ throw({query_parse_error, ?l2b(Msg)}).
+
+parse_int_param(_, Val) when is_integer(Val) ->
+ Val;
+parse_int_param(Name, Val) ->
+ case (catch list_to_integer(Val)) of
+ IntVal when is_integer(IntVal) ->
+ IntVal;
+ _ ->
+ Msg = io_lib:format("Invalid value for ~s: ~p", [Name, Val]),
+ throw({query_parse_error, ?l2b(Msg)})
+ end.
+
+parse_positive_int_param(Name, Val, Prop, Default) ->
+ MaximumVal = list_to_integer(
+ config:get("dreyfus", Prop, Default)),
+ case parse_int_param(Name, Val) of
+ IntVal when IntVal > MaximumVal ->
+ Fmt = "Value for ~s is too large, must not exceed ~p",
+ Msg = io_lib:format(Fmt, [Name, MaximumVal]),
+ throw({query_parse_error, ?l2b(Msg)});
+ IntVal when IntVal > 0 ->
+ IntVal;
+ IntVal when IntVal =< 0 ->
+ Fmt = "~s must be greater than zero",
+ Msg = io_lib:format(Fmt, [Name]),
+ throw({query_parse_error, ?l2b(Msg)});
+ _ ->
+ Fmt = "Invalid value for ~s: ~p",
+ Msg = io_lib:format(Fmt, [Name, Val]),
+ throw({query_parse_error, ?l2b(Msg)})
+ end.
+
+parse_positive_int_param2(Name, Val) ->
+ case parse_int_param(Name, Val) of
+ IntVal when IntVal > 0 ->
+ IntVal;
+ IntVal when IntVal =< 0 ->
+ Fmt = "~s must be greater than zero",
+ Msg = io_lib:format(Fmt, [Name]),
+ throw({query_parse_error, ?l2b(Msg)});
+ _ ->
+ Fmt = "Invalid value for ~s: ~p",
+ Msg = io_lib:format(Fmt, [Name, Val]),
+ throw({query_parse_error, ?l2b(Msg)})
+ end.
+
+parse_non_negative_int_param(Name, Val, Prop, Default) ->
+ MaximumVal = list_to_integer(
+ config:get("dreyfus", Prop, Default)),
+ case parse_int_param(Name, Val) of
+ IntVal when IntVal > MaximumVal ->
+ Fmt = "Value for ~s is too large, must not exceed ~p",
+ Msg = io_lib:format(Fmt, [Name, MaximumVal]),
+ throw({query_parse_error, ?l2b(Msg)});
+ IntVal when IntVal >= 0 ->
+ IntVal;
+ IntVal when IntVal < 0 ->
+ Fmt = "~s must be greater than or equal to zero",
+ Msg = io_lib:format(Fmt, [Name]),
+ throw({query_parse_error, ?l2b(Msg)});
+ _ ->
+ Fmt = "Invalid value for ~s: ~p",
+ Msg = io_lib:format(Fmt, [Name, Val]),
+ throw({query_parse_error, ?l2b(Msg)})
+ end.
+
+
+ensure_unique_partition(IndexParams) ->
+ Partitions = lists:filter(fun({Key, _Val}) ->
+ Key == partition
+ end, IndexParams),
+ case length(lists:usort(Partitions)) > 1 of
+ true ->
+ Msg = <<"Multiple conflicting values for `partition` provided">>,
+ throw({bad_request, Msg});
+ false ->
+ ok
+ end.
+
+
+validate_search_restrictions(Db, DDoc, Args) ->
+ #index_query_args{
+ q = Query,
+ partition = Partition,
+ grouping = Grouping,
+ limit = Limit
+ } = Args,
+ #grouping{
+ by = GroupBy
+ } = Grouping,
+
+ case Query of
+ undefined ->
+ Msg1 = <<"Query must include a 'q' or 'query' argument">>,
+ throw({query_parse_error, Msg1});
+ _ ->
+ ok
+ end,
+
+ DbPartitioned = fabric_util:is_partitioned(Db),
+ ViewPartitioned = get_view_partition_option(DDoc, DbPartitioned),
+
+ case not DbPartitioned andalso is_binary(Partition) of
+ true ->
+ Msg2 = <<"`partition` not supported on this index">>,
+ throw({bad_request, Msg2});
+ false ->
+ ok
+ end,
+
+ case {ViewPartitioned, is_binary(Partition)} of
+ {false, false} ->
+ ok;
+ {true, true} ->
+ ok;
+ {true, false} ->
+ Msg3 = <<"`partition` parameter is mandatory "
+ "for queries to this index.">>,
+ throw({bad_request, Msg3});
+ {false, true} ->
+ Msg4 = <<"`partition` not supported on this index">>,
+ throw({bad_request, Msg4})
+ end,
+
+ case DbPartitioned of
+ true ->
+ MaxLimit = config:get("dreyfus", "max_limit", "2000"),
+ parse_non_negative_int_param(
+ "limit", Limit, "max_limit_partitions", MaxLimit);
+ false ->
+ MaxLimit = config:get("dreyfus", "max_limit", "200"),
+ parse_non_negative_int_param("limit", Limit, "max_limit", MaxLimit)
+ end,
+
+ case GroupBy /= nil andalso is_binary(Partition) of
+ true ->
+ Msg5 = <<"`group_by` and `partition` are incompatible">>,
+ throw({bad_request, Msg5});
+ false ->
+ ok
+ end.
+
+
+get_view_partition_option(#doc{body = {Props}}, Default) ->
+ {Options} = couch_util:get_value(<<"options">>, Props, {[]}),
+ couch_util:get_value(<<"partitioned">>, Options, Default).
+
+
+hits_to_json(DbName, IncludeDocs, Hits) ->
+ {Ids, HitData} = lists:unzip(lists:map(fun get_hit_data/1, Hits)),
+ chttpd_stats:incr_rows(length(Hits)),
+ if IncludeDocs ->
+ chttpd_stats:incr_reads(length(Hits)),
+ {ok, JsonDocs} = dreyfus_fabric:get_json_docs(DbName, Ids),
+ lists:zipwith(fun(Hit, {Id, Doc}) ->
+ case Hit of
+ {Id, Order, Fields} ->
+ {[{id, Id}, {order, Order}, {fields, {Fields}}, Doc]};
+ {Id, Order, Fields, Highlights} ->
+ {[{id, Id}, {order, Order}, {fields, {Fields}},
+ {highlights, {Highlights}}, Doc]}
+ end
+ end, HitData, JsonDocs);
+
+ true ->
+ lists:map(fun(Hit) ->
+ case Hit of
+ {Id, Order, Fields} ->
+ {[{id, Id}, {order, Order}, {fields, {Fields}}]};
+ {Id, Order, Fields, Highlights} ->
+ {[{id, Id}, {order, Order}, {fields, {Fields}}, {highlights, {Highlights}}]}
+ end
+ end, HitData)
+ end.
+
+get_hit_data(Hit) ->
+ Id = couch_util:get_value(<<"_id">>, Hit#hit.fields),
+ Fields = lists:keydelete(<<"_id">>, 1, Hit#hit.fields),
+ case couch_util:get_value(<<"_highlights">>, Hit#hit.fields) of
+ undefined ->
+ {Id, {Id, Hit#hit.order, Fields}};
+ Highlights ->
+ Fields0 = lists:keydelete(<<"_highlights">>, 1, Fields),
+ {Id, {Id, Hit#hit.order, Fields0, Highlights}}
+ end.
+
+group_to_json(DbName, IncludeDocs, {Name, TotalHits, Hits}, UseNewApi) ->
+ {TotalHitsKey, HitsKey} = case UseNewApi of
+ true -> {total_rows, rows};
+ false -> {total_hits, hits}
+ end,
+ {[{by, Name},
+ {TotalHitsKey, TotalHits},
+ {HitsKey, hits_to_json(DbName, IncludeDocs, Hits)}]}.
+
+facets_to_json(Facets) ->
+ {[facet_to_json(F) || F <- Facets]}.
+
+facet_to_json({K, V, []}) ->
+ {hd(K), V};
+facet_to_json({K0, _V0, C0}) ->
+ C2 = [{tl(K1), V1, C1} || {K1, V1, C1} <- C0],
+ {hd(K0), facets_to_json(C2)}.
+
+send_grouped_response(Req, {TotalHits, TotalGroupedHits, Groups}, UseNewApi) ->
+ GroupResponsePairs = case UseNewApi of
+ true -> [{total_rows, TotalHits}, {groups, Groups}];
+ false -> [{total_hits, TotalHits}, {total_grouped_hits, TotalGroupedHits}, {groups, Groups}]
+ end,
+ send_json(Req, 200, {GroupResponsePairs}).
+
+handle_error(Req, Db, DDoc, RetryCount, RetryPause, {exit, _} = Err) ->
+ backoff_and_retry(Req, Db, DDoc, RetryCount, RetryPause, Err);
+handle_error(Req, Db, DDoc, RetryCount, RetryPause, {{normal, _}, _} = Err) ->
+ backoff_and_retry(Req, Db, DDoc, RetryPause, RetryCount, Err);
+handle_error(Req, _Db, _DDoc, _RetryCount, _RetryPause, Reason) ->
+ send_error(Req, Reason).
+
+backoff_and_retry(Req, Db, DDoc, RetryCount, RetryPause, Error) ->
+ RetryLimit = list_to_integer(config:get("dreyfus", "retry_limit", "5")),
+ case RetryCount > RetryLimit of
+ true ->
+ case Error of
+ {exit, noconnection} ->
+ SvcName = config:get("dreyfus", "name", "clouseau@127.0.0.1"),
+ ErrMsg = "Could not connect to the Clouseau Java service at " ++ SvcName,
+ send_error(Req, {ou_est_clouseau, ErrMsg});
+ _ ->
+ send_error(Req, timeout)
+ end;
+ false ->
+ timer:sleep(RetryPause),
+ handle_search_req(Req, Db, DDoc, RetryCount + 1, RetryPause * 2)
+ end.
diff --git a/src/dreyfus/src/dreyfus_httpd_handlers.erl b/src/dreyfus/src/dreyfus_httpd_handlers.erl
new file mode 100644
index 000000000..bf2be23b1
--- /dev/null
+++ b/src/dreyfus/src/dreyfus_httpd_handlers.erl
@@ -0,0 +1,29 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+
+%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
+
+-module(dreyfus_httpd_handlers).
+
+-export([url_handler/1, db_handler/1, design_handler/1]).
+
+url_handler(<<"_search_analyze">>) -> fun dreyfus_httpd:handle_analyze_req/1;
+url_handler(_) -> no_match.
+
+db_handler(<<"_search_cleanup">>) -> fun dreyfus_httpd:handle_cleanup_req/2;
+db_handler(_) -> no_match.
+
+design_handler(<<"_search">>) -> fun dreyfus_httpd:handle_search_req/3;
+design_handler(<<"_search_info">>) -> fun dreyfus_httpd:handle_info_req/3;
+design_handler(<<"_search_disk_size">>) -> fun dreyfus_httpd:handle_disk_size_req/3;
+design_handler(_) -> no_match.
diff --git a/src/dreyfus/src/dreyfus_index.erl b/src/dreyfus/src/dreyfus_index.erl
new file mode 100644
index 000000000..e33a208ee
--- /dev/null
+++ b/src/dreyfus/src/dreyfus_index.erl
@@ -0,0 +1,367 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+
+%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
+
+%% A dreyfus_index gen_server is linked to its clouseau twin.
+
+-module(dreyfus_index).
+-behaviour(gen_server).
+-vsn(1).
+-include_lib("couch/include/couch_db.hrl").
+-include("dreyfus.hrl").
+
+
+% public api.
+-export([start_link/2, design_doc_to_index/2, await/2, search/2, info/1,
+ group1/2, group2/2,
+ design_doc_to_indexes/1]).
+
+% gen_server api.
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+% private definitions.
+-record(state, {
+ dbname,
+ index,
+ updater_pid=nil,
+ index_pid=nil,
+ waiting_list=[]
+}).
+
+% exported for callback.
+-export([search_int/2, group1_int/2, group2_int/2, info_int/1]).
+
+% public functions.
+start_link(DbName, Index) ->
+ proc_lib:start_link(?MODULE, init, [{DbName, Index}]).
+
+await(Pid, MinSeq) ->
+ MFA = {gen_server, call, [Pid, {await, MinSeq}, infinity]},
+ dreyfus_util:time([index, await], MFA).
+
+search(Pid0, QueryArgs) ->
+ Pid = to_index_pid(Pid0),
+ MFA = {?MODULE, search_int, [Pid, QueryArgs]},
+ dreyfus_util:time([index, search], MFA).
+
+group1(Pid0, QueryArgs) ->
+ Pid = to_index_pid(Pid0),
+ MFA = {?MODULE, group1_int, [Pid, QueryArgs]},
+ dreyfus_util:time([index, group1], MFA).
+
+group2(Pid0, QueryArgs) ->
+ Pid = to_index_pid(Pid0),
+ MFA = {?MODULE, group2_int, [Pid, QueryArgs]},
+ dreyfus_util:time([index, group2], MFA).
+
+info(Pid0) ->
+ Pid = to_index_pid(Pid0),
+ MFA = {?MODULE, info_int, [Pid]},
+ dreyfus_util:time([index, info], MFA).
+
+%% We either have a dreyfus_index gen_server pid or the remote
+%% clouseau pid.
+to_index_pid(Pid) ->
+ case node(Pid) == node() of
+ true -> gen_server:call(Pid, get_index_pid, infinity);
+ false -> Pid
+ end.
+
+design_doc_to_indexes(#doc{body={Fields}}=Doc) ->
+ RawIndexes = couch_util:get_value(<<"indexes">>, Fields, {[]}),
+ case RawIndexes of
+ {IndexList} when is_list(IndexList) ->
+ {IndexNames, _} = lists:unzip(IndexList),
+ lists:flatmap(
+ fun(IndexName) ->
+ case (catch design_doc_to_index(Doc, IndexName)) of
+ {ok, #index{}=Index} -> [Index];
+ _ -> []
+ end
+ end,
+ IndexNames);
+ _ -> []
+ end.
+
+% gen_server functions.
+
+init({DbName, Index}) ->
+ process_flag(trap_exit, true),
+ case open_index(DbName, Index) of
+ {ok, Pid, Seq} ->
+ State=#state{
+ dbname=DbName,
+ index=Index#index{current_seq=Seq, dbname=DbName},
+ index_pid=Pid
+ },
+ case couch_db:open_int(DbName, []) of
+ {ok, Db} ->
+ try couch_db:monitor(Db) after couch_db:close(Db) end,
+ dreyfus_util:maybe_create_local_purge_doc(Db, Pid, Index),
+ proc_lib:init_ack({ok, self()}),
+ gen_server:enter_loop(?MODULE, [], State);
+ Error ->
+ proc_lib:init_ack(Error)
+ end;
+ Error ->
+ proc_lib:init_ack(Error)
+ end.
+
+handle_call({await, RequestSeq}, From,
+ #state{
+ index=#index{dbname=DbName,name=IdxName,ddoc_id=DDocId,current_seq=Seq}=Index,
+ index_pid=IndexPid,
+ updater_pid=nil,
+ waiting_list=WaitList
+ }=State) when RequestSeq > Seq ->
+ DbName2 = mem3:dbname(DbName),
+ <<"_design/", GroupId/binary>> = DDocId,
+ NewState = case dreyfus_util:in_black_list(DbName2, GroupId, IdxName) of
+ false ->
+ UpPid = spawn_link(fun() ->
+ dreyfus_index_updater:update(IndexPid,Index)
+ end),
+ State#state{
+ updater_pid=UpPid,
+ waiting_list=[{From,RequestSeq}|WaitList]
+ };
+ _ ->
+ couch_log:notice("Index Blocked from Updating - db: ~p,"
+ " ddocid: ~p name: ~p", [DbName, DDocId, IdxName]),
+ State
+ end,
+ {noreply, NewState};
+handle_call({await, RequestSeq}, _From,
+ #state{index=#index{current_seq=Seq}}=State) when RequestSeq =< Seq ->
+ {reply, {ok, State#state.index_pid, Seq}, State};
+handle_call({await, RequestSeq}, From, #state{waiting_list=WaitList}=State) ->
+ {noreply, State#state{
+ waiting_list=[{From,RequestSeq}|WaitList]
+ }};
+
+handle_call(get_index_pid, _From, State) -> % upgrade
+ {reply, State#state.index_pid, State};
+
+handle_call({search, QueryArgs0}, _From, State) -> % obsolete
+ Reply = search_int(State#state.index_pid, QueryArgs0),
+ {reply, Reply, State};
+
+handle_call({group1, QueryArgs0}, _From, State) -> % obsolete
+ Reply = group1_int(State#state.index_pid, QueryArgs0),
+ {reply, Reply, State};
+
+handle_call({group2, QueryArgs0}, _From, State) -> % obsolete
+ Reply = group2_int(State#state.index_pid, QueryArgs0),
+ {reply, Reply, State};
+
+handle_call(info, _From, State) -> % obsolete
+ Reply = info_int(State#state.index_pid),
+ {reply, Reply, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info({'EXIT', FromPid, {updated, NewSeq}},
+ #state{
+ index=#index{dbname=DbName,name=IdxName,ddoc_id=DDocId}=Index0,
+ index_pid=IndexPid,
+ updater_pid=UpPid,
+ waiting_list=WaitList
+ }=State) when UpPid == FromPid ->
+ Index = Index0#index{current_seq=NewSeq},
+ case reply_with_index(IndexPid, Index, WaitList) of
+ [] ->
+ {noreply, State#state{index=Index,
+ updater_pid=nil,
+ waiting_list=[]
+ }};
+ StillWaiting ->
+ DbName2 = mem3:dbname(DbName),
+ <<"_design/", GroupId/binary>> = DDocId,
+ Pid = case dreyfus_util:in_black_list(DbName2, GroupId, IdxName) of
+ true ->
+ couch_log:notice("Index Blocked from Updating - db: ~p, ddocid: ~p"
+ " name: ~p", [DbName, GroupId, IdxName]),
+ nil;
+ false ->
+ spawn_link(fun() ->
+ dreyfus_index_updater:update(IndexPid, Index)
+ end)
+ end,
+ {noreply, State#state{index=Index,
+ updater_pid=Pid,
+ waiting_list=StillWaiting
+ }}
+ end;
+handle_info({'EXIT', _, {updated, _}}, State) ->
+ {noreply, State};
+handle_info({'EXIT', FromPid, Reason}, #state{
+ index=Index,
+ index_pid=IndexPid,
+ waiting_list=WaitList
+ }=State) when FromPid == IndexPid ->
+ couch_log:notice(
+ "index for ~p closed with reason ~p", [index_name(Index), Reason]),
+ [gen_server:reply(Pid, {error, Reason}) || {Pid, _} <- WaitList],
+ {stop, normal, State};
+handle_info({'EXIT', FromPid, Reason}, #state{
+ index=Index,
+ updater_pid=UpPid,
+ waiting_list=WaitList
+ }=State) when FromPid == UpPid ->
+ couch_log:info("Shutting down index server ~p, updater ~p closing w/ reason ~w",
+ [index_name(Index), UpPid, Reason]),
+ [gen_server:reply(Pid, {error, Reason}) || {Pid, _} <- WaitList],
+ {stop, normal, State};
+handle_info({'EXIT', Pid, Reason}, State) ->
+ % probably dreyfus_index_manager.
+ couch_log:notice("Unknown pid ~p closed with reason ~p", [Pid, Reason]),
+ {stop, normal, State};
+handle_info({'DOWN',_,_,Pid,Reason}, #state{
+ index=Index,
+ waiting_list=WaitList
+ }=State) ->
+ couch_log:info("Shutting down index server ~p, db ~p closing w/ reason ~w",
+ [index_name(Index), Pid, Reason]),
+ [gen_server:reply(P, {error, Reason}) || {P, _} <- WaitList],
+ {stop, normal, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+% private functions.
+
+open_index(DbName, #index{analyzer=Analyzer, sig=Sig}) ->
+ Path = <<DbName/binary,"/",Sig/binary>>,
+ case clouseau_rpc:open_index(self(), Path, Analyzer) of
+ {ok, Pid} ->
+ case clouseau_rpc:get_update_seq(Pid) of
+ {ok, Seq} ->
+ {ok, Pid, Seq};
+ Error ->
+ Error
+ end;
+ Error ->
+ Error
+ end.
+
+design_doc_to_index(#doc{id=Id,body={Fields}}, IndexName) ->
+ Language = couch_util:get_value(<<"language">>, Fields, <<"javascript">>),
+ {RawIndexes} = couch_util:get_value(<<"indexes">>, Fields, {[]}),
+ InvalidDDocError = {invalid_design_doc,
+ <<"index `", IndexName/binary, "` must have parameter `index`">>},
+ case lists:keyfind(IndexName, 1, RawIndexes) of
+ false ->
+ {error, {not_found, <<IndexName/binary, " not found.">>}};
+ {IndexName, {Index}} ->
+ Analyzer = couch_util:get_value(<<"analyzer">>, Index, <<"standard">>),
+ case couch_util:get_value(<<"index">>, Index) of
+ undefined ->
+ {error, InvalidDDocError};
+ Def ->
+ Sig = ?l2b(couch_util:to_hex(crypto:hash(md5,
+ term_to_binary({Analyzer, Def})))),
+ {ok, #index{
+ analyzer=Analyzer,
+ ddoc_id=Id,
+ def=Def,
+ def_lang=Language,
+ name=IndexName,
+ sig=Sig}}
+ end;
+ _ ->
+ {error, InvalidDDocError}
+ end.
+
+reply_with_index(IndexPid, Index, WaitList) ->
+ reply_with_index(IndexPid, Index, WaitList, []).
+
+reply_with_index(_IndexPid, _Index, [], Acc) ->
+ Acc;
+reply_with_index(IndexPid, #index{current_seq=IndexSeq}=Index, [{Pid, Seq}|Rest], Acc) when Seq =< IndexSeq ->
+ gen_server:reply(Pid, {ok, IndexPid, IndexSeq}),
+ reply_with_index(IndexPid, Index, Rest, Acc);
+reply_with_index(IndexPid, Index, [{Pid, Seq}|Rest], Acc) ->
+ reply_with_index(IndexPid, Index, Rest, [{Pid, Seq}|Acc]).
+
+index_name(#index{dbname=DbName,ddoc_id=DDocId,name=IndexName}) ->
+ <<DbName/binary, " ", DDocId/binary, " ", IndexName/binary>>.
+
+args_to_proplist(#index_query_args{} = Args) ->
+ [
+ {'query', Args#index_query_args.q},
+ {partition, Args#index_query_args.partition},
+ {limit, Args#index_query_args.limit},
+ {refresh, Args#index_query_args.stale =:= false},
+ {'after', Args#index_query_args.bookmark},
+ {sort, Args#index_query_args.sort},
+ {include_fields, Args#index_query_args.include_fields},
+ {counts, Args#index_query_args.counts},
+ {ranges, Args#index_query_args.ranges},
+ {drilldown, Args#index_query_args.drilldown},
+ {highlight_fields, Args#index_query_args.highlight_fields},
+ {highlight_pre_tag, Args#index_query_args.highlight_pre_tag},
+ {highlight_post_tag, Args#index_query_args.highlight_post_tag},
+ {highlight_number, Args#index_query_args.highlight_number},
+ {highlight_size, Args#index_query_args.highlight_size}
+ ].
+
+args_to_proplist2(#index_query_args{} = Args) ->
+ [
+ {'query', Args#index_query_args.q},
+ {field, Args#index_query_args.grouping#grouping.by},
+ {refresh, Args#index_query_args.stale =:= false},
+ {groups, Args#index_query_args.grouping#grouping.groups},
+ {group_sort, Args#index_query_args.grouping#grouping.sort},
+ {sort, Args#index_query_args.sort},
+ {limit, Args#index_query_args.limit},
+ {include_fields, Args#index_query_args.include_fields},
+ {highlight_fields, Args#index_query_args.highlight_fields},
+ {highlight_pre_tag, Args#index_query_args.highlight_pre_tag},
+ {highlight_post_tag, Args#index_query_args.highlight_post_tag},
+ {highlight_number, Args#index_query_args.highlight_number},
+ {highlight_size, Args#index_query_args.highlight_size}
+ ].
+
+search_int(Pid, QueryArgs0) ->
+ QueryArgs = dreyfus_util:upgrade(QueryArgs0),
+ Props = args_to_proplist(QueryArgs),
+ clouseau_rpc:search(Pid, Props).
+
+group1_int(Pid, QueryArgs0) ->
+ QueryArgs = dreyfus_util:upgrade(QueryArgs0),
+ #index_query_args{
+ q = Query,
+ stale = Stale,
+ grouping = #grouping{
+ by = GroupBy,
+ offset = Offset,
+ limit = Limit,
+ sort = Sort
+ }
+ } = QueryArgs,
+ clouseau_rpc:group1(Pid, Query, GroupBy, Stale =:= false, Sort,
+ Offset, Limit).
+
+group2_int(Pid, QueryArgs0) ->
+ QueryArgs = dreyfus_util:upgrade(QueryArgs0),
+ Props = args_to_proplist2(QueryArgs),
+ clouseau_rpc:group2(Pid, Props).
+
+info_int(Pid) ->
+ clouseau_rpc:info(Pid).
diff --git a/src/dreyfus/src/dreyfus_index_manager.erl b/src/dreyfus/src/dreyfus_index_manager.erl
new file mode 100644
index 000000000..47f254243
--- /dev/null
+++ b/src/dreyfus/src/dreyfus_index_manager.erl
@@ -0,0 +1,153 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+
+%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
+
+-module(dreyfus_index_manager).
+-behaviour(gen_server).
+-vsn(1).
+-include_lib("couch/include/couch_db.hrl").
+-include("dreyfus.hrl").
+
+-define(BY_SIG, dreyfus_by_sig).
+-define(BY_PID, dreyfus_by_pid).
+
+% public api.
+-export([start_link/0, get_index/2, get_disk_size/2]).
+
+% gen_server api.
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+-export([handle_db_event/3]).
+
+% public functions.
+start_link() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+get_index(DbName, Index) ->
+ gen_server:call(?MODULE, {get_index, DbName, Index}, infinity).
+
+get_disk_size(DbName, #index{sig=Sig}) ->
+ Path = <<DbName/binary, "/", Sig/binary>>,
+ clouseau_rpc:disk_size(Path).
+
+% gen_server functions.
+
+init([]) ->
+ ets:new(?BY_SIG, [set, private, named_table]),
+ ets:new(?BY_PID, [set, private, named_table]),
+ couch_event:link_listener(?MODULE, handle_db_event, nil, [all_dbs]),
+ process_flag(trap_exit, true),
+ {ok, nil}.
+
+handle_call({get_index, DbName, #index{sig=Sig}=Index}, From, State) ->
+ case ets:lookup(?BY_SIG, {DbName, Sig}) of
+ [] ->
+ Pid = spawn_link(fun() -> new_index(DbName, Index) end),
+ ets:insert(?BY_PID, {Pid, opening, {DbName, Sig}}),
+ ets:insert(?BY_SIG, {{DbName,Sig}, [From]}),
+ {noreply, State};
+ [{_, WaitList}] when is_list(WaitList) ->
+ ets:insert(?BY_SIG, {{DbName, Sig}, [From | WaitList]}),
+ {noreply, State};
+ [{_, ExistingPid}] ->
+ {reply, {ok, ExistingPid}, State}
+ end;
+
+handle_call({open_ok, DbName, Sig, NewPid}, {OpenerPid, _}, State) ->
+ link(NewPid),
+ [{_, WaitList}] = ets:lookup(?BY_SIG, {DbName, Sig}),
+ [gen_server:reply(From, {ok, NewPid}) || From <- WaitList],
+ ets:delete(?BY_PID, OpenerPid),
+ add_to_ets(NewPid, DbName, Sig),
+ {reply, ok, State};
+
+handle_call({open_error, DbName, Sig, Error}, {OpenerPid, _}, State) ->
+ [{_, WaitList}] = ets:lookup(?BY_SIG, {DbName, Sig}),
+ [gen_server:reply(From, Error) || From <- WaitList],
+ ets:delete(?BY_PID, OpenerPid),
+ ets:delete(?BY_SIG, {DbName, Sig}),
+ {reply, ok, State}.
+
+handle_cast({cleanup, DbName}, State) ->
+ clouseau_rpc:cleanup(DbName),
+ {noreply, State};
+
+handle_cast({rename, DbName}, State) ->
+ clouseau_rpc:rename(DbName),
+ {noreply, State}.
+
+handle_info({'EXIT', FromPid, Reason}, State) ->
+ case ets:lookup(?BY_PID, FromPid) of
+ [] ->
+ if Reason =/= normal ->
+ couch_log:error("Exit on non-updater process: ~p", [Reason]),
+ exit(Reason);
+ true -> ok
+ end;
+ % Using Reason /= normal to force a match error
+ % if we didn't delete the Pid in a handle_call
+ % message for some reason.
+ [{_, opening, {DbName, Sig}}] when Reason /= normal ->
+ Msg = {open_error, DbName, Sig, Reason},
+ {reply, ok, _} = handle_call(Msg, {FromPid, nil}, State);
+ [{_, {DbName, Sig}}] ->
+ delete_from_ets(FromPid, DbName, Sig)
+ end,
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, nil, _Extra) ->
+ {ok, nil}.
+
+% private functions
+
+handle_db_event(DbName, created, _St) ->
+ gen_server:cast(?MODULE, {cleanup, DbName}),
+ {ok, nil};
+handle_db_event(DbName, deleted, _St) ->
+ RecoveryEnabled = config:get_boolean("couchdb",
+ "enable_database_recovery", false),
+ case RecoveryEnabled of
+ true ->
+ gen_server:cast(?MODULE, {rename, DbName});
+ false ->
+ gen_server:cast(?MODULE, {cleanup, DbName})
+ end,
+
+ {ok, nil};
+handle_db_event(_DbName, _Event, _St) ->
+ {ok, nil}.
+
+new_index(DbName, #index{sig=Sig}=Index) ->
+ case (catch dreyfus_index:start_link(DbName, Index)) of
+ {ok, NewPid} ->
+ Msg = {open_ok, DbName, Sig, NewPid},
+ ok = gen_server:call(?MODULE, Msg, infinity),
+ unlink(NewPid);
+ Error ->
+ Msg = {open_error, DbName, Sig, Error},
+ ok = gen_server:call(?MODULE, Msg, infinity)
+ end.
+
+add_to_ets(Pid, DbName, Sig) ->
+ true = ets:insert(?BY_PID, {Pid, {DbName, Sig}}),
+ true = ets:insert(?BY_SIG, {{DbName, Sig}, Pid}).
+
+delete_from_ets(Pid, DbName, Sig) ->
+ true = ets:delete(?BY_PID, Pid),
+ true = ets:delete(?BY_SIG, {DbName, Sig}).
+
diff --git a/src/dreyfus/src/dreyfus_index_updater.erl b/src/dreyfus/src/dreyfus_index_updater.erl
new file mode 100644
index 000000000..3720cb63c
--- /dev/null
+++ b/src/dreyfus/src/dreyfus_index_updater.erl
@@ -0,0 +1,181 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+
+%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
+
+-module(dreyfus_index_updater).
+-include_lib("couch/include/couch_db.hrl").
+-include("dreyfus.hrl").
+
+-export([update/2, load_docs/2]).
+
+-import(couch_query_servers, [get_os_process/1, ret_os_process/1, proc_prompt/2]).
+
+update(IndexPid, Index) ->
+ #index{
+ current_seq = CurSeq,
+ dbname = DbName,
+ ddoc_id = DDocId,
+ name = IndexName
+ } = Index,
+ erlang:put(io_priority, {search, DbName, IndexName}),
+ {ok, Db} = couch_db:open_int(DbName, []),
+ try
+ TotalUpdateChanges = couch_db:count_changes_since(Db, CurSeq),
+ TotalPurgeChanges = count_pending_purged_docs_since(Db, IndexPid),
+ TotalChanges = TotalUpdateChanges + TotalPurgeChanges,
+
+ couch_task_status:add_task([
+ {type, search_indexer},
+ {database, DbName},
+ {design_document, DDocId},
+ {index, IndexName},
+ {progress, 0},
+ {changes_done, 0},
+ {total_changes, TotalChanges}
+ ]),
+
+ %% update status every half second
+ couch_task_status:set_update_frequency(500),
+
+ %ExcludeIdRevs is [{Id1, Rev1}, {Id2, Rev2}, ...]
+ %The Rev is the final Rev, not purged Rev.
+ {ok, ExcludeIdRevs} = purge_index(Db, IndexPid, Index),
+ %% compute on all docs modified since we last computed.
+
+ NewCurSeq = couch_db:get_update_seq(Db),
+ Proc = get_os_process(Index#index.def_lang),
+ try
+ true = proc_prompt(Proc, [<<"add_fun">>, Index#index.def]),
+ EnumFun = fun ?MODULE:load_docs/2,
+ [Changes] = couch_task_status:get([changes_done]),
+ Acc0 = {Changes, IndexPid, Db, Proc, TotalChanges, now(), ExcludeIdRevs},
+ {ok, _} = couch_db:fold_changes(Db, CurSeq, EnumFun, Acc0, []),
+ ok = clouseau_rpc:commit(IndexPid, NewCurSeq)
+ after
+ ret_os_process(Proc)
+ end,
+ exit({updated, NewCurSeq})
+ after
+ couch_db:close(Db)
+ end.
+
+load_docs(FDI, {I, IndexPid, Db, Proc, Total, LastCommitTime, ExcludeIdRevs}=Acc) ->
+ couch_task_status:update([{changes_done, I}, {progress, (I * 100) div Total}]),
+ DI = couch_doc:to_doc_info(FDI),
+ #doc_info{id=Id, high_seq=Seq, revs=[#rev_info{rev=Rev}|_]} = DI,
+ %check if it is processed in purge_index to avoid update the index again.
+ case lists:member({Id, Rev}, ExcludeIdRevs) of
+ true -> ok;
+ false -> update_or_delete_index(IndexPid, Db, DI, Proc)
+ end,
+ %% Force a commit every minute
+ case timer:now_diff(Now = now(), LastCommitTime) >= 60000000 of
+ true ->
+ ok = clouseau_rpc:commit(IndexPid, Seq),
+ {ok, {I+1, IndexPid, Db, Proc, Total, Now, ExcludeIdRevs}};
+ false ->
+ {ok, setelement(1, Acc, I+1)}
+ end.
+
+purge_index(Db, IndexPid, Index) ->
+ {ok, IdxPurgeSeq} = clouseau_rpc:get_purge_seq(IndexPid),
+ Proc = get_os_process(Index#index.def_lang),
+ try
+ true = proc_prompt(Proc, [<<"add_fun">>, Index#index.def]),
+ FoldFun = fun({PurgeSeq, _UUID, Id, _Revs}, {Acc, _}) ->
+ Acc0 = case couch_db:get_full_doc_info(Db, Id) of
+ not_found ->
+ ok = clouseau_rpc:delete(IndexPid, Id),
+ Acc;
+ FDI ->
+ DI = couch_doc:to_doc_info(FDI),
+ #doc_info{id=Id, revs=[#rev_info{rev=Rev}|_]} = DI,
+ case lists:member({Id, Rev}, Acc) of
+ true -> Acc;
+ false ->
+ update_or_delete_index(IndexPid, Db, DI, Proc),
+ [{Id, Rev} | Acc]
+ end
+ end,
+ update_task(1),
+ {ok, {Acc0, PurgeSeq}}
+ end,
+
+ {ok, {ExcludeList, NewPurgeSeq}} = couch_db:fold_purge_infos(
+ Db, IdxPurgeSeq, FoldFun, {[], 0}, []),
+ clouseau_rpc:set_purge_seq(IndexPid, NewPurgeSeq),
+ update_local_doc(Db, Index, NewPurgeSeq),
+ {ok, ExcludeList}
+ after
+ ret_os_process(Proc)
+ end.
+
+count_pending_purged_docs_since(Db, IndexPid) ->
+ DbPurgeSeq = couch_db:get_purge_seq(Db),
+ {ok, IdxPurgeSeq} = clouseau_rpc:get_purge_seq(IndexPid),
+ DbPurgeSeq - IdxPurgeSeq.
+
+update_or_delete_index(IndexPid, Db, DI, Proc) ->
+ #doc_info{id=Id, revs=[#rev_info{deleted=Del}|_]} = DI,
+ case Del of
+ true ->
+ ok = clouseau_rpc:delete(IndexPid, Id);
+ false ->
+ case maybe_skip_doc(Db, Id) of
+ true ->
+ ok;
+ false ->
+ {ok, Doc} = couch_db:open_doc(Db, DI, []),
+ Json = couch_doc:to_json_obj(Doc, []),
+ [Fields|_] = proc_prompt(Proc, [<<"index_doc">>, Json]),
+ Fields1 = [list_to_tuple(Field) || Field <- Fields],
+ Fields2 = maybe_add_partition(Db, Id, Fields1),
+ case Fields2 of
+ [] -> ok = clouseau_rpc:delete(IndexPid, Id);
+ _ -> ok = clouseau_rpc:update(IndexPid, Id, Fields2)
+ end
+ end
+ end.
+
+update_local_doc(Db, Index, PurgeSeq) ->
+ DocId = dreyfus_util:get_local_purge_doc_id(Index#index.sig),
+ DocContent = dreyfus_util:get_local_purge_doc_body(Db, DocId, PurgeSeq, Index),
+ couch_db:update_doc(Db, DocContent, []).
+
+update_task(NumChanges) ->
+ [Changes, Total] = couch_task_status:get([changes_done, total_changes]),
+ Changes2 = Changes + NumChanges,
+ Progress = case Total of
+ 0 ->
+ 0;
+ _ ->
+ (Changes2 * 100) div Total
+ end,
+ couch_task_status:update([{progress, Progress}, {changes_done, Changes2}]).
+
+maybe_skip_doc(Db, <<"_design/", _/binary>>) ->
+ couch_db:is_partitioned(Db);
+maybe_skip_doc(_Db, _Id) ->
+ false.
+
+maybe_add_partition(_Db, _Id, []) ->
+ [];
+maybe_add_partition(Db, Id, Fields) ->
+ case couch_db:is_partitioned(Db) of
+ true ->
+ Partition = couch_partition:from_docid(Id),
+ [{<<"_partition">>, Partition, {[]}} | Fields];
+ false ->
+ Fields
+ end.
diff --git a/src/dreyfus/src/dreyfus_plugin_couch_db.erl b/src/dreyfus/src/dreyfus_plugin_couch_db.erl
new file mode 100644
index 000000000..b9f48ba74
--- /dev/null
+++ b/src/dreyfus/src/dreyfus_plugin_couch_db.erl
@@ -0,0 +1,26 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(dreyfus_plugin_couch_db).
+
+-export([
+ is_valid_purge_client/2,
+ on_compact/2
+]).
+
+
+is_valid_purge_client(DbName, Props) ->
+ dreyfus_util:verify_index_exists(DbName, Props).
+
+
+on_compact(DbName, DDocs) ->
+ dreyfus_util:ensure_local_purge_docs(DbName, DDocs).
diff --git a/src/dreyfus/src/dreyfus_rpc.erl b/src/dreyfus/src/dreyfus_rpc.erl
new file mode 100644
index 000000000..5542bd029
--- /dev/null
+++ b/src/dreyfus/src/dreyfus_rpc.erl
@@ -0,0 +1,130 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+
+%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
+
+-module(dreyfus_rpc).
+-include_lib("couch/include/couch_db.hrl").
+-include("dreyfus.hrl").
+-import(couch_query_servers, [get_os_process/1, ret_os_process/1, proc_prompt/2]).
+
+% public api.
+-export([search/4, group1/4, group2/4, info/3, disk_size/3]).
+
+% private callback
+-export([call/5, info_int/3]).
+
+search(DbName, DDoc, IndexName, QueryArgs) ->
+ MFA = {?MODULE, call, [search, DbName, DDoc, IndexName, QueryArgs]},
+ dreyfus_util:time([rpc, search], MFA).
+
+group1(DbName, DDoc, IndexName, QueryArgs) ->
+ MFA = {?MODULE, call, [group1, DbName, DDoc, IndexName, QueryArgs]},
+ dreyfus_util:time([rpc, group1], MFA).
+
+group2(DbName, DDoc, IndexName, QueryArgs) ->
+ MFA = {?MODULE, call, [group2, DbName, DDoc, IndexName, QueryArgs]},
+ dreyfus_util:time([rpc, group2], MFA).
+
+call(Fun, DbName, DDoc, IndexName, QueryArgs0) ->
+ QueryArgs = dreyfus_util:upgrade(QueryArgs0),
+ erlang:put(io_priority, {search, DbName}),
+ check_interactive_mode(),
+ {ok, Db} = get_or_create_db(DbName, []),
+ #index_query_args{
+ stale = Stale
+ } = QueryArgs,
+ {_LastSeq, MinSeq} = calculate_seqs(Db, Stale),
+ case dreyfus_index:design_doc_to_index(DDoc, IndexName) of
+ {ok, Index} ->
+ case dreyfus_index_manager:get_index(DbName, Index) of
+ {ok, Pid} ->
+ case dreyfus_index:await(Pid, MinSeq) of
+ {ok, IndexPid, _Seq} ->
+ Result = dreyfus_index:Fun(IndexPid, QueryArgs),
+ rexi:reply(Result);
+ % obsolete clauses, remove after upgrade
+ ok ->
+ Result = dreyfus_index:Fun(Pid, QueryArgs),
+ rexi:reply(Result);
+ {ok, _Seq} ->
+ Result = dreyfus_index:Fun(Pid, QueryArgs),
+ rexi:reply(Result);
+ Error ->
+ rexi:reply(Error)
+ end;
+ Error ->
+ rexi:reply(Error)
+ end;
+ Error ->
+ rexi:reply(Error)
+ end.
+
+info(DbName, DDoc, IndexName) ->
+ MFA = {?MODULE, info_int, [DbName, DDoc, IndexName]},
+ dreyfus_util:time([rpc, info], MFA).
+
+info_int(DbName, DDoc, IndexName) ->
+ erlang:put(io_priority, {search, DbName}),
+ check_interactive_mode(),
+ case dreyfus_index:design_doc_to_index(DDoc, IndexName) of
+ {ok, Index} ->
+ case dreyfus_index_manager:get_index(DbName, Index) of
+ {ok, Pid} ->
+ Result = dreyfus_index:info(Pid),
+ rexi:reply(Result);
+ Error ->
+ rexi:reply(Error)
+ end;
+ Error ->
+ rexi:reply(Error)
+ end.
+
+disk_size(DbName, DDoc, IndexName) ->
+ erlang:put(io_priority, {search, DbName}),
+ check_interactive_mode(),
+ case dreyfus_index:design_doc_to_index(DDoc, IndexName) of
+ {ok, Index} ->
+ Result = dreyfus_index_manager:get_disk_size(DbName, Index),
+ rexi:reply(Result);
+ Error ->
+ rexi:reply(Error)
+ end.
+
+get_or_create_db(DbName, Options) ->
+ case couch_db:open_int(DbName, Options) of
+ {not_found, no_db_file} ->
+ couch_log:warning("~p creating ~s", [?MODULE, DbName]),
+ couch_server:create(DbName, Options);
+ Else ->
+ Else
+ end.
+
+calculate_seqs(Db, Stale) ->
+ LastSeq = couch_db:get_update_seq(Db),
+ if
+ Stale == ok orelse Stale == update_after ->
+ {LastSeq, 0};
+ true ->
+ {LastSeq, LastSeq}
+ end.
+
+check_interactive_mode() ->
+ case config:get("couchdb", "maintenance_mode", "false") of
+ "true" ->
+ % Do this to avoid log spam from rexi_server
+ rexi:reply({rexi_EXIT, {maintenance_mode, node()}}),
+ exit(normal);
+ _ ->
+ ok
+ end.
diff --git a/src/dreyfus/src/dreyfus_sup.erl b/src/dreyfus/src/dreyfus_sup.erl
new file mode 100644
index 000000000..d855a822e
--- /dev/null
+++ b/src/dreyfus/src/dreyfus_sup.erl
@@ -0,0 +1,32 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+
+%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
+
+-module(dreyfus_sup).
+-behaviour(supervisor).
+
+-export([start_link/0, init/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init(_Args) ->
+ Children = [
+ child(dreyfus_index_manager)
+ ],
+ {ok, {{one_for_one,10,1},
+ couch_epi:register_service(dreyfus_epi, Children)}}.
+
+child(Child) ->
+ {Child, {Child, start_link, []}, permanent, 1000, worker, [Child]}.
diff --git a/src/dreyfus/src/dreyfus_util.erl b/src/dreyfus/src/dreyfus_util.erl
new file mode 100644
index 000000000..ae3133e7d
--- /dev/null
+++ b/src/dreyfus/src/dreyfus_util.erl
@@ -0,0 +1,418 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+
+%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
+
+-module(dreyfus_util).
+
+-include("dreyfus.hrl").
+-include_lib("mem3/include/mem3.hrl").
+-include_lib("couch/include/couch_db.hrl").
+
+-export([get_shards/2, sort/2, upgrade/1, export/1, time/2]).
+-export([in_black_list/1, in_black_list/3, maybe_deny_index/3]).
+-export([get_design_docid/1]).
+-export([
+ ensure_local_purge_docs/2,
+ get_value_from_options/2,
+ get_local_purge_doc_id/1,
+ get_local_purge_doc_body/4,
+ maybe_create_local_purge_doc/2,
+ maybe_create_local_purge_doc/3,
+ get_signature_from_idxdir/1,
+ verify_index_exists/2
+]).
+
+
+get_shards(DbName, #index_query_args{partition = nil} = Args) ->
+ case use_ushards(Args) of
+ true ->
+ mem3:ushards(DbName);
+ false ->
+ mem3:shards(DbName)
+ end;
+get_shards(DbName, #index_query_args{partition = Partition} = Args) ->
+ PartitionId = couch_partition:shard_key(Partition),
+ case use_ushards(Args) of
+ true ->
+ mem3:ushards(DbName, PartitionId);
+ false ->
+ mem3:shards(DbName, PartitionId)
+ end;
+get_shards(DbName, Args) ->
+ get_shards(DbName, upgrade(Args)).
+
+use_ushards(#index_query_args{stale=ok}) ->
+ true;
+use_ushards(#index_query_args{stable=true}) ->
+ true;
+use_ushards(#index_query_args{}) ->
+ false.
+
+-spec sort(Order :: relevance | [any()], [#sortable{}]) -> [#sortable{}].
+sort(Sort, List0) ->
+ {List1, Stash} = stash_items(List0),
+ List2 = lists:sort(fun(A, B) -> sort(Sort, A, B) end, List1),
+ unstash_items(List2, Stash).
+
+stash_items(List) ->
+ lists:unzip([stash_item(Item) || Item <- List]).
+
+stash_item(Item) ->
+ Ref = make_ref(),
+ {Item#sortable{item=Ref}, {Ref, Item#sortable.item}}.
+
+unstash_items(List, Stash) ->
+ [unstash_item(Item, Stash) || Item <- List].
+
+unstash_item(Stashed, Stash) ->
+ {_, Item} = lists:keyfind(Stashed#sortable.item, 1, Stash),
+ Stashed#sortable{item=Item}.
+
+-spec sort(Order :: relevance | [any()], #sortable{}, #sortable{}) -> boolean().
+sort(relevance, #sortable{}=A, #sortable{}=B) ->
+ sort2(pad([<<"-">>], <<"">>, length(A#sortable.order)), A, B);
+sort(Sort, #sortable{}=A, #sortable{}=B) when is_binary(Sort) ->
+ sort2(pad([Sort], <<"">>, length(A#sortable.order)), A, B);
+sort(Sort, #sortable{}=A, #sortable{}=B) when is_list(Sort) ->
+ sort2(pad(Sort, <<"">>, length(A#sortable.order)), A, B).
+
+-spec sort2([any()], #sortable{}, #sortable{}) -> boolean().
+sort2([<<"-",_/binary>>|_], #sortable{order=[A|_]}, #sortable{order=[B|_]}) when A =/= B ->
+ A > B;
+sort2([_|_], #sortable{order=[A|_]}, #sortable{order=[B|_]}) when A =/= B ->
+ A < B;
+sort2([], #sortable{shard=#shard{range=A}}, #sortable{shard=#shard{range=B}}) ->
+ % arbitrary tie-breaker
+ A =< B;
+sort2([_|Rest], #sortable{order=[_|RestA]}=SortableA, #sortable{order=[_|RestB]}=SortableB) ->
+ sort2(Rest, SortableA#sortable{order=RestA}, SortableB#sortable{order=RestB}).
+
+pad(List, _Padding, Length) when length(List) >= Length ->
+ List;
+pad(List, Padding, Length) ->
+ pad(List ++ [Padding], Padding, Length).
+
+upgrade(#index_query_args{}=Args) ->
+ Args;
+upgrade({index_query_args, Query, Limit, Stale, IncludeDocs, Bookmark,
+ Sort, Grouping, Stable}) ->
+ #index_query_args{
+ q = Query,
+ limit = Limit,
+ stale = Stale,
+ include_docs = IncludeDocs,
+ bookmark = Bookmark,
+ sort = Sort,
+ grouping = Grouping,
+ stable = Stable};
+upgrade({index_query_args, Query, Limit, Stale, IncludeDocs, Bookmark,
+ Sort, Grouping, Stable, Counts, Ranges, Drilldown}) ->
+ #index_query_args{
+ q = Query,
+ limit = Limit,
+ stale = Stale,
+ include_docs = IncludeDocs,
+ bookmark = Bookmark,
+ sort = Sort,
+ grouping = Grouping,
+ stable = Stable,
+ counts=Counts,
+ ranges = Ranges,
+ drilldown = Drilldown};
+upgrade({index_query_args, Query, Limit, Stale, IncludeDocs, Bookmark,
+ Sort, Grouping, Stable, Counts, Ranges, Drilldown,
+ IncludeFields, HighlightFields, HighlightPreTag, HighlightPostTag,
+ HighlightNumber, HighlightSize}) ->
+ #index_query_args{
+ q = Query,
+ limit = Limit,
+ stale = Stale,
+ include_docs = IncludeDocs,
+ bookmark = Bookmark,
+ sort = Sort,
+ grouping = Grouping,
+ stable = Stable,
+ counts = Counts,
+ ranges = Ranges,
+ drilldown = Drilldown,
+ include_fields = IncludeFields,
+ highlight_fields = HighlightFields,
+ highlight_pre_tag = HighlightPreTag,
+ highlight_post_tag = HighlightPostTag,
+ highlight_number = HighlightNumber,
+ highlight_size = HighlightSize
+ };
+upgrade({index_query_args, Query, Limit, Stale, IncludeDocs, Bookmark,
+ Sort, Grouping, Stable, Counts, Ranges, Drilldown,
+ IncludeFields, HighlightFields, HighlightPreTag, HighlightPostTag,
+ HighlightNumber, HighlightSize, RawBookmark}) ->
+ #index_query_args{
+ q = Query,
+ limit = Limit,
+ stale = Stale,
+ include_docs = IncludeDocs,
+ bookmark = Bookmark,
+ sort = Sort,
+ grouping = Grouping,
+ stable = Stable,
+ counts = Counts,
+ ranges = Ranges,
+ drilldown = Drilldown,
+ include_fields = IncludeFields,
+ highlight_fields = HighlightFields,
+ highlight_pre_tag = HighlightPreTag,
+ highlight_post_tag = HighlightPostTag,
+ highlight_number = HighlightNumber,
+ highlight_size = HighlightSize,
+ raw_bookmark = RawBookmark
+ }.
+
+export(#index_query_args{partition = nil, counts = nil, ranges = nil,
+ drilldown = [], include_fields = nil, highlight_fields = nil} = Args) ->
+ % Ensure existing searches work during the upgrade by creating an
+ % #index_query_args record in the old format
+ {index_query_args,
+ Args#index_query_args.q,
+ Args#index_query_args.limit,
+ Args#index_query_args.stale,
+ Args#index_query_args.include_docs,
+ Args#index_query_args.bookmark,
+ Args#index_query_args.sort,
+ Args#index_query_args.grouping,
+ Args#index_query_args.stable
+ };
+export(#index_query_args{partition = nil, include_fields = nil,
+ highlight_fields = nil} = Args) ->
+ {index_query_args,
+ Args#index_query_args.q,
+ Args#index_query_args.limit,
+ Args#index_query_args.stale,
+ Args#index_query_args.include_docs,
+ Args#index_query_args.bookmark,
+ Args#index_query_args.sort,
+ Args#index_query_args.grouping,
+ Args#index_query_args.stable,
+ Args#index_query_args.counts,
+ Args#index_query_args.ranges,
+ Args#index_query_args.drilldown
+ };
+export(#index_query_args{partition = nil} = Args) ->
+ {index_query_args,
+ Args#index_query_args.q,
+ Args#index_query_args.limit,
+ Args#index_query_args.stale,
+ Args#index_query_args.include_docs,
+ Args#index_query_args.bookmark,
+ Args#index_query_args.sort,
+ Args#index_query_args.grouping,
+ Args#index_query_args.stable,
+ Args#index_query_args.counts,
+ Args#index_query_args.ranges,
+ Args#index_query_args.drilldown,
+ Args#index_query_args.include_fields,
+ Args#index_query_args.highlight_fields,
+ Args#index_query_args.highlight_pre_tag,
+ Args#index_query_args.highlight_post_tag,
+ Args#index_query_args.highlight_number,
+ Args#index_query_args.highlight_size,
+ Args#index_query_args.raw_bookmark
+ };
+export(QueryArgs) ->
+ QueryArgs.
+
+time(Metric, {M, F, A}) when is_list(Metric) ->
+ Start = os:timestamp(),
+ try
+ erlang:apply(M, F, A)
+ after
+ Length = timer:now_diff(os:timestamp(), Start) / 1000,
+ couch_stats:update_histogram([dreyfus | Metric], Length)
+ end.
+
+in_black_list(DbName, GroupId, IndexName) when is_binary(DbName),
+ is_binary(GroupId), is_binary(IndexName) ->
+ in_black_list(?b2l(DbName), ?b2l(GroupId), ?b2l(IndexName));
+in_black_list(DbName, GroupId, IndexName) when is_list(DbName),
+ is_list(GroupId), is_list(IndexName) ->
+ in_black_list(lists:flatten([DbName, ".", GroupId, ".", IndexName]));
+in_black_list(_DbName, _GroupId, _IndexName) ->
+ false.
+
+in_black_list(IndexEntry) when is_list(IndexEntry) ->
+ case dreyfus_config:get(IndexEntry) of
+ undefined -> false;
+ _ -> true
+ end;
+in_black_list(_IndexEntry) ->
+ false.
+
+maybe_deny_index(DbName, GroupId, IndexName) ->
+ case in_black_list(DbName, GroupId, IndexName) of
+ true ->
+ Reason = ?l2b(io_lib:format("Index <~s, ~s, ~s>, is BlackListed",
+ [?b2l(DbName), ?b2l(GroupId), ?b2l(IndexName)])),
+ throw ({bad_request, Reason});
+ _ ->
+ ok
+ end.
+
+get_design_docid(#doc{id = <<"_design/", DesignName/binary>>}) ->
+ DesignName.
+
+get_value_from_options(Key, Options) ->
+ case couch_util:get_value(Key, Options) of
+ undefined ->
+ Reason = binary_to_list(Key) ++ " must exist in Options.",
+ throw({bad_request, Reason});
+ Value -> Value
+ end.
+
+ensure_local_purge_docs(DbName, DDocs) ->
+ couch_util:with_db(DbName, fun(Db) ->
+ lists:foreach(fun(DDoc) ->
+ #doc{body = {Props}} = DDoc,
+ case couch_util:get_value(<<"indexes">>, Props) of
+ undefined -> false;
+ _ ->
+ try dreyfus_index:design_doc_to_indexes(DDoc) of
+ SIndexes -> ensure_local_purge_doc(Db, SIndexes)
+ catch _:_ ->
+ ok
+ end
+ end
+ end, DDocs)
+ end).
+
+ensure_local_purge_doc(Db, SIndexes) ->
+ if SIndexes =/= [] ->
+ lists:map(fun(SIndex) ->
+ maybe_create_local_purge_doc(Db, SIndex)
+ end, SIndexes);
+ true -> ok end.
+
+maybe_create_local_purge_doc(Db, Index) ->
+ DocId = dreyfus_util:get_local_purge_doc_id(Index#index.sig),
+ case couch_db:open_doc(Db, DocId) of
+ {not_found, _} ->
+ DbPurgeSeq = couch_db:get_purge_seq(Db),
+ DocContent = dreyfus_util:get_local_purge_doc_body(
+ Db, DocId, DbPurgeSeq, Index),
+ couch_db:update_doc(Db, DocContent, []);
+ _ ->
+ ok
+ end.
+
+maybe_create_local_purge_doc(Db, IndexPid, Index) ->
+ DocId = dreyfus_util:get_local_purge_doc_id(Index#index.sig),
+ case couch_db:open_doc(Db, DocId) of
+ {not_found, _} ->
+ DbPurgeSeq = couch_db:get_purge_seq(Db),
+ clouseau_rpc:set_purge_seq(IndexPid, DbPurgeSeq),
+ DocContent = dreyfus_util:get_local_purge_doc_body(
+ Db, DocId, DbPurgeSeq, Index),
+ couch_db:update_doc(Db, DocContent, []);
+ _ ->
+ ok
+ end.
+
+get_local_purge_doc_id(Sig) ->
+ ?l2b(?LOCAL_DOC_PREFIX ++ "purge-" ++ "dreyfus-" ++ Sig).
+
+get_signature_from_idxdir(IdxDir) ->
+ IdxDirList = filename:split(IdxDir),
+ Sig = lists:last(IdxDirList),
+ case [Ch || Ch <- Sig, not (((Ch >= $0) and (Ch =< $9))
+ orelse ((Ch >= $a) and (Ch =< $f))
+ orelse ((Ch >= $A) and (Ch =< $F)))] == [] of
+ true -> Sig;
+ false -> undefined
+ end.
+
+get_local_purge_doc_body(Db, LocalDocId, PurgeSeq, Index) ->
+ #index{
+ name = IdxName,
+ ddoc_id = DDocId,
+ sig = Sig
+ } = Index,
+ {Mega, Secs, _} = os:timestamp(),
+ NowSecs = Mega * 1000000 + Secs,
+ JsonList = {[
+ {<<"_id">>, LocalDocId},
+ {<<"purge_seq">>, PurgeSeq},
+ {<<"updated_on">>, NowSecs},
+ {<<"indexname">>, IdxName},
+ {<<"ddoc_id">>, DDocId},
+ {<<"signature">>, Sig},
+ {<<"type">>, <<"dreyfus">>}
+ ]},
+ couch_doc:from_json_obj(JsonList).
+
+verify_index_exists(DbName, Props) ->
+ try
+ Type = couch_util:get_value(<<"type">>, Props),
+ if Type =/= <<"dreyfus">> -> false; true ->
+ DDocId = couch_util:get_value(<<"ddoc_id">>, Props),
+ IndexName = couch_util:get_value(<<"indexname">>, Props),
+ Sig = couch_util:get_value(<<"signature">>, Props),
+ couch_util:with_db(DbName, fun(Db) ->
+ case couch_db:get_design_doc(Db, DDocId) of
+ {ok, #doc{} = DDoc} ->
+ {ok, IdxState} = dreyfus_index:design_doc_to_index(
+ DDoc, IndexName),
+ IdxState#index.sig == Sig;
+ {not_found, _} ->
+ false
+ end
+ end)
+ end
+ catch _:_ ->
+ false
+ end.
+
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+
+-define(SORT(T, L), lists:sort(fun(A, B) -> sort(T, A, B) end, L)).
+-define(ASC, <<"">>).
+-define(DESC, <<"-">>).
+
+%% use proper for this...
+
+empty_test() ->
+ ?assertEqual([], ?SORT([], [])).
+
+primary_asc_test() ->
+ ?assertMatch([#sortable{order=[1]}, #sortable{order=[2]}],
+ ?SORT([?ASC], [#sortable{order=[2]}, #sortable{order=[1]}])).
+
+primary_desc_test() ->
+ ?assertMatch([#sortable{order=[2]}, #sortable{order=[1]}],
+ ?SORT([?DESC], [#sortable{order=[1]}, #sortable{order=[2]}])).
+
+secondary_asc_test() ->
+ ?assertMatch([#sortable{order=[1, 1]}, #sortable{order=[1, 2]}],
+ ?SORT([?ASC, ?ASC], [#sortable{order=[1, 2]}, #sortable{order=[1, 1]}])).
+
+secondary_desc_test() ->
+ ?assertMatch([#sortable{order=[1, 2]}, #sortable{order=[1, 1]}],
+ ?SORT([?DESC, ?DESC], [#sortable{order=[1, 1]}, #sortable{order=[1, 2]}])).
+
+stash_test() ->
+ {Stashed, Stash} = stash_items([#sortable{order=foo, item=bar}]),
+ First = hd(Stashed),
+ ?assert(is_reference(First#sortable.item)),
+ Unstashed = hd(unstash_items(Stashed, Stash)),
+ ?assertEqual(Unstashed#sortable.item, bar).
+
+-endif.
diff --git a/src/dreyfus/test/dreyfus_blacklist_await_test.erl b/src/dreyfus/test/dreyfus_blacklist_await_test.erl
new file mode 100644
index 000000000..28a5e7f30
--- /dev/null
+++ b/src/dreyfus/test/dreyfus_blacklist_await_test.erl
@@ -0,0 +1,76 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(dreyfus_blacklist_await_test).
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("dreyfus/include/dreyfus.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-define(DDOC_ID, <<"_design/black_list_doc">>).
+-define(INDEX_NAME, <<"my_index">>).
+-define(DBNAME, <<"mydb">>).
+-define(TIMEOUT, 1000).
+
+start() ->
+ test_util:start_couch([dreyfus]).
+
+stop(_) ->
+ test_util:stop_couch([dreyfus]).
+
+setup() ->
+ ok = meck:new(couch_log),
+ ok = meck:expect(couch_log, notice, fun(_Fmt, _Args) ->
+ ?debugFmt(_Fmt, _Args)
+ end).
+
+teardown(_) ->
+ ok = meck:unload(couch_log).
+
+dreyfus_blacklist_await_test_() ->
+ {
+ "dreyfus black_list_doc await tests",
+ {
+ setup,
+ fun start/0, fun stop/1,
+ {
+ foreach,
+ fun setup/0, fun teardown/1,
+ [
+ fun do_not_await_1/0
+ ]
+ }
+ }
+ }.
+
+do_not_await_1() ->
+ ok = meck:new(dreyfus_index, [passthrough]),
+ Denied = lists:flatten([?b2l(?DBNAME), ".", "black_list_doc", ".",
+ "my_index"]),
+ config:set("dreyfus_blacklist", Denied, "true"),
+ dreyfus_test_util:wait_config_change(Denied, "true"),
+ Index = #index{dbname=?DBNAME, name=?INDEX_NAME, ddoc_id=?DDOC_ID},
+ State = create_state(?DBNAME, Index, nil, nil, []),
+ Msg = "Index Blocked from Updating - db: ~p, ddocid: ~p name: ~p",
+ Return = wait_log_message(Msg, fun() ->
+ {noreply, NewState} = dreyfus_index:handle_call({await, 1},
+ self(), State)
+ end),
+ ?assertEqual(Return, ok).
+
+wait_log_message(Fmt, Fun) ->
+ ok = meck:reset(couch_log),
+ Fun(),
+ ok = meck:wait(couch_log, '_', [Fmt, '_'], 5000).
+
+create_state(DbName, Index, UPid, IPid, WList) ->
+ {state, DbName, Index, UPid, IPid, WList}.
diff --git a/src/dreyfus/test/dreyfus_blacklist_request_test.erl b/src/dreyfus/test/dreyfus_blacklist_request_test.erl
new file mode 100644
index 000000000..8e5598ae1
--- /dev/null
+++ b/src/dreyfus/test/dreyfus_blacklist_request_test.erl
@@ -0,0 +1,96 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(dreyfus_blacklist_request_test).
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch_log/include/couch_log.hrl").
+-include_lib("dreyfus/include/dreyfus.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-define(TIMEOUT, 1000).
+
+start() ->
+ test_util:start_couch([dreyfus]),
+ ok = meck:new(fabric, [passthrough]),
+ ok = meck:expect(fabric, open_doc, fun(_, _, _) ->
+ {ok, ddoc}
+ end).
+
+stop(_) ->
+ ok = meck:unload(fabric),
+ test_util:stop_couch([dreyfus]).
+
+setup() ->
+ ok.
+
+teardown(_) ->
+ ok.
+
+dreyfus_blacklist_request_test_() ->
+ {
+ "dreyfus blacklist request tests",
+ {
+ setup,
+ fun start/0, fun stop/1,
+ {
+ foreach,
+ fun setup/0, fun teardown/1,
+ [
+ fun deny_fabric_requests/0,
+ fun allow_fabric_request/0
+ ]
+ }
+ }
+ }.
+
+deny_fabric_requests() ->
+ Reason = <<"Index <mydb, myddocid, myindexname>, is BlackListed">>,
+ QueryArgs = #index_query_args{},
+ IndexQueryArgs = #index_query_args{},
+ DDoc = #doc{id = <<"_design/myddocid">>},
+ Denied = "mydb.myddocid.myindexname",
+ config:set("dreyfus_blacklist", Denied, "true"),
+ dreyfus_test_util:wait_config_change(Denied, "true"),
+ ?assertThrow({bad_request, Reason}, dreyfus_fabric_search:go(<<"mydb">>,
+ <<"myddocid">>, <<"myindexname">>, QueryArgs)),
+ ?assertThrow({bad_request, Reason}, dreyfus_fabric_group1:go(<<"mydb">>,
+ <<"myddocid">>, <<"myindexname">>, QueryArgs)),
+ ?assertThrow({bad_request, Reason}, dreyfus_fabric_group2:go(<<"mydb">>,
+ <<"myddocid">>, <<"myindexname">>, QueryArgs)),
+ ?assertThrow({bad_request, Reason}, dreyfus_fabric_info:go(<<"mydb">>,
+ <<"myddocid">>, <<"myindexname">>, QueryArgs)),
+ ?assertThrow({bad_request, Reason}, dreyfus_fabric_search:go(<<"mydb">>,
+ DDoc, <<"myindexname">>, IndexQueryArgs)),
+ ?assertThrow({bad_request, Reason}, dreyfus_fabric_group1:go(<<"mydb">>,
+ DDoc, <<"myindexname">>, IndexQueryArgs)),
+ ?assertThrow({bad_request, Reason}, dreyfus_fabric_group2:go(<<"mydb">>,
+ DDoc, <<"myindexname">>, IndexQueryArgs)),
+ ?assertThrow({bad_request, Reason}, dreyfus_fabric_info:go(<<"mydb">>,
+ DDoc, <<"myindexname">>, IndexQueryArgs)).
+
+allow_fabric_request() ->
+ ok = meck:new(dreyfus_fabric_search, [passthrough]),
+ ok = meck:expect(dreyfus_fabric_search, go,
+ fun(A, GroupId, B, C) when is_binary(GroupId) ->
+ meck:passthrough([A, GroupId, B, C])
+ end),
+ ok = meck:expect(dreyfus_fabric_search, go, fun(_, _, _, _) ->
+ ok
+ end),
+ Denied = "mydb2.myddocid2.myindexname2",
+ QueryArgs = #index_query_args{},
+ config:set("dreyfus_blacklist", Denied, "true"),
+ dreyfus_test_util:wait_config_change(Denied, "true"),
+ ?assertEqual(ok, dreyfus_fabric_search:go(<<"mydb">>,
+ <<"myddocid">>, <<"indexnotthere">>, QueryArgs)),
+ ok = meck:unload(dreyfus_fabric_search).
diff --git a/src/dreyfus/test/dreyfus_config_test.erl b/src/dreyfus/test/dreyfus_config_test.erl
new file mode 100644
index 000000000..775e49d7f
--- /dev/null
+++ b/src/dreyfus/test/dreyfus_config_test.erl
@@ -0,0 +1,71 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(dreyfus_config_test).
+
+
+-include_lib("couch_log/include/couch_log.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-define(TIMEOUT, 1000).
+
+
+start() ->
+ test_util:start_couch([dreyfus]).
+
+setup() ->
+ ok.
+
+teardown(_) ->
+ ok.
+
+dreyfus_config_test_() ->
+ {
+ "dreyfus config tests",
+ {
+ setup,
+ fun start/0, fun test_util:stop_couch/1,
+ {
+ foreach,
+ fun setup/0, fun teardown/1,
+ [
+ fun check_black_list/0,
+ fun check_delete_from_blacklist/0
+ ]
+ }
+ }
+ }.
+
+check_black_list() ->
+ Index = "mydb.myddocid.myindexname",
+ Index2 = "mydb2.myddocid2.myindexname2",
+ Index3 = "mydb3.myddocid3.myindexname3",
+ ok = config:set("dreyfus_blacklist", Index, "true"),
+ ok = config:set("dreyfus_blacklist", Index2, "true"),
+ ok = config:set("dreyfus_blacklist", Index3, "true"),
+ dreyfus_test_util:wait_config_change(Index3, "true"),
+ FinalBl = [Index3, Index2, Index],
+ lists:foreach(fun (I) ->
+ ?assertEqual("true", dreyfus_config:get(I))
+ end, FinalBl).
+
+check_delete_from_blacklist() ->
+ Index = "mydb.myddocid.myindexname",
+ Index2 = "mydb2.myddocid2.myindexname2",
+ ok = config:set("dreyfus_blacklist", Index, "true"),
+ dreyfus_test_util:wait_config_change(Index, "true"),
+ ok = config:delete("dreyfus_blacklist", Index),
+ dreyfus_test_util:wait_config_change(Index, undefined),
+ ok = config:set("dreyfus_blacklist", Index2, "true"),
+ dreyfus_test_util:wait_config_change(Index2, "true"),
+ ?assertEqual(undefined, dreyfus_config:get(Index)),
+ ?assertEqual("true", dreyfus_config:get(Index2)).
diff --git a/src/dreyfus/test/dreyfus_purge_test.erl b/src/dreyfus/test/dreyfus_purge_test.erl
new file mode 100644
index 000000000..5fa4bc90f
--- /dev/null
+++ b/src/dreyfus/test/dreyfus_purge_test.erl
@@ -0,0 +1,867 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(dreyfus_purge_test).
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("dreyfus/include/dreyfus.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("mem3/include/mem3.hrl").
+
+
+-export([test_purge_single/0, test_purge_multiple/0, test_purge_multiple2/0,
+ test_purge_conflict/0, test_purge_conflict2/0, test_purge_conflict3/0, test_purge_conflict4/0,
+ test_purge_update/0, test_purge_update2/0,
+ test_delete/0, test_delete_purge_conflict/0, test_delete_conflict/0,
+ test_all/0]).
+-export([test_verify_index_exists1/0, test_verify_index_exists2/0, test_verify_index_exists_failed/0,
+ test_local_doc/0, test_delete_local_doc/0, test_purge_search/0]).
+
+-compile(export_all).
+
+test_all() ->
+ test_purge_single(),
+ test_purge_multiple(),
+ test_purge_multiple2(),
+ test_purge_conflict(),
+ test_purge_conflict2(),
+ test_purge_conflict3(),
+ test_purge_conflict4(),
+ test_purge_update(),
+ test_purge_update2(),
+ test_delete(),
+ test_delete_purge_conflict(),
+ test_delete_conflict(),
+ test_verify_index_exists1(),
+ test_verify_index_exists2(),
+ test_verify_index_exists_failed(),
+ test_delete_local_doc(),
+ test_local_doc(),
+ test_purge_search(),
+ ok.
+
+test_purge_single() ->
+ DbName = db_name(),
+ create_db_docs(DbName),
+ {ok, _, HitCount1, _, _, _} = dreyfus_search(DbName, <<"apple">>),
+ ?assertEqual(HitCount1, 1),
+ purge_docs(DbName, [<<"apple">>]),
+ {ok, _, HitCount2, _, _, _} = dreyfus_search(DbName, <<"apple">>),
+ ?assertEqual(HitCount2, 0),
+ delete_db(DbName),
+ ok.
+
+test_purge_multiple() ->
+ Query = <<"color:red">>,
+
+ %create the db and docs
+ DbName = db_name(),
+ create_db_docs(DbName),
+
+ %first search request
+ {ok, _, HitCount1, _, _, _} = dreyfus_search(DbName, Query),
+
+ ?assertEqual(HitCount1, 5),
+
+ %purge 5 docs
+ purge_docs(DbName, [<<"apple">>, <<"tomato">>, <<"cherry">>, <<"haw">>,
+ <<"strawberry">>]),
+
+ %second search request
+ {ok, _, HitCount2, _, _, _} = dreyfus_search(DbName, Query),
+
+ ?assertEqual(HitCount2, 0),
+
+ %delete the db
+ delete_db(DbName),
+ ok.
+
+test_purge_multiple2() ->
+ %create the db and docs
+ DbName = db_name(),
+ create_db_docs(DbName),
+
+ Query = <<"color:red">>,
+
+ %first search request
+ {ok, _, HitCount1, _, _, _} = dreyfus_search(DbName, Query),
+
+ ?assertEqual(HitCount1, 5),
+
+ %purge 2 docs
+ purge_docs(DbName, [<<"apple">>, <<"tomato">>]),
+
+ %second search request
+ {ok, _, HitCount2, _, _, _} = dreyfus_search(DbName, Query),
+
+ ?assertEqual(HitCount2, 3),
+
+ %purge 2 docs
+ purge_docs(DbName, [<<"cherry">>, <<"haw">>]),
+
+ %third search request
+ {ok, _, HitCount3, _, _, _} = dreyfus_search(DbName, Query),
+
+ ?assertEqual(HitCount3, 1),
+
+ %delete the db
+ delete_db(DbName),
+ ok.
+
+test_purge_conflict() ->
+ %create dbs and docs
+ SourceDbName = db_name(),
+ timer:sleep(2000),
+ TargetDbName = db_name(),
+
+ create_db_docs(SourceDbName),
+ create_db_docs(TargetDbName, <<"green">>),
+
+ %first search
+ {ok, _, RedHitCount1, _RedHits1, _, _} = dreyfus_search(
+ TargetDbName, <<"color:red">>),
+ {ok, _, GreenHitCount1, _GreenHits1, _, _} = dreyfus_search(
+ TargetDbName, <<"color:green">>),
+
+ ?assertEqual(5, RedHitCount1 + GreenHitCount1),
+
+ %do replicate and make conflicted docs
+ {ok, _} = fabric:update_doc(<<"_replicator">>, make_replicate_doc(
+ SourceDbName, TargetDbName), [?ADMIN_CTX]),
+
+ %%check doc version
+ wait_for_replicate(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>,
+ <<"haw">>, <<"strawberry">>], 2, 5),
+
+ %second search
+ {ok, _, RedHitCount2, _RedHits2, _, _} = dreyfus_search(
+ TargetDbName, <<"color:red">>),
+ {ok, _, GreenHitCount2, _GreenHits2, _, _} = dreyfus_search(
+ TargetDbName, <<"color:green">>),
+
+ ?assertEqual(5, RedHitCount2 + GreenHitCount2),
+
+ purge_docs(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>, <<"haw">>,
+ <<"strawberry">>]),
+
+ %third search
+ {ok, _, RedHitCount3, _RedHits3, _, _} = dreyfus_search(TargetDbName,
+ <<"color:red">>),
+ {ok, _, GreenHitCount3, _GreenHits3, _, _} = dreyfus_search(TargetDbName,
+ <<"color:green">>),
+
+ ?assertEqual(5, RedHitCount3 + GreenHitCount3),
+ ?assertEqual(RedHitCount2, GreenHitCount3),
+ ?assertEqual(GreenHitCount2, RedHitCount3),
+
+ delete_db(SourceDbName),
+ delete_db(TargetDbName),
+ ok.
+
+test_purge_conflict2() ->
+ %create dbs and docs
+ SourceDbName = db_name(),
+ timer:sleep(2000),
+ TargetDbName = db_name(),
+
+ create_db_docs(SourceDbName),
+ create_db_docs(TargetDbName, <<"green">>),
+
+ %first search
+ {ok, _, RedHitCount1, _RedHits1, _, _} = dreyfus_search(TargetDbName,
+ <<"color:red">>),
+ {ok, _, GreenHitCount1, _GreenHits1, _, _} = dreyfus_search(TargetDbName,
+ <<"color:green">>),
+
+ ?assertEqual(5, RedHitCount1 + GreenHitCount1),
+
+ %do replicate and make conflicted docs
+ {ok, _} = fabric:update_doc(<<"_replicator">>, make_replicate_doc(
+ SourceDbName, TargetDbName), [?ADMIN_CTX]),
+
+ wait_for_replicate(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>,
+ <<"haw">>, <<"strawberry">>], 2, 5),
+
+ %second search
+ {ok, _, RedHitCount2, _RedHits2, _, _} = dreyfus_search(
+ TargetDbName, <<"color:red">>),
+ {ok, _, GreenHitCount2, _GreenHits2, _, _} = dreyfus_search(
+ TargetDbName, <<"color:green">>),
+
+ ?assertEqual(5, RedHitCount2 + GreenHitCount2),
+
+ purge_docs(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>,
+ <<"haw">>, <<"strawberry">>]),
+ purge_docs(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>,
+ <<"haw">>, <<"strawberry">>]),
+
+ %third search
+ {ok, _, RedHitCount3, _RedHits3, _, _} = dreyfus_search(
+ TargetDbName, <<"color:red">>),
+ {ok, _, GreenHitCount3, _GreenHits3, _, _} = dreyfus_search(
+ TargetDbName, <<"color:green">>),
+
+ ?assertEqual(0, RedHitCount3 + GreenHitCount3),
+
+ delete_db(SourceDbName),
+ delete_db(TargetDbName),
+ ok.
+
+
+test_purge_conflict3() ->
+ %create dbs and docs
+ SourceDbName = db_name(),
+ timer:sleep(2000),
+ TargetDbName = db_name(),
+
+ create_db_docs(SourceDbName),
+ create_db_docs(TargetDbName, <<"green">>),
+
+ %first search
+ {ok, _, RedHitCount1, _RedHits1, _, _} = dreyfus_search(
+ TargetDbName, <<"color:red">>),
+ {ok, _, GreenHitCount1, _GreenHits1, _, _} = dreyfus_search(
+ TargetDbName, <<"color:green">>),
+
+ ?assertEqual(5, RedHitCount1 + GreenHitCount1),
+
+ %do replicate and make conflicted docs
+ {ok, _} = fabric:update_doc(<<"_replicator">>, make_replicate_doc(
+ SourceDbName, TargetDbName), [?ADMIN_CTX]),
+
+ %%check doc version
+ wait_for_replicate(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>,
+ <<"haw">>, <<"strawberry">>], 2, 5),
+
+ %second search
+ {ok, _, RedHitCount2, _RedHits2, _, _} = dreyfus_search(
+ TargetDbName, <<"color:red">>),
+ {ok, _, GreenHitCount2, _GreenHits2, _, _} = dreyfus_search(
+ TargetDbName, <<"color:green">>),
+
+ ?assertEqual(5, RedHitCount2 + GreenHitCount2),
+
+ purge_docs(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>,
+ <<"haw">>, <<"strawberry">>]),
+
+ %third search
+ {ok, _, RedHitCount3, _RedHits3, _, _} = dreyfus_search(
+ TargetDbName, <<"color:red">>),
+ {ok, _, GreenHitCount3, _GreenHits3, _, _} = dreyfus_search(
+ TargetDbName, <<"color:green">>),
+
+ ?assertEqual(5, RedHitCount3 + GreenHitCount3),
+ ?assertEqual(RedHitCount2, GreenHitCount3),
+ ?assertEqual(GreenHitCount2, RedHitCount3),
+
+ purge_docs(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>,
+ <<"haw">>, <<"strawberry">>]),
+ {ok, _, RedHitCount4, _, _, _} = dreyfus_search(
+ TargetDbName, <<"color:red">>),
+ {ok, _, GreenHitCount4, _, _, _} = dreyfus_search(
+ TargetDbName, <<"color:green">>),
+
+ ?assertEqual(0, RedHitCount4 + GreenHitCount4),
+
+ delete_db(SourceDbName),
+ delete_db(TargetDbName),
+ ok.
+
+test_purge_conflict4() ->
+ %create dbs and docs
+ SourceDbName = db_name(),
+ timer:sleep(2000),
+ TargetDbName = db_name(),
+
+ create_db_docs(SourceDbName, <<"green">>),
+ create_db_docs(TargetDbName, <<"red">>),
+
+ %first search
+ {ok, _, RedHitCount1, _RedHits1, _, _} = dreyfus_search(
+ TargetDbName, <<"color:red">>),
+ {ok, _, GreenHitCount1, _GreenHits1, _, _} = dreyfus_search(
+ TargetDbName, <<"color:green">>),
+
+ ?assertEqual(5, RedHitCount1 + GreenHitCount1),
+
+ %do replicate and make conflicted docs
+ {ok, _} = fabric:update_doc(<<"_replicator">>, make_replicate_doc(
+ SourceDbName, TargetDbName), [?ADMIN_CTX]),
+
+ %%check doc version
+ wait_for_replicate(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>,
+ <<"haw">>, <<"strawberry">>], 2, 5),
+
+ %second search
+ {ok, _, RedHitCount2, _RedHits2, _, _} = dreyfus_search(
+ TargetDbName, <<"color:red">>),
+ {ok, _, GreenHitCount2, _GreenHits2, _, _} = dreyfus_search(
+ TargetDbName, <<"color:green">>),
+
+ ?assertEqual(5, RedHitCount2 + GreenHitCount2),
+
+ purge_docs_with_all_revs(TargetDbName, [<<"apple">>, <<"tomato">>,
+ <<"cherry">>, <<"haw">>, <<"strawberry">>]),
+
+ %third search
+ {ok, _, RedHitCount3, _RedHits3, _, _} = dreyfus_search(
+ TargetDbName, <<"color:red">>),
+ {ok, _, GreenHitCount3, _GreenHits3, _, _} = dreyfus_search(
+ TargetDbName, <<"color:green">>),
+
+ ?assertEqual(0, RedHitCount3 + GreenHitCount3),
+
+ delete_db(SourceDbName),
+ delete_db(TargetDbName),
+ ok.
+
+test_purge_update() ->
+ %create the db and docs
+ DbName = db_name(),
+ create_db_docs(DbName),
+
+ QueryRed = <<"color:red">>,
+ QueryGreen = <<"color:green">>,
+
+ %first search request
+ {ok, _, HitCount1, _, _, _} = dreyfus_search(DbName, QueryRed),
+
+ ?assertEqual(HitCount1, 5),
+
+ %update doc
+ Rev = get_rev(DbName, <<"apple">>),
+ Doc = couch_doc:from_json_obj({[
+ {<<"_id">>, <<"apple">>},
+ {<<"_rev">>, couch_doc:rev_to_str(Rev)},
+ {<<"color">>, <<"green">>},
+ {<<"size">>, 8}
+ ]}),
+ {ok, _} = fabric:update_docs(DbName, [Doc], [?ADMIN_CTX]),
+
+ %second search request
+ {ok, _, HitCount2, _, _, _} = dreyfus_search(DbName, QueryRed),
+ {ok, _, HitCount3, _, _, _} = dreyfus_search(DbName, QueryGreen),
+
+ % 4 red and 1 green
+ ?assertEqual(HitCount2, 4),
+ ?assertEqual(HitCount3, 1),
+
+ % purge 2 docs, 1 red and 1 green
+ purge_docs(DbName, [<<"apple">>, <<"tomato">>]),
+
+ % third search request
+ {ok, _, HitCount4, _, _, _} = dreyfus_search(DbName, QueryRed),
+ {ok, _, HitCount5, _, _, _} = dreyfus_search(DbName, QueryGreen),
+
+ % 3 red and 0 green
+ ?assertEqual(HitCount4, 3),
+ ?assertEqual(HitCount5, 0),
+
+ delete_db(DbName),
+ ok.
+
+test_purge_update2() ->
+ %create the db and docs
+ DbName = db_name(),
+ create_db_docs(DbName),
+
+ Query1 = <<"size:1">>,
+ Query1000 = <<"size:1000">>,
+
+ %first search request
+ {ok, _, HitCount1, _, _, _} = dreyfus_search(DbName, Query1),
+ {ok, _, HitCount2, _, _, _} = dreyfus_search(DbName, Query1000),
+
+ ?assertEqual(HitCount1, 5),
+ ?assertEqual(HitCount2, 0),
+
+ %update doc 999 times, it will take about 30 seconds.
+ update_doc(DbName, <<"apple">>, 999),
+
+ %second search request
+ {ok, _, HitCount3, _, _, _} = dreyfus_search(DbName, Query1),
+ {ok, _, HitCount4, _, _, _} = dreyfus_search(DbName, Query1000),
+
+ % 4 value(1) and 1 value(1000)
+ ?assertEqual(HitCount3, 4),
+ ?assertEqual(HitCount4, 1),
+
+ % purge doc
+ purge_docs(DbName, [<<"apple">>]),
+
+ % third search request
+ {ok, _, HitCount5, _, _, _} = dreyfus_search(DbName, Query1),
+ {ok, _, HitCount6, _, _, _} = dreyfus_search(DbName, Query1000),
+
+ % 4 value(1) and 0 value(1000)
+ ?assertEqual(HitCount5, 4),
+ ?assertEqual(HitCount6, 0),
+
+ delete_db(DbName),
+ ok.
+
+test_delete() ->
+ DbName = db_name(),
+ create_db_docs(DbName),
+ {ok, _, HitCount1, _, _, _} = dreyfus_search(DbName, <<"apple">>),
+ ?assertEqual(HitCount1, 1),
+ ok = delete_docs(DbName, [<<"apple">>]),
+ {ok, _, HitCount2, _, _, _} = dreyfus_search(DbName, <<"apple">>),
+ ?assertEqual(HitCount2, 0),
+ delete_db(DbName),
+ ok.
+
+test_delete_conflict() ->
+ %create dbs and docs
+ SourceDbName = db_name(),
+ timer:sleep(2000),
+ TargetDbName = db_name(),
+
+ create_db_docs(SourceDbName),
+ create_db_docs(TargetDbName, <<"green">>),
+
+ %first search
+ {ok, _, RedHitCount1, _RedHits1, _, _} = dreyfus_search(
+ TargetDbName, <<"color:red">>),
+ {ok, _, GreenHitCount1, _GreenHits1, _, _} = dreyfus_search(
+ TargetDbName, <<"color:green">>),
+
+ ?assertEqual(5, RedHitCount1 + GreenHitCount1),
+
+ %do replicate and make conflicted docs
+ {ok, _} = fabric:update_doc(<<"_replicator">>, make_replicate_doc(
+ SourceDbName, TargetDbName), [?ADMIN_CTX]),
+
+ wait_for_replicate(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>,
+ <<"haw">>, <<"strawberry">>], 2, 5),
+
+ %second search
+ {ok, _, RedHitCount2, _RedHits2, _, _} = dreyfus_search(
+ TargetDbName, <<"color:red">>),
+ {ok, _, GreenHitCount2, _GreenHits2, _, _} = dreyfus_search(
+ TargetDbName, <<"color:green">>),
+
+ ?assertEqual(5, RedHitCount2 + GreenHitCount2),
+
+ %delete docs
+ delete_docs(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>,
+ <<"haw">>, <<"strawberry">>]),
+
+ %third search
+ {ok, _, RedHitCount3, _RedHits3, _, _} = dreyfus_search(
+ TargetDbName, <<"color:red">>),
+ {ok, _, GreenHitCount3, _GreenHits3, _, _} = dreyfus_search(
+ TargetDbName, <<"color:green">>),
+
+ ?assertEqual(5, RedHitCount3 + GreenHitCount3),
+ ?assertEqual(RedHitCount2, GreenHitCount3),
+ ?assertEqual(GreenHitCount2, RedHitCount3),
+
+ delete_db(SourceDbName),
+ delete_db(TargetDbName),
+ ok.
+
+test_delete_purge_conflict() ->
+ %create dbs and docs
+ SourceDbName = db_name(),
+ timer:sleep(2000),
+ TargetDbName = db_name(),
+
+ create_db_docs(SourceDbName),
+ create_db_docs(TargetDbName, <<"green">>),
+
+ %first search
+ {ok, _, RedHitCount1, _RedHits1, _, _} = dreyfus_search(
+ TargetDbName, <<"color:red">>),
+ {ok, _, GreenHitCount1, _GreenHits1, _, _} = dreyfus_search(
+ TargetDbName, <<"color:green">>),
+
+ ?assertEqual(5, RedHitCount1 + GreenHitCount1),
+
+ %do replicate and make conflicted docs
+ {ok, _} = fabric:update_doc(<<"_replicator">>, make_replicate_doc(
+ SourceDbName, TargetDbName), [?ADMIN_CTX]),
+
+ wait_for_replicate(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>,
+ <<"haw">>, <<"strawberry">>], 2, 5),
+
+ %second search
+ {ok, _, RedHitCount2, _RedHits2, _, _} = dreyfus_search(
+ TargetDbName, <<"color:red">>),
+ {ok, _, GreenHitCount2, _GreenHits2, _, _} = dreyfus_search(
+ TargetDbName, <<"color:green">>),
+
+ ?assertEqual(5, RedHitCount2 + GreenHitCount2),
+
+ %purge docs
+ purge_docs(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>,
+ <<"haw">>, <<"strawberry">>]),
+
+ %delete docs
+ delete_docs(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>,
+ <<"haw">>, <<"strawberry">>]),
+
+ %third search
+ {ok, _, RedHitCount3, _RedHits3, _, _} = dreyfus_search(
+ TargetDbName, <<"color:red">>),
+ {ok, _, GreenHitCount3, _GreenHits3, _, _} = dreyfus_search(
+ TargetDbName, <<"color:green">>),
+
+ ?assertEqual(RedHitCount3, 0),
+ ?assertEqual(GreenHitCount3, 0),
+ ?assertEqual(GreenHitCount3, 0),
+ ?assertEqual(RedHitCount3, 0),
+
+ delete_db(SourceDbName),
+ delete_db(TargetDbName),
+ ok.
+
+test_local_doc() ->
+ DbName = db_name(),
+ create_db_docs(DbName),
+
+ {ok, _, HitCount1, _, _, _} = dreyfus_search(DbName, <<"apple">>),
+ ?assertEqual(HitCount1, 1),
+ purge_docs(DbName, [<<"apple">>, <<"tomato">>, <<"cherry">>,
+ <<"strawberry">>]),
+ {ok, _, HitCount2, _, _, _} = dreyfus_search(DbName, <<"apple">>),
+ ?assertEqual(HitCount2, 0),
+
+ %get local doc
+ [Sig|_] = get_sigs(DbName),
+ LocalId = dreyfus_util:get_local_purge_doc_id(Sig),
+ LocalShards = mem3:local_shards(DbName),
+ PurgeSeqs = lists:map(fun(Shard) ->
+ {ok, Db} = couch_db:open_int(Shard#shard.name, [?ADMIN_CTX]),
+ {ok, LDoc} = couch_db:open_doc(Db, LocalId, []),
+ {Props} = couch_doc:to_json_obj(LDoc, []),
+ dreyfus_util:get_value_from_options(<<"updated_on">>, Props),
+ PurgeSeq = dreyfus_util:get_value_from_options(<<"purge_seq">>, Props),
+ Type = dreyfus_util:get_value_from_options(<<"type">>, Props),
+ ?assertEqual(<<"dreyfus">>, Type),
+ couch_db:close(Db),
+ PurgeSeq
+ end, LocalShards),
+ ?assertEqual(lists:sum(PurgeSeqs), 4),
+
+ delete_db(DbName),
+ ok.
+
+test_verify_index_exists1() ->
+ DbName = db_name(),
+ create_db_docs(DbName),
+
+ {ok, _, HitCount1, _, _, _} = dreyfus_search(DbName, <<"apple">>),
+ ?assertEqual(HitCount1, 1),
+
+ ok = purge_docs(DbName, [<<"apple">>]),
+
+ {ok, _, HitCount2, _, _, _} = dreyfus_search(DbName, <<"apple">>),
+ ?assertEqual(HitCount2, 0),
+
+ ShardNames = [Sh || #shard{name = Sh} <- mem3:local_shards(DbName)],
+ [ShardDbName | _Rest ] = ShardNames,
+ {ok, Db} = couch_db:open(ShardDbName, [?ADMIN_CTX]),
+ {ok, LDoc} = couch_db:open_doc(Db,
+ dreyfus_util:get_local_purge_doc_id(
+ <<"49e82c2a910b1046b55cc45ad058a7ee">>), []
+ ),
+ #doc{body = {Props}} = LDoc,
+ ?assertEqual(true, dreyfus_util:verify_index_exists(ShardDbName, Props)),
+ delete_db(DbName),
+ ok.
+
+test_verify_index_exists2() ->
+ DbName = db_name(),
+ create_db_docs(DbName),
+
+ {ok, _, HitCount1, _, _, _} = dreyfus_search(DbName, <<"apple">>),
+ ?assertEqual(HitCount1, 1),
+
+ ShardNames = [Sh || #shard{name = Sh} <- mem3:local_shards(DbName)],
+ [ShardDbName | _Rest ] = ShardNames,
+ {ok, Db} = couch_db:open(ShardDbName, [?ADMIN_CTX]),
+ {ok, LDoc} = couch_db:open_doc(Db,
+ dreyfus_util:get_local_purge_doc_id(
+ <<"49e82c2a910b1046b55cc45ad058a7ee">>), []
+ ),
+ #doc{body = {Props}} = LDoc,
+ ?assertEqual(true, dreyfus_util:verify_index_exists(ShardDbName, Props)),
+
+ delete_db(DbName),
+ ok.
+
+test_verify_index_exists_failed() ->
+ DbName = db_name(),
+ create_db_docs(DbName),
+
+ {ok, _, HitCount1, _, _, _} = dreyfus_search(DbName, <<"apple">>),
+ ?assertEqual(HitCount1, 1),
+
+ ShardNames = [Sh || #shard{name = Sh} <- mem3:local_shards(DbName)],
+ [ShardDbName | _Rest ] = ShardNames,
+ {ok, Db} = couch_db:open(ShardDbName, [?ADMIN_CTX]),
+ {ok, LDoc} = couch_db:open_doc(Db,
+ dreyfus_util:get_local_purge_doc_id(
+ <<"49e82c2a910b1046b55cc45ad058a7ee">>), []
+ ),
+ #doc{body = {Options}} = LDoc,
+ OptionsDbErr = [
+ {<<"indexname">>,
+ dreyfus_util:get_value_from_options(<<"indexname">>, Options)},
+ {<<"ddoc_id">>,
+ dreyfus_util:get_value_from_options(<<"ddoc_id">>, Options)},
+ {<<"signature">>,
+ dreyfus_util:get_value_from_options(<<"signature">>, Options)}
+ ],
+ ?assertEqual(false, dreyfus_util:verify_index_exists(
+ ShardDbName, OptionsDbErr)),
+
+ OptionsIdxErr = [
+ {<<"indexname">>, <<"someindex">>},
+ {<<"ddoc_id">>,
+ dreyfus_util:get_value_from_options(<<"ddoc_id">>, Options)},
+ {<<"signature">>,
+ dreyfus_util:get_value_from_options(<<"signature">>, Options)}
+ ],
+ ?assertEqual(false, dreyfus_util:verify_index_exists(
+ ShardDbName, OptionsIdxErr)),
+
+ OptionsDDocErr = [
+ {<<"indexname">>,
+ dreyfus_util:get_value_from_options(<<"indexname">>, Options)},
+ {<<"ddoc_id">>,
+ <<"somedesigndoc">>},
+ {<<"signature">>,
+ dreyfus_util:get_value_from_options(<<"signature">>, Options)}
+ ],
+ ?assertEqual(false, dreyfus_util:verify_index_exists(
+ ShardDbName, OptionsDDocErr)),
+
+ OptionsSigErr = [
+ {<<"indexname">>,
+ dreyfus_util:get_value_from_options(<<"indexname">>, Options)},
+ {<<"ddoc_id">>,
+ dreyfus_util:get_value_from_options(<<"ddoc_id">>, Options)},
+ {<<"signature">>,
+ <<"12345678901234567890123456789012">>}
+ ],
+ ?assertEqual(false, dreyfus_util:verify_index_exists(
+ ShardDbName, OptionsSigErr)),
+
+ delete_db(DbName),
+ ok.
+
+test_delete_local_doc() ->
+ DbName = db_name(),
+ create_db_docs(DbName),
+
+ {ok, _, HitCount1, _, _, _} = dreyfus_search(DbName, <<"apple">>),
+ ?assertEqual(HitCount1, 1),
+
+ ok = purge_docs(DbName, [<<"apple">>]),
+
+ {ok, _, HitCount2, _, _, _} = dreyfus_search(DbName, <<"apple">>),
+ ?assertEqual(HitCount2, 0),
+
+ LDocId = dreyfus_util:get_local_purge_doc_id(
+ <<"49e82c2a910b1046b55cc45ad058a7ee">>),
+ ShardNames = [Sh || #shard{name = Sh} <- mem3:local_shards(DbName)],
+ [ShardDbName | _Rest ] = ShardNames,
+ {ok, Db} = couch_db:open(ShardDbName, [?ADMIN_CTX]),
+ {ok, _} = couch_db:open_doc(Db, LDocId, []),
+
+ delete_docs(DbName, [<<"_design/search">>]),
+ io:format("DbName ~p~n", [DbName]),
+ ?debugFmt("Converting ... ~n~p~n", [DbName]),
+
+
+ dreyfus_fabric_cleanup:go(DbName),
+ {ok, Db2} = couch_db:open(ShardDbName, [?ADMIN_CTX]),
+ {not_found, _} = couch_db:open_doc(Db2, LDocId, []),
+
+ delete_db(DbName),
+ ok.
+
+test_purge_search() ->
+ DbName = db_name(),
+ create_db_docs(DbName),
+ purge_docs(DbName, [<<"apple">>, <<"tomato">>, <<"haw">>]),
+ {ok, _, HitCount, _, _, _} = dreyfus_search(DbName, <<"color:red">>),
+ ?assertEqual(HitCount, 2),
+ delete_db(DbName),
+ ok.
+
+%private API
+db_name() ->
+ Nums = tuple_to_list(erlang:now()),
+ Prefix = "test-db",
+ Suffix = lists:concat([integer_to_list(Num) || Num <- Nums]),
+ list_to_binary(Prefix ++ "-" ++ Suffix).
+
+purge_docs(DBName, DocIds) ->
+ IdsRevs = [{DocId, [get_rev(DBName, DocId)]} || DocId <- DocIds],
+ {ok, _} = fabric:purge_docs(DBName, IdsRevs, []),
+ ok.
+
+purge_docs_with_all_revs(DBName, DocIds) ->
+ IdsRevs = [{DocId, get_revs(DBName, DocId)} || DocId <- DocIds],
+ {ok, _} = fabric:purge_docs(DBName, IdsRevs, []),
+ ok.
+
+dreyfus_search(DbName, KeyWord) ->
+ QueryArgs = #index_query_args{q = KeyWord},
+ {ok, DDoc} = fabric:open_doc(DbName, <<"_design/search">>, []),
+ dreyfus_fabric_search:go(DbName, DDoc, <<"index">>, QueryArgs).
+
+create_db_docs(DbName) ->
+ create_db(DbName),
+ create_docs(DbName, 5, <<"red">>).
+
+create_db_docs(DbName, Color) ->
+ create_db(DbName),
+ create_docs(DbName, 5, Color).
+
+create_docs(DbName, Count, Color) ->
+ {ok, _} = fabric:update_docs(DbName, make_docs(Count, Color), [?ADMIN_CTX]),
+ {ok, _} = fabric:update_doc(DbName, make_design_doc(dreyfus), [?ADMIN_CTX]).
+
+create_db(DbName) ->
+ ok = fabric:create_db(DbName, [?ADMIN_CTX, {q, 1}]).
+
+delete_db(DbName) ->
+ ok = fabric:delete_db(DbName, [?ADMIN_CTX]).
+
+make_docs(Count, Color) ->
+ [make_doc(I, Color) || I <- lists:seq(1, Count)].
+
+make_doc(Id, Color) ->
+ couch_doc:from_json_obj({[
+ {<<"_id">>, get_value(Id)},
+ {<<"color">>, Color},
+ {<<"size">>, 1}
+ ]}).
+
+get_value(Key) ->
+ case Key of
+ 1 -> <<"apple">>;
+ 2 -> <<"tomato">>;
+ 3 -> <<"cherry">>;
+ 4 -> <<"strawberry">>;
+ 5 -> <<"haw">>;
+ 6 -> <<"carrot">>;
+ 7 -> <<"pitaya">>;
+ 8 -> <<"grape">>;
+ 9 -> <<"date">>;
+ 10 -> <<"watermelon">>
+ end.
+
+make_design_doc(dreyfus) ->
+ couch_doc:from_json_obj({[
+ {<<"_id">>, <<"_design/search">>},
+ {<<"language">>, <<"javascript">>},
+ {<<"indexes">>, {[
+ {<<"index">>, {[
+ {<<"analyzer">>, <<"standard">>},
+ {<<"index">>, <<
+ "function (doc) { \n"
+ " index(\"default\", doc._id);\n"
+ " if(doc.color) {\n"
+ " index(\"color\", doc.color);\n"
+ " }\n"
+ " if(doc.size) {\n"
+ " index(\"size\", doc.size);\n"
+ " }\n"
+ "}"
+ >>}
+ ]}}
+ ]}}
+ ]}).
+
+make_replicate_doc(SourceDbName, TargetDbName) ->
+ couch_doc:from_json_obj({[
+ {<<"_id">>, list_to_binary("replicate_fm_" ++
+ binary_to_list(SourceDbName) ++ "_to_" ++ binary_to_list(TargetDbName))},
+ {<<"source">>, list_to_binary("http://localhost:15984/" ++ SourceDbName)},
+ {<<"target">>, list_to_binary("http://localhost:15984/" ++ TargetDbName)}
+ ]}).
+
+get_rev(DbName, DocId) ->
+ FDI = fabric:get_full_doc_info(DbName, DocId, []),
+ #doc_info{revs = [#rev_info{} = PrevRev | _]} = couch_doc:to_doc_info(FDI),
+ PrevRev#rev_info.rev.
+
+get_revs(DbName, DocId) ->
+ FDI = fabric:get_full_doc_info(DbName, DocId, []),
+ #doc_info{ revs = Revs } = couch_doc:to_doc_info(FDI),
+ [Rev#rev_info.rev || Rev <- Revs].
+
+update_doc(_, _, 0) ->
+ ok;
+update_doc(DbName, DocId, Times) ->
+ Rev = get_rev(DbName, DocId),
+ Doc = couch_doc:from_json_obj({[
+ {<<"_id">>, <<"apple">>},
+ {<<"_rev">>, couch_doc:rev_to_str(Rev)},
+ {<<"size">>, 1001 - Times}
+ ]}),
+ {ok, _} = fabric:update_docs(DbName, [Doc], [?ADMIN_CTX]),
+ update_doc(DbName, DocId, Times-1).
+
+delete_docs(DbName, DocIds) ->
+ lists:foreach(
+ fun(DocId) -> ok = delete_doc(DbName, DocId) end,
+ DocIds
+ ).
+
+delete_doc(DbName, DocId) ->
+ Rev = get_rev(DbName, DocId),
+ DDoc = couch_doc:from_json_obj({[
+ {<<"_id">>, DocId},
+ {<<"_rev">>, couch_doc:rev_to_str(Rev)},
+ {<<"_deleted">>, true}
+ ]}),
+ {ok, _} = fabric:update_doc(DbName, DDoc, [?ADMIN_CTX]),
+ ok.
+
+wait_for_replicate(_, _, _, 0) ->
+ couch_log:notice("[~p] wait time out", [?MODULE]),
+ ok;
+wait_for_replicate(DbName, DocIds, ExpectRevCount ,TimeOut)
+ when is_list(DocIds) ->
+ [wait_for_replicate(DbName, DocId, ExpectRevCount ,TimeOut) || DocId <- DocIds];
+wait_for_replicate(DbName, DocId, ExpectRevCount ,TimeOut) ->
+ FDI = fabric:get_full_doc_info(DbName, DocId, []),
+ #doc_info{ revs = Revs } = couch_doc:to_doc_info(FDI),
+ case erlang:length(Revs) of
+ ExpectRevCount ->
+ couch_log:notice("[~p] wait end by expect, time used:~p, DocId:~p",
+ [?MODULE, 5-TimeOut, DocId]),
+ ok;
+ true ->
+ timer:sleep(1000),
+ wait_for_replicate(DbName, DocId, ExpectRevCount ,TimeOut-1)
+ end,
+ ok.
+
+get_sigs(DbName) ->
+ {ok, DesignDocs} = fabric:design_docs(DbName),
+ lists:usort(lists:flatmap(fun active_sigs/1,
+ [couch_doc:from_json_obj(DD) || DD <- DesignDocs])).
+
+active_sigs(#doc{body={Fields}}=Doc) ->
+ {RawIndexes} = couch_util:get_value(<<"indexes">>, Fields, {[]}),
+ {IndexNames, _} = lists:unzip(RawIndexes),
+ [begin
+ {ok, Index} = dreyfus_index:design_doc_to_index(Doc, IndexName),
+ Index#index.sig
+ end || IndexName <- IndexNames].
diff --git a/src/dreyfus/test/dreyfus_test_util.erl b/src/dreyfus/test/dreyfus_test_util.erl
new file mode 100644
index 000000000..631bc1047
--- /dev/null
+++ b/src/dreyfus/test/dreyfus_test_util.erl
@@ -0,0 +1,13 @@
+-module(dreyfus_test_util).
+
+-compile(export_all).
+
+-include_lib("couch/include/couch_db.hrl").
+
+wait_config_change(Key, Value) ->
+ test_util:wait(fun() ->
+ case dreyfus_config:get(Key) of
+ Value -> ok;
+ _ -> wait
+ end
+ end).
diff --git a/src/dreyfus/test/elixir/mix.exs b/src/dreyfus/test/elixir/mix.exs
new file mode 100644
index 000000000..9b0f642dd
--- /dev/null
+++ b/src/dreyfus/test/elixir/mix.exs
@@ -0,0 +1,30 @@
+defmodule Foo.Mixfile do
+ use Mix.Project
+
+ def project do
+ [
+ app: :foo,
+ version: "0.1.0",
+ elixir: "~> 1.5",
+ start_permanent: Mix.env == :prod,
+ deps: deps()
+ ]
+ end
+
+ # Run "mix help compile.app" to learn about applications.
+ def application do
+ [
+ extra_applications: [:logger]
+ ]
+ end
+
+ # Run "mix help deps" to learn about dependencies.
+ defp deps do
+ [
+ # {:dep_from_hexpm, "~> 0.3.0"},
+ {:httpotion, "~> 3.0"},
+ {:jiffy, "~> 0.14.11"}
+ # {:dep_from_git, git: "https://github.com/elixir-lang/my_dep.git", tag: "0.1.0"},
+ ]
+ end
+end
diff --git a/src/dreyfus/test/elixir/mix.lock b/src/dreyfus/test/elixir/mix.lock
new file mode 100644
index 000000000..ed51e5312
--- /dev/null
+++ b/src/dreyfus/test/elixir/mix.lock
@@ -0,0 +1,5 @@
+%{
+ "httpotion": {:hex, :httpotion, "3.1.0", "14d20d9b0ce4e86e253eb91e4af79e469ad949f57a5d23c0a51b2f86559f6589", [:mix], [{:ibrowse, "~> 4.4", [hex: :ibrowse, repo: "hexpm", optional: false]}], "hexpm"},
+ "ibrowse": {:hex, :ibrowse, "4.4.1", "2b7d0637b0f8b9b4182de4bd0f2e826a4da2c9b04898b6e15659ba921a8d6ec2", [:rebar3], [], "hexpm"},
+ "jiffy": {:hex, :jiffy, "0.14.13", "225a9a35e26417832c611526567194b4d3adc4f0dfa5f2f7008f4684076f2a01", [:rebar3], [], "hexpm"},
+}
diff --git a/src/dreyfus/test/elixir/run b/src/dreyfus/test/elixir/run
new file mode 100755
index 000000000..66a5947b7
--- /dev/null
+++ b/src/dreyfus/test/elixir/run
@@ -0,0 +1,4 @@
+#!/bin/bash -e
+cd "$(dirname "$0")"
+mix deps.get
+mix test --trace
diff --git a/src/dreyfus/test/elixir/test/partition_search_test.exs b/src/dreyfus/test/elixir/test/partition_search_test.exs
new file mode 100644
index 000000000..052a41ad1
--- /dev/null
+++ b/src/dreyfus/test/elixir/test/partition_search_test.exs
@@ -0,0 +1,219 @@
+defmodule PartitionSearchTest do
+ use CouchTestCase
+
+ @moduletag :search
+
+ @moduledoc """
+ Test Partition functionality with search
+ """
+
+ def create_search_docs(db_name, pk1 \\ "foo", pk2 \\ "bar") do
+ docs = for i <- 1..10 do
+ id = if rem(i, 2) == 0 do
+ "#{pk1}:#{i}"
+ else
+ "#{pk2}:#{i}"
+ end
+ %{
+ :_id => id,
+ :value => i,
+ :some => "field"
+ }
+ end
+
+ resp = Couch.post("/#{db_name}/_bulk_docs", body: %{:docs => docs}, query: %{w: 3})
+ assert resp.status_code == 201
+ end
+
+ def create_ddoc(db_name, opts \\ %{}) do
+ indexFn = "function(doc) {\n if (doc.some) {\n index('some', doc.some);\n }\n}"
+ default_ddoc = %{
+ indexes: %{
+ books: %{
+ analyzer: %{name: "standard"},
+ index: indexFn
+ }
+ }
+ }
+
+ ddoc = Enum.into(opts, default_ddoc)
+
+ resp = Couch.put("/#{db_name}/_design/library", body: ddoc)
+ assert resp.status_code == 201
+ assert Map.has_key?(resp.body, "ok") == true
+ end
+
+ def get_ids (resp) do
+ %{:body => %{"rows" => rows}} = resp
+ Enum.map(rows, fn row -> row["id"] end)
+ end
+
+ @tag :with_partitioned_db
+ test "Simple query returns partitioned search results", context do
+ db_name = context[:db_name]
+ create_search_docs(db_name)
+ create_ddoc(db_name)
+
+ url = "/#{db_name}/_partition/foo/_design/library/_search/books"
+ resp = Couch.get(url, query: %{q: "some:field"})
+ assert resp.status_code == 200
+ ids = get_ids(resp)
+ assert ids == ["foo:10", "foo:2", "foo:4", "foo:6", "foo:8"]
+
+ url = "/#{db_name}/_partition/bar/_design/library/_search/books"
+ resp = Couch.get(url, query: %{q: "some:field"})
+ assert resp.status_code == 200
+ ids = get_ids(resp)
+ assert ids == ["bar:1", "bar:3", "bar:5", "bar:7", "bar:9"]
+ end
+
+ @tag :with_partitioned_db
+ test "Only returns docs in partition not those in shard", context do
+ db_name = context[:db_name]
+ create_search_docs(db_name, "foo", "bar42")
+ create_ddoc(db_name)
+
+ url = "/#{db_name}/_partition/foo/_design/library/_search/books"
+ resp = Couch.get(url, query: %{q: "some:field"})
+ assert resp.status_code == 200
+ ids = get_ids(resp)
+ assert ids == ["foo:10", "foo:2", "foo:4", "foo:6", "foo:8"]
+ end
+
+ @tag :with_partitioned_db
+ test "Works with bookmarks and limit", context do
+ db_name = context[:db_name]
+ create_search_docs(db_name)
+ create_ddoc(db_name)
+
+ url = "/#{db_name}/_partition/foo/_design/library/_search/books"
+ resp = Couch.get(url, query: %{q: "some:field", limit: 3})
+ assert resp.status_code == 200
+ ids = get_ids(resp)
+ assert ids == ["foo:10", "foo:2", "foo:4"]
+
+ %{:body => %{"bookmark" => bookmark}} = resp
+
+ resp = Couch.get(url, query: %{q: "some:field", limit: 3, bookmark: bookmark})
+ assert resp.status_code == 200
+ ids = get_ids(resp)
+ assert ids == ["foo:6", "foo:8"]
+
+ resp = Couch.get(url, query: %{q: "some:field", limit: 2000, bookmark: bookmark})
+ assert resp.status_code == 200
+ ids = get_ids(resp)
+ assert ids == ["foo:6", "foo:8"]
+
+ resp = Couch.get(url, query: %{q: "some:field", limit: 2001, bookmark: bookmark})
+ assert resp.status_code == 400
+ end
+
+ @tag :with_db
+ test "Works with limit using POST for on non-partitioned db", context do
+ db_name = context[:db_name]
+ create_search_docs(db_name)
+ create_ddoc(db_name)
+
+ url = "/#{db_name}/_design/library/_search/books"
+ resp = Couch.post(url, body: %{:q => "some:field", :limit => 1})
+ assert resp.status_code == 200
+ end
+
+ @tag :with_partitioned_db
+ test "Works with limit using POST for partitioned db", context do
+ db_name = context[:db_name]
+ create_search_docs(db_name)
+ create_ddoc(db_name)
+
+ url = "/#{db_name}/_partition/foo/_design/library/_search/books"
+ resp = Couch.post(url, body: %{:q => "some:field", :limit => 1})
+ assert resp.status_code == 200
+ end
+
+ @tag :with_partitioned_db
+ test "Cannot do global query with partition view", context do
+ db_name = context[:db_name]
+ create_search_docs(db_name)
+ create_ddoc(db_name)
+
+ url = "/#{db_name}/_design/library/_search/books"
+ resp = Couch.get(url, query: %{q: "some:field"})
+ assert resp.status_code == 400
+ %{:body => %{"reason" => reason}} = resp
+ assert Regex.match?(~r/mandatory for queries to this index./, reason)
+ end
+
+ @tag :with_partitioned_db
+ test "Cannot do partition query with global search ddoc", context do
+ db_name = context[:db_name]
+ create_search_docs(db_name)
+ create_ddoc(db_name, options: %{partitioned: false})
+
+ url = "/#{db_name}/_partition/foo/_design/library/_search/books"
+ resp = Couch.get(url, query: %{q: "some:field"})
+ assert resp.status_code == 400
+ %{:body => %{"reason" => reason}} = resp
+ assert reason == "`partition` not supported on this index"
+ end
+
+ @tag :with_db
+ test "normal search on non-partitioned dbs still work", context do
+ db_name = context[:db_name]
+ create_search_docs(db_name)
+ create_ddoc(db_name)
+
+ url = "/#{db_name}/_design/library/_search/books"
+ resp = Couch.get(url, query: %{q: "some:field"})
+ assert resp.status_code == 200
+ ids = get_ids(resp)
+ assert ids == ["bar:1", "bar:5", "bar:9", "foo:2", "bar:3", "foo:4", "foo:6", "bar:7", "foo:8", "foo:10"]
+ end
+
+ @tag :with_db
+ test "normal search on non-partitioned dbs without limit", context do
+ db_name = context[:db_name]
+ create_search_docs(db_name)
+ create_ddoc(db_name)
+
+ url = "/#{db_name}/_design/library/_search/books"
+ resp = Couch.get(url, query: %{q: "some:field"})
+ assert resp.status_code == 200
+ ids = get_ids(resp)
+ assert ids == ["bar:1", "bar:5", "bar:9", "foo:2", "bar:3", "foo:4", "foo:6", "bar:7", "foo:8", "foo:10"]
+ end
+
+ @tag :with_db
+ test "normal search on non-partitioned dbs with limit", context do
+ db_name = context[:db_name]
+ create_search_docs(db_name)
+ create_ddoc(db_name)
+
+ url = "/#{db_name}/_design/library/_search/books"
+ resp = Couch.get(url, query: %{q: "some:field", limit: 3})
+ assert resp.status_code == 200
+ ids = get_ids(resp)
+ assert ids == ["bar:1", "bar:5", "bar:9"]
+ end
+
+ @tag :with_db
+ test "normal search on non-partitioned dbs with over limit", context do
+ db_name = context[:db_name]
+ create_search_docs(db_name)
+ create_ddoc(db_name)
+
+ url = "/#{db_name}/_design/library/_search/books"
+ resp = Couch.get(url, query: %{q: "some:field", limit: 201})
+ assert resp.status_code == 400
+ end
+
+ @tag :with_partitioned_db
+ test "rejects conflicting partition values", context do
+ db_name = context[:db_name]
+ create_search_docs(db_name)
+ create_ddoc(db_name)
+
+ url = "/#{db_name}/_partition/foo/_design/library/_search/books"
+ resp = Couch.post(url, body: %{q: "some:field", partition: "bar"})
+ assert resp.status_code == 400
+ end
+end
diff --git a/src/dreyfus/test/elixir/test/test_helper.exs b/src/dreyfus/test/elixir/test/test_helper.exs
new file mode 100644
index 000000000..6eb20e242
--- /dev/null
+++ b/src/dreyfus/test/elixir/test/test_helper.exs
@@ -0,0 +1,4 @@
+Code.require_file "../../../../couchdb/test/elixir/lib/couch.ex", __DIR__
+Code.require_file "../../../../couchdb/test/elixir/test/test_helper.exs", __DIR__
+Code.require_file "../../../../couchdb/test/elixir/test/support/couch_test_case.ex", __DIR__
+Code.require_file "../../../../couchdb/test/elixir/lib/couch/db_test.ex", __DIR__
diff --git a/src/mango/src/mango_idx.erl b/src/mango/src/mango_idx.erl
index c2c26958c..5d06a8fe3 100644
--- a/src/mango/src/mango_idx.erl
+++ b/src/mango/src/mango_idx.erl
@@ -182,7 +182,7 @@ from_ddoc(Db, {Props}) ->
_ ->
?MANGO_ERROR(invalid_query_ddoc_language)
end,
- IdxMods = case module_loaded(dreyfus_index) of
+ IdxMods = case clouseau_rpc:connected() of
true ->
[mango_idx_view, mango_idx_text];
false ->
@@ -268,7 +268,7 @@ cursor_mod(#idx{type = <<"json">>}) ->
cursor_mod(#idx{def = all_docs, type= <<"special">>}) ->
mango_cursor_special;
cursor_mod(#idx{type = <<"text">>}) ->
- case module_loaded(dreyfus_index) of
+ case clouseau_rpc:connected() of
true ->
mango_cursor_text;
false ->
@@ -281,7 +281,7 @@ idx_mod(#idx{type = <<"json">>}) ->
idx_mod(#idx{type = <<"special">>}) ->
mango_idx_special;
idx_mod(#idx{type = <<"text">>}) ->
- case module_loaded(dreyfus_index) of
+ case clouseau_rpc:connected() of
true ->
mango_idx_text;
false ->
@@ -309,7 +309,7 @@ get_idx_def(Opts) ->
get_idx_type(Opts) ->
case proplists:get_value(type, Opts) of
<<"json">> -> <<"json">>;
- <<"text">> -> case module_loaded(dreyfus_index) of
+ <<"text">> -> case clouseau_rpc:connected() of
true ->
<<"text">>;
false ->
diff --git a/src/mango/src/mango_native_proc.erl b/src/mango/src/mango_native_proc.erl
index ab161469a..274ae11de 100644
--- a/src/mango/src/mango_native_proc.erl
+++ b/src/mango/src/mango_native_proc.erl
@@ -345,7 +345,7 @@ make_text_field_name([P | Rest], Type) ->
validate_index_info(IndexInfo) ->
- IdxTypes = case module_loaded(dreyfus_index) of
+ IdxTypes = case clouseau_rpc:connected() of
true ->
[mango_idx_view, mango_idx_text];
false ->
diff --git a/support/build_js.escript b/support/build_js.escript
index 0b3a859ef..e4cb282ee 100644
--- a/support/build_js.escript
+++ b/support/build_js.escript
@@ -20,6 +20,7 @@
main([]) ->
JsFiles = ["share/server/json2.js",
+ "share/server/dreyfus.js",
"share/server/filter.js",
"share/server/mimeparse.js",
"share/server/render.js",
@@ -30,6 +31,7 @@ main([]) ->
"share/server/loop.js"],
CoffeeFiles = ["share/server/json2.js",
+ "share/server/dreyfus.js",
"share/server/filter.js",
"share/server/mimeparse.js",
"share/server/render.js",