summaryrefslogtreecommitdiff
path: root/src/fabric/src/fabric.erl
diff options
context:
space:
mode:
Diffstat (limited to 'src/fabric/src/fabric.erl')
-rw-r--r--src/fabric/src/fabric.erl587
1 files changed, 348 insertions, 239 deletions
diff --git a/src/fabric/src/fabric.erl b/src/fabric/src/fabric.erl
index aaaa7e011..5840cd63c 100644
--- a/src/fabric/src/fabric.erl
+++ b/src/fabric/src/fabric.erl
@@ -17,27 +17,57 @@
-include_lib("couch_mrview/include/couch_mrview.hrl").
% DBs
--export([all_dbs/0, all_dbs/1, create_db/1, create_db/2, delete_db/1,
- delete_db/2, get_db_info/1, get_doc_count/1, get_doc_count/2,
- set_revs_limit/3, set_security/2, set_security/3,
- get_revs_limit/1, get_security/1, get_security/2,
+-export([
+ all_dbs/0, all_dbs/1,
+ create_db/1, create_db/2,
+ delete_db/1,
+ delete_db/2,
+ get_db_info/1,
+ get_doc_count/1, get_doc_count/2,
+ set_revs_limit/3,
+ set_security/2, set_security/3,
+ get_revs_limit/1,
+ get_security/1, get_security/2,
get_all_security/1, get_all_security/2,
- get_purge_infos_limit/1, set_purge_infos_limit/3,
- compact/1, compact/2, get_partition_info/2]).
+ get_purge_infos_limit/1,
+ set_purge_infos_limit/3,
+ compact/1, compact/2,
+ get_partition_info/2
+]).
% Documents
--export([open_doc/3, open_revs/4, get_doc_info/3, get_full_doc_info/3,
- get_missing_revs/2, get_missing_revs/3, update_doc/3, update_docs/3,
- purge_docs/3, att_receiver/3]).
+-export([
+ open_doc/3,
+ open_revs/4,
+ get_doc_info/3,
+ get_full_doc_info/3,
+ get_missing_revs/2, get_missing_revs/3,
+ update_doc/3,
+ update_docs/3,
+ purge_docs/3,
+ att_receiver/3
+]).
% Views
--export([all_docs/4, all_docs/5, changes/4, query_view/3, query_view/4,
- query_view/6, query_view/7, get_view_group_info/2, end_changes/0]).
+-export([
+ all_docs/4, all_docs/5,
+ changes/4,
+ query_view/3, query_view/4, query_view/6, query_view/7,
+ get_view_group_info/2,
+ end_changes/0
+]).
% miscellany
--export([design_docs/1, reset_validation_funs/1, cleanup_index_files/0,
- cleanup_index_files/1, cleanup_index_files_all_nodes/1, dbname/1,
- inactive_index_files/1, db_uuids/1]).
+-export([
+ design_docs/1,
+ reset_validation_funs/1,
+ cleanup_index_files/0,
+ cleanup_index_files/1,
+ cleanup_index_files_all_nodes/1,
+ dbname/1,
+ inactive_index_files/1,
+ db_uuids/1
+]).
-include_lib("fabric/include/fabric.hrl").
@@ -54,19 +84,21 @@ all_dbs() ->
all_dbs(<<>>).
%% @doc returns a list of all database names
--spec all_dbs(Prefix::iodata()) -> {ok, [binary()]}.
+-spec all_dbs(Prefix :: iodata()) -> {ok, [binary()]}.
all_dbs(Prefix) when is_binary(Prefix) ->
Length = byte_size(Prefix),
- MatchingDbs = mem3:fold_shards(fun(#shard{dbname=DbName}, Acc) ->
- case DbName of
- <<Prefix:Length/binary, _/binary>> ->
- [DbName | Acc];
- _ ->
- Acc
- end
- end, []),
+ MatchingDbs = mem3:fold_shards(
+ fun(#shard{dbname = DbName}, Acc) ->
+ case DbName of
+ <<Prefix:Length/binary, _/binary>> ->
+ [DbName | Acc];
+ _ ->
+ Acc
+ end
+ end,
+ []
+ ),
{ok, lists:usort(MatchingDbs)};
-
%% @equiv all_dbs(list_to_binary(Prefix))
all_dbs(Prefix) when is_list(Prefix) ->
all_dbs(list_to_binary(Prefix)).
@@ -76,40 +108,39 @@ all_dbs(Prefix) when is_list(Prefix) ->
%% etc.
-spec get_db_info(dbname()) ->
{ok, [
- {instance_start_time, binary()} |
- {doc_count, non_neg_integer()} |
- {doc_del_count, non_neg_integer()} |
- {purge_seq, non_neg_integer()} |
- {compact_running, boolean()} |
- {disk_size, non_neg_integer()} |
- {disk_format_version, pos_integer()}
+ {instance_start_time, binary()}
+ | {doc_count, non_neg_integer()}
+ | {doc_del_count, non_neg_integer()}
+ | {purge_seq, non_neg_integer()}
+ | {compact_running, boolean()}
+ | {disk_size, non_neg_integer()}
+ | {disk_format_version, pos_integer()}
]}.
get_db_info(DbName) ->
fabric_db_info:go(dbname(DbName)).
%% @doc returns the size of a given partition
--spec get_partition_info(dbname(), Partition::binary()) ->
+-spec get_partition_info(dbname(), Partition :: binary()) ->
{ok, [
- {db_name, binary()} |
- {partition, binary()} |
- {doc_count, non_neg_integer()} |
- {doc_del_count, non_neg_integer()} |
- {sizes, json_obj()}
+ {db_name, binary()}
+ | {partition, binary()}
+ | {doc_count, non_neg_integer()}
+ | {doc_del_count, non_neg_integer()}
+ | {sizes, json_obj()}
]}.
get_partition_info(DbName, Partition) ->
fabric_db_partition_info:go(dbname(DbName), Partition).
-
%% @doc the number of docs in a database
%% @equiv get_doc_count(DbName, <<"_all_docs">>)
get_doc_count(DbName) ->
get_doc_count(DbName, <<"_all_docs">>).
%% @doc the number of design docs in a database
--spec get_doc_count(dbname(), Namespace::binary()) ->
- {ok, non_neg_integer() | null} |
- {error, atom()} |
- {error, atom(), any()}.
+-spec get_doc_count(dbname(), Namespace :: binary()) ->
+ {ok, non_neg_integer() | null}
+ | {error, atom()}
+ | {error, atom(), any()}.
get_doc_count(DbName, <<"_all_docs">>) ->
fabric_db_doc_count:go(dbname(DbName));
get_doc_count(DbName, <<"_design">>) ->
@@ -150,29 +181,38 @@ set_revs_limit(DbName, Limit, Options) when is_integer(Limit), Limit > 0 ->
-spec get_revs_limit(dbname()) -> pos_integer() | no_return().
get_revs_limit(DbName) ->
{ok, Db} = fabric_util:get_db(dbname(DbName), [?ADMIN_CTX]),
- try couch_db:get_revs_limit(Db) after catch couch_db:close(Db) end.
+ try
+ couch_db:get_revs_limit(Db)
+ after
+ catch couch_db:close(Db)
+ end.
%% @doc sets the readers/writers/admin permissions for a database
--spec set_security(dbname(), SecObj::json_obj()) -> ok.
+-spec set_security(dbname(), SecObj :: json_obj()) -> ok.
set_security(DbName, SecObj) ->
fabric_db_meta:set_security(dbname(DbName), SecObj, [?ADMIN_CTX]).
%% @doc sets the readers/writers/admin permissions for a database
--spec set_security(dbname(), SecObj::json_obj(), [option()]) -> ok.
+-spec set_security(dbname(), SecObj :: json_obj(), [option()]) -> ok.
set_security(DbName, SecObj, Options) ->
fabric_db_meta:set_security(dbname(DbName), SecObj, opts(Options)).
%% @doc sets the upper bound for the number of stored purge requests
-spec set_purge_infos_limit(dbname(), pos_integer(), [option()]) -> ok.
-set_purge_infos_limit(DbName, Limit, Options)
- when is_integer(Limit), Limit > 0 ->
+set_purge_infos_limit(DbName, Limit, Options) when
+ is_integer(Limit), Limit > 0
+->
fabric_db_meta:set_purge_infos_limit(dbname(DbName), Limit, opts(Options)).
%% @doc retrieves the upper bound for the number of stored purge requests
-spec get_purge_infos_limit(dbname()) -> pos_integer() | no_return().
get_purge_infos_limit(DbName) ->
{ok, Db} = fabric_util:get_db(dbname(DbName), [?ADMIN_CTX]),
- try couch_db:get_purge_infos_limit(Db) after catch couch_db:close(Db) end.
+ try
+ couch_db:get_purge_infos_limit(Db)
+ after
+ catch couch_db:close(Db)
+ end.
get_security(DbName) ->
get_security(DbName, [?ADMIN_CTX]).
@@ -181,80 +221,88 @@ get_security(DbName) ->
-spec get_security(dbname()) -> json_obj() | no_return().
get_security(DbName, Options) ->
{ok, Db} = fabric_util:get_db(dbname(DbName), opts(Options)),
- try couch_db:get_security(Db) after catch couch_db:close(Db) end.
+ try
+ couch_db:get_security(Db)
+ after
+ catch couch_db:close(Db)
+ end.
%% @doc retrieve the security object for all shards of a database
-spec get_all_security(dbname()) ->
- {ok, [{#shard{}, json_obj()}]} |
- {error, no_majority | timeout} |
- {error, atom(), any()}.
+ {ok, [{#shard{}, json_obj()}]}
+ | {error, no_majority | timeout}
+ | {error, atom(), any()}.
get_all_security(DbName) ->
get_all_security(DbName, []).
%% @doc retrieve the security object for all shards of a database
-spec get_all_security(dbname(), [option()]) ->
- {ok, [{#shard{}, json_obj()}]} |
- {error, no_majority | timeout} |
- {error, atom(), any()}.
+ {ok, [{#shard{}, json_obj()}]}
+ | {error, no_majority | timeout}
+ | {error, atom(), any()}.
get_all_security(DbName, Options) ->
fabric_db_meta:get_all_security(dbname(DbName), opts(Options)).
compact(DbName) ->
- [rexi:cast(Node, {fabric_rpc, compact, [Name]}) ||
- #shard{node=Node, name=Name} <- mem3:shards(dbname(DbName))],
+ [
+ rexi:cast(Node, {fabric_rpc, compact, [Name]})
+ || #shard{node = Node, name = Name} <- mem3:shards(dbname(DbName))
+ ],
ok.
compact(DbName, DesignName) ->
- [rexi:cast(Node, {fabric_rpc, compact, [Name, DesignName]}) ||
- #shard{node=Node, name=Name} <- mem3:shards(dbname(DbName))],
+ [
+ rexi:cast(Node, {fabric_rpc, compact, [Name, DesignName]})
+ || #shard{node = Node, name = Name} <- mem3:shards(dbname(DbName))
+ ],
ok.
% doc operations
%% @doc retrieve the doc with a given id
-spec open_doc(dbname(), docid(), [option()]) ->
- {ok, #doc{}} |
- {not_found, missing | deleted} |
- {timeout, any()} |
- {error, any()} |
- {error, any() | any()}.
+ {ok, #doc{}}
+ | {not_found, missing | deleted}
+ | {timeout, any()}
+ | {error, any()}
+ | {error, any() | any()}.
open_doc(DbName, Id, Options) ->
case proplists:get_value(doc_info, Options) of
- undefined ->
- fabric_doc_open:go(dbname(DbName), docid(Id), opts(Options));
- Else ->
- {error, {invalid_option, {doc_info, Else}}}
+ undefined ->
+ fabric_doc_open:go(dbname(DbName), docid(Id), opts(Options));
+ Else ->
+ {error, {invalid_option, {doc_info, Else}}}
end.
%% @doc retrieve a collection of revisions, possible all
-spec open_revs(dbname(), docid(), [revision()] | all, [option()]) ->
- {ok, [{ok, #doc{}} | {{not_found,missing}, revision()}]} |
- {timeout, any()} |
- {error, any()} |
- {error, any(), any()}.
+ {ok, [{ok, #doc{}} | {{not_found, missing}, revision()}]}
+ | {timeout, any()}
+ | {error, any()}
+ | {error, any(), any()}.
open_revs(DbName, Id, Revs, Options) ->
fabric_doc_open_revs:go(dbname(DbName), docid(Id), Revs, opts(Options)).
%% @doc Retrieves an information on a document with a given id
-spec get_doc_info(dbname(), docid(), [options()]) ->
- {ok, #doc_info{}} |
- {not_found, missing} |
- {timeout, any()} |
- {error, any()} |
- {error, any() | any()}.
+ {ok, #doc_info{}}
+ | {not_found, missing}
+ | {timeout, any()}
+ | {error, any()}
+ | {error, any() | any()}.
get_doc_info(DbName, Id, Options) ->
- Options1 = [doc_info|Options],
+ Options1 = [doc_info | Options],
fabric_doc_open:go(dbname(DbName), docid(Id), opts(Options1)).
%% @doc Retrieves a full information on a document with a given id
-spec get_full_doc_info(dbname(), docid(), [options()]) ->
- {ok, #full_doc_info{}} |
- {not_found, missing | deleted} |
- {timeout, any()} |
- {error, any()} |
- {error, any() | any()}.
+ {ok, #full_doc_info{}}
+ | {not_found, missing | deleted}
+ | {timeout, any()}
+ | {error, any()}
+ | {error, any() | any()}.
get_full_doc_info(DbName, Id, Options) ->
- Options1 = [{doc_info, full}|Options],
+ Options1 = [{doc_info, full} | Options],
fabric_doc_open:go(dbname(DbName), docid(Id), opts(Options1)).
%% @equiv get_missing_revs(DbName, IdsRevs, [])
@@ -262,7 +310,7 @@ get_missing_revs(DbName, IdsRevs) ->
get_missing_revs(DbName, IdsRevs, []).
%% @doc retrieve missing revisions for a list of `{Id, Revs}'
--spec get_missing_revs(dbname(),[{docid(), [revision()]}], [option()]) ->
+-spec get_missing_revs(dbname(), [{docid(), [revision()]}], [option()]) ->
{ok, [{docid(), any(), [any()]}]}.
get_missing_revs(DbName, IdsRevs, Options) when is_list(IdsRevs) ->
Sanitized = [idrevs(IdR) || IdR <- IdsRevs],
@@ -274,20 +322,20 @@ get_missing_revs(DbName, IdsRevs, Options) when is_list(IdsRevs) ->
{ok, any()} | any().
update_doc(DbName, Doc, Options) ->
case update_docs(DbName, [Doc], opts(Options)) of
- {ok, [{ok, NewRev}]} ->
- {ok, NewRev};
- {accepted, [{accepted, NewRev}]} ->
- {accepted, NewRev};
- {ok, [{{_Id, _Rev}, Error}]} ->
- throw(Error);
- {ok, [Error]} ->
- throw(Error);
- {ok, []} ->
- % replication success
- #doc{revs = {Pos, [RevId | _]}} = doc(DbName, Doc),
- {ok, {Pos, RevId}};
- {error, [Error]} ->
- throw(Error)
+ {ok, [{ok, NewRev}]} ->
+ {ok, NewRev};
+ {accepted, [{accepted, NewRev}]} ->
+ {accepted, NewRev};
+ {ok, [{{_Id, _Rev}, Error}]} ->
+ throw(Error);
+ {ok, [Error]} ->
+ throw(Error);
+ {ok, []} ->
+ % replication success
+ #doc{revs = {Pos, [RevId | _]}} = doc(DbName, Doc),
+ {ok, {Pos, RevId}};
+ {error, [Error]} ->
+ throw(Error)
end.
%% @doc update a list of docs
@@ -296,7 +344,8 @@ update_doc(DbName, Doc, Options) ->
update_docs(DbName, Docs0, Options) ->
try
Docs1 = docs(DbName, Docs0),
- fabric_doc_update:go(dbname(DbName), Docs1, opts(Options)) of
+ fabric_doc_update:go(dbname(DbName), Docs1, opts(Options))
+ of
{ok, Results} ->
{ok, Results};
{accepted, Results} ->
@@ -305,27 +354,34 @@ update_docs(DbName, Docs0, Options) ->
{error, Error};
Error ->
throw(Error)
- catch {aborted, PreCommitFailures} ->
- {aborted, PreCommitFailures}
+ catch
+ {aborted, PreCommitFailures} ->
+ {aborted, PreCommitFailures}
end.
-
%% @doc purge revisions for a list '{Id, Revs}'
%% returns {ok, {PurgeSeq, Results}}
-spec purge_docs(dbname(), [{docid(), [revision()]}], [option()]) ->
- {ok, [{Health, [revision()]}] | {error, any()}} when
+ {ok, [{Health, [revision()]}] | {error, any()}}
+when
Health :: ok | accepted.
purge_docs(DbName, IdsRevs, Options) when is_list(IdsRevs) ->
IdsRevs2 = [idrevs(IdRs) || IdRs <- IdsRevs],
fabric_doc_purge:go(dbname(DbName), IdsRevs2, opts(Options)).
-
%% @doc spawns a process to upload attachment data and
%% returns a fabric attachment receiver context tuple
%% with the spawned middleman process, an empty binary,
%% or exits with an error tuple {Error, Arg}
--spec att_receiver(#httpd{}, dbname(), Length :: undefined | chunked | pos_integer() |
- {unknown_transfer_encoding, any()}) ->
+-spec att_receiver(
+ #httpd{},
+ dbname(),
+ Length ::
+ undefined
+ | chunked
+ | pos_integer()
+ | {unknown_transfer_encoding, any()}
+) ->
{fabric_attachment_receiver, pid(), chunked | pos_integer()} | binary().
att_receiver(Req, DbName, Length) ->
fabric_doc_atts:receiver(Req, DbName, Length).
@@ -340,26 +396,28 @@ all_docs(DbName, Callback, Acc, QueryArgs) ->
%% "http://wiki.apache.org/couchdb/HTTP_Document_API#All_Documents">
%% all_docs</a> for details
-spec all_docs(
- dbname(), [{atom(), any()}], callback(), [] | tuple(),
- #mrargs{} | [option()]) ->
+ dbname(),
+ [{atom(), any()}],
+ callback(),
+ [] | tuple(),
+ #mrargs{} | [option()]
+) ->
{ok, any()} | {error, Reason :: term()}.
all_docs(DbName, Options, Callback, Acc0, #mrargs{} = QueryArgs) when
- is_function(Callback, 2) ->
+ is_function(Callback, 2)
+->
fabric_view_all_docs:go(dbname(DbName), opts(Options), QueryArgs, Callback, Acc0);
-
%% @doc convenience function that takes a keylist rather than a record
%% @equiv all_docs(DbName, Callback, Acc0, kl_to_query_args(QueryArgs))
all_docs(DbName, Options, Callback, Acc0, QueryArgs) ->
all_docs(DbName, Options, Callback, Acc0, kl_to_query_args(QueryArgs)).
-
--spec changes(dbname(), callback(), any(), #changes_args{} | [{atom(),any()}]) ->
+-spec changes(dbname(), callback(), any(), #changes_args{} | [{atom(), any()}]) ->
{ok, any()}.
-changes(DbName, Callback, Acc0, #changes_args{}=Options) ->
+changes(DbName, Callback, Acc0, #changes_args{} = Options) ->
Feed = Options#changes_args.feed,
fabric_view_changes:go(dbname(DbName), Feed, Options, Callback, Acc0);
-
%% @doc convenience function, takes keylist instead of record
%% @equiv changes(DbName, Callback, Acc0, kl_to_changes_args(Options))
changes(DbName, Callback, Acc0, Options) ->
@@ -375,22 +433,28 @@ query_view(DbName, DesignName, ViewName, QueryArgs) ->
Callback = fun default_callback/2,
query_view(DbName, DesignName, ViewName, Callback, [], QueryArgs).
-
%% @equiv query_view(DbName, DesignName, [],
%% ViewName, fun default_callback/2, [], QueryArgs)
query_view(DbName, DDoc, ViewName, Callback, Acc, QueryArgs) ->
query_view(DbName, [], DDoc, ViewName, Callback, Acc, QueryArgs).
-
%% @doc execute a given view.
%% There are many additional query args that can be passed to a view,
%% see <a href="http://wiki.apache.org/couchdb/HTTP_view_API#Querying_Options">
%% query args</a> for details.
--spec query_view(dbname(), [{atom(), any()}] | [],
- #doc{} | binary(), iodata(), callback(), any(), #mrargs{}) ->
+-spec query_view(
+ dbname(),
+ [{atom(), any()}] | [],
+ #doc{} | binary(),
+ iodata(),
+ callback(),
+ any(),
+ #mrargs{}
+) ->
any().
-query_view(Db, Options, GroupId, ViewName, Callback, Acc0, QueryArgs)
- when is_binary(GroupId) ->
+query_view(Db, Options, GroupId, ViewName, Callback, Acc0, QueryArgs) when
+ is_binary(GroupId)
+->
DbName = dbname(Db),
{ok, DDoc} = ddoc_cache:open(DbName, <<"_design/", GroupId/binary>>),
query_view(Db, Options, DDoc, ViewName, Callback, Acc0, QueryArgs);
@@ -398,13 +462,13 @@ query_view(Db, Options, DDoc, ViewName, Callback, Acc0, QueryArgs0) ->
DbName = dbname(Db),
View = name(ViewName),
case fabric_util:is_users_db(DbName) of
- true ->
- FakeDb = fabric_util:open_cluster_db(DbName, Options),
- couch_users_db:after_doc_read(DDoc, FakeDb);
- false ->
- ok
+ true ->
+ FakeDb = fabric_util:open_cluster_db(DbName, Options),
+ couch_users_db:after_doc_read(DDoc, FakeDb);
+ false ->
+ ok
end,
- {ok, #mrst{views=Views, language=Lang}} =
+ {ok, #mrst{views = Views, language = Lang}} =
couch_mrview_util:ddoc_to_mrst(DbName, DDoc),
QueryArgs1 = couch_mrview_util:set_view_type(QueryArgs0, View, Views),
QueryArgs2 = fabric_util:validate_args(Db, DDoc, QueryArgs1),
@@ -437,24 +501,24 @@ query_view(Db, Options, DDoc, ViewName, Callback, Acc0, QueryArgs0) ->
%% is running and so forth
-spec get_view_group_info(dbname(), #doc{} | docid()) ->
{ok, [
- {signature, binary()} |
- {language, binary()} |
- {disk_size, non_neg_integer()} |
- {compact_running, boolean()} |
- {updater_running, boolean()} |
- {waiting_commit, boolean()} |
- {waiting_clients, non_neg_integer()} |
- {update_seq, pos_integer()} |
- {purge_seq, non_neg_integer()} |
- {sizes, [
- {active, non_neg_integer()} |
- {external, non_neg_integer()} |
- {file, non_neg_integer()}
- ]} |
- {updates_pending, [
- {minimum, non_neg_integer()} |
- {preferred, non_neg_integer()} |
- {total, non_neg_integer()}
+ {signature, binary()}
+ | {language, binary()}
+ | {disk_size, non_neg_integer()}
+ | {compact_running, boolean()}
+ | {updater_running, boolean()}
+ | {waiting_commit, boolean()}
+ | {waiting_clients, non_neg_integer()}
+ | {update_seq, pos_integer()}
+ | {purge_seq, non_neg_integer()}
+ | {sizes, [
+ {active, non_neg_integer()}
+ | {external, non_neg_integer()}
+ | {file, non_neg_integer()}
+ ]}
+ | {updates_pending, [
+ {minimum, non_neg_integer()}
+ | {preferred, non_neg_integer()}
+ | {total, non_neg_integer()}
]}
]}.
get_view_group_info(DbName, DesignId) ->
@@ -467,23 +531,25 @@ end_changes() ->
%% @doc retrieve all the design docs from a database
-spec design_docs(dbname()) -> {ok, [json_obj()]} | {error, Reason :: term()}.
design_docs(DbName) ->
- Extra = case get(io_priority) of
- undefined -> [];
- Else -> [{io_priority, Else}]
- end,
+ Extra =
+ case get(io_priority) of
+ undefined -> [];
+ Else -> [{io_priority, Else}]
+ end,
QueryArgs0 = #mrargs{
- include_docs=true,
- extra=Extra
+ include_docs = true,
+ extra = Extra
},
QueryArgs = set_namespace(<<"_design">>, QueryArgs0),
- Callback = fun({meta, _}, []) ->
- {ok, []};
- ({row, Props}, Acc) ->
- {ok, [couch_util:get_value(doc, Props) | Acc]};
- (complete, Acc) ->
- {ok, lists:reverse(Acc)};
- ({error, Reason}, _Acc) ->
- {error, Reason}
+ Callback = fun
+ ({meta, _}, []) ->
+ {ok, []};
+ ({row, Props}, Acc) ->
+ {ok, [couch_util:get_value(doc, Props) | Acc]};
+ (complete, Acc) ->
+ {ok, lists:reverse(Acc)};
+ ({error, Reason}, _Acc) ->
+ {error, Reason}
end,
fabric:all_docs(dbname(DbName), [?ADMIN_CTX], Callback, [], QueryArgs).
@@ -492,8 +558,10 @@ design_docs(DbName) ->
%% NOTE: This function probably doesn't belong here as part fo the API
-spec reset_validation_funs(dbname()) -> [reference()].
reset_validation_funs(DbName) ->
- [rexi:cast(Node, {fabric_rpc, reset_validation_funs, [Name]}) ||
- #shard{node=Node, name=Name} <- mem3:shards(DbName)].
+ [
+ rexi:cast(Node, {fabric_rpc, reset_validation_funs, [Name]})
+ || #shard{node = Node, name = Name} <- mem3:shards(DbName)
+ ].
%% @doc clean up index files for all Dbs
-spec cleanup_index_files() -> [ok].
@@ -504,15 +572,19 @@ cleanup_index_files() ->
%% @doc clean up index files for a specific db
-spec cleanup_index_files(dbname()) -> ok.
cleanup_index_files(DbName) ->
- try lists:foreach(
- fun(File) ->
- file:delete(File)
- end, inactive_index_files(DbName))
+ try
+ lists:foreach(
+ fun(File) ->
+ file:delete(File)
+ end,
+ inactive_index_files(DbName)
+ )
catch
error:Error ->
couch_log:error(
"~p:cleanup_index_files. Error: ~p",
- [?MODULE, Error]),
+ [?MODULE, Error]
+ ),
ok
end.
@@ -521,31 +593,48 @@ cleanup_index_files(DbName) ->
inactive_index_files(DbName) ->
{ok, DesignDocs} = fabric:design_docs(DbName),
- ActiveSigs = maps:from_list(lists:map(fun(#doc{id = GroupId}) ->
- {ok, Info} = fabric:get_view_group_info(DbName, GroupId),
- {binary_to_list(couch_util:get_value(signature, Info)), nil}
- end, [couch_doc:from_json_obj(DD) || DD <- DesignDocs])),
-
- FileList = lists:flatmap(fun(#shard{name = ShardName}) ->
- IndexDir = couch_index_util:index_dir(mrview, ShardName),
- filelib:wildcard([IndexDir, "/*"])
- end, mem3:local_shards(dbname(DbName))),
-
- if ActiveSigs =:= [] -> FileList; true ->
- %% <sig>.view and <sig>.compact.view where <sig> is in ActiveSigs
- %% will be excluded from FileList because they are active view
- %% files and should not be deleted.
- lists:filter(fun(FilePath) ->
- not maps:is_key(get_view_sig_from_filename(FilePath), ActiveSigs)
- end, FileList)
+ ActiveSigs = maps:from_list(
+ lists:map(
+ fun(#doc{id = GroupId}) ->
+ {ok, Info} = fabric:get_view_group_info(DbName, GroupId),
+ {binary_to_list(couch_util:get_value(signature, Info)), nil}
+ end,
+ [couch_doc:from_json_obj(DD) || DD <- DesignDocs]
+ )
+ ),
+
+ FileList = lists:flatmap(
+ fun(#shard{name = ShardName}) ->
+ IndexDir = couch_index_util:index_dir(mrview, ShardName),
+ filelib:wildcard([IndexDir, "/*"])
+ end,
+ mem3:local_shards(dbname(DbName))
+ ),
+
+ if
+ ActiveSigs =:= [] ->
+ FileList;
+ true ->
+ %% <sig>.view and <sig>.compact.view where <sig> is in ActiveSigs
+ %% will be excluded from FileList because they are active view
+ %% files and should not be deleted.
+ lists:filter(
+ fun(FilePath) ->
+ not maps:is_key(get_view_sig_from_filename(FilePath), ActiveSigs)
+ end,
+ FileList
+ )
end.
%% @doc clean up index files for a specific db on all nodes
-spec cleanup_index_files_all_nodes(dbname()) -> [reference()].
cleanup_index_files_all_nodes(DbName) ->
- lists:foreach(fun(Node) ->
- rexi:cast(Node, {?MODULE, cleanup_index_files, [DbName]})
- end, mem3:nodes()).
+ lists:foreach(
+ fun(Node) ->
+ rexi:cast(Node, {?MODULE, cleanup_index_files, [DbName]})
+ end,
+ mem3:nodes()
+ ).
%% some simple type validation and transcoding
dbname(DbName) when is_list(DbName) ->
@@ -555,12 +644,13 @@ dbname(DbName) when is_binary(DbName) ->
dbname(Db) ->
try
couch_db:name(Db)
- catch error:badarg ->
- erlang:error({illegal_database_name, Db})
+ catch
+ error:badarg ->
+ erlang:error({illegal_database_name, Db})
end.
%% @doc get db shard uuids
--spec db_uuids(dbname()) -> map().
+-spec db_uuids(dbname()) -> map().
db_uuids(DbName) ->
fabric_db_uuids:go(dbname(DbName)).
@@ -580,15 +670,16 @@ docs(_Db, Docs) ->
doc(_Db, #doc{} = Doc) ->
Doc;
doc(Db0, {_} = Doc) ->
- Db = case couch_db:is_db(Db0) of
- true ->
- Db0;
- false ->
- Shard = hd(mem3:shards(Db0)),
- Props = couch_util:get_value(props, Shard#shard.opts, []),
- {ok, Db1} = couch_db:clustered_db(Db0, [{props, Props}]),
- Db1
- end,
+ Db =
+ case couch_db:is_db(Db0) of
+ true ->
+ Db0;
+ false ->
+ Shard = hd(mem3:shards(Db0)),
+ Props = couch_util:get_value(props, Shard#shard.opts, []),
+ {ok, Db1} = couch_db:clustered_db(Db0, [{props, Props}]),
+ Db1
+ end,
couch_db:doc_from_json_obj_validate(Db, Doc);
doc(_Db, Doc) ->
erlang:error({illegal_doc_format, Doc}).
@@ -616,15 +707,15 @@ opts(Options) ->
add_option(Key, Options) ->
case couch_util:get_value(Key, Options) of
- undefined ->
- case erlang:get(Key) of
undefined ->
- Options;
- Value ->
- [{Key, Value} | Options]
- end;
- _ ->
- Options
+ case erlang:get(Key) of
+ undefined ->
+ Options;
+ Value ->
+ [{Key, Value} | Options]
+ end;
+ _ ->
+ Options
end.
default_callback(complete, Acc) ->
@@ -632,7 +723,7 @@ default_callback(complete, Acc) ->
default_callback(Row, Acc) ->
{ok, [Row | Acc]}.
-is_reduce_view(#mrargs{view_type=ViewType}) ->
+is_reduce_view(#mrargs{view_type = ViewType}) ->
ViewType =:= red;
is_reduce_view({Reduce, _, _}) ->
Reduce =:= red.
@@ -651,29 +742,38 @@ kl_to_query_args(KeyList) ->
%% note that record_info is only known at compile time
%% so the code must be written in this way. For each new
%% record type add a case clause
-lookup_index(Key,RecName) ->
+lookup_index(Key, RecName) ->
Indexes =
case RecName of
- changes_args ->
- lists:zip(record_info(fields, changes_args),
- lists:seq(2, record_info(size, changes_args)));
- mrargs ->
- lists:zip(record_info(fields, mrargs),
- lists:seq(2, record_info(size, mrargs)))
+ changes_args ->
+ lists:zip(
+ record_info(fields, changes_args),
+ lists:seq(2, record_info(size, changes_args))
+ );
+ mrargs ->
+ lists:zip(
+ record_info(fields, mrargs),
+ lists:seq(2, record_info(size, mrargs))
+ )
end,
couch_util:get_value(Key, Indexes).
%% @doc convert a keylist to record with given `RecName'
%% @see lookup_index
-kl_to_record(KeyList,RecName) ->
- Acc0 = case RecName of
- changes_args -> #changes_args{};
- mrargs -> #mrargs{}
- end,
- lists:foldl(fun({Key, Value}, Acc) ->
- Index = lookup_index(couch_util:to_existing_atom(Key),RecName),
- setelement(Index, Acc, Value)
- end, Acc0, KeyList).
+kl_to_record(KeyList, RecName) ->
+ Acc0 =
+ case RecName of
+ changes_args -> #changes_args{};
+ mrargs -> #mrargs{}
+ end,
+ lists:foldl(
+ fun({Key, Value}, Acc) ->
+ Index = lookup_index(couch_util:to_existing_atom(Key), RecName),
+ setelement(Index, Acc, Value)
+ end,
+ Acc0,
+ KeyList
+ ).
set_namespace(NS, #mrargs{extra = Extra} = Args) ->
Args#mrargs{extra = [{namespace, NS} | Extra]}.
@@ -686,11 +786,16 @@ get_view_sig_from_filename(FilePath) ->
update_doc_test_() ->
{
- "Update doc tests", {
- setup, fun setup/0, fun teardown/1,
- fun(Ctx) -> [
- should_throw_conflict(Ctx)
- ] end
+ "Update doc tests",
+ {
+ setup,
+ fun setup/0,
+ fun teardown/1,
+ fun(Ctx) ->
+ [
+ should_throw_conflict(Ctx)
+ ]
+ end
}
}.
@@ -699,35 +804,39 @@ should_throw_conflict(Doc) ->
?assertThrow(conflict, update_doc(<<"test-db">>, Doc, []))
end).
-
setup() ->
Doc = #doc{
id = <<"test_doc">>,
- revs = {3, [<<5,68,252,180,43,161,216,223,26,119,71,219,212,229,
- 159,113>>]},
- body = {[{<<"foo">>,<<"asdf">>},{<<"author">>,<<"tom">>}]},
- atts = [], deleted = false, meta = []
+ revs = {3, [<<5, 68, 252, 180, 43, 161, 216, 223, 26, 119, 71, 219, 212, 229, 159, 113>>]},
+ body = {[{<<"foo">>, <<"asdf">>}, {<<"author">>, <<"tom">>}]},
+ atts = [],
+ deleted = false,
+ meta = []
},
ok = application:ensure_started(config),
ok = meck:expect(mem3, shards, fun(_, _) -> [] end),
ok = meck:expect(mem3, quorum, fun(_) -> 1 end),
ok = meck:expect(rexi, cast, fun(_, _) -> ok end),
- ok = meck:expect(rexi_utils, recv,
+ ok = meck:expect(
+ rexi_utils,
+ recv,
fun(_, _, _, _, _, _) ->
{ok, {error, [{Doc, conflict}]}}
- end),
- ok = meck:expect(couch_util, reorder_results,
+ end
+ ),
+ ok = meck:expect(
+ couch_util,
+ reorder_results,
fun(_, [{_, Res}]) ->
[Res]
- end),
+ end
+ ),
ok = meck:expect(fabric_util, create_monitors, fun(_) -> ok end),
ok = meck:expect(rexi_monitor, stop, fun(_) -> ok end),
Doc.
-
teardown(_) ->
meck:unload(),
ok = application:stop(config).
-
-endif.