summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJan Lehnardt <jan@apache.org>2020-07-12 16:22:13 +0200
committerJan Lehnardt <jan@apache.org>2020-07-12 16:22:13 +0200
commit4dd23c3fc096f5e5269930eb8e0b2f355720010a (patch)
tree491d77b33a595913e40f31c32c0b498bca1d7e70
parent67b8630a2bc4a335e66bd4ca9397f30ef4cffc82 (diff)
downloadcouchdb-4dd23c3fc096f5e5269930eb8e0b2f355720010a.tar.gz
chore: cleanup
-rw-r--r--src/chttpd/src/chttpd_db.erl21
-rw-r--r--src/chttpd/src/chttpd_show.erl2
-rw-r--r--src/chttpd/src/chttpd_view.erl4
-rw-r--r--src/couch/src/couch_access_native_proc.erl12
-rw-r--r--src/couch/src/couch_changes.erl2
-rw-r--r--src/couch/src/couch_db.erl28
-rw-r--r--src/couch/src/couch_db_updater.erl48
-rw-r--r--src/couch/test/eunit/couchdb_update_conflicts_tests.erl4
-rw-r--r--src/couch_index/src/couch_index_updater.erl11
-rw-r--r--src/couch_mrview/src/couch_mrview.erl2
-rw-r--r--src/couch_mrview/src/couch_mrview_http.erl4
-rw-r--r--src/couch_mrview/src/couch_mrview_updater.erl1
-rw-r--r--src/couch_mrview/src/couch_mrview_util.erl1
-rw-r--r--src/couch_peruser/test/eunit/couch_peruser_test.erl1
-rw-r--r--src/couch_replicator/src/couch_replicator_api_wrap.erl4
-rw-r--r--src/couch_replicator/src/couch_replicator_scheduler_job.erl6
-rw-r--r--src/fabric/src/fabric_doc_update.erl10
-rw-r--r--src/fabric/src/fabric_rpc.erl2
-rw-r--r--src/fabric/src/fabric_view_changes.erl2
-rw-r--r--src/fabric/src/fabric_view_map.erl2
-rw-r--r--src/fabric/src/fabric_view_reduce.erl2
-rw-r--r--src/mem3/src/mem3_nodes.erl1
-rw-r--r--test/elixir/test/bulk_docs_test.exs3
23 files changed, 72 insertions, 101 deletions
diff --git a/src/chttpd/src/chttpd_db.erl b/src/chttpd/src/chttpd_db.erl
index 9b10f3a01..04b6cdae8 100644
--- a/src/chttpd/src/chttpd_db.erl
+++ b/src/chttpd/src/chttpd_db.erl
@@ -115,6 +115,8 @@ handle_changes_req1(#httpd{}=Req, Db) ->
},
fabric:changes(Db, fun changes_callback/2, Acc0, ChangesArgs)
end) of
+ % TODO: This may be a debugging leftover, undo by just returning
+ % chttpd:etag_respond()
{error, {forbidden, Message, _Stacktrace}} ->
throw({forbidden, Message});
Response ->
@@ -128,6 +130,8 @@ handle_changes_req1(#httpd{}=Req, Db) ->
threshold = Max
},
try
+ % TODO: This may be a debugging leftover, undo by just returning
+ % fabric:changes()
case fabric:changes(Db, fun changes_callback/2, Acc0, ChangesArgs) of
{error, {forbidden, Message, _Stacktrace}} ->
throw({forbidden, Message});
@@ -921,25 +925,19 @@ view_cb(Msg, Acc) ->
couch_mrview_http:view_cb(Msg, Acc).
db_doc_req(#httpd{method='DELETE'}=Req, Db, DocId) ->
+ % fetch the old doc revision, so we can compare access control
+ % in send_update_doc() later.
Doc0 = couch_doc_open(Db, DocId, nil, [{user_ctx, Req#httpd.user_ctx}]),
Revs = chttpd:qs_value(Req, "rev"),
case Revs of
undefined ->
- Body = {[{<<"_deleted">>,true}]};
+ Body = {[{<<"_deleted">>,true}]};
Rev ->
- Body = {[{<<"_rev">>, ?l2b(Revs)},{<<"_deleted">>,true}]}
+ Body = {[{<<"_rev">>, ?l2b(Rev)},{<<"_deleted">>,true}]}
end,
- % Doc0 = couch_doc_from_req(Req, Db, DocId, Body),
Doc = Doc0#doc{revs=Revs,body=Body,deleted=true},
send_updated_doc(Req, Db, DocId, couch_doc_from_req(Req, Db, DocId, Doc));
- % % check for the existence of the doc to handle the 404 case.
- % OldDoc = couch_doc_open(Db, DocId, nil, [{user_ctx, Req#httpd.user_ctx}]),
- % NewRevs = couch_doc:parse_rev(chttpd:qs_value(Req, "rev")),
- % NewBody = {[{<<"_deleted">>}, true]},
- % NewDoc = OldDoc#doc{revs=NewRevs, body=NewBody},
- % send_updated_doc(Req, Db, DocId, couch_doc_from_req(Req, Db, DocId, NewDoc));
-
db_doc_req(#httpd{method='GET', mochi_req=MochiReq}=Req, Db, DocId) ->
#doc_query_args{
rev = Rev,
@@ -1278,8 +1276,6 @@ receive_request_data(Req, LenLeft) when LenLeft > 0 ->
receive_request_data(_Req, _) ->
throw(<<"expected more data">>).
-
-
update_doc_result_to_json({#doc{id=Id,revs=Rev}, access}) ->
update_doc_result_to_json({{Id, Rev}, access});
update_doc_result_to_json({{Id, Rev}, Error}) ->
@@ -1373,6 +1369,7 @@ update_doc(Db, DocId, #doc{deleted=Deleted, body=DocBody}=Doc, Options) ->
{'DOWN', Ref, _, _, {exit_exit, Reason}} ->
erlang:exit(Reason)
end,
+
case Result of
{ok, NewRev} ->
Accepted = false;
diff --git a/src/chttpd/src/chttpd_show.erl b/src/chttpd/src/chttpd_show.erl
index 285857ecf..a6d0368b9 100644
--- a/src/chttpd/src/chttpd_show.erl
+++ b/src/chttpd/src/chttpd_show.erl
@@ -35,7 +35,6 @@ handle_doc_show_req(#httpd{
path_parts=[_, _, _, _, ShowName, DocId]
}=Req, Db, DDoc) ->
-
% open the doc
Options = [conflicts, {user_ctx, Req#httpd.user_ctx}],
Doc = maybe_open_doc(Db, DocId, Options),
@@ -48,7 +47,6 @@ handle_doc_show_req(#httpd{
path_parts=[_, _, _, _, ShowName, DocId|Rest]
}=Req, Db, DDoc) ->
-
DocParts = [DocId|Rest],
DocId1 = ?l2b(string:join([?b2l(P)|| P <- DocParts], "/")),
diff --git a/src/chttpd/src/chttpd_view.erl b/src/chttpd/src/chttpd_view.erl
index 46f128815..31f59ecc8 100644
--- a/src/chttpd/src/chttpd_view.erl
+++ b/src/chttpd/src/chttpd_view.erl
@@ -51,6 +51,10 @@ fabric_query_view(Db, Req, DDoc, ViewName, Args) ->
Max = chttpd:chunked_response_buffer_size(),
VAcc = #vacc{db=Db, req=Req, threshold=Max},
Options = [{user_ctx, Req#httpd.user_ctx}],
+ % TODO: This might just be a debugging leftover, we might be able
+ % to undo this by just returning {ok, Resp#vacc.resp}
+ % However, this *might* be here because we need to handle
+ % errors here now, because access might tell us to.
case fabric:query_view(Db, Options, DDoc, ViewName,
fun view_cb/2, VAcc, Args) of
{ok, Resp} ->
diff --git a/src/couch/src/couch_access_native_proc.erl b/src/couch/src/couch_access_native_proc.erl
index fb9415028..965b124de 100644
--- a/src/couch/src/couch_access_native_proc.erl
+++ b/src/couch/src/couch_access_native_proc.erl
@@ -77,19 +77,11 @@ handle_call({prompt, [<<"rereduce">>, _, _]}, _From, St) ->
{reply, null, St};
handle_call({prompt, [<<"index_doc">>, Doc]}, _From, St) ->
- % Vals = case index_doc(St, mango_json:to_binary(Doc)) of
- % [] ->
- % [[]];
- % Else ->
- % Else
- % end,
{reply, [[]], St};
-
handle_call(Msg, _From, St) ->
{stop, {invalid_call, Msg}, {invalid_call, Msg}, St}.
-
handle_cast(garbage_collect, St) ->
erlang:garbage_collect(),
{noreply, St};
@@ -145,5 +137,7 @@ map_doc(_St, {Doc}) ->
] end, Access),
ById ++ BySeq;
Else ->
- [[],[]] % no comprende: should not be needed once we implement _access field validation
+ % TODO: no comprende: should not be needed once we implement
+ % _access field validation
+ [[],[]]
end.
diff --git a/src/couch/src/couch_changes.erl b/src/couch/src/couch_changes.erl
index fea5f9f1d..6e9294a56 100644
--- a/src/couch/src/couch_changes.erl
+++ b/src/couch/src/couch_changes.erl
@@ -168,7 +168,6 @@ configure_filter("_view", Style, Req, Db) ->
case [?l2b(couch_httpd:unquote(Part)) || Part <- ViewNameParts] of
[DName, VName] ->
{ok, DDoc} = open_ddoc(Db, <<"_design/", DName/binary>>),
- % ok = couch_util:validate_design_access(Db, DDoc),
check_member_exists(DDoc, [<<"views">>, VName]),
case couch_db:is_clustered(Db) of
true ->
@@ -192,7 +191,6 @@ configure_filter(FilterName, Style, Req, Db) ->
case [?l2b(couch_httpd:unquote(Part)) || Part <- FilterNameParts] of
[DName, FName] ->
{ok, DDoc} = open_ddoc(Db, <<"_design/", DName/binary>>),
- % ok = couch_util:validate_design_access(Db, DDoc),
check_member_exists(DDoc, [<<"filters">>, FName]),
case couch_db:is_clustered(Db) of
true ->
diff --git a/src/couch/src/couch_db.erl b/src/couch/src/couch_db.erl
index ecab54e84..cac768df4 100644
--- a/src/couch/src/couch_db.erl
+++ b/src/couch/src/couch_db.erl
@@ -1333,7 +1333,7 @@ update_docs(Db, Docs0, Options, replicated_changes) ->
end;
update_docs(Db, Docs0, Options, interactive_edit) ->
- Docs = tag_docs(Docs0),
+ Docs = tag_docs(Docs0),
AllOrNothing = lists:member(all_or_nothing, Options),
PrepValidateFun = fun(Db0, DocBuckets0, ExistingDocInfos) ->
@@ -1365,13 +1365,12 @@ update_docs(Db, Docs0, Options, interactive_edit) ->
{ok, CommitResults} = write_and_commit(Db, DocBuckets3,
NonRepDocs, Options2),
- ResultsDict = lists:foldl(fun({Key, Resp}, ResultsAcc) ->
+ ResultsDict = lists:foldl(fun({Key, Resp}, ResultsAcc) ->
dict:store(Key, Resp, ResultsAcc)
end, dict:from_list(IdRevs), CommitResults ++ DocErrors),
- R = {ok, lists:map(fun(Doc) ->
+ {ok, lists:map(fun(Doc) ->
dict:fetch(doc_tag(Doc), ResultsDict)
- end, Docs)},
- R
+ end, Docs)}
end.
% Returns the first available document on disk. Input list is a full rev path
@@ -1625,24 +1624,6 @@ changes_since(Db, StartSeq, Fun, Options, Acc) when is_record(Db, db) ->
false -> couch_db_engine:fold_changes(Db, StartSeq, Fun, Options, Acc)
end.
-% TODO: nicked from couch_mrview, maybe move to couch_mrview.hrl
--record(mracc, {
- db,
- meta_sent=false,
- total_rows,
- offset,
- limit,
- skip,
- group_level,
- doc_info,
- callback,
- user_acc,
- last_go=ok,
- reduce_fun,
- update_seq,
- args
-}).
-
calculate_start_seq(_Db, _Node, Seq) when is_integer(Seq) ->
Seq;
calculate_start_seq(Db, Node, {Seq, Uuid}) ->
@@ -1759,6 +1740,7 @@ fold_design_docs(Db, UserFun, UserAcc, Options1) ->
fold_changes(Db, StartSeq, UserFun, UserAcc) ->
fold_changes(Db, StartSeq, UserFun, UserAcc, []).
+
fold_changes(Db, StartSeq, UserFun, UserAcc, Opts) ->
case couch_db:has_access_enabled(Db) and not couch_db:is_admin(Db) of
true -> couch_mrview:query_changes_access(Db, StartSeq, UserFun, Opts, UserAcc);
diff --git a/src/couch/src/couch_db_updater.erl b/src/couch/src/couch_db_updater.erl
index f10ec78d8..4188969e5 100644
--- a/src/couch/src/couch_db_updater.erl
+++ b/src/couch/src/couch_db_updater.erl
@@ -252,7 +252,8 @@ sort_and_tag_grouped_docs(Client, GroupedDocs) ->
% duplicate documents if the incoming groups are not sorted, so as a sanity
% check we sort them again here. See COUCHDB-2735.
Cmp = fun
- ([], []) -> false;
+ ([], []) -> false; % TODO: re-evaluate this addition, might be a
+ % superflous now
([#doc{id=A}|_], [#doc{id=B}|_]) -> A < B
end,
lists:map(fun(DocGroup) ->
@@ -438,11 +439,6 @@ upgrade_sizes(S) when is_integer(S) ->
send_result(Client, Doc, NewResult) ->
% used to send a result to the client
-
-
-
-
-
catch(Client ! {result, self(), {doc_tag(Doc), NewResult}}).
doc_tag(#doc{meta=Meta}) ->
@@ -452,9 +448,8 @@ doc_tag(#doc{meta=Meta}) ->
Else -> throw({invalid_doc_tag, Else})
end.
-% couch_db_updater:merge_rev_trees([[],[]] = NewDocs,[] = OldDocs,{merge_acc,1000,false,[],[],0,[]}=Acc]
-
-merge_rev_trees([[]], [], Acc) -> % validate_docs_access left us with no docs to merge
+merge_rev_trees([[]], [], Acc) ->
+ % validate_docs_access left us with no docs to merge
{ok, Acc};
merge_rev_trees([], [], Acc) ->
{ok, Acc#merge_acc{
@@ -633,6 +628,10 @@ update_docs_int(Db, DocsList, LocalDocs, MergeConflicts, UserCtx) ->
RevsLimit = couch_db_engine:get_revs_limit(Db),
Ids = [Id || [{_Client, #doc{id=Id}}|_] <- DocsList],
+ % TODO: maybe a perf hit, instead of zip3-ing existin Accesses into
+ % our doc lists, maybe find 404 docs differently down in
+ % validate_docs_access (revs is [], which we can then use
+ % to skip validation as we know it is the first doc rev)
Accesses = [Access || [{_Client, #doc{access=Access}}|_] <- DocsList],
% lookup up the old documents, if they exist.
@@ -673,10 +672,6 @@ update_docs_int(Db, DocsList, LocalDocs, MergeConflicts, UserCtx) ->
cur_seq = UpdateSeq,
full_partitions = FullPartitions
},
- %
- %
- %
-
% Loop over DocsList, validate_access for each OldDocInfo on Db,
%. if no OldDocInfo, then send to DocsListValidated, keep OldDocsInfo
% if valid, then send to DocsListValidated, OldDocsInfo
@@ -693,12 +688,10 @@ update_docs_int(Db, DocsList, LocalDocs, MergeConflicts, UserCtx) ->
% Write out the document summaries (the bodies are stored in the nodes of
% the trees, the attachments are already written to disk)
{ok, IndexFDIs} = flush_trees(Db, NewFullDocInfos, []),
-
Pairs = pair_write_info(OldDocLookups, IndexFDIs),
LocalDocs1 = apply_local_docs_access(Db, LocalDocs),
LocalDocs2 = update_local_doc_revs(LocalDocs1),
-
{ok, Db1} = couch_db_engine:write_doc_infos(Db, Pairs, LocalDocs2),
WriteCount = length(IndexFDIs),
@@ -721,23 +714,21 @@ update_docs_int(Db, DocsList, LocalDocs, MergeConflicts, UserCtx) ->
{ok, commit_data(Db1), UpdatedDDocIds}.
check_access(Db, UserCtx, Access) ->
-
-
-
check_access(Db, UserCtx, couch_db:has_access_enabled(Db), Access).
check_access(_Db, UserCtx, false, _Access) ->
true;
check_access(Db, UserCtx, true, Access) -> couch_db:check_access(Db#db{user_ctx=UserCtx}, Access).
+% TODO: looks like we go into validation here unconditionally and only check in
+% check_access() whether the Db has_access_enabled(), we should do this
+% here on the outside. Might be our perf issue.
+% However, if it is, that means we have to speed this up as it would still
+% be too slow for when access is enabled.
validate_docs_access(Db, UserCtx, DocsList, OldDocInfos) ->
-
-
validate_docs_access(Db, UserCtx, DocsList, OldDocInfos, [], []).
validate_docs_access(_Db, UserCtx, [], [], DocsListValidated, OldDocInfosValidated) ->
-
-
{ lists:reverse(DocsListValidated), lists:reverse(OldDocInfosValidated) };
validate_docs_access(Db, UserCtx, [Docs | DocRest], [OldInfo | OldInfoRest], DocsListValidated, OldDocInfosValidated) ->
% loop over Docs as {Client, NewDoc}
@@ -752,21 +743,17 @@ validate_docs_access(Db, UserCtx, [Docs | DocRest], [OldInfo | OldInfoRest], Doc
end,
NewDocMatchesAccess = check_access(Db, UserCtx, Doc#doc.access),
-
-
case OldDocMatchesAccess andalso NewDocMatchesAccess of
true -> % if valid, then send to DocsListValidated, OldDocsInfo
% and store the access context on the new doc
[{Client, Doc} | Acc];
_Else2 -> % if invalid, then send_result tagged `access`(c.f. `conflict)
- % and don’t add to DLV, nor ODI
-
+ % and don’t add to DLV, nor ODI
send_result(Client, Doc, access),
Acc
end
end, [], Docs),
-
-
+
{ NewDocsListValidated, NewOldDocInfosValidated } = case length(NewDocs) of
0 -> % we sent out all docs as invalid access, drop the old doc info associated with it
{ [NewDocs | DocsListValidated], OldDocInfosValidated };
@@ -775,10 +762,6 @@ validate_docs_access(Db, UserCtx, [Docs | DocRest], [OldInfo | OldInfoRest], Doc
end,
validate_docs_access(Db, UserCtx, DocRest, OldInfoRest, NewDocsListValidated, NewOldDocInfosValidated).
-
-%{ DocsListValidated, OldDocInfosValidated } =
-
-
apply_local_docs_access(Db, Docs) ->
apply_local_docs_access1(couch_db:has_access_enabled(Db), Docs).
@@ -953,6 +936,7 @@ get_meta_body_size(Meta) ->
{ejson_size, ExternalSize} = lists:keyfind(ejson_size, 1, Meta),
ExternalSize.
+
default_security_object(DbName, []) ->
default_security_object(DbName);
default_security_object(DbName, Options) ->
diff --git a/src/couch/test/eunit/couchdb_update_conflicts_tests.erl b/src/couch/test/eunit/couchdb_update_conflicts_tests.erl
index 1a32986d0..7f9d1dbdb 100644
--- a/src/couch/test/eunit/couchdb_update_conflicts_tests.erl
+++ b/src/couch/test/eunit/couchdb_update_conflicts_tests.erl
@@ -51,8 +51,8 @@ view_indexes_cleanup_test_() ->
setup,
fun start/0, fun test_util:stop_couch/1,
[
- concurrent_updates()
- % bulk_docs_updates()
+ concurrent_updates(),
+ bulk_docs_updates()
]
}
}.
diff --git a/src/couch_index/src/couch_index_updater.erl b/src/couch_index/src/couch_index_updater.erl
index e56ebeb0a..2f65c1c1c 100644
--- a/src/couch_index/src/couch_index_updater.erl
+++ b/src/couch_index/src/couch_index_updater.erl
@@ -166,12 +166,10 @@ update(Idx, Mod, IdxState) ->
case {IncludeDesign, DocId} of
{false, <<"_design/", _/binary>>} ->
{nil, Seq};
- % _ when Deleted ->
- % {#doc{id=DocId, deleted=true}, Seq};
_ ->
- {ok, Doc} = couch_db:open_doc_int(Db, DocInfo, DocOpts),
- case IndexName of
+ case IndexName of % TODO: move into outer case statement
<<"_design/_access">> ->
+ {ok, Doc} = couch_db:open_doc_int(Db, DocInfo, DocOpts),
% TODO: hande conflicted docs in _access index
% probably remove
[RevInfo|_] = DocInfo#doc_info.revs,
@@ -180,7 +178,10 @@ update(Idx, Mod, IdxState) ->
access = Access
},
{Doc1, Seq};
- _Else ->
+ _ when Deleted ->
+ {#doc{id=DocId, deleted=true}, Seq};
+ _ ->
+ {ok, Doc} = couch_db:open_doc_int(Db, DocInfo, DocOpts),
{Doc, Seq}
end
end
diff --git a/src/couch_mrview/src/couch_mrview.erl b/src/couch_mrview/src/couch_mrview.erl
index c303503b0..298576df6 100644
--- a/src/couch_mrview/src/couch_mrview.erl
+++ b/src/couch_mrview/src/couch_mrview.erl
@@ -376,7 +376,6 @@ query_view(Db, DDoc, VName, Args) ->
query_view(Db, DDoc, VName, Args, Callback, Acc) when is_list(Args) ->
query_view(Db, DDoc, VName, to_mrargs(Args), Callback, Acc);
query_view(Db, DDoc, VName, Args0, Callback, Acc0) ->
- % ok = couch_util:validate_design_access(Db, DDoc),
case couch_mrview_util:get_view(Db, DDoc, VName, Args0) of
{ok, VInfo, Sig, Args} ->
{ok, Acc1} = case Args#mrargs.preflight_fun of
@@ -804,6 +803,7 @@ default_cb(ok, ddoc_updated) ->
default_cb(Row, Acc) ->
{ok, [Row | Acc]}.
+
to_mrargs(KeyList) ->
lists:foldl(fun({Key, Value}, Acc) ->
Index = lookup_index(couch_util:to_existing_atom(Key)),
diff --git a/src/couch_mrview/src/couch_mrview_http.erl b/src/couch_mrview/src/couch_mrview_http.erl
index 3f633e960..3cf8833d7 100644
--- a/src/couch_mrview/src/couch_mrview_http.erl
+++ b/src/couch_mrview/src/couch_mrview_http.erl
@@ -81,9 +81,10 @@ handle_reindex_req(#httpd{method='POST',
handle_reindex_req(Req, _Db, _DDoc) ->
chttpd:send_method_not_allowed(Req, "POST").
+
handle_view_req(#httpd{method='GET',
path_parts=[_, _, DDocName, _, VName, <<"_info">>]}=Req,
- Db, DDoc) ->
+ Db, _DDoc) ->
DbName = couch_db:name(Db),
DDocId = <<"_design/", DDocName/binary >>,
{ok, Info} = couch_mrview:get_view_info(DbName, DDocId, VName),
@@ -254,6 +255,7 @@ get_view_callback(_DbName, _DbName, false) ->
get_view_callback(_, _, _) ->
fun view_cb/2.
+
design_doc_view(Req, Db, DDoc, ViewName, Keys) ->
Args0 = parse_params(Req, Keys),
ETagFun = fun(Sig, Acc0) ->
diff --git a/src/couch_mrview/src/couch_mrview_updater.erl b/src/couch_mrview/src/couch_mrview_updater.erl
index 29eeebb31..0dc9f85f7 100644
--- a/src/couch_mrview/src/couch_mrview_updater.erl
+++ b/src/couch_mrview/src/couch_mrview_updater.erl
@@ -116,6 +116,7 @@ process_doc(Doc, Seq, #mrst{doc_acc=Acc}=State) when length(Acc) > 100 ->
process_doc(Doc, Seq, State#mrst{doc_acc=[]});
process_doc(nil, Seq, #mrst{doc_acc=Acc}=State) ->
{ok, State#mrst{doc_acc=[{nil, Seq, nil} | Acc]}};
+% TODO: re-evaluate why this is commented out
% process_doc(#doc{id=Id, deleted=true}, Seq, #mrst{doc_acc=Acc}=State) ->
% {ok, State#mrst{doc_acc=[{Id, Seq, deleted} | Acc]}};
process_doc(#doc{id=Id}=Doc, Seq, #mrst{doc_acc=Acc}=State) ->
diff --git a/src/couch_mrview/src/couch_mrview_util.erl b/src/couch_mrview/src/couch_mrview_util.erl
index 2bf168073..698adb650 100644
--- a/src/couch_mrview/src/couch_mrview_util.erl
+++ b/src/couch_mrview/src/couch_mrview_util.erl
@@ -409,6 +409,7 @@ validate_args(Db, DDoc, Args0) ->
validate_args(#mrst{} = State, Args0) ->
Args = validate_args(Args0),
+
ViewPartitioned = State#mrst.partitioned,
Partition = get_extra(Args, partition),
AllDocsAccess = get_extra(Args, all_docs_access, false),
diff --git a/src/couch_peruser/test/eunit/couch_peruser_test.erl b/src/couch_peruser/test/eunit/couch_peruser_test.erl
index 48a2a0121..151c493c7 100644
--- a/src/couch_peruser/test/eunit/couch_peruser_test.erl
+++ b/src/couch_peruser/test/eunit/couch_peruser_test.erl
@@ -41,7 +41,6 @@ setup() ->
set_config("couch_peruser", "cluster_start_period", "0"),
set_config("couch_peruser", "enable", "true"),
set_config("cluster", "n", "1"),
- set_config("log", "level", "debug"),
TestAuthDb.
teardown(TestAuthDb) ->
diff --git a/src/couch_replicator/src/couch_replicator_api_wrap.erl b/src/couch_replicator/src/couch_replicator_api_wrap.erl
index 8549a67f3..a21de4242 100644
--- a/src/couch_replicator/src/couch_replicator_api_wrap.erl
+++ b/src/couch_replicator/src/couch_replicator_api_wrap.erl
@@ -820,7 +820,7 @@ bulk_results_to_errors(Docs, {ok, Results}, interactive_edit) ->
bulk_results_to_errors(Docs, {ok, Results}, replicated_changes) ->
bulk_results_to_errors(Docs, {aborted, Results}, interactive_edit);
-bulk_results_to_errors(Docs, {aborted, Results}, interactive_edit) ->
+bulk_results_to_errors(_Docs, {aborted, Results}, interactive_edit) ->
lists:map(
fun({{Id, Rev}, Err}) ->
{_, Error, Reason} = couch_httpd:error_info(Err),
@@ -828,7 +828,7 @@ bulk_results_to_errors(Docs, {aborted, Results}, interactive_edit) ->
end,
Results);
-bulk_results_to_errors(Docs, Results, remote) ->
+bulk_results_to_errors(_Docs, Results, remote) ->
lists:reverse(lists:foldl(
fun({Props}, Acc) ->
case get_value(<<"error">>, Props, get_value(error, Props)) of
diff --git a/src/couch_replicator/src/couch_replicator_scheduler_job.erl b/src/couch_replicator/src/couch_replicator_scheduler_job.erl
index afbadcf4d..c18fe2018 100644
--- a/src/couch_replicator/src/couch_replicator_scheduler_job.erl
+++ b/src/couch_replicator/src/couch_replicator_scheduler_job.erl
@@ -818,8 +818,6 @@ update_checkpoint(Db, Doc, Access, UserCtx, DbType) ->
end.
update_checkpoint(Db, #doc{id = LogId} = Doc0, Access, UserCtx) ->
- % UserCtx = couch_db:get_user_ctx(Db),
- % couch_log:debug("~n~n~n~nUserCtx: ~p~n", [UserCtx]),
% if db has _access, then:
% get userCtx from replication and splice into doc _access
Doc = case Access of
@@ -834,7 +832,9 @@ update_checkpoint(Db, #doc{id = LogId} = Doc0, Access, UserCtx) ->
{error, Reason} ->
throw({checkpoint_commit_failure, Reason})
end
- catch throw:conflict -> %TODO: splice in access
+ catch throw:conflict ->
+ % TODO: An admin could have changed the access on the checkpoint doc.
+ % However unlikely, we can handle this gracefully here.
case (catch couch_replicator_api_wrap:open_doc(Db, LogId, [ejson_body])) of
{ok, #doc{body = LogBody, revs = {Pos, [RevId | _]}}} ->
% This means that we were able to update successfully the
diff --git a/src/fabric/src/fabric_doc_update.erl b/src/fabric/src/fabric_doc_update.erl
index 76907ffa4..260724019 100644
--- a/src/fabric/src/fabric_doc_update.erl
+++ b/src/fabric/src/fabric_doc_update.erl
@@ -39,8 +39,7 @@ go(DbName, AllDocs0, Opts) ->
try rexi_utils:recv(Workers, #shard.ref, fun handle_message/3, Acc0, infinity, Timeout) of
{ok, {Health, Results}}
when Health =:= ok; Health =:= accepted; Health =:= error ->
- R = {Health, [R || R <- couch_util:reorder_results(AllDocs, Results), R =/= noreply]},
- R;
+ {Health, [R || R <- couch_util:reorder_results(AllDocs, Results), R =/= noreply]};
{timeout, Acc} ->
{_, _, W1, GroupedDocs1, DocReplDict} = Acc,
{DefunctWorkers, _} = lists:unzip(GroupedDocs1),
@@ -316,6 +315,8 @@ doc_update1() ->
{ok, StW5_3} = handle_message({rexi_EXIT, nil}, SA2, StW5_2),
{stop, ReplyW5} = handle_message({rexi_EXIT, nil}, SB2, StW5_3),
?assertEqual(
+ % TODO: we had to flip this, it might point to a missing, or overzealous
+ % lists:reverse() in our implementation.
{error, [{Doc2,{error,internal_server_error}},{Doc1,{accepted,"A"}}]},
ReplyW5
).
@@ -340,7 +341,8 @@ doc_update2() ->
{stop, Reply} =
handle_message({rexi_EXIT, 1},lists:nth(3,Shards),Acc2),
-
+ % TODO: we had to flip this, it might point to a missing, or overzealous
+ % lists:reverse() in our implementation.
?assertEqual({accepted, [{Doc2,{accepted,Doc1}}, {Doc1,{accepted,Doc2}}]},
Reply).
@@ -365,6 +367,8 @@ doc_update3() ->
{stop, Reply} =
handle_message({ok, [{ok, Doc1},{ok, Doc2}]},lists:nth(3,Shards),Acc2),
+ % TODO: we had to flip this, it might point to a missing, or overzealous
+ % lists:reverse() in our implementation.
?assertEqual({ok, [{Doc2, {ok,Doc1}},{Doc1, {ok, Doc2}}]},Reply).
% needed for testing to avoid having to start the mem3 application
diff --git a/src/fabric/src/fabric_rpc.erl b/src/fabric/src/fabric_rpc.erl
index 1c0ea7b7d..85da3ff12 100644
--- a/src/fabric/src/fabric_rpc.erl
+++ b/src/fabric/src/fabric_rpc.erl
@@ -49,13 +49,11 @@ changes(DbName, Options, StartVector, DbOptions) ->
Args = case Filter of
{fetch, custom, Style, Req, {DDocId, Rev}, FName} ->
{ok, DDoc} = ddoc_cache:open_doc(mem3:dbname(DbName), DDocId, Rev),
- % ok = couch_util:validate_design_access(DDoc),
Args0#changes_args{
filter_fun={custom, Style, Req, DDoc, FName}
};
{fetch, view, Style, {DDocId, Rev}, VName} ->
{ok, DDoc} = ddoc_cache:open_doc(mem3:dbname(DbName), DDocId, Rev),
- % ok = couch_util:validate_design_access(DDoc),
Args0#changes_args{filter_fun={view, Style, DDoc, VName}};
_ ->
Args0
diff --git a/src/fabric/src/fabric_view_changes.erl b/src/fabric/src/fabric_view_changes.erl
index 7abe1f339..5b9a866c7 100644
--- a/src/fabric/src/fabric_view_changes.erl
+++ b/src/fabric/src/fabric_view_changes.erl
@@ -71,6 +71,8 @@ go(DbName, "normal", Options, Callback, Acc0) ->
Acc,
5000
) of
+ % TODO: This may be a debugging leftover, undo by just returning
+ % Callback({stop, pack_seqs…
{ok, Collector} ->
#collector{counters=Seqs, user_acc=AccOut, offset=Offset} = Collector,
Callback({stop, pack_seqs(Seqs), pending_count(Offset)}, AccOut);
diff --git a/src/fabric/src/fabric_view_map.erl b/src/fabric/src/fabric_view_map.erl
index 693e26a78..801fa824f 100644
--- a/src/fabric/src/fabric_view_map.erl
+++ b/src/fabric/src/fabric_view_map.erl
@@ -58,6 +58,8 @@ go(Db, Options, DDoc, View, Args, Callback, Acc, VInfo) ->
"map_view"
),
Callback({error, timeout}, Acc);
+ % TODO: this might be a debugging leftover, revert by deleting the
+ % next two lines
{error, {forbidden, Error, _Stacktrace}} ->
{error, {forbidden, Error}};
{error, Error} ->
diff --git a/src/fabric/src/fabric_view_reduce.erl b/src/fabric/src/fabric_view_reduce.erl
index 3e68b98d9..831d2dd33 100644
--- a/src/fabric/src/fabric_view_reduce.erl
+++ b/src/fabric/src/fabric_view_reduce.erl
@@ -57,6 +57,8 @@ go(Db, DDoc, VName, Args, Callback, Acc, VInfo) ->
"reduce_view"
),
Callback({error, timeout}, Acc);
+ % TODO: this might be a debugging leftover, revert by deleting the
+ % next two lines
{error, {forbidden, Error, _Stacktrace}} ->
{error, {forbidden, Error}};
{error, Error} ->
diff --git a/src/mem3/src/mem3_nodes.erl b/src/mem3/src/mem3_nodes.erl
index 2167d9988..dd5be1a72 100644
--- a/src/mem3/src/mem3_nodes.erl
+++ b/src/mem3/src/mem3_nodes.erl
@@ -124,7 +124,6 @@ changes_callback(start, _) ->
changes_callback({stop, EndSeq}, _) ->
exit({seq, EndSeq});
changes_callback({change, {Change}, _}, _) ->
- % Change: ~p~n", [Change]),
Node = couch_util:get_value(<<"id">>, Change),
case Node of <<"_design/", _/binary>> -> ok; _ ->
case mem3_util:is_deleted(Change) of
diff --git a/test/elixir/test/bulk_docs_test.exs b/test/elixir/test/bulk_docs_test.exs
index a825bf15b..a689154fc 100644
--- a/test/elixir/test/bulk_docs_test.exs
+++ b/test/elixir/test/bulk_docs_test.exs
@@ -124,6 +124,9 @@ defmodule BulkDocsTest do
test "bulk docs emits conflict error for duplicate doc `_id`s", ctx do
docs = [%{_id: "0", a: 0}, %{_id: "1", a: 1}, %{_id: "1", a: 2}, %{_id: "3", a: 3}]
rows = bulk_post(docs, ctx[:db_name]).body
+
+ # TODO: we had to change the order here, this might point to the same
+ # missing, or overzealous application of lists:reverse() as elsewhere.
assert Enum.at(rows, 2)["id"] == "1"
assert Enum.at(rows, 2)["ok"]
assert Enum.at(rows, 1)["error"] == "conflict"