summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNick Vatamaniuc <nickva@users.noreply.github.com>2020-01-07 19:41:03 -0500
committerJoan Touzet <wohali@users.noreply.github.com>2020-01-07 19:41:03 -0500
commitdbdf1953e98b662e8d1184d8e6c5758af4f5a7c7 (patch)
tree62c511372ef10d43e80faf68297bfc6987862a96
parent102bcf4e07de54616fc44529792db58f482d736b (diff)
downloadcouchdb-dbdf1953e98b662e8d1184d8e6c5758af4f5a7c7.tar.gz
Remove unused batching code from replicator (#2419)
The `batch_doc(Doc)` code was previously used for local endpoints when flushing docs with attachments. After that code was removed, the `remote_doc_handler/2` filters out all docs with attachments before they even get to the doc flusher so batch_doc(Doc) effectively is always returns `true`.
-rw-r--r--src/couch_replicator/src/couch_replicator_worker.erl42
1 files changed, 9 insertions, 33 deletions
diff --git a/src/couch_replicator/src/couch_replicator_worker.erl b/src/couch_replicator/src/couch_replicator_worker.erl
index 986c32c0a..23a4ea107 100644
--- a/src/couch_replicator/src/couch_replicator_worker.erl
+++ b/src/couch_replicator/src/couch_replicator_worker.erl
@@ -28,8 +28,6 @@
% TODO: maybe make both buffer max sizes configurable
-define(DOC_BUFFER_BYTE_SIZE, 512 * 1024). % for remote targets
--define(MAX_BULK_ATT_SIZE, 64 * 1024).
--define(MAX_BULK_ATTS_PER_DOC, 8).
-define(STATS_DELAY, 10000000). % 10 seconds (in microseconds)
-define(MISSING_DOC_RETRY_MSEC, 2000).
@@ -334,40 +332,18 @@ maybe_flush_docs(Doc,State) ->
maybe_flush_docs(#httpdb{} = Target, Batch, Doc) ->
#batch{docs = DocAcc, size = SizeAcc} = Batch,
- case batch_doc(Doc) of
- false ->
- couch_log:debug("Worker flushing doc with attachments", []),
- case flush_doc(Target, Doc) of
- ok ->
- {Batch, couch_replicator_stats:new([{docs_written, 1}])};
- _ ->
- {Batch, couch_replicator_stats:new([{doc_write_failures, 1}])}
- end;
- true ->
- JsonDoc = ?JSON_ENCODE(couch_doc:to_json_obj(Doc, [revs, attachments])),
- case SizeAcc + iolist_size(JsonDoc) of
- SizeAcc2 when SizeAcc2 > ?DOC_BUFFER_BYTE_SIZE ->
- couch_log:debug("Worker flushing doc batch of size ~p bytes", [SizeAcc2]),
- Stats = flush_docs(Target, [JsonDoc | DocAcc]),
- {#batch{}, Stats};
- SizeAcc2 ->
- Stats = couch_replicator_stats:new(),
- {#batch{docs = [JsonDoc | DocAcc], size = SizeAcc2}, Stats}
- end
+ JsonDoc = ?JSON_ENCODE(couch_doc:to_json_obj(Doc, [revs, attachments])),
+ case SizeAcc + iolist_size(JsonDoc) of
+ SizeAcc2 when SizeAcc2 > ?DOC_BUFFER_BYTE_SIZE ->
+ couch_log:debug("Worker flushing doc batch of size ~p bytes", [SizeAcc2]),
+ Stats = flush_docs(Target, [JsonDoc | DocAcc]),
+ {#batch{}, Stats};
+ SizeAcc2 ->
+ Stats = couch_replicator_stats:new(),
+ {#batch{docs = [JsonDoc | DocAcc], size = SizeAcc2}, Stats}
end.
-batch_doc(#doc{atts = []}) ->
- true;
-batch_doc(#doc{atts = Atts}) ->
- (length(Atts) =< ?MAX_BULK_ATTS_PER_DOC) andalso
- lists:all(
- fun(Att) ->
- [L, Data] = couch_att:fetch([disk_len, data], Att),
- (L =< ?MAX_BULK_ATT_SIZE) andalso (Data =/= stub)
- end, Atts).
-
-
flush_docs(_Target, []) ->
couch_replicator_stats:new();
flush_docs(Target, DocList) ->