diff options
author | Nick Vatamaniuc <vatamane@apache.org> | 2020-01-13 18:39:31 -0500 |
---|---|---|
committer | Nick Vatamaniuc <vatamane@apache.org> | 2020-01-14 00:59:00 -0500 |
commit | 66afc45015827cd55d8a5663f19da09e18c2780e (patch) | |
tree | aeb4f9fdd80bc9327959eab2c78989efcc435395 | |
parent | 660889db4be737e736706995e32e1cc224055ca9 (diff) | |
download | couchdb-66afc45015827cd55d8a5663f19da09e18c2780e.tar.gz |
Properly account for replication stats when splitting bulk docs batches
Previously if batch of bulk docs had to be bisected in order to fit a lower max
request size limit on the target, we only counted stats for the second batch.
So it was possibly we might have missed some `doc_write_failures` updates which
can be perceived as a data loss to the customer.
So we use the handy-dandy `sum_stats/2` function to sum the return stats from
both batches and return that.
Issue: https://github.com/apache/couchdb/issues/2414
-rw-r--r-- | src/couch_replicator/src/couch_replicator_worker.erl | 5 |
1 files changed, 3 insertions, 2 deletions
diff --git a/src/couch_replicator/src/couch_replicator_worker.erl b/src/couch_replicator/src/couch_replicator_worker.erl index 885e171a0..eb8beaaa9 100644 --- a/src/couch_replicator/src/couch_replicator_worker.erl +++ b/src/couch_replicator/src/couch_replicator_worker.erl @@ -381,8 +381,9 @@ handle_flush_docs_result({error, request_body_too_large}, Target, DocList) -> " request body is too large. Splitting batch into 2 separate batches of" " sizes ~p and ~p", [Len, couch_replicator_api_wrap:db_uri(Target), length(DocList1), length(DocList2)]), - flush_docs(Target, DocList1), - flush_docs(Target, DocList2); + Stats1 = flush_docs(Target, DocList1), + Stats2 = flush_docs(Target, DocList2), + couch_replicator_stats:sum_stats(Stats1, Stats2); handle_flush_docs_result({ok, Errors}, Target, DocList) -> DbUri = couch_replicator_api_wrap:db_uri(Target), lists:foreach( |