summaryrefslogtreecommitdiff
path: root/src/mongo/s
diff options
context:
space:
mode:
authorMihai Andrei <mihai.andrei@10gen.com>2022-07-27 20:35:40 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-07-27 23:38:57 +0000
commit7b7fe658db948e6f5a4a6c30d4590d7866c59371 (patch)
tree7b0be78f6441fc8ea2f3fb9a14b254378a1c1dc6 /src/mongo/s
parent586135f8159f72b2f42b1cbd96c93fbfbc84c13b (diff)
downloadmongo-7b7fe658db948e6f5a4a6c30d4590d7866c59371.tar.gz
SERVER-66289 Update write size estimation logic in DocumentSourceWriter
Diffstat (limited to 'src/mongo/s')
-rw-r--r--src/mongo/s/write_ops/batch_write_op.cpp53
1 files changed, 9 insertions, 44 deletions
diff --git a/src/mongo/s/write_ops/batch_write_op.cpp b/src/mongo/s/write_ops/batch_write_op.cpp
index 4c2061dd07f..26f9a4d3488 100644
--- a/src/mongo/s/write_ops/batch_write_op.cpp
+++ b/src/mongo/s/write_ops/batch_write_op.cpp
@@ -59,7 +59,6 @@ struct WriteErrorComp {
// batches before serializing.
//
// TODO: Revisit when we revisit command limits in general
-const int kEstUpdateOverheadBytes = (BSONObjMaxInternalSize - BSONObjMaxUserSize) / 100;
const int kEstDeleteOverheadBytes = (BSONObjMaxInternalSize - BSONObjMaxUserSize) / 100;
/**
@@ -159,51 +158,17 @@ int getWriteSizeBytes(const WriteOp& writeOp) {
return item.getDocument().objsize();
} else if (batchType == BatchedCommandRequest::BatchType_Update) {
// Note: Be conservative here - it's okay if we send slightly too many batches.
- auto estSize = static_cast<int>(BSONObj::kMinBSONLength);
- static const auto boolSize = 1;
-
- // Add the size of the 'collation' field, if present.
- estSize += !item.getUpdate().getCollation() ? 0
- : (UpdateOpEntry::kCollationFieldName.size() +
- item.getUpdate().getCollation()->objsize());
-
- // Add the size of the 'arrayFilters' field, if present.
- estSize += !item.getUpdate().getArrayFilters() ? 0 : ([&item]() {
- auto size = BSONObj::kMinBSONLength + UpdateOpEntry::kArrayFiltersFieldName.size();
- for (auto&& filter : *item.getUpdate().getArrayFilters()) {
- size += filter.objsize();
- }
- return size;
- })();
-
- // Add the sizes of the 'multi' and 'upsert' fields.
- estSize += UpdateOpEntry::kUpsertFieldName.size() + boolSize;
- estSize += UpdateOpEntry::kMultiFieldName.size() + boolSize;
-
- // Add the size of 'upsertSupplied' field if present.
- if (auto upsertSupplied = item.getUpdate().getUpsertSupplied()) {
- estSize += UpdateOpEntry::kUpsertSuppliedFieldName.size() + boolSize;
- }
-
- // Add the sizes of the 'q' and 'u' fields.
- estSize += (UpdateOpEntry::kQFieldName.size() + item.getUpdate().getQ().objsize() +
- UpdateOpEntry::kUFieldName.size() + item.getUpdate().getU().objsize());
-
- // Add the size of the 'c' field if present.
- if (auto constants = item.getUpdate().getC()) {
- estSize += UpdateOpEntry::kCFieldName.size() + item.getUpdate().getC()->objsize();
- }
-
- // Add the size of 'hint' field if present.
- if (auto hint = item.getUpdate().getHint(); !hint.isEmpty()) {
- estSize += UpdateOpEntry::kHintFieldName.size() + hint.objsize();
- }
-
- // Finally, add the constant updateOp overhead size.
- estSize += kEstUpdateOverheadBytes;
+ const auto& update = item.getUpdate();
+ auto estSize = write_ops::getUpdateSizeEstimate(update.getQ(),
+ update.getU(),
+ update.getC(),
+ update.getUpsertSupplied().has_value(),
+ update.getCollation(),
+ update.getArrayFilters(),
+ update.getHint());
// When running a debug build, verify that estSize is at least the BSON serialization size.
- dassert(estSize >= item.getUpdate().toBSON().objsize());
+ dassert(estSize >= update.toBSON().objsize());
return estSize;
} else if (batchType == BatchedCommandRequest::BatchType_Delete) {
// Note: Be conservative here - it's okay if we send slightly too many batches.