summaryrefslogtreecommitdiff
path: root/jstests/noPassthrough/batched_multi_deletes_params.js
diff options
context:
space:
mode:
authorJosef Ahmad <josef.ahmad@mongodb.com>2022-04-13 10:28:30 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-04-13 10:54:26 +0000
commit847ed004af65831cf0592a96d8bf5b022a46cafd (patch)
tree74b4474d5ba1a714f6188f4b89a68a44f0180b26 /jstests/noPassthrough/batched_multi_deletes_params.js
parent19f6bf91662cc08edb0bd8d3b12390cae96036b3 (diff)
downloadmongo-847ed004af65831cf0592a96d8bf5b022a46cafd.tar.gz
SERVER-63039 Add staged documents size target to BatchedDeleteStage
Diffstat (limited to 'jstests/noPassthrough/batched_multi_deletes_params.js')
-rw-r--r--jstests/noPassthrough/batched_multi_deletes_params.js44
1 files changed, 43 insertions, 1 deletions
diff --git a/jstests/noPassthrough/batched_multi_deletes_params.js b/jstests/noPassthrough/batched_multi_deletes_params.js
index cc0365bf728..b819f4de9a6 100644
--- a/jstests/noPassthrough/batched_multi_deletes_params.js
+++ b/jstests/noPassthrough/batched_multi_deletes_params.js
@@ -106,9 +106,51 @@ function validateTargetBatchTimeMS() {
}
}
+function validateTargetStagedDocsBytes() {
+ const collCount = 10000;
+ const docPaddingBytes = 1024;
+ const cumulativePaddingBytes = collCount *
+ (bsonsize({_id: ObjectId(), a: 'a'}) +
+ 100 /* allow for getMemUsage() own metadata and overestimation */ + docPaddingBytes);
+
+ assert.commandWorked(db.adminCommand({setParameter: 1, batchedDeletesTargetBatchTimeMS: 0}));
+ assert.commandWorked(db.adminCommand({setParameter: 1, batchedDeletesTargetBatchDocs: 0}));
+
+ for (let stagedDocsBytes of [0, 1024 * 1024, 5 * 1024 * 1024]) {
+ jsTestLog("Validating stagedDocsBytes=" + stagedDocsBytes);
+
+ assert.commandWorked(db.adminCommand(
+ {setParameter: 1, batchedDeletesTargetStagedDocBytes: stagedDocsBytes}));
+
+ coll.drop();
+ assert.commandWorked(coll.insertMany(
+ [...Array(collCount).keys()].map(x => ({a: "a".repeat(docPaddingBytes)}))));
+
+ // batchedDeletesTargetStagedDocsBytes := 0 means no limit.
+ const expectedBatches =
+ stagedDocsBytes ? Math.ceil(cumulativePaddingBytes / stagedDocsBytes) : 1;
+ const serverStatusBatchesBefore = db.serverStatus()['batchedDeletes']['batches'];
+ const serverStatusDocsBefore = db.serverStatus()['batchedDeletes']['docs'];
+
+ assert.eq(collCount, coll.find().itcount());
+ assert.commandWorked(coll.deleteMany({}));
+ assert.eq(0, coll.find().itcount());
+
+ const serverStatusBatchesAfter = db.serverStatus()['batchedDeletes']['batches'];
+ const serverStatusDocsAfter = db.serverStatus()['batchedDeletes']['docs'];
+ const serverStatusDocsExpected = serverStatusDocsBefore + collCount;
+ const serverStatusBatchesExpected = serverStatusBatchesBefore + expectedBatches;
+ assert.eq(serverStatusBatchesAfter, serverStatusBatchesExpected);
+ assert.eq(serverStatusDocsAfter, serverStatusDocsExpected);
+
+ rst.awaitReplication();
+ rst.checkReplicatedDataHashes();
+ }
+}
+
validateTargetDocsPerBatch();
validateTargetBatchTimeMS();
-// TODO (SERVER-63039): validate targetStagedDocBytes.
+validateTargetStagedDocsBytes();
rst.stopSet();
})();