summaryrefslogtreecommitdiff
path: root/jstests
diff options
context:
space:
mode:
authorJosef Ahmad <josef.ahmad@mongodb.com>2022-05-02 15:24:23 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-05-02 16:10:52 +0000
commit76e631755203c727392a822dd50a743813967b9a (patch)
tree9ac63b42189f170b5f0c36e285374070afbd6afc /jstests
parent671486e7ba73cfac7e769f9839de9c7be023be21 (diff)
downloadmongo-76e631755203c727392a822dd50a743813967b9a.tar.gz
SERVER-63044 Batch non-sharded user multi-doc deletes by default
Diffstat (limited to 'jstests')
-rw-r--r--jstests/core/batched_multi_deletes.js12
-rw-r--r--jstests/core/explain_delete.js14
-rw-r--r--jstests/noPassthrough/batched_multi_deletes.js11
-rw-r--r--jstests/noPassthrough/batched_multi_deletes_failover.js13
-rw-r--r--jstests/noPassthrough/batched_multi_deletes_oplog.js16
-rw-r--r--jstests/noPassthrough/batched_multi_deletes_params.js10
-rw-r--r--jstests/noPassthrough/batched_multi_deletes_write_conflict.js11
-rw-r--r--jstests/noPassthrough/change_stream_unwind_batched_writes.js12
-rw-r--r--jstests/noPassthrough/crud_timestamps.js37
-rw-r--r--jstests/noPassthrough/ttl_batch_deletes.js2
-rw-r--r--jstests/sharding/oplog_document_key.js16
-rw-r--r--jstests/sharding/query/explain_cmd.js6
12 files changed, 82 insertions, 78 deletions
diff --git a/jstests/core/batched_multi_deletes.js b/jstests/core/batched_multi_deletes.js
index 4b6bb678978..48260a57a7f 100644
--- a/jstests/core/batched_multi_deletes.js
+++ b/jstests/core/batched_multi_deletes.js
@@ -2,16 +2,17 @@
* Tests batch-deleting a large range of data.
*
* @tags: [
+ * # TODO (SERVER-66071): support sharded collections
+ * assumes_unsharded_collection,
* does_not_support_retryable_writes,
* # TODO (SERVER-55909): make WUOW 'groupOplogEntries' the only mode of operation.
* does_not_support_transactions,
+ * featureFlagBatchMultiDeletes,
* multiversion_incompatible,
* no_selinux,
* requires_fcv_60,
* requires_getmore,
* requires_non_retryable_writes,
- * # TODO (SERVER-63044): namespace for this test is hardcoded, tenant migrations rename it.
- * tenant_migration_incompatible,
* ]
*/
@@ -19,11 +20,8 @@
"use strict";
function populateAndMassDelete(queryPredicate) {
- // '__internalBatchedDeletesTesting.Collection0' is a special, hardcoded namespace that batches
- // multi-doc deletes if the 'internalBatchUserMultiDeletesForTest' server parameter is set.
- // TODO (SERVER-63044): remove this special handling.
- const testDB = db.getSiblingDB('__internalBatchedDeletesTesting');
- const coll = testDB['Collection0'];
+ const testDB = db.getSiblingDB('test');
+ const coll = testDB['c'];
const collCount =
54321; // Intentionally not a multiple of BatchedDeleteStageBatchParams::targetBatchDocs.
diff --git a/jstests/core/explain_delete.js b/jstests/core/explain_delete.js
index 44a67ea21ca..6e2899a500a 100644
--- a/jstests/core/explain_delete.js
+++ b/jstests/core/explain_delete.js
@@ -10,9 +10,10 @@ t.drop();
var explain;
+// TODO (SERVER-66071): make BATCHED_DELETE the only expected delete stage.
/**
- * Verify that the explain command output 'explain' shows a DELETE stage with an nWouldDelete
- * value equal to 'nWouldDelete'.
+ * Verify that the explain command output 'explain' shows a [BATCHED_]DELETE stage with an
+ * nWouldDelete value equal to 'nWouldDelete'.
*/
function checkNWouldDelete(explain, nWouldDelete) {
assert.commandWorked(explain);
@@ -20,19 +21,20 @@ function checkNWouldDelete(explain, nWouldDelete) {
var executionStats = explain.executionStats;
assert("executionStages" in executionStats);
- // If passed through mongos, then DELETE stage(s) should be below the SHARD_WRITE mongos
- // stage. Otherwise the DELETE stage is the root stage.
+ // If passed through mongos, then [BATCHED_]DELETE stage(s) should be below the SHARD_WRITE
+ // mongos stage. Otherwise the [BATCHED_]DELETE stage is the root stage.
var execStages = executionStats.executionStages;
if ("SHARD_WRITE" === execStages.stage) {
let totalToBeDeletedAcrossAllShards = 0;
execStages.shards.forEach(function(shardExplain) {
const rootStageName = shardExplain.executionStages.stage;
- assert.eq(rootStageName, "DELETE", tojson(execStages));
+ assert(rootStageName === "DELETE" || rootStageName === "BATCHED_DELETE",
+ tojson(execStages));
totalToBeDeletedAcrossAllShards += shardExplain.executionStages.nWouldDelete;
});
assert.eq(totalToBeDeletedAcrossAllShards, nWouldDelete, explain);
} else {
- assert.eq(execStages.stage, "DELETE", explain);
+ assert(execStages.stage === "DELETE" || execStages.stage === "BATCHED_DELETE", explain);
assert.eq(execStages.nWouldDelete, nWouldDelete, explain);
}
}
diff --git a/jstests/noPassthrough/batched_multi_deletes.js b/jstests/noPassthrough/batched_multi_deletes.js
index 5daffa31778..e83270a26d8 100644
--- a/jstests/noPassthrough/batched_multi_deletes.js
+++ b/jstests/noPassthrough/batched_multi_deletes.js
@@ -2,6 +2,7 @@
* Validate basic batched multi-deletion functionality.
*
* @tags: [
+ * featureFlagBatchMultiDeletes,
* # Running as a replica set requires journaling.
* requires_journaling,
* ]
@@ -12,11 +13,8 @@
load("jstests/libs/analyze_plan.js");
function validateBatchedDeletes(conn) {
- // '__internalBatchedDeletesTesting.Collection0' is a special, hardcoded namespace that batches
- // multi-doc deletes if the 'internalBatchUserMultiDeletesForTest' server parameter is set.
- // TODO (SERVER-63044): remove this special handling.
- const db = conn.getDB("__internalBatchedDeletesTesting");
- const coll = db.getCollection('Collection0');
+ const db = conn.getDB("test");
+ const coll = db.getCollection("c");
const collName = coll.getName();
const docsPerBatchDefault = 100; // BatchedDeleteStageBatchParams::targetBatchDocs
@@ -48,9 +46,6 @@ function validateBatchedDeletes(conn) {
assert.commandWorked(
coll.insertMany([...Array(collCount).keys()].map(x => ({_id: x, a: "a".repeat(1024)}))));
- assert.commandWorked(
- db.adminCommand({setParameter: 1, internalBatchUserMultiDeletesForTest: 1}));
-
// For consistent results, don't enforce the targetBatchTimeMS and targetStagedDocBytes.
assert.commandWorked(db.adminCommand({setParameter: 1, batchedDeletesTargetBatchTimeMS: 0}));
assert.commandWorked(db.adminCommand({setParameter: 1, batchedDeletesTargetStagedDocBytes: 0}));
diff --git a/jstests/noPassthrough/batched_multi_deletes_failover.js b/jstests/noPassthrough/batched_multi_deletes_failover.js
index 72afcd6c1c9..2aecb33e1ee 100644
--- a/jstests/noPassthrough/batched_multi_deletes_failover.js
+++ b/jstests/noPassthrough/batched_multi_deletes_failover.js
@@ -2,6 +2,7 @@
* Tests that data is consistent after a failover, clean, or unclean shutdown occurs in the middle
* of a batched delete.
* @tags: [
+ * featureFlagBatchMultiDeletes,
* # TODO (SERVER-55909): make WUOW 'groupOplogEntries' the only mode of operation.
* does_not_support_transactions,
* exclude_from_large_txns,
@@ -92,13 +93,10 @@ const rst = new ReplSetTest({
});
const nodes = rst.startSet();
rst.initiate();
+rst.awaitNodesAgreeOnPrimary();
-// '__internalBatchedDeletesTesting.Collection0' is a special, hardcoded namespace that batches
-// multi-doc deletes if the 'internalBatchUserMultiDeletesForTest' server parameter is set.
-// TODO (SERVER-63044): remove this special handling - but preserve a test specific namespace do the
-// failpoint does not interfere with other background operations.
-const dbName = "__internalBatchedDeletesTesting";
-const collName = "Collection0";
+const dbName = "test";
+const collName = "collHangBatchedDelete";
function runTest(failoverFn, clustered, expectNetworkErrorOnDelete) {
let primary = rst.getPrimary();
@@ -129,9 +127,6 @@ function runTest(failoverFn, clustered, expectNetworkErrorOnDelete) {
jsTestLog(`About to hang batched delete after evaluating approximately ${
hangAfterApproxNDocs} documents`);
- assert.commandWorked(
- testDB.adminCommand({setParameter: 1, internalBatchUserMultiDeletesForTest: 1}));
-
// When the delete fails, the failpoint will automatically unpause. If the connection is killed,
// it is unsafe to try and disable the failpoint tied to testDB's original connection.
const fp = configureFailPoint(
diff --git a/jstests/noPassthrough/batched_multi_deletes_oplog.js b/jstests/noPassthrough/batched_multi_deletes_oplog.js
index 313654f557b..a2c1c60a195 100644
--- a/jstests/noPassthrough/batched_multi_deletes_oplog.js
+++ b/jstests/noPassthrough/batched_multi_deletes_oplog.js
@@ -2,6 +2,7 @@
* Validate oplog behaviour of batched multi-deletes.
*
* @tags: [
+ * featureFlagBatchMultiDeletes,
* # Running as a replica set requires journaling.
* requires_journaling,
* ]
@@ -12,17 +13,12 @@
// Verifies that batches replicate as applyOps entries.
function validateBatchedDeletesOplogDocsPerBatch(conn) {
- // '__internalBatchedDeletesTesting.Collection0' is a special, hardcoded namespace that batches
- // multi-doc deletes if the 'internalBatchUserMultiDeletesForTest' server parameter is set.
- // TODO (SERVER-63044): remove this special handling.
- const db = conn.getDB("__internalBatchedDeletesTesting");
- const coll = db.getCollection('Collection0');
+ const db = conn.getDB("test");
+ const coll = db.getCollection("c");
const docsPerBatch = 100;
const collCount = 5017; // Intentionally not a multiple of docsPerBatch.
- assert.commandWorked(
- db.adminCommand({setParameter: 1, internalBatchUserMultiDeletesForTest: 1}));
// Disable size-based batching
assert.commandWorked(db.adminCommand({setParameter: 1, batchedDeletesTargetStagedDocBytes: 0}));
// Disable time-based batching
@@ -53,8 +49,8 @@ function validateBatchedDeletesOplogDocsPerBatch(conn) {
// Verifies the batched deleter cuts batches that stay within the 16MB BSON limit of one applyOps
// entry.
function validateBatchedDeletesOplogBatchAbove16MB(conn) {
- const db = conn.getDB("__internalBatchedDeletesTesting");
- const coll = db.getCollection('Collection0');
+ const db = conn.getDB("test");
+ const coll = db.getCollection("c");
// With non-clustered collections (_id of ObjectId type) the batched deleter's conservative
// applyOps estimation would cut a batch at ~63k documents. Create a collection >> 63k
@@ -64,8 +60,6 @@ function validateBatchedDeletesOplogBatchAbove16MB(conn) {
const collCount = 130000;
const expectedApplyOpsEntries =
Math.ceil(collCount / 63600 /* max docs per batch, see comment above. */);
- assert.commandWorked(
- db.adminCommand({setParameter: 1, internalBatchUserMultiDeletesForTest: 1}));
// Disable size-based batching
assert.commandWorked(db.adminCommand({setParameter: 1, batchedDeletesTargetStagedDocBytes: 0}));
// Disable time-based batching
diff --git a/jstests/noPassthrough/batched_multi_deletes_params.js b/jstests/noPassthrough/batched_multi_deletes_params.js
index b819f4de9a6..c57ab9155ee 100644
--- a/jstests/noPassthrough/batched_multi_deletes_params.js
+++ b/jstests/noPassthrough/batched_multi_deletes_params.js
@@ -2,6 +2,7 @@
* Validate batched multi-deleter's parameters.
*
* @tags: [
+ * featureFlagBatchMultiDeletes,
* # Running as a replica set requires journaling.
* requires_journaling,
* ]
@@ -19,13 +20,8 @@ rst.initiate();
rst.awaitNodesAgreeOnPrimary();
const conn = rst.getPrimary();
-// '__internalBatchedDeletesTesting.Collection0' is a special, hardcoded namespace that batches
-// multi-doc deletes if the 'internalBatchUserMultiDeletesForTest' server parameter is set.
-// TODO (SERVER-63044): remove this special handling.
-const db = conn.getDB("__internalBatchedDeletesTesting");
-const coll = db.getCollection('Collection0');
-
-assert.commandWorked(db.adminCommand({setParameter: 1, internalBatchUserMultiDeletesForTest: 1}));
+const db = conn.getDB("test");
+const coll = db.getCollection("c");
function validateTargetDocsPerBatch() {
const collCount = 1234;
diff --git a/jstests/noPassthrough/batched_multi_deletes_write_conflict.js b/jstests/noPassthrough/batched_multi_deletes_write_conflict.js
index a59d0549101..b67e40ca430 100644
--- a/jstests/noPassthrough/batched_multi_deletes_write_conflict.js
+++ b/jstests/noPassthrough/batched_multi_deletes_write_conflict.js
@@ -2,6 +2,7 @@
* Validate basic batched multi-deletion handling with write conflicts.
*
* @tags: [
+ * featureFlagBatchMultiDeletes,
* requires_fcv_53,
* ]
*/
@@ -15,11 +16,8 @@ load("jstests/libs/fail_point_util.js"); // For 'configureFailPoint()'
const conn = MongoRunner.runMongod();
-// '__internalBatchedDeletesTesting.Collection0' is a special, hardcoded namespace that batches
-// multi-doc deletes if the 'internalBatchUserMultiDeletesForTest' server parameter is set.
-// TODO (SERVER-63044): remove this special handling.
-const testDB = conn.getDB("__internalBatchedDeletesTesting");
-const coll = testDB.getCollection("Collection0");
+const testDB = conn.getDB("test");
+const coll = testDB.getCollection("c");
const collName = coll.getName();
const ns = coll.getFullName();
@@ -32,9 +30,6 @@ assert.commandWorked(
assert.commandWorked(testDB.setProfilingLevel(2));
-assert.commandWorked(
- testDB.adminCommand({setParameter: 1, internalBatchUserMultiDeletesForTest: 1}));
-
// While test is not deterministic, there will most likely be several write conflicts during the
// execution. ~250 write conflicts on average.
const batchedDeleteWriteConflictFP = configureFailPoint(
diff --git a/jstests/noPassthrough/change_stream_unwind_batched_writes.js b/jstests/noPassthrough/change_stream_unwind_batched_writes.js
index f84406c4a78..4d009e036af 100644
--- a/jstests/noPassthrough/change_stream_unwind_batched_writes.js
+++ b/jstests/noPassthrough/change_stream_unwind_batched_writes.js
@@ -2,6 +2,7 @@
* Verifies change streams operation for batched writes.
*
* @tags: [
+ * featureFlagBatchMultiDeletes,
* # Running as a replica set requires journaling.
* requires_journaling,
* requires_majority_read_concern,
@@ -11,11 +12,8 @@
(function() {
"use strict";
-// '__internalBatchedDeletesTesting.Collection0' is a special, hardcoded namespace that batches
-// multi-doc deletes if the 'internalBatchUserMultiDeletesForTest' server parameter is set.
-// TODO (SERVER-63044): remove this special handling.
-const dbName = "__internalBatchedDeletesTesting";
-const collName = "Collection0";
+const dbName = "test";
+const collName = "c";
/**
* Asserts that the expected operation type and documentKey are found on the change stream
@@ -58,10 +56,8 @@ function runTest(conn) {
const totalNumDocs = 8;
let changeList = [];
- // Enable batched deletes. For consistent results, disable any batch targeting except for
+ // For consistent results, disable any batch targeting except for
// 'batchedDeletesTargetBatchDocs'.
- assert.commandWorked(
- db.adminCommand({setParameter: 1, internalBatchUserMultiDeletesForTest: 1}));
assert.commandWorked(db.adminCommand({setParameter: 1, batchedDeletesTargetBatchTimeMS: 0}));
assert.commandWorked(db.adminCommand({setParameter: 1, batchedDeletesTargetStagedDocBytes: 0}));
assert.commandWorked(
diff --git a/jstests/noPassthrough/crud_timestamps.js b/jstests/noPassthrough/crud_timestamps.js
index 07718be5bbc..eb80ef47463 100644
--- a/jstests/noPassthrough/crud_timestamps.js
+++ b/jstests/noPassthrough/crud_timestamps.js
@@ -16,6 +16,19 @@ rst.initiate();
const testDB = rst.getPrimary().getDB(dbName);
const coll = testDB.getCollection(collName);
+// Determine whether deletes are batched.
+const ret = rst.getPrimary().adminCommand({getParameter: 1, featureFlagBatchMultiDeletes: 1});
+assert(ret.ok || (!ret.ok && ret.errmsg === "no option found to get"));
+const batchedDeletesEnabled = ret.ok ? ret.featureFlagBatchMultiDeletes.value : false;
+if (batchedDeletesEnabled) {
+ // For consistent results, generate a single delete (applyOps) batch.
+ assert.commandWorked(
+ testDB.adminCommand({setParameter: 1, batchedDeletesTargetBatchTimeMS: 0}));
+ assert.commandWorked(
+ testDB.adminCommand({setParameter: 1, batchedDeletesTargetStagedDocBytes: 0}));
+ assert.commandWorked(testDB.adminCommand({setParameter: 1, batchedDeletesTargetBatchDocs: 0}));
+}
+
if (!testDB.serverStatus().storageEngine.supportsSnapshotReadConcern) {
rst.stopSet();
return;
@@ -99,15 +112,21 @@ request = {
assert.commandWorked(coll.runCommand(request));
-ts1 = oplog.findOne({op: 'd', o: {_id: 1}}).ts;
-ts2 = oplog.findOne({op: 'd', o: {_id: 2}}).ts;
-let ts3 = oplog.findOne({op: 'd', o: {_id: 3}}).ts;
-let ts4 = oplog.findOne({op: 'd', o: {_id: 4}}).ts;
-
-check(ts1, [{_id: 2}, {_id: 3, a: 4}, {_id: 4, a: 5}]);
-check(ts2, [{_id: 3, a: 4}, {_id: 4, a: 5}]);
-check(ts3, [{_id: 4, a: 5}]);
-check(ts4, []);
+if (batchedDeletesEnabled) {
+ const applyOps = oplog.findOne({op: 'c', ns: 'admin.$cmd', 'o.applyOps.op': 'd'});
+ const ts = applyOps['ts'];
+ check(ts, []);
+} else {
+ ts1 = oplog.findOne({op: 'd', o: {_id: 1}}).ts;
+ ts2 = oplog.findOne({op: 'd', o: {_id: 2}}).ts;
+ const ts3 = oplog.findOne({op: 'd', o: {_id: 3}}).ts;
+ const ts4 = oplog.findOne({op: 'd', o: {_id: 4}}).ts;
+
+ check(ts1, [{_id: 2}, {_id: 3, a: 4}, {_id: 4, a: 5}]);
+ check(ts2, [{_id: 3, a: 4}, {_id: 4, a: 5}]);
+ check(ts3, [{_id: 4, a: 5}]);
+ check(ts4, []);
+}
session.endSession();
rst.stopSet();
diff --git a/jstests/noPassthrough/ttl_batch_deletes.js b/jstests/noPassthrough/ttl_batch_deletes.js
index 6fe3d309c57..b9558689cfc 100644
--- a/jstests/noPassthrough/ttl_batch_deletes.js
+++ b/jstests/noPassthrough/ttl_batch_deletes.js
@@ -172,6 +172,6 @@ runTestCase(testTTLDeleteWithIndexScanDocByDoc);
runTestCase(testTTLDeleteWithCollectionScanBatched);
runTestCase(testTTLDeleteWithCollectionScanDocByDoc);
// Excluding sharded collections from batched deletes is a temporary measure.
-// TODO: Remove this test after SERVER-64107 and SERVER-65644 are resolved.
+// TODO (SERVER-66071): Remove this test.
runTestCase(testTTLDeleteWithIndexScanBatchedExcludesShardedCollection, true);
})();
diff --git a/jstests/sharding/oplog_document_key.js b/jstests/sharding/oplog_document_key.js
index 3830f41686c..34946b864e2 100644
--- a/jstests/sharding/oplog_document_key.js
+++ b/jstests/sharding/oplog_document_key.js
@@ -113,9 +113,21 @@ assert.commandWorked(db.un.remove({_id: 10}));
assert.commandWorked(db.un.remove({_id: 30}));
a = oplog.findOne({ns: 'test.un', op: 'd', 'o._id': 10});
-assert.eq(a.o, {_id: 10});
+if (a) {
+ assert.eq(a.o, {_id: 10});
+} else {
+ // Validate this is a batched delete, which includes the document key.
+ a = oplog.findOne({ns: 'admin.$cmd', op: 'c', 'o.applyOps.o._id': 10});
+ assert.eq(a.o.applyOps[0].o, {_id: 10});
+}
b = oplog.findOne({ns: 'test.un', op: 'd', 'o._id': 30});
-assert.eq(b.o, {_id: 30});
+if (b) {
+ assert.eq(b.o, {_id: 30});
+} else {
+ // Validate this is a batched delete, which includes the document key.
+ b = oplog.findOne({ns: 'admin.$cmd', op: 'c', 'o.applyOps.o._id': 30});
+ assert.eq(b.o.applyOps[0].o, {_id: 30});
+}
////////////////////////////////////////////////////////////////////////
jsTest.log("Test remove command: 'byX'");
diff --git a/jstests/sharding/query/explain_cmd.js b/jstests/sharding/query/explain_cmd.js
index fb1d789e1db..00cce44d184 100644
--- a/jstests/sharding/query/explain_cmd.js
+++ b/jstests/sharding/query/explain_cmd.js
@@ -83,8 +83,10 @@ explain = db.runCommand({
assert.commandWorked(explain, tojson(explain));
assert.eq(explain.queryPlanner.winningPlan.stage, "SHARD_WRITE");
assert.eq(explain.queryPlanner.winningPlan.shards.length, 2);
-assert.eq(explain.queryPlanner.winningPlan.shards[0].winningPlan.stage, "DELETE");
-assert.eq(explain.queryPlanner.winningPlan.shards[1].winningPlan.stage, "DELETE");
+const stageShard0 = explain.queryPlanner.winningPlan.shards[0].winningPlan.stage;
+const stageShard1 = explain.queryPlanner.winningPlan.shards[1].winningPlan.stage;
+assert(stageShard0 === "DELETE" || stageShard0 === "BATCHED_DELETE");
+assert(stageShard1 === "DELETE" || stageShard1 === "BATCHED_DELETE");
// Check that the deletes didn't actually happen.
assert.eq(3, collSharded.count({b: 1}));