summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--buildscripts/resmokeconfig/suites/change_streams_batched_deletes_passthrough.yml41
-rw-r--r--buildscripts/resmokeconfig/suites/concurrency_replication_batched_deletes_passthrough.yml57
-rw-r--r--buildscripts/resmokeconfig/suites/replica_sets_batched_deletes_passthrough.yml43
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_batched_deletes_passthrough.yml38
-rw-r--r--etc/evergreen.yml2
-rw-r--r--etc/evergreen_yml_components/definitions.yml32
-rw-r--r--jstests/core/batched_multi_deletes.js2
-rw-r--r--src/mongo/db/op_observer_impl.cpp4
-rw-r--r--src/mongo/db/op_observer_impl_test.cpp17
-rw-r--r--src/mongo/db/query/get_executor.cpp13
10 files changed, 245 insertions, 4 deletions
diff --git a/buildscripts/resmokeconfig/suites/change_streams_batched_deletes_passthrough.yml b/buildscripts/resmokeconfig/suites/change_streams_batched_deletes_passthrough.yml
new file mode 100644
index 00000000000..61a06d66c39
--- /dev/null
+++ b/buildscripts/resmokeconfig/suites/change_streams_batched_deletes_passthrough.yml
@@ -0,0 +1,41 @@
+# This passthrough runs all change streams JS tests and automatically batches multi-deletes.
+test_kind: js_test
+
+selector:
+ roots:
+ - jstests/change_streams/**/*.js
+ exclude_files:
+ # TODO: (SERVER-64972): add change stream support for batched deletes.
+ - jstests/change_streams/change_stream.js
+ - jstests/change_streams/lookup_pit_pre_and_post_image.js
+ - jstests/change_streams/show_raw_update_description.js
+
+ exclude_with_any_tags:
+ - assumes_standalone_mongod
+ # TODO (SERVER-64506): make groupOplogEntries WUOW's nestable (e.g. inside multi-doc txns).
+ - uses_prepare_transaction
+ - uses_transactions
+
+executor:
+ archive:
+ hooks:
+ - CheckReplDBHashInBackground
+ - CheckReplDBHash
+ - ValidateCollections
+ hooks:
+ - class: CheckReplDBHashInBackground
+ - class: CheckReplDBHash
+ - class: ValidateCollections
+ - class: CleanEveryN
+ n: 20
+ config:
+ shell_options:
+ eval: >-
+ var testingReplication = true;
+ fixture:
+ class: ReplicaSetFixture
+ mongod_options:
+ set_parameters:
+ enableTestCommands: 1
+ failpoint.batchDeletesByDefault: "{mode: 'alwaysOn'}"
+ num_nodes: 2
diff --git a/buildscripts/resmokeconfig/suites/concurrency_replication_batched_deletes_passthrough.yml b/buildscripts/resmokeconfig/suites/concurrency_replication_batched_deletes_passthrough.yml
new file mode 100644
index 00000000000..0935f13218f
--- /dev/null
+++ b/buildscripts/resmokeconfig/suites/concurrency_replication_batched_deletes_passthrough.yml
@@ -0,0 +1,57 @@
+# This passthrough runs all concurrency_replication and automatically batches multi-deletes.
+test_kind: fsm_workload_test
+
+selector:
+ roots:
+ - jstests/concurrency/fsm_workloads/**/*.js
+ exclude_files:
+ ##
+ # Disabled due to MongoDB restrictions and/or workload restrictions
+ ##
+ # These workloads use >100MB of data, which can overwhelm test hosts.
+ - jstests/concurrency/fsm_workloads/agg_group_external.js
+ - jstests/concurrency/fsm_workloads/agg_sort_external.js
+
+ # The findAndModify_update_grow.js workload can cause OOM kills on test hosts.
+ - jstests/concurrency/fsm_workloads/findAndModify_update_grow.js
+
+ # These workloads run the reIndex command, which is only allowed on a standalone node.
+ - jstests/concurrency/fsm_workloads/reindex.js
+ - jstests/concurrency/fsm_workloads/reindex_background.js
+ - jstests/concurrency/fsm_workloads/reindex_writeconflict.js
+
+ # TODO (SERVER-64506): these tests use transactions under the hood to modify user roles.
+ - jstests/concurrency/fsm_workloads/auth_*.js
+
+ exclude_with_any_tags:
+ - assumes_standalone_mongod
+ - requires_sharding
+ # TODO (SERVER-64506): make groupOplogEntries WUOW's nestable (e.g. inside multi-doc txns).
+ - uses_prepare_transaction
+ - uses_transactions
+
+executor:
+ archive:
+ hooks:
+ - CheckReplDBHash
+ - ValidateCollections
+ tests: true
+ config: {}
+ hooks:
+ # The CheckReplDBHash hook waits until all operations have replicated to and have been applied
+ # on the secondaries, so we run the ValidateCollections hook after it to ensure we're
+ # validating the entire contents of the collection.
+ #
+ # TODO SERVER-26466: Add CheckReplOplogs hook to the concurrency suite.
+ - class: CheckReplDBHash
+ - class: ValidateCollections
+ - class: CleanupConcurrencyWorkloads
+ fixture:
+ class: ReplicaSetFixture
+ mongod_options:
+ oplogSize: 1024
+ set_parameters:
+ enableTestCommands: 1
+ roleGraphInvalidationIsFatal: 1
+ failpoint.batchDeletesByDefault: "{mode: 'alwaysOn'}"
+ num_nodes: 3
diff --git a/buildscripts/resmokeconfig/suites/replica_sets_batched_deletes_passthrough.yml b/buildscripts/resmokeconfig/suites/replica_sets_batched_deletes_passthrough.yml
new file mode 100644
index 00000000000..d51f9684a96
--- /dev/null
+++ b/buildscripts/resmokeconfig/suites/replica_sets_batched_deletes_passthrough.yml
@@ -0,0 +1,43 @@
+# This passthrough runs all core JS tests on a replica set fixture and automatically batches multi-deletes.
+test_kind: js_test
+
+selector:
+ roots:
+ - jstests/core/**/*.js
+ exclude_files:
+ # Expects explain to show a DELETE stage.
+ - jstests/core/explain_delete.js
+ # TODO (SERVER-64506): these tests use transactions under the hood to modify user roles.
+ - jstests/core/roles_info.js
+ - jstests/core/role_management_helpers.js
+ - jstests/core/views/views_all_commands.js
+
+ exclude_with_any_tags:
+ - assumes_standalone_mongod
+ # TODO (SERVER-64506): make groupOplogEntries WUOW's nestable (e.g. inside multi-doc txns).
+ - uses_prepare_transaction
+ - uses_transactions
+
+executor:
+ archive:
+ hooks:
+ - CheckReplDBHashInBackground
+ - CheckReplDBHash
+ - ValidateCollections
+ hooks:
+ - class: CheckReplDBHashInBackground
+ - class: CheckReplDBHash
+ - class: ValidateCollections
+ - class: CleanEveryN
+ n: 20
+ config:
+ shell_options:
+ eval: >-
+ var testingReplication = true;
+ fixture:
+ class: ReplicaSetFixture
+ mongod_options:
+ set_parameters:
+ enableTestCommands: 1
+ failpoint.batchDeletesByDefault: "{mode: 'alwaysOn'}"
+ num_nodes: 2
diff --git a/buildscripts/resmokeconfig/suites/sharding_batched_deletes_passthrough.yml b/buildscripts/resmokeconfig/suites/sharding_batched_deletes_passthrough.yml
new file mode 100644
index 00000000000..9b49600225b
--- /dev/null
+++ b/buildscripts/resmokeconfig/suites/sharding_batched_deletes_passthrough.yml
@@ -0,0 +1,38 @@
+# This passthrough runs all sharding JS tests and automatically batches multi-deletes.
+test_kind: js_test
+
+selector:
+ roots:
+ - jstests/sharding/**/*.js
+ exclude_files:
+ - jstests/sharding/libs/*.js
+ # TODO (SERVER-64506): these tests use transactions under the hood to modify user roles.
+ - jstests/sharding/api_params_nontransaction_sharded.js
+ - jstests/sharding/api_params_nontransaction_unsharded.js
+ # Expects DELETE stage
+ - jstests/sharding/query/explain_cmd.js
+ # TODO: (SERVER-64972): add change stream support for batched deletes.
+ - jstests/sharding/change_stream_no_orphans.js
+
+ exclude_with_any_tags:
+ - assumes_standalone_mongod
+ # TODO (SERVER-64506): make groupOplogEntries WUOW's nestable (e.g. inside multi-doc txns).
+ - featureFlagInternalTransactions
+ - uses_multi_shard_transaction
+ - uses_prepare_transaction
+ - uses_transactions
+
+executor:
+ archive:
+ hooks:
+ - CheckReplDBHashInBackground
+ - CheckReplDBHash
+ - ValidateCollections
+ config:
+ shell_options:
+ nodb: ''
+ global_vars:
+ TestData:
+ setParameters:
+ enableTestCommands: 1
+ failpoint.batchDeletesByDefault: "{mode: 'alwaysOn'}"
diff --git a/etc/evergreen.yml b/etc/evergreen.yml
index f9d280ae3c8..1adaafb8d53 100644
--- a/etc/evergreen.yml
+++ b/etc/evergreen.yml
@@ -241,6 +241,7 @@ buildvariants:
- name: build_variant_gen
- name: .aggregation !.encrypt
- name: .auth !.audit !.multiversion
+ - name: .batched_deletes
- name: .causally_consistent !.wo_snapshot
- name: .change_streams !.secondary_reads
- name: .clustered_collections
@@ -1511,6 +1512,7 @@ buildvariants:
- name: audit
- name: .auth
- name: unittest_shell_hang_analyzer_gen
+ - name: .batched_deletes
- name: .causally_consistent !.sharding
- name: .change_streams
- name: .change_stream_fuzzer
diff --git a/etc/evergreen_yml_components/definitions.yml b/etc/evergreen_yml_components/definitions.yml
index 928cbcd18ca..dbd8c5e5761 100644
--- a/etc/evergreen_yml_components/definitions.yml
+++ b/etc/evergreen_yml_components/definitions.yml
@@ -5116,6 +5116,38 @@ tasks:
use_large_distro: "true"
- <<: *gen_task_template
+ name: replica_sets_batched_deletes_passthrough_gen
+ tags: ["large", "batched_deletes"]
+ commands:
+ - func: "generate resmoke tasks"
+ vars:
+ use_large_distro: "true"
+
+- <<: *gen_task_template
+ name: sharding_batched_deletes_passthrough_gen
+ tags: ["sharding", "large", "batched_deletes"]
+ commands:
+ - func: "generate resmoke tasks"
+ vars:
+ use_large_distro: "true"
+
+- <<: *gen_task_template
+ name: change_streams_batched_deletes_passthrough_gen
+ tags: ["change_streams", "large", "batched_deletes"]
+ commands:
+ - func: "generate resmoke tasks"
+ vars:
+ use_large_distro: "true"
+
+- <<: *gen_task_template
+ name: concurrency_replication_batched_deletes_passthrough_gen
+ tags: ["large", "batched_deletes"]
+ commands:
+ - func: "generate resmoke tasks"
+ vars:
+ use_large_distro: "true"
+
+- <<: *gen_task_template
name: parallel_gen
tags: ["misc_js", "parallel"]
commands:
diff --git a/jstests/core/batched_multi_deletes.js b/jstests/core/batched_multi_deletes.js
index 07b4af9d639..456da903675 100644
--- a/jstests/core/batched_multi_deletes.js
+++ b/jstests/core/batched_multi_deletes.js
@@ -37,7 +37,7 @@ function populateAndMassDelete(queryPredicate) {
// Verify the delete will involve the BATCHED_DELETE stage.
const expl = testDB.runCommand({
- explain: {delete: coll.getName(), deletes: [{q: {_id: {$gte: 0}}, limit: 0}]},
+ explain: {delete: coll.getName(), deletes: [{q: queryPredicate, limit: 0}]},
verbosity: "executionStats"
});
assert.commandWorked(expl);
diff --git a/src/mongo/db/op_observer_impl.cpp b/src/mongo/db/op_observer_impl.cpp
index 098a7d55d16..37fc927add5 100644
--- a/src/mongo/db/op_observer_impl.cpp
+++ b/src/mongo/db/op_observer_impl.cpp
@@ -1981,6 +1981,10 @@ void OpObserverImpl::onBatchedWriteCommit(OperationContext* opCtx) {
auto& batchedWriteContext = BatchedWriteContext::get(opCtx);
auto& batchedOps = batchedWriteContext.getBatchedOperations(opCtx);
+ if (!batchedOps.size()) {
+ return;
+ }
+
// Reserve all the optimes in advance, so we only need to get the optime mutex once. We
// reserve enough entries for all statements in the transaction.
auto oplogSlots = repl::getNextOpTimes(opCtx, batchedOps.size());
diff --git a/src/mongo/db/op_observer_impl_test.cpp b/src/mongo/db/op_observer_impl_test.cpp
index 27f497aedea..a4bff71f00b 100644
--- a/src/mongo/db/op_observer_impl_test.cpp
+++ b/src/mongo/db/op_observer_impl_test.cpp
@@ -2494,6 +2494,23 @@ TEST_F(BatchedWriteOutputsTest, TestApplyOpsGrouping) {
}
}
+// Verifies an empty WUOW doesn't generate an oplog entry.
+TEST_F(BatchedWriteOutputsTest, testEmptyWUOW) {
+ // Setup.
+ auto opCtxRaii = cc().makeOperationContext();
+ OperationContext* opCtx = opCtxRaii.get();
+ reset(opCtx, NamespaceString::kRsOplogNamespace);
+ auto opObserverRegistry = std::make_unique<OpObserverRegistry>();
+ opObserverRegistry->addObserver(std::make_unique<OpObserverImpl>());
+ opCtx->getServiceContext()->setOpObserver(std::move(opObserverRegistry));
+
+ // Start and commit an empty WUOW.
+ WriteUnitOfWork wuow(opCtx, true /* groupOplogEntries */);
+ wuow.commit();
+
+ // The getNOplogEntries call below asserts that the oplog is empty.
+ getNOplogEntries(opCtx, 0);
+}
class OnDeleteOutputsTest : public OpObserverTest {
protected:
diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp
index 26a9728ae3a..f53e9f1debb 100644
--- a/src/mongo/db/query/get_executor.cpp
+++ b/src/mongo/db/query/get_executor.cpp
@@ -109,6 +109,7 @@
namespace mongo {
MONGO_FAIL_POINT_DEFINE(includeFakeColumnarIndex);
+MONGO_FAIL_POINT_DEFINE(batchDeletesByDefault);
boost::intrusive_ptr<ExpressionContext> makeExpressionContextForGetExecutor(
OperationContext* opCtx, const BSONObj& requestCollation, const NamespaceString& nss) {
@@ -1660,9 +1661,15 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorDele
deleteStageParams->canonicalQuery = cq.get();
- if (MONGO_unlikely(gInternalBatchUserMultiDeletesForTest.load() &&
- nss.ns() == "__internalBatchedDeletesTesting.Collection0" &&
- deleteStageParams->isMulti)) {
+ const bool batchDelete =
+ (deleteStageParams->isMulti && !deleteStageParams->fromMigrate &&
+ !deleteStageParams->returnDeleted && deleteStageParams->sort.isEmpty() &&
+ !deleteStageParams->numStatsForDoc) &&
+ ((gInternalBatchUserMultiDeletesForTest.load() &&
+ nss.ns() == "__internalBatchedDeletesTesting.Collection0") ||
+ (batchDeletesByDefault.shouldFail()));
+
+ if (batchDelete) {
root =
std::make_unique<BatchedDeleteStage>(cq->getExpCtxRaw(),
std::move(deleteStageParams),