From f9a29465fca8df7a0db888389a3c3038a4d3fc6d Mon Sep 17 00:00:00 2001 From: Benety Goh Date: Thu, 11 May 2023 14:59:14 +0000 Subject: SERVER-76408 create feature flag for large batched writes --- .../noPassthrough/batched_multi_deletes_large_transaction.js | 2 +- jstests/replsets/rollback_large_batched_multi_deletes.js | 3 +-- src/mongo/db/op_observer/op_observer_impl.cpp | 3 +-- src/mongo/db/storage/storage_parameters.idl | 10 ++++++++++ 4 files changed, 13 insertions(+), 5 deletions(-) diff --git a/jstests/noPassthrough/batched_multi_deletes_large_transaction.js b/jstests/noPassthrough/batched_multi_deletes_large_transaction.js index 09728147bec..ea8e76fb599 100644 --- a/jstests/noPassthrough/batched_multi_deletes_large_transaction.js +++ b/jstests/noPassthrough/batched_multi_deletes_large_transaction.js @@ -47,7 +47,7 @@ assert.commandWorked(coll.insert(docIds.map((x) => { // Set up server to split deletes over multiple oplog entries // such that each oplog entry contains two delete operations. -if (!FeatureFlagUtil.isEnabled(db, "InternalWritesAreReplicatedTransactionally")) { +if (!FeatureFlagUtil.isEnabled(db, "LargeBatchedOperations")) { // Confirm legacy server behavior where mutiple oplog entries are not allowed // for batched writes. const result = diff --git a/jstests/replsets/rollback_large_batched_multi_deletes.js b/jstests/replsets/rollback_large_batched_multi_deletes.js index d5f0f840a3c..fb0110b27cb 100644 --- a/jstests/replsets/rollback_large_batched_multi_deletes.js +++ b/jstests/replsets/rollback_large_batched_multi_deletes.js @@ -65,8 +65,7 @@ const nodeOptions = { }; const rollbackTest = new RollbackTest(jsTestName(), /*replSet=*/ undefined, nodeOptions); -if (!FeatureFlagUtil.isEnabled(rollbackTest.getPrimary(), - "InternalWritesAreReplicatedTransactionally")) { +if (!FeatureFlagUtil.isEnabled(rollbackTest.getPrimary(), "LargeBatchedOperations")) { jsTestLog('Skipping test because required feature flag is not enabled.'); rollbackTest.stop(); return; diff --git a/src/mongo/db/op_observer/op_observer_impl.cpp b/src/mongo/db/op_observer/op_observer_impl.cpp index 5bf2f567ef5..6f4bf660c26 100644 --- a/src/mongo/db/op_observer/op_observer_impl.cpp +++ b/src/mongo/db/op_observer/op_observer_impl.cpp @@ -1916,8 +1916,7 @@ void OpObserverImpl::onBatchedWriteCommit(OperationContext* opCtx) { getMaxSizeOfBatchedOperationsInSingleOplogEntryBytes(), /*prepare=*/false); - if (!gFeatureFlagInternalWritesAreReplicatedTransactionally.isEnabled( - serverGlobalParams.featureCompatibility)) { + if (!gFeatureFlagLargeBatchedOperations.isEnabled(serverGlobalParams.featureCompatibility)) { // Before SERVER-70765, we relied on packTransactionStatementsForApplyOps() to check if the // batch of operations could fit in a single applyOps entry. Now, we pass the size limit to // TransactionOperations::getApplyOpsInfo() and are now able to return an error earlier. diff --git a/src/mongo/db/storage/storage_parameters.idl b/src/mongo/db/storage/storage_parameters.idl index 34e0116fd84..b5e6eecdef3 100644 --- a/src/mongo/db/storage/storage_parameters.idl +++ b/src/mongo/db/storage/storage_parameters.idl @@ -202,6 +202,16 @@ feature_flags: default: true version: 6.1 shouldBeFCVGated: true + featureFlagLargeBatchedOperations: + description: >- + Enable support for replicating batched operations over multiple applyOps oplog + entries. Otherwise, batched operations that do not fit within a single applyOps + oplog entry will fail with a TransactionTooLarge error. + See featureFlagBatchMultiDeletes, maxNumberOfBatchedOperationsInSingleOplogEntry, + and maxSizeOfBatchedOperationsInSingleOplogEntryBytes. + cpp_varname: gFeatureFlagLargeBatchedOperations + default: false + shouldBeFCVGated: true featureFlagDocumentSourceListCatalog: description: "When enabled, allow the use of the $listCatalog aggregation stage" cpp_varname: feature_flags::gDocumentSourceListCatalog -- cgit v1.2.1