diff options
author | Matthew Russotto <matthew.russotto@10gen.com> | 2017-03-09 12:34:17 -0500 |
---|---|---|
committer | Matthew Russotto <matthew.russotto@10gen.com> | 2017-03-13 09:02:03 -0400 |
commit | 73f9e8b8a8422becf8694fe3d82c0e647dc71189 (patch) | |
tree | b938d6e3fd63fc00819b72231dfe952b8b212d79 /src | |
parent | ba3db7220399aedbb871aa8a18d325a877c30d53 (diff) | |
download | mongo-73f9e8b8a8422becf8694fe3d82c0e647dc71189.tar.gz |
SERVER-26965 Use RAII type for turning off replicated writes
Diffstat (limited to 'src')
-rw-r--r-- | src/mongo/db/catalog/capped_utils.cpp | 30 | ||||
-rw-r--r-- | src/mongo/db/catalog/rename_collection.cpp | 20 | ||||
-rw-r--r-- | src/mongo/db/commands/dbcommands.cpp | 4 | ||||
-rw-r--r-- | src/mongo/db/commands/mr.cpp | 12 | ||||
-rw-r--r-- | src/mongo/db/db.cpp | 4 | ||||
-rw-r--r-- | src/mongo/db/dbhelpers.cpp | 4 | ||||
-rw-r--r-- | src/mongo/db/introspect.cpp | 4 | ||||
-rw-r--r-- | src/mongo/db/operation_context.h | 20 | ||||
-rw-r--r-- | src/mongo/db/repl/collection_bulk_loader_impl.cpp | 5 | ||||
-rw-r--r-- | src/mongo/db/repl/collection_cloner.cpp | 2 | ||||
-rw-r--r-- | src/mongo/db/repl/master_slave.cpp | 2 | ||||
-rw-r--r-- | src/mongo/db/repl/replication_coordinator_external_state_impl.cpp | 6 | ||||
-rw-r--r-- | src/mongo/db/repl/rs_initialsync.cpp | 2 | ||||
-rw-r--r-- | src/mongo/db/repl/storage_interface_impl.cpp | 2 | ||||
-rw-r--r-- | src/mongo/db/repl/storage_interface_impl_test.cpp | 4 | ||||
-rw-r--r-- | src/mongo/db/repl/sync_tail.cpp | 8 | ||||
-rw-r--r-- | src/mongo/dbtests/repltests.cpp | 9 | ||||
-rw-r--r-- | src/mongo/s/catalog/sharding_catalog_config_initialization_test.cpp | 8 |
18 files changed, 67 insertions, 79 deletions
diff --git a/src/mongo/db/catalog/capped_utils.cpp b/src/mongo/db/catalog/capped_utils.cpp index 62b2eae8fbf..d05c1ef2f92 100644 --- a/src/mongo/db/catalog/capped_utils.cpp +++ b/src/mongo/db/catalog/capped_utils.cpp @@ -273,26 +273,28 @@ Status convertToCapped(OperationContext* opCtx, } - const bool shouldReplicateWrites = opCtx->writesAreReplicated(); - opCtx->setReplicatedWrites(false); - ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, opCtx, shouldReplicateWrites); - Status status = - cloneCollectionAsCapped(opCtx, db, shortSource.toString(), shortTmpName, size, true); + { + repl::UnreplicatedWritesBlock uwb(opCtx); + Status status = + cloneCollectionAsCapped(opCtx, db, shortSource.toString(), shortTmpName, size, true); - if (!status.isOK()) { - return status; - } + if (!status.isOK()) { + return status; + } - verify(db->getCollection(longTmpName)); + verify(db->getCollection(longTmpName)); + } { WriteUnitOfWork wunit(opCtx); - status = db->dropCollection(opCtx, collectionName.ns()); - opCtx->setReplicatedWrites(shouldReplicateWrites); - if (!status.isOK()) - return status; + { + repl::UnreplicatedWritesBlock uwb(opCtx); + Status status = db->dropCollection(opCtx, collectionName.ns()); + if (!status.isOK()) + return status; + } - status = db->renameCollection(opCtx, longTmpName, collectionName.ns(), false); + Status status = db->renameCollection(opCtx, longTmpName, collectionName.ns(), false); if (!status.isOK()) return status; diff --git a/src/mongo/db/catalog/rename_collection.cpp b/src/mongo/db/catalog/rename_collection.cpp index f48454fae29..86676854c4e 100644 --- a/src/mongo/db/catalog/rename_collection.cpp +++ b/src/mongo/db/catalog/rename_collection.cpp @@ -179,13 +179,11 @@ Status renameCollection(OperationContext* opCtx, WriteUnitOfWork wunit(opCtx); // No logOp necessary because the entire renameCollection command is one logOp. - bool shouldReplicateWrites = opCtx->writesAreReplicated(); - opCtx->setReplicatedWrites(false); + repl::UnreplicatedWritesBlock uwb(opCtx); targetColl = targetDB->createCollection(opCtx, target.ns(), options, false); // _id index build with others later. - opCtx->setReplicatedWrites(shouldReplicateWrites); if (!targetColl) { return Status(ErrorCodes::OutOfDiskSpace, "Failed to create target collection."); } @@ -232,10 +230,8 @@ Status renameCollection(OperationContext* opCtx, WriteUnitOfWork wunit(opCtx); // No logOp necessary because the entire renameCollection command is one logOp. - bool shouldReplicateWrites = opCtx->writesAreReplicated(); - opCtx->setReplicatedWrites(false); + repl::UnreplicatedWritesBlock uwb(opCtx); Status status = targetColl->insertDocument(opCtx, obj, indexers, true); - opCtx->setReplicatedWrites(shouldReplicateWrites); if (!status.isOK()) return status; wunit.commit(); @@ -251,12 +247,12 @@ Status renameCollection(OperationContext* opCtx, // source collection and finalize the rename. WriteUnitOfWork wunit(opCtx); - bool shouldReplicateWrites = opCtx->writesAreReplicated(); - opCtx->setReplicatedWrites(false); - Status status = sourceDB->dropCollection(opCtx, source.ns()); - opCtx->setReplicatedWrites(shouldReplicateWrites); - if (!status.isOK()) - return status; + { + repl::UnreplicatedWritesBlock uwb(opCtx); + Status status = sourceDB->dropCollection(opCtx, source.ns()); + if (!status.isOK()) + return status; + } indexer.commit(); diff --git a/src/mongo/db/commands/dbcommands.cpp b/src/mongo/db/commands/dbcommands.cpp index 233c3e408e4..f89b9fe723b 100644 --- a/src/mongo/db/commands/dbcommands.cpp +++ b/src/mongo/db/commands/dbcommands.cpp @@ -326,9 +326,7 @@ public: bool backupOriginalFiles = e.isBoolean() && e.boolean(); StorageEngine* engine = getGlobalServiceContext()->getGlobalStorageEngine(); - bool shouldReplicateWrites = opCtx->writesAreReplicated(); - opCtx->setReplicatedWrites(false); - ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, opCtx, shouldReplicateWrites); + repl::UnreplicatedWritesBlock uwb(opCtx); Status status = repairDatabase( opCtx, engine, dbname, preserveClonedFilesOnFailure, backupOriginalFiles); diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp index af0c740fc70..9d36a06731b 100644 --- a/src/mongo/db/commands/mr.cpp +++ b/src/mongo/db/commands/mr.cpp @@ -389,9 +389,7 @@ void State::dropTempCollections() { if (_useIncremental && !_config.incLong.isEmpty()) { // We don't want to log the deletion of incLong as it isn't replicated. While // harmless, this would lead to a scary looking warning on the secondaries. - bool shouldReplicateWrites = _opCtx->writesAreReplicated(); - _opCtx->setReplicatedWrites(false); - ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, _opCtx, shouldReplicateWrites); + repl::UnreplicatedWritesBlock uwb(_opCtx); MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN { ScopedTransaction scopedXact(_opCtx, MODE_IX); @@ -419,9 +417,7 @@ void State::prepTempCollection() { if (_useIncremental) { // Create the inc collection and make sure we have index on "0" key. // Intentionally not replicating the inc collection to secondaries. - bool shouldReplicateWrites = _opCtx->writesAreReplicated(); - _opCtx->setReplicatedWrites(false); - ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, _opCtx, shouldReplicateWrites); + repl::UnreplicatedWritesBlock uwb(_opCtx); MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN { OldClientWriteContext incCtx(_opCtx, _config.incLong.ns()); @@ -786,9 +782,7 @@ void State::_insertToInc(BSONObj& o) { OldClientWriteContext ctx(_opCtx, _config.incLong.ns()); WriteUnitOfWork wuow(_opCtx); Collection* coll = getCollectionOrUassert(ctx.db(), _config.incLong); - bool shouldReplicateWrites = _opCtx->writesAreReplicated(); - _opCtx->setReplicatedWrites(false); - ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, _opCtx, shouldReplicateWrites); + repl::UnreplicatedWritesBlock uwb(_opCtx); // The documents inserted into the incremental collection are of the form // {"0": <key>, "1": <value>}, so we cannot call fixDocumentForInsert(o) here because the diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp index 987a4c38679..02294c9c71a 100644 --- a/src/mongo/db/db.cpp +++ b/src/mongo/db/db.cpp @@ -205,9 +205,7 @@ void logStartup(OperationContext* opCtx) { WriteUnitOfWork wunit(opCtx); if (!collection) { BSONObj options = BSON("capped" << true << "size" << 10 * 1024 * 1024); - bool shouldReplicateWrites = opCtx->writesAreReplicated(); - opCtx->setReplicatedWrites(false); - ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, opCtx, shouldReplicateWrites); + repl::UnreplicatedWritesBlock uwb(opCtx); uassertStatusOK(userCreateNS(opCtx, db, startupLogCollectionName.ns(), options)); collection = db->getCollection(startupLogCollectionName); } diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp index 83f4c15e52d..265687697ff 100644 --- a/src/mongo/db/dbhelpers.cpp +++ b/src/mongo/db/dbhelpers.cpp @@ -495,9 +495,7 @@ long long Helpers::removeRange(OperationContext* opCtx, void Helpers::emptyCollection(OperationContext* opCtx, const char* ns) { OldClientContext context(opCtx, ns); - bool shouldReplicateWrites = opCtx->writesAreReplicated(); - opCtx->setReplicatedWrites(false); - ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, opCtx, shouldReplicateWrites); + repl::UnreplicatedWritesBlock uwb(opCtx); Collection* collection = context.db() ? context.db()->getCollection(ns) : nullptr; deleteObjects(opCtx, collection, ns, BSONObj(), PlanExecutor::YIELD_MANUAL, false); } diff --git a/src/mongo/db/introspect.cpp b/src/mongo/db/introspect.cpp index 243c23087e1..9fe9dd117cd 100644 --- a/src/mongo/db/introspect.cpp +++ b/src/mongo/db/introspect.cpp @@ -189,9 +189,7 @@ Status createProfileCollection(OperationContext* opCtx, Database* db) { collectionOptions.cappedSize = 1024 * 1024; WriteUnitOfWork wunit(opCtx); - bool shouldReplicateWrites = opCtx->writesAreReplicated(); - opCtx->setReplicatedWrites(false); - ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, opCtx, shouldReplicateWrites); + repl::UnreplicatedWritesBlock uwb(opCtx); invariant(db->createCollection(opCtx, dbProfilingNS, collectionOptions)); wunit.commit(); diff --git a/src/mongo/db/operation_context.h b/src/mongo/db/operation_context.h index fb867589651..ac558d668ab 100644 --- a/src/mongo/db/operation_context.h +++ b/src/mongo/db/operation_context.h @@ -54,6 +54,10 @@ class ServiceContext; class StringData; class WriteUnitOfWork; +namespace repl { +class UnreplicatedWritesBlock; +} // namespace repl + /** * This class encompasses the state required by an operation and lives from the time a network * operation is dispatched until its execution is finished. Note that each "getmore" on a cursor @@ -275,14 +279,6 @@ public: } /** - * Set whether or not operations should generate oplog entries. - * TODO SERVER-26965: Make this private. - */ - void setReplicatedWrites(bool writesAreReplicated = true) { - _writesAreReplicated = writesAreReplicated; - } - - /** * Returns true if operations should generate oplog entries. */ bool writesAreReplicated() const { @@ -404,7 +400,15 @@ private: */ Date_t getExpirationDateForWaitForValue(Milliseconds waitFor); + /** + * Set whether or not operations should generate oplog entries. + */ + void setReplicatedWrites(bool writesAreReplicated = true) { + _writesAreReplicated = writesAreReplicated; + } + friend class WriteUnitOfWork; + friend class repl::UnreplicatedWritesBlock; Client* const _client; const unsigned int _opId; diff --git a/src/mongo/db/repl/collection_bulk_loader_impl.cpp b/src/mongo/db/repl/collection_bulk_loader_impl.cpp index 9b1c96ec95b..2732376875a 100644 --- a/src/mongo/db/repl/collection_bulk_loader_impl.cpp +++ b/src/mongo/db/repl/collection_bulk_loader_impl.cpp @@ -93,6 +93,9 @@ Status CollectionBulkLoaderImpl::init(Collection* coll, invariant(opCtx); invariant(coll); invariant(opCtx->getClient() == &cc()); + // All writes in CollectionBulkLoaderImpl should be unreplicated. + // The opCtx is accessed indirectly through _secondaryIndexesBlock. + UnreplicatedWritesBlock uwb(opCtx); std::vector<BSONObj> specs(secondaryIndexSpecs); // This enforces the buildIndexes setting in the replica set configuration. _secondaryIndexesBlock->removeExistingIndexes(&specs); @@ -124,6 +127,7 @@ Status CollectionBulkLoaderImpl::insertDocuments(const std::vector<BSONObj>::con return _runTaskReleaseResourcesOnFailure( [begin, end, &count, this](OperationContext* opCtx) -> Status { invariant(opCtx); + UnreplicatedWritesBlock uwb(opCtx); for (auto iter = begin; iter != end; ++iter) { std::vector<MultiIndexBlock*> indexers; @@ -157,6 +161,7 @@ Status CollectionBulkLoaderImpl::commit() { LOG(2) << "Creating indexes for ns: " << _nss.ns(); invariant(opCtx->getClient() == &cc()); invariant(opCtx == _opCtx); + UnreplicatedWritesBlock uwb(opCtx); // Commit before deleting dups, so the dups will be removed from secondary indexes when // deleted. diff --git a/src/mongo/db/repl/collection_cloner.cpp b/src/mongo/db/repl/collection_cloner.cpp index 85c9f39169d..4a47981da2d 100644 --- a/src/mongo/db/repl/collection_cloner.cpp +++ b/src/mongo/db/repl/collection_cloner.cpp @@ -338,7 +338,7 @@ void CollectionCloner::_listIndexesCallback(const Fetcher::QueryResponseStatus& return; } auto opCtx = cbd.opCtx; - opCtx->setReplicatedWrites(false); + UnreplicatedWritesBlock uwb(opCtx); auto&& createStatus = _storageInterface->createCollection(opCtx, _destNss, _options); _finishCallback(createStatus); diff --git a/src/mongo/db/repl/master_slave.cpp b/src/mongo/db/repl/master_slave.cpp index 21194aeff8d..f76856d2802 100644 --- a/src/mongo/db/repl/master_slave.cpp +++ b/src/mongo/db/repl/master_slave.cpp @@ -731,7 +731,7 @@ void ReplSource::_sync_pullOpLog_applyOperation(OperationContext* opCtx, // Push the CurOp stack for "opCtx" so each individual oplog entry application is separately // reported. CurOp individualOp(opCtx); - opCtx->setReplicatedWrites(false); + UnreplicatedWritesBlock uwb(opCtx); const ReplSettings& replSettings = getGlobalReplicationCoordinator()->getSettings(); if (replSettings.getPretouch() && !alreadyLocked /*doesn't make sense if in write lock already*/) { diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp index 04a2fcf88f8..c2619816034 100644 --- a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp +++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp @@ -373,7 +373,7 @@ Status ReplicationCoordinatorExternalStateImpl::runRepairOnLocalDB(OperationCont return Status::OK(); } - opCtx->setReplicatedWrites(false); + UnreplicatedWritesBlock uwb(opCtx); Status status = repairDatabase(opCtx, engine, localDbName, false, false); // Open database before returning @@ -660,9 +660,7 @@ void ReplicationCoordinatorExternalStateImpl::cleanUpLastApplyBatch(OperationCon } // Apply remaining ops one at at time, but don't log them because they are already logged. - const bool wereWritesReplicated = opCtx->writesAreReplicated(); - ON_BLOCK_EXIT([&] { opCtx->setReplicatedWrites(wereWritesReplicated); }); - opCtx->setReplicatedWrites(false); + UnreplicatedWritesBlock uwb(opCtx); while (cursor->more()) { auto entry = cursor->nextSafe(); diff --git a/src/mongo/db/repl/rs_initialsync.cpp b/src/mongo/db/repl/rs_initialsync.cpp index a6a5cf78baf..24463e23091 100644 --- a/src/mongo/db/repl/rs_initialsync.cpp +++ b/src/mongo/db/repl/rs_initialsync.cpp @@ -265,7 +265,7 @@ bool _initialSyncApplyOplog(OperationContext* opCtx, Status _initialSync(OperationContext* opCtx, BackgroundSync* bgsync) { log() << "initial sync pending"; - opCtx->setReplicatedWrites(false); + UnreplicatedWritesBlock uwb(opCtx); DisableDocumentValidation validationDisabler(opCtx); ReplicationCoordinator* replCoord(getGlobalReplicationCoordinator()); diff --git a/src/mongo/db/repl/storage_interface_impl.cpp b/src/mongo/db/repl/storage_interface_impl.cpp index ff5aa7d4260..bece9cedeb6 100644 --- a/src/mongo/db/repl/storage_interface_impl.cpp +++ b/src/mongo/db/repl/storage_interface_impl.cpp @@ -257,7 +257,7 @@ StorageInterfaceImpl::createCollectionForBulkLoading( auto status = runner->runSynchronousTask([&](OperationContext* opCtx) -> Status { // We are not replicating nor validating writes under this OperationContext*. // The OperationContext* is used for all writes to the (newly) cloned collection. - opCtx->setReplicatedWrites(false); + UnreplicatedWritesBlock uwb(opCtx); documentValidationDisabled(opCtx) = true; // Retry if WCE. diff --git a/src/mongo/db/repl/storage_interface_impl_test.cpp b/src/mongo/db/repl/storage_interface_impl_test.cpp index cd8bdd98686..e94761e5d35 100644 --- a/src/mongo/db/repl/storage_interface_impl_test.cpp +++ b/src/mongo/db/repl/storage_interface_impl_test.cpp @@ -195,6 +195,7 @@ protected: setGlobalReplicationCoordinator(_coordinator); } void tearDown() override { + _uwb.reset(nullptr); _opCtx.reset(nullptr); ServiceContextMongoDTest::tearDown(); } @@ -203,7 +204,7 @@ protected: void createOptCtx() { _opCtx = cc().makeOperationContext(); // We are not replicating nor validating these writes. - _opCtx->setReplicatedWrites(false); + _uwb = stdx::make_unique<UnreplicatedWritesBlock>(_opCtx.get()); DisableDocumentValidation validationDisabler(_opCtx.get()); } @@ -213,6 +214,7 @@ protected: private: ServiceContext::UniqueOperationContext _opCtx; + std::unique_ptr<UnreplicatedWritesBlock> _uwb; // Owned by service context ReplicationCoordinator* _coordinator; diff --git a/src/mongo/db/repl/sync_tail.cpp b/src/mongo/db/repl/sync_tail.cpp index 8738b47d027..06c86f5823f 100644 --- a/src/mongo/db/repl/sync_tail.cpp +++ b/src/mongo/db/repl/sync_tail.cpp @@ -325,7 +325,7 @@ Status SyncTail::syncApply(OperationContext* opCtx, auto applyOp = [&](Database* db) { // For non-initial-sync, we convert updates to upserts // to suppress errors when replaying oplog entries. - opCtx->setReplicatedWrites(false); + UnreplicatedWritesBlock uwb(opCtx); DisableDocumentValidation validationDisabler(opCtx); Status status = @@ -481,7 +481,7 @@ void scheduleWritesToOplog(OperationContext* opCtx, const auto txnHolder = cc().makeOperationContext(); const auto opCtx = txnHolder.get(); opCtx->lockState()->setShouldConflictWithSecondaryBatchApplication(false); - opCtx->setReplicatedWrites(false); + UnreplicatedWritesBlock uwb(opCtx); std::vector<BSONObj> docs; docs.reserve(end - begin); @@ -1062,7 +1062,7 @@ void multiSyncApply(MultiApplier::OperationPtrs* ops, SyncTail*) { Status multiSyncApply_noAbort(OperationContext* opCtx, MultiApplier::OperationPtrs* oplogEntryPointers, SyncApplyFn syncApply) { - opCtx->setReplicatedWrites(false); + UnreplicatedWritesBlock uwb(opCtx); DisableDocumentValidation validationDisabler(opCtx); // allow us to get through the magic barrier @@ -1182,7 +1182,7 @@ Status multiInitialSyncApply_noAbort(OperationContext* opCtx, MultiApplier::OperationPtrs* ops, SyncTail* st, AtomicUInt32* fetchCount) { - opCtx->setReplicatedWrites(false); + UnreplicatedWritesBlock uwb(opCtx); DisableDocumentValidation validationDisabler(opCtx); // allow us to get through the magic barrier diff --git a/src/mongo/dbtests/repltests.cpp b/src/mongo/dbtests/repltests.cpp index 59eedcd86b7..cb2032f8ccc 100644 --- a/src/mongo/dbtests/repltests.cpp +++ b/src/mongo/dbtests/repltests.cpp @@ -187,9 +187,8 @@ protected: if (0) { mongo::unittest::log() << "op: " << *i << endl; } - _opCtx.setReplicatedWrites(false); + repl::UnreplicatedWritesBlock uwb(&_opCtx); a.applyOperation(&_opCtx, ctx.db(), *i); - _opCtx.setReplicatedWrites(true); } } } @@ -240,9 +239,8 @@ protected: OpDebug* const nullOpDebug = nullptr; if (o.hasField("_id")) { - _opCtx.setReplicatedWrites(false); + repl::UnreplicatedWritesBlock uwb(&_opCtx); coll->insertDocument(&_opCtx, o, nullOpDebug, true); - _opCtx.setReplicatedWrites(true); wunit.commit(); return; } @@ -252,9 +250,8 @@ protected: id.init(); b.appendOID("_id", &id); b.appendElements(o); - _opCtx.setReplicatedWrites(false); + repl::UnreplicatedWritesBlock uwb(&_opCtx); coll->insertDocument(&_opCtx, b.obj(), nullOpDebug, true); - _opCtx.setReplicatedWrites(true); wunit.commit(); } static BSONObj wid(const char* json) { diff --git a/src/mongo/s/catalog/sharding_catalog_config_initialization_test.cpp b/src/mongo/s/catalog/sharding_catalog_config_initialization_test.cpp index 0ce97c5375e..cb35f46c2df 100644 --- a/src/mongo/s/catalog/sharding_catalog_config_initialization_test.cpp +++ b/src/mongo/s/catalog/sharding_catalog_config_initialization_test.cpp @@ -214,13 +214,11 @@ TEST_F(ConfigInitializationTest, ReRunsIfDocRolledBackThenReElected) { // Now remove the version document and re-run initializeConfigDatabaseIfNeeded(). { // Mirror what happens if the config.version document is rolled back. - ON_BLOCK_EXIT([&] { - operationContext()->setReplicatedWrites(true); - replicationCoordinator()->setFollowerMode(repl::MemberState::RS_PRIMARY); - }); - operationContext()->setReplicatedWrites(false); + ON_BLOCK_EXIT( + [&] { replicationCoordinator()->setFollowerMode(repl::MemberState::RS_PRIMARY); }); replicationCoordinator()->setFollowerMode(repl::MemberState::RS_ROLLBACK); auto opCtx = operationContext(); + repl::UnreplicatedWritesBlock uwb(opCtx); auto nss = NamespaceString(VersionType::ConfigNS); MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN { ScopedTransaction transaction(opCtx, MODE_IX); |