diff options
author | Maria van Keulen <maria@mongodb.com> | 2017-03-07 12:00:08 -0500 |
---|---|---|
committer | Maria van Keulen <maria@mongodb.com> | 2017-03-07 12:00:08 -0500 |
commit | 589a5c169ced8f6e9ddcd3d182ae1b75db6b7d79 (patch) | |
tree | c7a090ffdd56a91ae677e2492c61b820af44f964 /src/mongo/db/repl | |
parent | 3cba97198638df3750e3b455e2ad57af7ee536ae (diff) | |
download | mongo-589a5c169ced8f6e9ddcd3d182ae1b75db6b7d79.tar.gz |
SERVER-27938 Rename all OperationContext variables to opCtx
This commit is an automated rename of all whole word instances of txn,
_txn, and txnPtr to opCtx, _opCtx, and opCtxPtr, respectively in all
.cpp and .h files in src/mongo.
Diffstat (limited to 'src/mongo/db/repl')
92 files changed, 2914 insertions, 2859 deletions
diff --git a/src/mongo/db/repl/bgsync.cpp b/src/mongo/db/repl/bgsync.cpp index a0f646f6078..186903d0924 100644 --- a/src/mongo/db/repl/bgsync.cpp +++ b/src/mongo/db/repl/bgsync.cpp @@ -140,20 +140,20 @@ BackgroundSync::BackgroundSync( bufferMaxSizeGauge.increment(_oplogBuffer->getMaxSize() - bufferMaxSizeGauge.get()); } -void BackgroundSync::startup(OperationContext* txn) { - _oplogBuffer->startup(txn); +void BackgroundSync::startup(OperationContext* opCtx) { + _oplogBuffer->startup(opCtx); invariant(!_producerThread); _producerThread.reset(new stdx::thread(stdx::bind(&BackgroundSync::_run, this))); } -void BackgroundSync::shutdown(OperationContext* txn) { +void BackgroundSync::shutdown(OperationContext* opCtx) { stdx::lock_guard<stdx::mutex> lock(_mutex); // Clear the buffer. This unblocks the OplogFetcher if it is blocked with a full queue, but // ensures that it won't add anything. It will also unblock the OpApplier pipeline if it is // waiting for an operation to be past the slaveDelay point. - clearBuffer(txn); + clearBuffer(opCtx); _state = ProducerState::Stopped; if (_syncSourceResolver) { @@ -167,9 +167,9 @@ void BackgroundSync::shutdown(OperationContext* txn) { _inShutdown = true; } -void BackgroundSync::join(OperationContext* txn) { +void BackgroundSync::join(OperationContext* opCtx) { _producerThread->join(); - _oplogBuffer->shutdown(txn); + _oplogBuffer->shutdown(opCtx); } bool BackgroundSync::inShutdown() const { @@ -225,15 +225,15 @@ void BackgroundSync::_runProducer() { } // we want to start when we're no longer primary // start() also loads _lastOpTimeFetched, which we know is set from the "if" - auto txn = cc().makeOperationContext(); + auto opCtx = cc().makeOperationContext(); if (getState() == ProducerState::Starting) { - start(txn.get()); + start(opCtx.get()); } - _produce(txn.get()); + _produce(opCtx.get()); } -void BackgroundSync::_produce(OperationContext* txn) { +void BackgroundSync::_produce(OperationContext* opCtx) { if (MONGO_FAIL_POINT(stopReplProducer)) { // This log output is used in js tests so please leave it. log() << "bgsync - stopReplProducer fail point " @@ -271,7 +271,7 @@ void BackgroundSync::_produce(OperationContext* txn) { HostAndPort source; SyncSourceResolverResponse syncSourceResp; { - const OpTime minValidSaved = StorageInterface::get(txn)->getMinValid(txn); + const OpTime minValidSaved = StorageInterface::get(opCtx)->getMinValid(opCtx); stdx::lock_guard<stdx::mutex> lock(_mutex); const auto requiredOpTime = (minValidSaved > _lastOpTimeFetched) ? minValidSaved : OpTime(); @@ -358,8 +358,9 @@ void BackgroundSync::_produce(OperationContext* txn) { // Set the applied point if unset. This is most likely the first time we've established a sync // source since stepping down or otherwise clearing the applied point. We need to set this here, // before the OplogWriter gets a chance to append to the oplog. - if (StorageInterface::get(txn)->getAppliedThrough(txn).isNull()) { - StorageInterface::get(txn)->setAppliedThrough(txn, _replCoord->getMyLastAppliedOpTime()); + if (StorageInterface::get(opCtx)->getAppliedThrough(opCtx).isNull()) { + StorageInterface::get(opCtx)->setAppliedThrough(opCtx, + _replCoord->getMyLastAppliedOpTime()); } // "lastFetched" not used. Already set in _enqueueDocuments. @@ -472,7 +473,7 @@ void BackgroundSync::_produce(OperationContext* txn) { } } - _rollback(txn, source, syncSourceResp.rbid, getConnection); + _rollback(opCtx, source, syncSourceResp.rbid, getConnection); // Reset the producer to clear the sync source and the last optime fetched. stop(true); startProducerIfStopped(); @@ -540,10 +541,10 @@ Status BackgroundSync::_enqueueDocuments(Fetcher::Documents::const_iterator begi return Status::OK(); // Nothing to do. } - auto txn = cc().makeOperationContext(); + auto opCtx = cc().makeOperationContext(); // Wait for enough space. - _oplogBuffer->waitForSpace(txn.get(), info.toApplyDocumentBytes); + _oplogBuffer->waitForSpace(opCtx.get(), info.toApplyDocumentBytes); { // Don't add more to the buffer if we are in shutdown. Continue holding the lock until we @@ -560,7 +561,7 @@ Status BackgroundSync::_enqueueDocuments(Fetcher::Documents::const_iterator begi } // Buffer docs for later application. - _oplogBuffer->pushAllNonBlocking(txn.get(), begin, end); + _oplogBuffer->pushAllNonBlocking(opCtx.get(), begin, end); // Update last fetched info. _lastFetchedHash = info.lastDocument.value; @@ -585,8 +586,8 @@ Status BackgroundSync::_enqueueDocuments(Fetcher::Documents::const_iterator begi return Status::OK(); } -bool BackgroundSync::peek(OperationContext* txn, BSONObj* op) { - return _oplogBuffer->peek(txn, op); +bool BackgroundSync::peek(OperationContext* opCtx, BSONObj* op) { + return _oplogBuffer->peek(opCtx, op); } void BackgroundSync::waitForMore() { @@ -594,11 +595,11 @@ void BackgroundSync::waitForMore() { _oplogBuffer->waitForData(Seconds(1)); } -void BackgroundSync::consume(OperationContext* txn) { +void BackgroundSync::consume(OperationContext* opCtx) { // this is just to get the op off the queue, it's been peeked at // and queued for application already BSONObj op; - if (_oplogBuffer->tryPop(txn, &op)) { + if (_oplogBuffer->tryPop(opCtx, &op)) { bufferCountGauge.decrement(1); bufferSizeGauge.decrement(getSize(op)); } else { @@ -609,7 +610,7 @@ void BackgroundSync::consume(OperationContext* txn) { } } -void BackgroundSync::_rollback(OperationContext* txn, +void BackgroundSync::_rollback(OperationContext* opCtx, const HostAndPort& source, boost::optional<int> requiredRBID, stdx::function<DBClientBase*()> getConnection) { @@ -635,7 +636,7 @@ void BackgroundSync::_rollback(OperationContext* txn, // then. { log() << "rollback 0"; - Lock::GlobalWrite globalWrite(txn->lockState()); + Lock::GlobalWrite globalWrite(opCtx->lockState()); if (!_replCoord->setFollowerMode(MemberState::RS_ROLLBACK)) { log() << "Cannot transition from " << _replCoord->getMemberState().toString() << " to " << MemberState(MemberState::RS_ROLLBACK).toString(); @@ -644,8 +645,8 @@ void BackgroundSync::_rollback(OperationContext* txn, } try { - auto status = syncRollback(txn, - OplogInterfaceLocal(txn, rsOplogName), + auto status = syncRollback(opCtx, + OplogInterfaceLocal(opCtx, rsOplogName), RollbackSourceImpl(getConnection, source, rsOplogName), requiredRBID, _replCoord); @@ -668,7 +669,7 @@ void BackgroundSync::_rollback(OperationContext* txn, warning() << "rollback cannot complete at this time (retrying later): " << redact(ex) << " appliedThrough=" << _replCoord->getMyLastAppliedOpTime() - << " minvalid=" << StorageInterface::get(txn)->getMinValid(txn); + << " minvalid=" << StorageInterface::get(opCtx)->getMinValid(opCtx); // Sleep a bit to allow upstream node to coalesce, if that was the cause of the failure. If // we failed in a way that will keep failing, but wasn't flagged as a fatal failure, this @@ -684,12 +685,12 @@ void BackgroundSync::_rollback(OperationContext* txn, // so that if we wind up shutting down uncleanly in response to something we rolled back // we know that we won't wind up right back in the same situation when we start back up // because the rollback wasn't durable. - txn->recoveryUnit()->waitUntilDurable(); + opCtx->recoveryUnit()->waitUntilDurable(); // If we detected that we rolled back the shardIdentity document as part of this rollback // then we must shut down to clear the in-memory ShardingState associated with the // shardIdentity document. - if (ShardIdentityRollbackNotifier::get(txn)->didRollbackHappen()) { + if (ShardIdentityRollbackNotifier::get(opCtx)->didRollbackHappen()) { severe() << "shardIdentity document rollback detected. Shutting down to clear " "in-memory sharding state. Restarting this process should safely return it " "to a healthy state"; @@ -734,10 +735,10 @@ void BackgroundSync::stop(bool resetLastFetchedOptime) { } } -void BackgroundSync::start(OperationContext* txn) { +void BackgroundSync::start(OperationContext* opCtx) { OpTimeWithHash lastAppliedOpTimeWithHash; do { - lastAppliedOpTimeWithHash = _readLastAppliedOpTimeWithHash(txn); + lastAppliedOpTimeWithHash = _readLastAppliedOpTimeWithHash(opCtx); stdx::lock_guard<stdx::mutex> lk(_mutex); // Double check the state after acquiring the mutex. if (_state != ProducerState::Starting) { @@ -762,28 +763,28 @@ void BackgroundSync::start(OperationContext* txn) { LOG(1) << "bgsync fetch queue set to: " << _lastOpTimeFetched << " " << _lastFetchedHash; } -void BackgroundSync::clearBuffer(OperationContext* txn) { - _oplogBuffer->clear(txn); +void BackgroundSync::clearBuffer(OperationContext* opCtx) { + _oplogBuffer->clear(opCtx); const auto count = bufferCountGauge.get(); bufferCountGauge.decrement(count); const auto size = bufferSizeGauge.get(); bufferSizeGauge.decrement(size); } -OpTimeWithHash BackgroundSync::_readLastAppliedOpTimeWithHash(OperationContext* txn) { +OpTimeWithHash BackgroundSync::_readLastAppliedOpTimeWithHash(OperationContext* opCtx) { BSONObj oplogEntry; try { MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN { - ScopedTransaction transaction(txn, MODE_IX); - Lock::DBLock lk(txn->lockState(), "local", MODE_X); - bool success = Helpers::getLast(txn, rsOplogName.c_str(), oplogEntry); + ScopedTransaction transaction(opCtx, MODE_IX); + Lock::DBLock lk(opCtx->lockState(), "local", MODE_X); + bool success = Helpers::getLast(opCtx, rsOplogName.c_str(), oplogEntry); if (!success) { // This can happen when we are to do an initial sync. lastHash will be set // after the initial sync is complete. return OpTimeWithHash(0); } } - MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "readLastAppliedHash", rsOplogName); + MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "readLastAppliedHash", rsOplogName); } catch (const DBException& ex) { severe() << "Problem reading " << rsOplogName << ": " << redact(ex); fassertFailed(18904); @@ -817,8 +818,8 @@ bool BackgroundSync::shouldStopFetching() const { return false; } -void BackgroundSync::pushTestOpToBuffer(OperationContext* txn, const BSONObj& op) { - _oplogBuffer->push(txn, op); +void BackgroundSync::pushTestOpToBuffer(OperationContext* opCtx, const BSONObj& op) { + _oplogBuffer->push(opCtx, op); bufferCountGauge.increment(); bufferSizeGauge.increment(op.objsize()); } diff --git a/src/mongo/db/repl/bgsync.h b/src/mongo/db/repl/bgsync.h index 479caabaee8..85069066cdd 100644 --- a/src/mongo/db/repl/bgsync.h +++ b/src/mongo/db/repl/bgsync.h @@ -84,17 +84,17 @@ public: /** * Starts oplog buffer, task executor and producer thread, in that order. */ - void startup(OperationContext* txn); + void startup(OperationContext* opCtx); /** * Signals producer thread to stop. */ - void shutdown(OperationContext* txn); + void shutdown(OperationContext* opCtx); /** * Waits for producer thread to stop before shutting down the task executor and oplog buffer. */ - void join(OperationContext* txn); + void join(OperationContext* opCtx); /** * Returns true if shutdown() has been called. @@ -109,8 +109,8 @@ public: // Interface implementation - bool peek(OperationContext* txn, BSONObj* op); - void consume(OperationContext* txn); + bool peek(OperationContext* opCtx, BSONObj* op); + void consume(OperationContext* opCtx); void clearSyncTarget(); void waitForMore(); @@ -118,7 +118,7 @@ public: BSONObj getCounters(); // Clears any fetched and buffered oplog entries. - void clearBuffer(OperationContext* txn); + void clearBuffer(OperationContext* opCtx); /** * Returns true if any of the following is true: @@ -134,7 +134,7 @@ public: void startProducerIfStopped(); // Adds a fake oplog entry to buffer. Used for testing only. - void pushTestOpToBuffer(OperationContext* txn, const BSONObj& op); + void pushTestOpToBuffer(OperationContext* opCtx, const BSONObj& op); private: bool _inShutdown_inlock() const; @@ -148,7 +148,7 @@ private: void _run(); // Production thread inner loop. void _runProducer(); - void _produce(OperationContext* txn); + void _produce(OperationContext* opCtx); /** * Checks current background sync state before pushing operations into blocking queue and @@ -165,15 +165,15 @@ private: * Executes a rollback. * 'getConnection' returns a connection to the sync source. */ - void _rollback(OperationContext* txn, + void _rollback(OperationContext* opCtx, const HostAndPort& source, boost::optional<int> requiredRBID, stdx::function<DBClientBase*()> getConnection); // restart syncing - void start(OperationContext* txn); + void start(OperationContext* opCtx); - OpTimeWithHash _readLastAppliedOpTimeWithHash(OperationContext* txn); + OpTimeWithHash _readLastAppliedOpTimeWithHash(OperationContext* opCtx); // Production thread std::unique_ptr<OplogBuffer> _oplogBuffer; diff --git a/src/mongo/db/repl/collection_bulk_loader_impl.cpp b/src/mongo/db/repl/collection_bulk_loader_impl.cpp index ffc6b176032..9b1c96ec95b 100644 --- a/src/mongo/db/repl/collection_bulk_loader_impl.cpp +++ b/src/mongo/db/repl/collection_bulk_loader_impl.cpp @@ -51,7 +51,7 @@ namespace mongo { namespace repl { -CollectionBulkLoaderImpl::CollectionBulkLoaderImpl(OperationContext* txn, +CollectionBulkLoaderImpl::CollectionBulkLoaderImpl(OperationContext* opCtx, Collection* coll, const BSONObj idIndexSpec, std::unique_ptr<OldThreadPool> threadPool, @@ -62,13 +62,13 @@ CollectionBulkLoaderImpl::CollectionBulkLoaderImpl(OperationContext* txn, _runner(std::move(runner)), _autoColl(std::move(autoColl)), _autoDB(std::move(autoDb)), - _txn(txn), + _opCtx(opCtx), _coll(coll), _nss{coll->ns()}, - _idIndexBlock(stdx::make_unique<MultiIndexBlock>(txn, coll)), - _secondaryIndexesBlock(stdx::make_unique<MultiIndexBlock>(txn, coll)), + _idIndexBlock(stdx::make_unique<MultiIndexBlock>(opCtx, coll)), + _secondaryIndexesBlock(stdx::make_unique<MultiIndexBlock>(opCtx, coll)), _idIndexSpec(idIndexSpec) { - invariant(txn); + invariant(opCtx); invariant(coll); invariant(_runner); invariant(_autoDB); @@ -89,10 +89,10 @@ CollectionBulkLoaderImpl::~CollectionBulkLoaderImpl() { Status CollectionBulkLoaderImpl::init(Collection* coll, const std::vector<BSONObj>& secondaryIndexSpecs) { return _runTaskReleaseResourcesOnFailure( - [coll, &secondaryIndexSpecs, this](OperationContext* txn) -> Status { - invariant(txn); + [coll, &secondaryIndexSpecs, this](OperationContext* opCtx) -> Status { + invariant(opCtx); invariant(coll); - invariant(txn->getClient() == &cc()); + invariant(opCtx->getClient() == &cc()); std::vector<BSONObj> specs(secondaryIndexSpecs); // This enforces the buildIndexes setting in the replica set configuration. _secondaryIndexesBlock->removeExistingIndexes(&specs); @@ -122,8 +122,8 @@ Status CollectionBulkLoaderImpl::insertDocuments(const std::vector<BSONObj>::con const std::vector<BSONObj>::const_iterator end) { int count = 0; return _runTaskReleaseResourcesOnFailure( - [begin, end, &count, this](OperationContext* txn) -> Status { - invariant(txn); + [begin, end, &count, this](OperationContext* opCtx) -> Status { + invariant(opCtx); for (auto iter = begin; iter != end; ++iter) { std::vector<MultiIndexBlock*> indexers; @@ -134,15 +134,15 @@ Status CollectionBulkLoaderImpl::insertDocuments(const std::vector<BSONObj>::con indexers.push_back(_secondaryIndexesBlock.get()); } MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN { - WriteUnitOfWork wunit(txn); - const auto status = _coll->insertDocument(txn, *iter, indexers, false); + WriteUnitOfWork wunit(opCtx); + const auto status = _coll->insertDocument(opCtx, *iter, indexers, false); if (!status.isOK()) { return status; } wunit.commit(); } MONGO_WRITE_CONFLICT_RETRY_LOOP_END( - _txn, "CollectionBulkLoaderImpl::insertDocuments", _nss.ns()); + _opCtx, "CollectionBulkLoaderImpl::insertDocuments", _nss.ns()); ++count; } @@ -152,11 +152,11 @@ Status CollectionBulkLoaderImpl::insertDocuments(const std::vector<BSONObj>::con Status CollectionBulkLoaderImpl::commit() { return _runTaskReleaseResourcesOnFailure( - [this](OperationContext* txn) -> Status { + [this](OperationContext* opCtx) -> Status { _stats.startBuildingIndexes = Date_t::now(); LOG(2) << "Creating indexes for ns: " << _nss.ns(); - invariant(txn->getClient() == &cc()); - invariant(txn == _txn); + invariant(opCtx->getClient() == &cc()); + invariant(opCtx == _opCtx); // Commit before deleting dups, so the dups will be removed from secondary indexes when // deleted. @@ -173,12 +173,12 @@ Status CollectionBulkLoaderImpl::commit() { "MultiIndexBlock::ignoreUniqueConstraint set."}; } MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN { - WriteUnitOfWork wunit(txn); + WriteUnitOfWork wunit(opCtx); _secondaryIndexesBlock->commit(); wunit.commit(); } MONGO_WRITE_CONFLICT_RETRY_LOOP_END( - _txn, "CollectionBulkLoaderImpl::commit", _nss.ns()); + _opCtx, "CollectionBulkLoaderImpl::commit", _nss.ns()); } if (_idIndexBlock) { @@ -192,8 +192,8 @@ Status CollectionBulkLoaderImpl::commit() { for (auto&& it : dups) { MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN { - WriteUnitOfWork wunit(_txn); - _coll->deleteDocument(_txn, + WriteUnitOfWork wunit(_opCtx); + _coll->deleteDocument(_opCtx, it, nullptr /** OpDebug **/, false /* fromMigrate */, @@ -201,17 +201,17 @@ Status CollectionBulkLoaderImpl::commit() { wunit.commit(); } MONGO_WRITE_CONFLICT_RETRY_LOOP_END( - _txn, "CollectionBulkLoaderImpl::commit", _nss.ns()); + _opCtx, "CollectionBulkLoaderImpl::commit", _nss.ns()); } // Commit _id index, without dups. MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN { - WriteUnitOfWork wunit(txn); + WriteUnitOfWork wunit(opCtx); _idIndexBlock->commit(); wunit.commit(); } MONGO_WRITE_CONFLICT_RETRY_LOOP_END( - _txn, "CollectionBulkLoaderImpl::commit", _nss.ns()); + _opCtx, "CollectionBulkLoaderImpl::commit", _nss.ns()); } _stats.endBuildingIndexes = Date_t::now(); LOG(2) << "Done creating indexes for ns: " << _nss.ns() @@ -244,9 +244,9 @@ void CollectionBulkLoaderImpl::_releaseResources() { Status CollectionBulkLoaderImpl::_runTaskReleaseResourcesOnFailure( TaskRunner::SynchronousTask task, TaskRunner::NextAction nextAction) { - auto newTask = [this, &task](OperationContext* txn) -> Status { + auto newTask = [this, &task](OperationContext* opCtx) -> Status { ScopeGuard guard = MakeGuard(&CollectionBulkLoaderImpl::_releaseResources, this); - const auto status = task(txn); + const auto status = task(opCtx); if (status.isOK()) { guard.Dismiss(); } diff --git a/src/mongo/db/repl/collection_bulk_loader_impl.h b/src/mongo/db/repl/collection_bulk_loader_impl.h index 61928e4c385..17b4741ec8f 100644 --- a/src/mongo/db/repl/collection_bulk_loader_impl.h +++ b/src/mongo/db/repl/collection_bulk_loader_impl.h @@ -61,7 +61,7 @@ public: BSONObj toBSON() const; }; - CollectionBulkLoaderImpl(OperationContext* txn, + CollectionBulkLoaderImpl(OperationContext* opCtx, Collection* coll, const BSONObj idIndexSpec, std::unique_ptr<OldThreadPool> threadPool, @@ -91,7 +91,7 @@ private: std::unique_ptr<TaskRunner> _runner; std::unique_ptr<AutoGetCollection> _autoColl; std::unique_ptr<AutoGetOrCreateDb> _autoDB; - OperationContext* _txn = nullptr; + OperationContext* _opCtx = nullptr; Collection* _coll = nullptr; NamespaceString _nss; std::unique_ptr<MultiIndexBlock> _idIndexBlock; diff --git a/src/mongo/db/repl/collection_cloner.cpp b/src/mongo/db/repl/collection_cloner.cpp index ac1b34c1528..85c9f39169d 100644 --- a/src/mongo/db/repl/collection_cloner.cpp +++ b/src/mongo/db/repl/collection_cloner.cpp @@ -119,9 +119,9 @@ CollectionCloner::CollectionCloner(executor::TaskExecutor* executor, _documents(), _dbWorkTaskRunner(_dbWorkThreadPool), _scheduleDbWorkFn([this](const executor::TaskExecutor::CallbackFn& work) { - auto task = [work](OperationContext* txn, + auto task = [work](OperationContext* opCtx, const Status& status) -> TaskRunner::NextAction { - work(executor::TaskExecutor::CallbackArgs(nullptr, {}, status, txn)); + work(executor::TaskExecutor::CallbackArgs(nullptr, {}, status, opCtx)); return TaskRunner::NextAction::kDisposeOperationContext; }; _dbWorkTaskRunner.schedule(task); @@ -337,9 +337,10 @@ void CollectionCloner::_listIndexesCallback(const Fetcher::QueryResponseStatus& _finishCallback(cbd.status); return; } - auto txn = cbd.txn; - txn->setReplicatedWrites(false); - auto&& createStatus = _storageInterface->createCollection(txn, _destNss, _options); + auto opCtx = cbd.opCtx; + opCtx->setReplicatedWrites(false); + auto&& createStatus = + _storageInterface->createCollection(opCtx, _destNss, _options); _finishCallback(createStatus); }); if (!scheduleResult.isOK()) { diff --git a/src/mongo/db/repl/collection_cloner_test.cpp b/src/mongo/db/repl/collection_cloner_test.cpp index 8c0ac71ba70..440b8dee232 100644 --- a/src/mongo/db/repl/collection_cloner_test.cpp +++ b/src/mongo/db/repl/collection_cloner_test.cpp @@ -405,8 +405,8 @@ TEST_F(CollectionClonerTest, ListIndexesReturnedNamespaceNotFound) { bool writesAreReplicatedOnOpCtx = false; NamespaceString collNss; storageInterface->createCollFn = [&collNss, &collectionCreated, &writesAreReplicatedOnOpCtx]( - OperationContext* txn, const NamespaceString& nss, const CollectionOptions& options) { - writesAreReplicatedOnOpCtx = txn->writesAreReplicated(); + OperationContext* opCtx, const NamespaceString& nss, const CollectionOptions& options) { + writesAreReplicatedOnOpCtx = opCtx->writesAreReplicated(); collectionCreated = true; collNss = nss; return Status::OK(); @@ -458,14 +458,14 @@ TEST_F(CollectionClonerTest, // Replace scheduleDbWork function to schedule the create collection task with an injected error // status. auto exec = &getExecutor(); - collectionCloner->setScheduleDbWorkFn_forTest( - [exec](const executor::TaskExecutor::CallbackFn& workFn) { - auto wrappedTask = [workFn](const executor::TaskExecutor::CallbackArgs& cbd) { - workFn(executor::TaskExecutor::CallbackArgs( - cbd.executor, cbd.myHandle, Status(ErrorCodes::CallbackCanceled, ""), cbd.txn)); - }; - return exec->scheduleWork(wrappedTask); - }); + collectionCloner->setScheduleDbWorkFn_forTest([exec]( + const executor::TaskExecutor::CallbackFn& workFn) { + auto wrappedTask = [workFn](const executor::TaskExecutor::CallbackArgs& cbd) { + workFn(executor::TaskExecutor::CallbackArgs( + cbd.executor, cbd.myHandle, Status(ErrorCodes::CallbackCanceled, ""), cbd.opCtx)); + }; + return exec->scheduleWork(wrappedTask); + }); bool collectionCreated = false; storageInterface->createCollFn = [&collectionCreated]( diff --git a/src/mongo/db/repl/data_replicator.cpp b/src/mongo/db/repl/data_replicator.cpp index ee990aab967..92e64cbb993 100644 --- a/src/mongo/db/repl/data_replicator.cpp +++ b/src/mongo/db/repl/data_replicator.cpp @@ -252,9 +252,9 @@ bool DataReplicator::_isActive_inlock() const { return State::kRunning == _state || State::kShuttingDown == _state; } -Status DataReplicator::startup(OperationContext* txn, +Status DataReplicator::startup(OperationContext* opCtx, std::uint32_t initialSyncMaxAttempts) noexcept { - invariant(txn); + invariant(opCtx); invariant(initialSyncMaxAttempts >= 1U); stdx::lock_guard<stdx::mutex> lock(_mutex); @@ -270,7 +270,7 @@ Status DataReplicator::startup(OperationContext* txn, return Status(ErrorCodes::ShutdownInProgress, "data replicator completed"); } - _setUp_inlock(txn, initialSyncMaxAttempts); + _setUp_inlock(opCtx, initialSyncMaxAttempts); // Start first initial sync attempt. std::uint32_t initialSyncAttempt = 0; @@ -397,32 +397,32 @@ void DataReplicator::setScheduleDbWorkFn_forTest(const CollectionCloner::Schedul _scheduleDbWorkFn = work; } -void DataReplicator::_setUp_inlock(OperationContext* txn, std::uint32_t initialSyncMaxAttempts) { +void DataReplicator::_setUp_inlock(OperationContext* opCtx, std::uint32_t initialSyncMaxAttempts) { // This will call through to the storageInterfaceImpl to ReplicationCoordinatorImpl. - // 'txn' is passed through from startup(). - _storage->setInitialSyncFlag(txn); + // 'opCtx' is passed through from startup(). + _storage->setInitialSyncFlag(opCtx); LOG(1) << "Creating oplogBuffer."; - _oplogBuffer = _dataReplicatorExternalState->makeInitialSyncOplogBuffer(txn); - _oplogBuffer->startup(txn); + _oplogBuffer = _dataReplicatorExternalState->makeInitialSyncOplogBuffer(opCtx); + _oplogBuffer->startup(opCtx); _stats.initialSyncStart = _exec->now(); _stats.maxFailedInitialSyncAttempts = initialSyncMaxAttempts; _stats.failedInitialSyncAttempts = 0; } -void DataReplicator::_tearDown_inlock(OperationContext* txn, +void DataReplicator::_tearDown_inlock(OperationContext* opCtx, const StatusWith<OpTimeWithHash>& lastApplied) { _stats.initialSyncEnd = _exec->now(); // This might not be necessary if we failed initial sync. invariant(_oplogBuffer); - _oplogBuffer->shutdown(txn); + _oplogBuffer->shutdown(opCtx); if (!lastApplied.isOK()) { return; } - _storage->clearInitialSyncFlag(txn); + _storage->clearInitialSyncFlag(opCtx); _opts.setMyLastOptime(lastApplied.getValue().opTime); log() << "initial sync done; took " << duration_cast<Seconds>(_stats.initialSyncEnd - _stats.initialSyncStart) << "."; @@ -570,28 +570,28 @@ Status DataReplicator::_recreateOplogAndDropReplicatedDatabases() { LOG(1) << "About to drop+create the oplog, if it exists, ns:" << _opts.localOplogNS << ", and drop all user databases (so that we can clone them)."; - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); // We are not replicating nor validating these writes. - UnreplicatedWritesBlock unreplicatedWritesBlock(txn.get()); + UnreplicatedWritesBlock unreplicatedWritesBlock(opCtx.get()); // 1.) Drop the oplog. LOG(2) << "Dropping the existing oplog: " << _opts.localOplogNS; - auto status = _storage->dropCollection(txn.get(), _opts.localOplogNS); + auto status = _storage->dropCollection(opCtx.get(), _opts.localOplogNS); if (!status.isOK()) { return status; } // 2.) Drop user databases. LOG(2) << "Dropping user databases"; - status = _storage->dropReplicatedDatabases(txn.get()); + status = _storage->dropReplicatedDatabases(opCtx.get()); if (!status.isOK()) { return status; } // 3.) Create the oplog. LOG(2) << "Creating the oplog: " << _opts.localOplogNS; - return _storage->createOplog(txn.get(), _opts.localOplogNS); + return _storage->createOplog(opCtx.get(), _opts.localOplogNS); } void DataReplicator::_rollbackCheckerResetCallback( @@ -833,12 +833,12 @@ void DataReplicator::_lastOplogEntryFetcherCallbackForStopTimestamp( const auto& oplogSeedDoc = documents.front(); LOG(1) << "inserting oplog seed document: " << oplogSeedDoc; - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); // StorageInterface::insertDocument() has to be called outside the lock because we may // override its behavior in tests. See DataReplicatorReturnsCallbackCanceledAndDoesNot- // ScheduleRollbackCheckerIfShutdownAfterInsertingInsertOplogSeedDocument in // data_replicator_test.cpp - auto status = _storage->insertDocument(txn.get(), _opts.localOplogNS, oplogSeedDoc); + auto status = _storage->insertDocument(opCtx.get(), _opts.localOplogNS, oplogSeedDoc); if (!status.isOK()) { stdx::lock_guard<stdx::mutex> lock(_mutex); onCompletionGuard->setResultAndCancelRemainingWork_inlock(lock, status); @@ -1048,7 +1048,7 @@ void DataReplicator::_finishInitialSyncAttempt(const StatusWith<OpTimeWithHash>& // For example, if CollectionCloner fails while inserting documents into the // CollectionBulkLoader, we will get here via one of CollectionCloner's TaskRunner callbacks // which has an active OperationContext bound to the current Client. This would lead to an - // invariant when we attempt to create a new OperationContext for _tearDown(txn). + // invariant when we attempt to create a new OperationContext for _tearDown(opCtx). // To avoid this, we schedule _finishCallback against the TaskExecutor rather than calling it // here synchronously. @@ -1139,8 +1139,8 @@ void DataReplicator::_finishCallback(StatusWith<OpTimeWithHash> lastApplied) { decltype(_onCompletion) onCompletion; { stdx::lock_guard<stdx::mutex> lock(_mutex); - auto txn = makeOpCtx(); - _tearDown_inlock(txn.get(), lastApplied); + auto opCtx = makeOpCtx(); + _tearDown_inlock(opCtx.get(), lastApplied); invariant(_onCompletion); std::swap(_onCompletion, onCompletion); @@ -1395,8 +1395,8 @@ StatusWith<Operations> DataReplicator::_getNextApplierBatch_inlock() { // * only OplogEntries from before the slaveDelay point // * a single command OplogEntry (including index builds, which appear to be inserts) // * consequently, commands bound the previous batch to be in a batch of their own - auto txn = makeOpCtx(); - while (_oplogBuffer->peek(txn.get(), &op)) { + auto opCtx = makeOpCtx(); + while (_oplogBuffer->peek(opCtx.get(), &op)) { auto entry = OplogEntry(std::move(op)); // Check for oplog version change. If it is absent, its value is one. @@ -1417,7 +1417,7 @@ StatusWith<Operations> DataReplicator::_getNextApplierBatch_inlock() { if (ops.empty()) { // Apply commands one-at-a-time. ops.push_back(std::move(entry)); - invariant(_oplogBuffer->tryPop(txn.get(), &op)); + invariant(_oplogBuffer->tryPop(opCtx.get(), &op)); dassert(SimpleBSONObjComparator::kInstance.evaluate(ops.back().raw == op)); } @@ -1451,7 +1451,7 @@ StatusWith<Operations> DataReplicator::_getNextApplierBatch_inlock() { // Add op to buffer. ops.push_back(std::move(entry)); totalBytes += ops.back().raw.objsize(); - invariant(_oplogBuffer->tryPop(txn.get(), &op)); + invariant(_oplogBuffer->tryPop(opCtx.get(), &op)); dassert(SimpleBSONObjComparator::kInstance.evaluate(ops.back().raw == op)); } return std::move(ops); diff --git a/src/mongo/db/repl/data_replicator.h b/src/mongo/db/repl/data_replicator.h index 62a8f134988..e2b96a92f6a 100644 --- a/src/mongo/db/repl/data_replicator.h +++ b/src/mongo/db/repl/data_replicator.h @@ -186,7 +186,7 @@ public: /** * Starts initial sync process, with the provided number of attempts */ - Status startup(OperationContext* txn, std::uint32_t maxAttempts) noexcept; + Status startup(OperationContext* opCtx, std::uint32_t maxAttempts) noexcept; /** * Shuts down replication if "start" has been called, and blocks until shutdown has completed. @@ -336,12 +336,12 @@ private: /** * Sets up internal state to begin initial sync. */ - void _setUp_inlock(OperationContext* txn, std::uint32_t initialSyncMaxAttempts); + void _setUp_inlock(OperationContext* opCtx, std::uint32_t initialSyncMaxAttempts); /** * Tears down internal state before reporting final status to caller. */ - void _tearDown_inlock(OperationContext* txn, const StatusWith<OpTimeWithHash>& lastApplied); + void _tearDown_inlock(OperationContext* opCtx, const StatusWith<OpTimeWithHash>& lastApplied); /** * Callback to start a single initial sync attempt. diff --git a/src/mongo/db/repl/data_replicator_external_state.h b/src/mongo/db/repl/data_replicator_external_state.h index 0102ddab533..a1b4bf61e87 100644 --- a/src/mongo/db/repl/data_replicator_external_state.h +++ b/src/mongo/db/repl/data_replicator_external_state.h @@ -110,13 +110,13 @@ public: * This function creates an oplog buffer of the type specified at server startup. */ virtual std::unique_ptr<OplogBuffer> makeInitialSyncOplogBuffer( - OperationContext* txn) const = 0; + OperationContext* opCtx) const = 0; /** * Creates an oplog buffer suitable for steady state replication. */ virtual std::unique_ptr<OplogBuffer> makeSteadyStateOplogBuffer( - OperationContext* txn) const = 0; + OperationContext* opCtx) const = 0; /** * Returns the current replica set config if there is one, or an error why there isn't. @@ -130,7 +130,7 @@ private: * * Used exclusively by the DataReplicator to construct a MultiApplier. */ - virtual StatusWith<OpTime> _multiApply(OperationContext* txn, + virtual StatusWith<OpTime> _multiApply(OperationContext* opCtx, MultiApplier::Operations ops, MultiApplier::ApplyOperationFn applyOperation) = 0; diff --git a/src/mongo/db/repl/data_replicator_external_state_impl.cpp b/src/mongo/db/repl/data_replicator_external_state_impl.cpp index eed85a8216e..e486cfd5278 100644 --- a/src/mongo/db/repl/data_replicator_external_state_impl.cpp +++ b/src/mongo/db/repl/data_replicator_external_state_impl.cpp @@ -106,13 +106,13 @@ bool DataReplicatorExternalStateImpl::shouldStopFetching( } std::unique_ptr<OplogBuffer> DataReplicatorExternalStateImpl::makeInitialSyncOplogBuffer( - OperationContext* txn) const { - return _replicationCoordinatorExternalState->makeInitialSyncOplogBuffer(txn); + OperationContext* opCtx) const { + return _replicationCoordinatorExternalState->makeInitialSyncOplogBuffer(opCtx); } std::unique_ptr<OplogBuffer> DataReplicatorExternalStateImpl::makeSteadyStateOplogBuffer( - OperationContext* txn) const { - return _replicationCoordinatorExternalState->makeSteadyStateOplogBuffer(txn); + OperationContext* opCtx) const { + return _replicationCoordinatorExternalState->makeSteadyStateOplogBuffer(opCtx); } StatusWith<ReplSetConfig> DataReplicatorExternalStateImpl::getCurrentConfig() const { @@ -120,10 +120,10 @@ StatusWith<ReplSetConfig> DataReplicatorExternalStateImpl::getCurrentConfig() co } StatusWith<OpTime> DataReplicatorExternalStateImpl::_multiApply( - OperationContext* txn, + OperationContext* opCtx, MultiApplier::Operations ops, MultiApplier::ApplyOperationFn applyOperation) { - return _replicationCoordinatorExternalState->multiApply(txn, std::move(ops), applyOperation); + return _replicationCoordinatorExternalState->multiApply(opCtx, std::move(ops), applyOperation); } Status DataReplicatorExternalStateImpl::_multiSyncApply(MultiApplier::OperationPtrs* ops) { diff --git a/src/mongo/db/repl/data_replicator_external_state_impl.h b/src/mongo/db/repl/data_replicator_external_state_impl.h index 2c5518d7a1d..40a25bcaf67 100644 --- a/src/mongo/db/repl/data_replicator_external_state_impl.h +++ b/src/mongo/db/repl/data_replicator_external_state_impl.h @@ -59,14 +59,14 @@ public: const rpc::ReplSetMetadata& replMetadata, boost::optional<rpc::OplogQueryMetadata> oqMetadata) override; - std::unique_ptr<OplogBuffer> makeInitialSyncOplogBuffer(OperationContext* txn) const override; + std::unique_ptr<OplogBuffer> makeInitialSyncOplogBuffer(OperationContext* opCtx) const override; - std::unique_ptr<OplogBuffer> makeSteadyStateOplogBuffer(OperationContext* txn) const override; + std::unique_ptr<OplogBuffer> makeSteadyStateOplogBuffer(OperationContext* opCtx) const override; StatusWith<ReplSetConfig> getCurrentConfig() const override; private: - StatusWith<OpTime> _multiApply(OperationContext* txn, + StatusWith<OpTime> _multiApply(OperationContext* opCtx, MultiApplier::Operations ops, MultiApplier::ApplyOperationFn applyOperation) override; diff --git a/src/mongo/db/repl/data_replicator_external_state_mock.cpp b/src/mongo/db/repl/data_replicator_external_state_mock.cpp index a5eb417b403..1f315aee521 100644 --- a/src/mongo/db/repl/data_replicator_external_state_mock.cpp +++ b/src/mongo/db/repl/data_replicator_external_state_mock.cpp @@ -81,12 +81,12 @@ bool DataReplicatorExternalStateMock::shouldStopFetching( } std::unique_ptr<OplogBuffer> DataReplicatorExternalStateMock::makeInitialSyncOplogBuffer( - OperationContext* txn) const { + OperationContext* opCtx) const { return stdx::make_unique<OplogBufferBlockingQueue>(); } std::unique_ptr<OplogBuffer> DataReplicatorExternalStateMock::makeSteadyStateOplogBuffer( - OperationContext* txn) const { + OperationContext* opCtx) const { return stdx::make_unique<OplogBufferBlockingQueue>(); } @@ -95,10 +95,10 @@ StatusWith<ReplSetConfig> DataReplicatorExternalStateMock::getCurrentConfig() co } StatusWith<OpTime> DataReplicatorExternalStateMock::_multiApply( - OperationContext* txn, + OperationContext* opCtx, MultiApplier::Operations ops, MultiApplier::ApplyOperationFn applyOperation) { - return multiApplyFn(txn, std::move(ops), applyOperation); + return multiApplyFn(opCtx, std::move(ops), applyOperation); } Status DataReplicatorExternalStateMock::_multiSyncApply(MultiApplier::OperationPtrs* ops) { diff --git a/src/mongo/db/repl/data_replicator_external_state_mock.h b/src/mongo/db/repl/data_replicator_external_state_mock.h index ea2943a0749..88ff0df26c5 100644 --- a/src/mongo/db/repl/data_replicator_external_state_mock.h +++ b/src/mongo/db/repl/data_replicator_external_state_mock.h @@ -56,9 +56,9 @@ public: const rpc::ReplSetMetadata& replMetadata, boost::optional<rpc::OplogQueryMetadata> oqMetadata) override; - std::unique_ptr<OplogBuffer> makeInitialSyncOplogBuffer(OperationContext* txn) const override; + std::unique_ptr<OplogBuffer> makeInitialSyncOplogBuffer(OperationContext* opCtx) const override; - std::unique_ptr<OplogBuffer> makeSteadyStateOplogBuffer(OperationContext* txn) const override; + std::unique_ptr<OplogBuffer> makeSteadyStateOplogBuffer(OperationContext* opCtx) const override; StatusWith<ReplSetConfig> getCurrentConfig() const override; @@ -97,7 +97,7 @@ public: StatusWith<ReplSetConfig> replSetConfigResult = ReplSetConfig(); private: - StatusWith<OpTime> _multiApply(OperationContext* txn, + StatusWith<OpTime> _multiApply(OperationContext* opCtx, MultiApplier::Operations ops, MultiApplier::ApplyOperationFn applyOperation) override; diff --git a/src/mongo/db/repl/data_replicator_test.cpp b/src/mongo/db/repl/data_replicator_test.cpp index 9a6efec3f83..61a5dcb8f2d 100644 --- a/src/mongo/db/repl/data_replicator_test.cpp +++ b/src/mongo/db/repl/data_replicator_test.cpp @@ -265,31 +265,32 @@ protected: void setUp() override { executor::ThreadPoolExecutorTest::setUp(); _storageInterface = stdx::make_unique<StorageInterfaceMock>(); - _storageInterface->createOplogFn = [this](OperationContext* txn, + _storageInterface->createOplogFn = [this](OperationContext* opCtx, const NamespaceString& nss) { LockGuard lock(_storageInterfaceWorkDoneMutex); _storageInterfaceWorkDone.createOplogCalled = true; return Status::OK(); }; _storageInterface->insertDocumentFn = - [this](OperationContext* txn, const NamespaceString& nss, const BSONObj& doc) { + [this](OperationContext* opCtx, const NamespaceString& nss, const BSONObj& doc) { LockGuard lock(_storageInterfaceWorkDoneMutex); ++_storageInterfaceWorkDone.documentsInsertedCount; return Status::OK(); }; _storageInterface->insertDocumentsFn = [this]( - OperationContext* txn, const NamespaceString& nss, const std::vector<BSONObj>& ops) { + OperationContext* opCtx, const NamespaceString& nss, const std::vector<BSONObj>& ops) { LockGuard lock(_storageInterfaceWorkDoneMutex); _storageInterfaceWorkDone.insertedOplogEntries = true; ++_storageInterfaceWorkDone.oplogEntriesInserted; return Status::OK(); }; - _storageInterface->dropCollFn = [this](OperationContext* txn, const NamespaceString& nss) { + _storageInterface->dropCollFn = [this](OperationContext* opCtx, + const NamespaceString& nss) { LockGuard lock(_storageInterfaceWorkDoneMutex); _storageInterfaceWorkDone.droppedCollections.push_back(nss.ns()); return Status::OK(); }; - _storageInterface->dropUserDBsFn = [this](OperationContext* txn) { + _storageInterface->dropUserDBsFn = [this](OperationContext* opCtx) { LockGuard lock(_storageInterfaceWorkDoneMutex); _storageInterfaceWorkDone.droppedUserDBs = true; return Status::OK(); @@ -577,66 +578,66 @@ const std::uint32_t maxAttempts = 1U; TEST_F(DataReplicatorTest, StartupReturnsIllegalOperationIfAlreadyActive) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); ASSERT_FALSE(dr->isActive()); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); ASSERT_TRUE(dr->isActive()); - ASSERT_EQUALS(ErrorCodes::IllegalOperation, dr->startup(txn.get(), maxAttempts)); + ASSERT_EQUALS(ErrorCodes::IllegalOperation, dr->startup(opCtx.get(), maxAttempts)); ASSERT_TRUE(dr->isActive()); } TEST_F(DataReplicatorTest, StartupReturnsShutdownInProgressIfDataReplicatorIsShuttingDown) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); ASSERT_FALSE(dr->isActive()); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); ASSERT_TRUE(dr->isActive()); // SyncSourceSelector returns an invalid sync source so DataReplicator is stuck waiting for // another sync source in 'Options::syncSourceRetryWait' ms. ASSERT_OK(dr->shutdown()); - ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, dr->startup(txn.get(), maxAttempts)); + ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, dr->startup(opCtx.get(), maxAttempts)); } TEST_F(DataReplicatorTest, StartupReturnsShutdownInProgressIfExecutorIsShutdown) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); getExecutor().shutdown(); - ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, dr->startup(txn.get(), maxAttempts)); + ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, dr->startup(opCtx.get(), maxAttempts)); ASSERT_FALSE(dr->isActive()); // Cannot startup data replicator again since it's in the Complete state. - ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, dr->startup(txn.get(), maxAttempts)); + ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, dr->startup(opCtx.get(), maxAttempts)); } TEST_F(DataReplicatorTest, ShutdownTransitionsStateToCompleteIfCalledBeforeStartup) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); ASSERT_OK(dr->shutdown()); - ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, dr->startup(txn.get(), maxAttempts)); + ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, dr->startup(opCtx.get(), maxAttempts)); // Data replicator is inactive when it's in the Complete state. ASSERT_FALSE(dr->isActive()); } TEST_F(DataReplicatorTest, StartupSetsInitialSyncFlagOnSuccess) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); // Initial sync flag should not be set before starting. - ASSERT_FALSE(getStorage().getInitialSyncFlag(txn.get())); + ASSERT_FALSE(getStorage().getInitialSyncFlag(opCtx.get())); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); ASSERT_TRUE(dr->isActive()); // Initial sync flag should be set. - ASSERT_TRUE(getStorage().getInitialSyncFlag(txn.get())); + ASSERT_TRUE(getStorage().getInitialSyncFlag(opCtx.get())); } TEST_F(DataReplicatorTest, DataReplicatorReturnsCallbackCanceledIfShutdownImmediatelyAfterStartup) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345)); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); // This will cancel the _startInitialSyncAttemptCallback() task scheduled by startup(). ASSERT_OK(dr->shutdown()); @@ -655,13 +656,13 @@ TEST_F(DataReplicatorTest, DataReplicatorReturnsCallbackCanceledIfShutdownImmedi TEST_F(DataReplicatorTest, DataReplicatorRetriesSyncSourceSelectionIfChooseNewSyncSourceReturnsInvalidSyncSource) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); // Override chooseNewSyncSource() result in SyncSourceSelectorMock before calling startup() // because DataReplicator will look for a valid sync source immediately after startup. _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort()); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); // Run first sync source selection attempt. executor::NetworkInterfaceMock::InNetworkGuard(getNet())->runReadyNetworkOperations(); @@ -697,13 +698,13 @@ TEST_F( DataReplicatorTest, DataReplicatorReturnsInitialSyncOplogSourceMissingIfNoValidSyncSourceCanBeFoundAfterTenFailedChooseSyncSourceAttempts) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); // Override chooseNewSyncSource() result in SyncSourceSelectorMock before calling startup() // because DataReplicator will look for a valid sync source immediately after startup. _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort()); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); _simulateChooseSyncSourceFailure(getNet(), _options.syncSourceRetryWait); @@ -718,12 +719,12 @@ TEST_F( TEST_F(DataReplicatorTest, DataReplicatorRetriesInitialSyncUpToMaxAttemptsAndReturnsLastAttemptError) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort()); const std::uint32_t initialSyncMaxAttempts = 3U; - ASSERT_OK(dr->startup(txn.get(), initialSyncMaxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), initialSyncMaxAttempts)); auto net = getNet(); for (std::uint32_t i = 0; i < initialSyncMaxAttempts; ++i) { @@ -748,10 +749,10 @@ TEST_F(DataReplicatorTest, TEST_F(DataReplicatorTest, DataReplicatorReturnsCallbackCanceledIfShutdownWhileRetryingSyncSourceSelection) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort()); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); auto net = getNet(); { @@ -774,11 +775,11 @@ TEST_F( DataReplicatorTest, DataReplicatorReturnsScheduleErrorIfTaskExecutorFailsToScheduleNextChooseSyncSourceCallback) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort()); _executorProxy->shouldFailScheduleWorkAt = true; - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); dr->join(); @@ -788,13 +789,13 @@ TEST_F( TEST_F(DataReplicatorTest, DataReplicatorReturnsScheduleErrorIfTaskExecutorFailsToScheduleNextInitialSyncAttempt) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort()); ASSERT_EQUALS(DataReplicator::State::kPreStart, dr->getState_forTest()); - ASSERT_OK(dr->startup(txn.get(), 2U)); + ASSERT_OK(dr->startup(opCtx.get(), 2U)); ASSERT_EQUALS(DataReplicator::State::kRunning, dr->getState_forTest()); // Advance clock so that we run all but the last sync source callback. @@ -816,7 +817,7 @@ TEST_F(DataReplicatorTest, // the completion callback function throws an exception. TEST_F(DataReplicatorTest, DataReplicatorTransitionsToCompleteWhenFinishCallbackThrowsException) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); _onCompletion = [this](const StatusWith<OpTimeWithHash>& lastApplied) { _lastApplied = lastApplied; @@ -824,7 +825,7 @@ TEST_F(DataReplicatorTest, DataReplicatorTransitionsToCompleteWhenFinishCallback }; _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort()); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); ASSERT_OK(dr->shutdown()); dr->join(); @@ -862,9 +863,9 @@ TEST_F(DataReplicatorTest, DataReplicatorResetsOnCompletionCallbackFunctionPoint }); ON_BLOCK_EXIT([this]() { getExecutor().shutdown(); }); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); sharedCallbackData.reset(); ASSERT_FALSE(sharedCallbackStateDestroyed); @@ -891,17 +892,17 @@ TEST_F(DataReplicatorTest, DataReplicatorRecreatesOplogAndDropsReplicatedDatabas // We are not interested in proceeding beyond the oplog creation stage so we inject a failure // after setting '_storageInterfaceWorkDone.createOplogCalled' to true. auto oldCreateOplogFn = _storageInterface->createOplogFn; - _storageInterface->createOplogFn = [oldCreateOplogFn](OperationContext* txn, + _storageInterface->createOplogFn = [oldCreateOplogFn](OperationContext* opCtx, const NamespaceString& nss) { - oldCreateOplogFn(txn, nss); + oldCreateOplogFn(opCtx, nss); return Status(ErrorCodes::OperationFailed, "oplog creation failed"); }; auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345)); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); dr->join(); ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied); @@ -913,7 +914,7 @@ TEST_F(DataReplicatorTest, DataReplicatorRecreatesOplogAndDropsReplicatedDatabas TEST_F(DataReplicatorTest, DataReplicatorPassesThroughGetRollbackIdScheduleError) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); // replSetGetRBID is the first remote command to be scheduled by the data replicator after // creating the oplog collection. @@ -925,7 +926,7 @@ TEST_F(DataReplicatorTest, DataReplicatorPassesThroughGetRollbackIdScheduleError HostAndPort syncSource("localhost", 12345); _syncSourceSelector->setChooseNewSyncSourceResult_forTest(syncSource); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); dr->join(); ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied); @@ -942,18 +943,18 @@ TEST_F( // down before returning from createOplog() to make the scheduleRemoteCommand() call for // replSetGetRBID fail. auto oldCreateOplogFn = _storageInterface->createOplogFn; - _storageInterface->createOplogFn = [oldCreateOplogFn, this](OperationContext* txn, + _storageInterface->createOplogFn = [oldCreateOplogFn, this](OperationContext* opCtx, const NamespaceString& nss) { - auto status = oldCreateOplogFn(txn, nss); + auto status = oldCreateOplogFn(opCtx, nss); getExecutor().shutdown(); return status; }; auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345)); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); dr->join(); ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, _lastApplied); @@ -964,14 +965,14 @@ TEST_F( TEST_F(DataReplicatorTest, DataReplicatorCancelsRollbackCheckerOnShutdown) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); HostAndPort syncSource("localhost", 12345); _syncSourceSelector->setChooseNewSyncSourceResult_forTest(syncSource); ASSERT_EQUALS(DataReplicator::State::kPreStart, dr->getState_forTest()); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); ASSERT_EQUALS(DataReplicator::State::kRunning, dr->getState_forTest()); auto net = getNet(); @@ -1000,10 +1001,10 @@ TEST_F(DataReplicatorTest, DataReplicatorCancelsRollbackCheckerOnShutdown) { TEST_F(DataReplicatorTest, DataReplicatorPassesThroughRollbackCheckerCallbackError) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345)); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); auto net = getNet(); { @@ -1021,7 +1022,7 @@ TEST_F(DataReplicatorTest, DataReplicatorPassesThroughRollbackCheckerCallbackErr TEST_F(DataReplicatorTest, DataReplicatorPassesThroughLastOplogEntryFetcherScheduleError) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); // The last oplog entry fetcher is the first component that sends a find command so we reject // any find commands and save the request for inspection at the end of this test case. @@ -1033,7 +1034,7 @@ TEST_F(DataReplicatorTest, DataReplicatorPassesThroughLastOplogEntryFetcherSched HostAndPort syncSource("localhost", 12345); _syncSourceSelector->setChooseNewSyncSourceResult_forTest(syncSource); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); auto net = getNet(); { @@ -1056,10 +1057,10 @@ TEST_F(DataReplicatorTest, DataReplicatorPassesThroughLastOplogEntryFetcherSched TEST_F(DataReplicatorTest, DataReplicatorPassesThroughLastOplogEntryFetcherCallbackError) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345)); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); auto net = getNet(); { @@ -1082,10 +1083,10 @@ TEST_F(DataReplicatorTest, DataReplicatorPassesThroughLastOplogEntryFetcherCallb TEST_F(DataReplicatorTest, DataReplicatorCancelsLastOplogEntryFetcherOnShutdown) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345)); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); auto net = getNet(); { @@ -1108,10 +1109,10 @@ TEST_F(DataReplicatorTest, DataReplicatorCancelsLastOplogEntryFetcherOnShutdown) TEST_F(DataReplicatorTest, DataReplicatorReturnsNoMatchingDocumentIfLastOplogEntryFetcherReturnsEmptyBatchOfDocuments) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345)); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); auto net = getNet(); { @@ -1132,10 +1133,10 @@ TEST_F(DataReplicatorTest, TEST_F(DataReplicatorTest, DataReplicatorResendsFindCommandIfLastOplogEntryFetcherReturnsRetriableError) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345)); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); auto net = getNet(); executor::NetworkInterfaceMock::InNetworkGuard guard(net); @@ -1159,10 +1160,10 @@ TEST_F(DataReplicatorTest, TEST_F(DataReplicatorTest, DataReplicatorReturnsNoSuchKeyIfLastOplogEntryFetcherReturnsEntryWithMissingHash) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345)); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); auto net = getNet(); { @@ -1183,10 +1184,10 @@ TEST_F(DataReplicatorTest, TEST_F(DataReplicatorTest, DataReplicatorReturnsNoSuchKeyIfLastOplogEntryFetcherReturnsEntryWithMissingTimestamp) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345)); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); auto net = getNet(); { @@ -1207,12 +1208,12 @@ TEST_F(DataReplicatorTest, TEST_F(DataReplicatorTest, DataReplicatorPassesThroughErrorFromDataReplicatorExternalStateGetCurrentConfig) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); getExternalState()->replSetConfigResult = Status(ErrorCodes::OperationFailed, ""); _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345)); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); auto net = getNet(); { @@ -1232,7 +1233,7 @@ TEST_F(DataReplicatorTest, TEST_F(DataReplicatorTest, DataReplicatorPassesThroughOplogFetcherScheduleError) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); // Make the tailable oplog query fail. Allow all other requests to be scheduled. executor::RemoteCommandRequest request; @@ -1247,7 +1248,7 @@ TEST_F(DataReplicatorTest, DataReplicatorPassesThroughOplogFetcherScheduleError) HostAndPort syncSource("localhost", 12345); _syncSourceSelector->setChooseNewSyncSourceResult_forTest(syncSource); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); auto net = getNet(); { @@ -1273,10 +1274,10 @@ TEST_F(DataReplicatorTest, DataReplicatorPassesThroughOplogFetcherScheduleError) TEST_F(DataReplicatorTest, DataReplicatorPassesThroughOplogFetcherCallbackError) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345)); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); auto net = getNet(); { @@ -1311,10 +1312,10 @@ TEST_F(DataReplicatorTest, DataReplicatorPassesThroughOplogFetcherCallbackError) TEST_F(DataReplicatorTest, DataReplicatorSucceedsOnEarlyOplogFetcherCompletionIfThereAreNoOperationsToApply) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345)); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); auto net = getNet(); { @@ -1363,10 +1364,10 @@ TEST_F( DataReplicatorTest, DataReplicatorSucceedsOnEarlyOplogFetcherCompletionIfThereAreEnoughOperationsInTheOplogBufferToReachEndTimestamp) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345)); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); auto net = getNet(); { @@ -1412,10 +1413,10 @@ TEST_F( DataReplicatorTest, DataReplicatorReturnsRemoteResultsUnavailableOnEarlyOplogFetcherCompletionIfThereAreNotEnoughOperationsInTheOplogBufferToReachEndTimestamp) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345)); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); auto net = getNet(); { @@ -1456,7 +1457,7 @@ TEST_F( TEST_F(DataReplicatorTest, DataReplicatorPassesThroughDatabasesClonerScheduleErrorAndCancelsOplogFetcher) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); // Make the listDatabases command fail. Allow all other requests to be scheduled. executor::RemoteCommandRequest request; @@ -1470,7 +1471,7 @@ TEST_F(DataReplicatorTest, HostAndPort syncSource("localhost", 12345); _syncSourceSelector->setChooseNewSyncSourceResult_forTest(syncSource); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); auto net = getNet(); { @@ -1504,10 +1505,10 @@ TEST_F(DataReplicatorTest, TEST_F(DataReplicatorTest, DataReplicatorPassesThroughDatabasesClonerCallbackErrorAndCancelsOplogFetcher) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345)); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); auto net = getNet(); { @@ -1544,10 +1545,10 @@ TEST_F(DataReplicatorTest, TEST_F(DataReplicatorTest, DataReplicatorIgnoresLocalDatabasesWhenCloningDatabases) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345)); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); auto net = getNet(); { @@ -1604,10 +1605,10 @@ TEST_F(DataReplicatorTest, DataReplicatorIgnoresLocalDatabasesWhenCloningDatabas TEST_F(DataReplicatorTest, DataReplicatorIgnoresDatabaseInfoDocumentWithoutNameFieldWhenCloningDatabases) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345)); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); auto net = getNet(); { @@ -1670,10 +1671,10 @@ TEST_F(DataReplicatorTest, TEST_F(DataReplicatorTest, DataReplicatorCancelsBothOplogFetcherAndDatabasesClonerOnShutdown) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345)); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); auto net = getNet(); { @@ -1697,7 +1698,7 @@ TEST_F(DataReplicatorTest, DataReplicatorCancelsBothOplogFetcherAndDatabasesClon TEST_F(DataReplicatorTest, DataReplicatorPassesThroughSecondLastOplogEntryFetcherScheduleErrorAndCancelsOplogFetcher) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); // Make the second last oplog entry fetcher command fail. Allow all other requests to be // scheduled. @@ -1718,7 +1719,7 @@ TEST_F(DataReplicatorTest, }; _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345)); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); auto net = getNet(); { @@ -1758,10 +1759,10 @@ TEST_F(DataReplicatorTest, TEST_F(DataReplicatorTest, DataReplicatorPassesThroughSecondLastOplogEntryFetcherCallbackErrorAndCancelsOplogFetcher) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345)); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); auto net = getNet(); { @@ -1810,10 +1811,10 @@ TEST_F(DataReplicatorTest, TEST_F(DataReplicatorTest, DataReplicatorCancelsBothSecondLastOplogEntryFetcherAndOplogFetcherOnShutdown) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345)); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); auto net = getNet(); { @@ -1857,10 +1858,10 @@ TEST_F(DataReplicatorTest, TEST_F(DataReplicatorTest, DataReplicatorCancelsSecondLastOplogEntryFetcherOnOplogFetcherCallbackError) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345)); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); auto net = getNet(); { @@ -1914,10 +1915,10 @@ TEST_F( DataReplicatorTest, DataReplicatorReturnsTypeMismatchErrorWhenSecondLastOplogEntryFetcherReturnsMalformedDocument) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345)); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); auto oplogEntry = makeOplogEntry(1); auto net = getNet(); @@ -1963,10 +1964,10 @@ TEST_F( TEST_F(DataReplicatorTest, DataReplicatorReturnsOplogOutOfOrderIfStopTimestampPrecedesBeginTimestamp) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345)); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); auto net = getNet(); { @@ -2010,7 +2011,7 @@ TEST_F( DataReplicatorTest, DataReplicatorPassesThroughInsertOplogSeedDocumentErrorAfterDataCloningFinishesWithNoOperationsToApply) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); NamespaceString insertDocumentNss; BSONObj insertDocumentDoc; @@ -2022,7 +2023,7 @@ TEST_F( }; _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345)); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); auto oplogEntry = makeOplogEntry(1); auto net = getNet(); @@ -2069,7 +2070,7 @@ TEST_F( DataReplicatorTest, DataReplicatorReturnsCallbackCanceledAndDoesNotScheduleRollbackCheckerIfShutdownAfterInsertingInsertOplogSeedDocument) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); NamespaceString insertDocumentNss; BSONObj insertDocumentDoc; @@ -2082,7 +2083,7 @@ TEST_F( }; _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345)); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); auto oplogEntry = makeOplogEntry(1); auto net = getNet(); @@ -2129,7 +2130,7 @@ TEST_F( DataReplicatorTest, DataReplicatorPassesThroughRollbackCheckerScheduleErrorAfterCloningFinishesWithNoOperationsToApply) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); // Make the second replSetGetRBID command fail. Allow all other requests to be scheduled. executor::RemoteCommandRequest request; @@ -2147,7 +2148,7 @@ TEST_F( }; _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345)); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); auto oplogEntry = makeOplogEntry(1); auto net = getNet(); @@ -2192,10 +2193,10 @@ TEST_F( DataReplicatorTest, DataReplicatorPassesThroughRollbackCheckerCallbackErrorAfterCloningFinishesWithNoOperationsToApply) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345)); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); auto oplogEntry = makeOplogEntry(1); auto net = getNet(); @@ -2245,10 +2246,10 @@ TEST_F( TEST_F(DataReplicatorTest, DataReplicatorCancelsLastRollbackCheckerOnShutdown) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345)); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); auto oplogEntry = makeOplogEntry(1); auto net = getNet(); @@ -2299,10 +2300,10 @@ TEST_F(DataReplicatorTest, DataReplicatorCancelsLastRollbackCheckerOnShutdown) { TEST_F(DataReplicatorTest, DataReplicatorCancelsLastRollbackCheckerOnOplogFetcherCallbackError) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345)); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); auto oplogEntry = makeOplogEntry(1); auto net = getNet(); @@ -2357,10 +2358,10 @@ TEST_F(DataReplicatorTest, DataReplicatorCancelsLastRollbackCheckerOnOplogFetche TEST_F(DataReplicatorTest, DataReplicatorReturnsUnrecoverableRollbackErrorIfSyncSourceRolledBackAfterCloningData) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345)); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); auto oplogEntry = makeOplogEntry(1); auto net = getNet(); @@ -2405,12 +2406,12 @@ TEST_F(DataReplicatorTest, TEST_F(DataReplicatorTest, LastOpTimeShouldBeSetEvenIfNoOperationsAreAppliedAfterCloning) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345)); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); - ASSERT_TRUE(_storageInterface->getInitialSyncFlag(txn.get())); + ASSERT_TRUE(_storageInterface->getInitialSyncFlag(opCtx.get())); auto oplogEntry = makeOplogEntry(1); auto net = getNet(); @@ -2488,17 +2489,17 @@ TEST_F(DataReplicatorTest, LastOpTimeShouldBeSetEvenIfNoOperationsAreAppliedAfte dr->join(); ASSERT_EQUALS(OplogEntry(oplogEntry).getOpTime(), unittest::assertGet(_lastApplied).opTime); ASSERT_EQUALS(oplogEntry["h"].Long(), unittest::assertGet(_lastApplied).value); - ASSERT_FALSE(_storageInterface->getInitialSyncFlag(txn.get())); + ASSERT_FALSE(_storageInterface->getInitialSyncFlag(opCtx.get())); } TEST_F(DataReplicatorTest, DataReplicatorPassesThroughGetNextApplierBatchScheduleError) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345)); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); - ASSERT_TRUE(_storageInterface->getInitialSyncFlag(txn.get())); + ASSERT_TRUE(_storageInterface->getInitialSyncFlag(opCtx.get())); auto net = getNet(); int baseRollbackId = 1; @@ -2547,12 +2548,12 @@ TEST_F(DataReplicatorTest, DataReplicatorPassesThroughGetNextApplierBatchSchedul TEST_F(DataReplicatorTest, DataReplicatorPassesThroughSecondGetNextApplierBatchScheduleError) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345)); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); - ASSERT_TRUE(_storageInterface->getInitialSyncFlag(txn.get())); + ASSERT_TRUE(_storageInterface->getInitialSyncFlag(opCtx.get())); auto net = getNet(); int baseRollbackId = 1; @@ -2601,12 +2602,12 @@ TEST_F(DataReplicatorTest, DataReplicatorPassesThroughSecondGetNextApplierBatchS TEST_F(DataReplicatorTest, DataReplicatorCancelsGetNextApplierBatchOnShutdown) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345)); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); - ASSERT_TRUE(_storageInterface->getInitialSyncFlag(txn.get())); + ASSERT_TRUE(_storageInterface->getInitialSyncFlag(opCtx.get())); auto net = getNet(); int baseRollbackId = 1; @@ -2651,12 +2652,12 @@ TEST_F(DataReplicatorTest, DataReplicatorCancelsGetNextApplierBatchOnShutdown) { TEST_F(DataReplicatorTest, DataReplicatorPassesThroughGetNextApplierBatchInLockError) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345)); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); - ASSERT_TRUE(_storageInterface->getInitialSyncFlag(txn.get())); + ASSERT_TRUE(_storageInterface->getInitialSyncFlag(opCtx.get())); // _getNextApplierBatch_inlock() returns BadValue when it gets an oplog entry with an unexpected // version (not OplogEntry::kOplogVersion). @@ -2715,12 +2716,12 @@ TEST_F( DataReplicatorTest, DataReplicatorReturnsEmptyBatchFromGetNextApplierBatchInLockIfRsSyncApplyStopFailPointIsEnabled) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345)); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); - ASSERT_TRUE(_storageInterface->getInitialSyncFlag(txn.get())); + ASSERT_TRUE(_storageInterface->getInitialSyncFlag(opCtx.get())); // _getNextApplierBatch_inlock() returns BadValue when it gets an oplog entry with an unexpected // version (not OplogEntry::kOplogVersion). @@ -2790,12 +2791,12 @@ TEST_F( TEST_F(DataReplicatorTest, DataReplicatorReturnsNoSuchKeyIfApplierBatchContainsAnOplogEntryWithoutHash) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345)); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); - ASSERT_TRUE(_storageInterface->getInitialSyncFlag(txn.get())); + ASSERT_TRUE(_storageInterface->getInitialSyncFlag(opCtx.get())); // This oplog entry (without a required "h" field) will be read by OplogFetcher and inserted // into OplogBuffer to be retrieved by _getNextApplierBatch_inlock(). @@ -2848,12 +2849,12 @@ TEST_F(DataReplicatorTest, TEST_F(DataReplicatorTest, DataReplicatorPassesThroughMultiApplierScheduleError) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345)); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); - ASSERT_TRUE(_storageInterface->getInitialSyncFlag(txn.get())); + ASSERT_TRUE(_storageInterface->getInitialSyncFlag(opCtx.get())); auto net = getNet(); int baseRollbackId = 1; @@ -2920,14 +2921,14 @@ TEST_F(DataReplicatorTest, DataReplicatorPassesThroughMultiApplierScheduleError) TEST_F(DataReplicatorTest, DataReplicatorPassesThroughMultiApplierCallbackError) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); getExternalState()->multiApplyFn = [](OperationContext*, const MultiApplier::Operations&, MultiApplier::ApplyOperationFn) { return Status(ErrorCodes::OperationFailed, "multiApply failed"); }; _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345)); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); auto net = getNet(); int baseRollbackId = 1; @@ -2975,10 +2976,10 @@ TEST_F(DataReplicatorTest, DataReplicatorPassesThroughMultiApplierCallbackError) TEST_F(DataReplicatorTest, DataReplicatorCancelsGetNextApplierBatchCallbackOnOplogFetcherError) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345)); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); auto net = getNet(); int baseRollbackId = 1; @@ -3026,10 +3027,10 @@ TEST_F(DataReplicatorTest, DataReplicatorCancelsGetNextApplierBatchCallbackOnOpl TEST_F(DataReplicatorTest, DataReplicatorReturnsLastAppliedOnReachingStopTimestampAfterApplyingOneBatch) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345)); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); auto lastOp = makeOplogEntry(2); @@ -3088,10 +3089,10 @@ TEST_F(DataReplicatorTest, TEST_F(DataReplicatorTest, DataReplicatorReturnsLastAppliedOnReachingStopTimestampAfterApplyingMultipleBatches) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345)); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); // To make DataReplicator apply multiple batches, we make the third and last operation a command // so that it will go into a separate batch from the second operation. First operation is the @@ -3186,7 +3187,7 @@ TEST_F( DataReplicatorTest, DataReplicatorSchedulesLastOplogEntryFetcherToGetNewStopTimestampIfMissingDocumentsHaveBeenFetchedDuringMultiInitialSyncApply) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); // Override DataReplicatorExternalState::_multiInitialSyncApply() so that it will also fetch a // missing document. @@ -3210,7 +3211,7 @@ TEST_F( }; _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345)); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); // Use command for third and last operation to ensure we have two batches to apply. auto lastOp = makeOplogEntry(3, "c"); @@ -3281,7 +3282,7 @@ TEST_F( TEST_F(DataReplicatorTest, DataReplicatorReturnsInvalidSyncSourceWhenFailInitialSyncWithBadHostFailpointIsEnabled) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); // This fail point makes chooseSyncSourceCallback fail with an InvalidSyncSource error. auto failPoint = getGlobalFailPointRegistry()->getFailPoint("failInitialSyncWithBadHost"); @@ -3289,7 +3290,7 @@ TEST_F(DataReplicatorTest, ON_BLOCK_EXIT([failPoint]() { failPoint->setMode(FailPoint::off); }); _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345)); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); dr->join(); ASSERT_EQUALS(ErrorCodes::InvalidSyncSource, _lastApplied); @@ -3297,10 +3298,10 @@ TEST_F(DataReplicatorTest, TEST_F(DataReplicatorTest, OplogOutOfOrderOnOplogFetchFinish) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345)); - ASSERT_OK(dr->startup(txn.get(), maxAttempts)); + ASSERT_OK(dr->startup(opCtx.get(), maxAttempts)); auto net = getNet(); int baseRollbackId = 1; @@ -3346,10 +3347,10 @@ TEST_F(DataReplicatorTest, OplogOutOfOrderOnOplogFetchFinish) { TEST_F(DataReplicatorTest, GetInitialSyncProgressReturnsCorrectProgress) { auto dr = &getDR(); - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 27017)); - ASSERT_OK(dr->startup(txn.get(), 2U)); + ASSERT_OK(dr->startup(opCtx.get(), 2U)); auto net = getNet(); int baseRollbackId = 1; diff --git a/src/mongo/db/repl/database_task.cpp b/src/mongo/db/repl/database_task.cpp index b19bf201b5d..5c4f9422ea9 100644 --- a/src/mongo/db/repl/database_task.cpp +++ b/src/mongo/db/repl/database_task.cpp @@ -41,16 +41,16 @@ namespace repl { // static DatabaseTask::Task DatabaseTask::makeGlobalExclusiveLockTask(const Task& task) { invariant(task); - DatabaseTask::Task newTask = [task](OperationContext* txn, const Status& status) { + DatabaseTask::Task newTask = [task](OperationContext* opCtx, const Status& status) { if (!status.isOK()) { - return task(txn, status); + return task(opCtx, status); } MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN { - ScopedTransaction transaction(txn, MODE_X); - Lock::GlobalWrite lock(txn->lockState()); - return task(txn, status); + ScopedTransaction transaction(opCtx, MODE_X); + Lock::GlobalWrite lock(opCtx->lockState()); + return task(opCtx, status); } - MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "globalExclusiveLockTask", "global"); + MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "globalExclusiveLockTask", "global"); MONGO_UNREACHABLE; }; return newTask; @@ -61,17 +61,17 @@ DatabaseTask::Task DatabaseTask::makeDatabaseLockTask(const Task& task, const std::string& databaseName, LockMode mode) { invariant(task); - DatabaseTask::Task newTask = [=](OperationContext* txn, const Status& status) { + DatabaseTask::Task newTask = [=](OperationContext* opCtx, const Status& status) { if (!status.isOK()) { - return task(txn, status); + return task(opCtx, status); } MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN { LockMode permissiveLockMode = isSharedLockMode(mode) ? MODE_IS : MODE_IX; - ScopedTransaction transaction(txn, permissiveLockMode); - Lock::DBLock lock(txn->lockState(), databaseName, mode); - return task(txn, status); + ScopedTransaction transaction(opCtx, permissiveLockMode); + Lock::DBLock lock(opCtx->lockState(), databaseName, mode); + return task(opCtx, status); } - MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "databaseLockTask", databaseName); + MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "databaseLockTask", databaseName); MONGO_UNREACHABLE; }; return newTask; @@ -82,18 +82,18 @@ DatabaseTask::Task DatabaseTask::makeCollectionLockTask(const Task& task, const NamespaceString& nss, LockMode mode) { invariant(task); - DatabaseTask::Task newTask = [=](OperationContext* txn, const Status& status) { + DatabaseTask::Task newTask = [=](OperationContext* opCtx, const Status& status) { if (!status.isOK()) { - return task(txn, status); + return task(opCtx, status); } MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN { LockMode permissiveLockMode = isSharedLockMode(mode) ? MODE_IS : MODE_IX; - ScopedTransaction transaction(txn, permissiveLockMode); - Lock::DBLock lock(txn->lockState(), nss.db(), permissiveLockMode); - Lock::CollectionLock collectionLock(txn->lockState(), nss.toString(), mode); - return task(txn, status); + ScopedTransaction transaction(opCtx, permissiveLockMode); + Lock::DBLock lock(opCtx->lockState(), nss.db(), permissiveLockMode); + Lock::CollectionLock collectionLock(opCtx->lockState(), nss.toString(), mode); + return task(opCtx, status); } - MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "collectionLockTask", nss.toString()); + MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "collectionLockTask", nss.toString()); MONGO_UNREACHABLE; }; return newTask; diff --git a/src/mongo/db/repl/database_task_test.cpp b/src/mongo/db/repl/database_task_test.cpp index 5c004c466d7..beaa896acf0 100644 --- a/src/mongo/db/repl/database_task_test.cpp +++ b/src/mongo/db/repl/database_task_test.cpp @@ -49,8 +49,8 @@ class DatabaseTaskTest : public TaskRunnerTest {}; TEST_F(DatabaseTaskTest, TaskRunnerErrorStatus) { // Should not attempt to acquire lock on error status from task runner. - auto task = [](OperationContext* txn, const Status& status) { - ASSERT_FALSE(txn); + auto task = [](OperationContext* opCtx, const Status& status) { + ASSERT_FALSE(opCtx); ASSERT_EQUALS(ErrorCodes::BadValue, status.code()); return TaskRunner::NextAction::kInvalid; }; @@ -66,15 +66,15 @@ TEST_F(DatabaseTaskTest, TaskRunnerErrorStatus) { TEST_F(DatabaseTaskTest, RunGlobalExclusiveLockTask) { stdx::mutex mutex; bool called = false; - OperationContext* txn = nullptr; + OperationContext* opCtx = nullptr; bool lockIsW = false; Status status = getDetectableErrorStatus(); // Task returning 'void' implies NextAction::NoAction. auto task = [&](OperationContext* theTxn, const Status& theStatus) { stdx::lock_guard<stdx::mutex> lk(mutex); called = true; - txn = theTxn; - lockIsW = txn->lockState()->isW(); + opCtx = theTxn; + lockIsW = opCtx->lockState()->isW(); status = theStatus; return TaskRunner::NextAction::kCancel; }; @@ -84,7 +84,7 @@ TEST_F(DatabaseTaskTest, RunGlobalExclusiveLockTask) { stdx::lock_guard<stdx::mutex> lk(mutex); ASSERT_TRUE(called); - ASSERT(txn); + ASSERT(opCtx); ASSERT_TRUE(lockIsW); ASSERT_OK(status); } @@ -92,15 +92,15 @@ TEST_F(DatabaseTaskTest, RunGlobalExclusiveLockTask) { void _testRunDatabaseLockTask(DatabaseTaskTest& test, LockMode mode) { stdx::mutex mutex; bool called = false; - OperationContext* txn = nullptr; + OperationContext* opCtx = nullptr; bool isDatabaseLockedForMode = false; Status status = test.getDetectableErrorStatus(); // Task returning 'void' implies NextAction::NoAction. auto task = [&](OperationContext* theTxn, const Status& theStatus) { stdx::lock_guard<stdx::mutex> lk(mutex); called = true; - txn = theTxn; - isDatabaseLockedForMode = txn->lockState()->isDbLockedForMode(databaseName, mode); + opCtx = theTxn; + isDatabaseLockedForMode = opCtx->lockState()->isDbLockedForMode(databaseName, mode); status = theStatus; return TaskRunner::NextAction::kCancel; }; @@ -110,7 +110,7 @@ void _testRunDatabaseLockTask(DatabaseTaskTest& test, LockMode mode) { stdx::lock_guard<stdx::mutex> lk(mutex); ASSERT_TRUE(called); - ASSERT(txn); + ASSERT(opCtx); ASSERT_TRUE(isDatabaseLockedForMode); ASSERT_OK(status); } @@ -134,16 +134,16 @@ TEST_F(DatabaseTaskTest, RunDatabaseLockTaskModeIS) { void _testRunCollectionLockTask(DatabaseTaskTest& test, LockMode mode) { stdx::mutex mutex; bool called = false; - OperationContext* txn = nullptr; + OperationContext* opCtx = nullptr; bool isCollectionLockedForMode = false; Status status = test.getDetectableErrorStatus(); // Task returning 'void' implies NextAction::NoAction. auto task = [&](OperationContext* theTxn, const Status& theStatus) { stdx::lock_guard<stdx::mutex> lk(mutex); called = true; - txn = theTxn; + opCtx = theTxn; isCollectionLockedForMode = - txn->lockState()->isCollectionLockedForMode(nss.toString(), mode); + opCtx->lockState()->isCollectionLockedForMode(nss.toString(), mode); status = theStatus; return TaskRunner::NextAction::kCancel; }; @@ -153,7 +153,7 @@ void _testRunCollectionLockTask(DatabaseTaskTest& test, LockMode mode) { stdx::lock_guard<stdx::mutex> lk(mutex); ASSERT_TRUE(called); - ASSERT(txn); + ASSERT(opCtx); ASSERT_TRUE(isCollectionLockedForMode); ASSERT_OK(status); } diff --git a/src/mongo/db/repl/databases_cloner.cpp b/src/mongo/db/repl/databases_cloner.cpp index 3f7c1ce11e4..ff0c9170e15 100644 --- a/src/mongo/db/repl/databases_cloner.cpp +++ b/src/mongo/db/repl/databases_cloner.cpp @@ -354,13 +354,13 @@ void DatabasesCloner::_onEachDBCloneFinish(const Status& status, const std::stri auto adminStatus = Status(ErrorCodes::NotYetInitialized, ""); { // TODO: Move isAdminDbValid() out of the collection/database cloner code paths. - OperationContext* txn = cc().getOperationContext(); - ServiceContext::UniqueOperationContext txnPtr; - if (!txn) { - txnPtr = cc().makeOperationContext(); - txn = txnPtr.get(); + OperationContext* opCtx = cc().getOperationContext(); + ServiceContext::UniqueOperationContext opCtxPtr; + if (!opCtx) { + opCtxPtr = cc().makeOperationContext(); + opCtx = opCtxPtr.get(); } - adminStatus = _storage->isAdminDbValid(txn); + adminStatus = _storage->isAdminDbValid(opCtx); } if (!adminStatus.isOK()) { LOG(1) << "Validation failed on 'admin' db due to " << adminStatus; diff --git a/src/mongo/db/repl/databases_cloner_test.cpp b/src/mongo/db/repl/databases_cloner_test.cpp index 366e8c04a7c..777a4eedacd 100644 --- a/src/mongo/db/repl/databases_cloner_test.cpp +++ b/src/mongo/db/repl/databases_cloner_test.cpp @@ -142,27 +142,27 @@ protected: executor::ThreadPoolExecutorTest::setUp(); launchExecutorThread(); - _storageInterface.createOplogFn = [this](OperationContext* txn, + _storageInterface.createOplogFn = [this](OperationContext* opCtx, const NamespaceString& nss) { _storageInterfaceWorkDone.createOplogCalled = true; return Status::OK(); }; _storageInterface.insertDocumentFn = - [this](OperationContext* txn, const NamespaceString& nss, const BSONObj& doc) { + [this](OperationContext* opCtx, const NamespaceString& nss, const BSONObj& doc) { ++_storageInterfaceWorkDone.documentsInsertedCount; return Status::OK(); }; _storageInterface.insertDocumentsFn = [this]( - OperationContext* txn, const NamespaceString& nss, const std::vector<BSONObj>& ops) { + OperationContext* opCtx, const NamespaceString& nss, const std::vector<BSONObj>& ops) { _storageInterfaceWorkDone.insertedOplogEntries = true; ++_storageInterfaceWorkDone.oplogEntriesInserted; return Status::OK(); }; - _storageInterface.dropCollFn = [this](OperationContext* txn, const NamespaceString& nss) { + _storageInterface.dropCollFn = [this](OperationContext* opCtx, const NamespaceString& nss) { _storageInterfaceWorkDone.droppedCollections.push_back(nss.ns()); return Status::OK(); }; - _storageInterface.dropUserDBsFn = [this](OperationContext* txn) { + _storageInterface.dropUserDBsFn = [this](OperationContext* opCtx) { _storageInterfaceWorkDone.droppedUserDBs = true; return Status::OK(); }; @@ -728,9 +728,9 @@ TEST_F(DBsClonerTest, DatabaseClonerChecksAdminDbUsingStorageInterfaceAfterCopyi bool isAdminDbValidFnCalled = false; OperationContext* isAdminDbValidFnOpCtx = nullptr; _storageInterface.isAdminDbValidFn = [&isAdminDbValidFnCalled, - &isAdminDbValidFnOpCtx](OperationContext* txn) { + &isAdminDbValidFnOpCtx](OperationContext* opCtx) { isAdminDbValidFnCalled = true; - isAdminDbValidFnOpCtx = txn; + isAdminDbValidFnOpCtx = opCtx; return Status::OK(); }; @@ -770,7 +770,7 @@ TEST_F(DBsClonerTest, AdminDbValidationErrorShouldAbortTheCloner) { Status result = getDetectableErrorStatus(); bool isAdminDbValidFnCalled = false; - _storageInterface.isAdminDbValidFn = [&isAdminDbValidFnCalled](OperationContext* txn) { + _storageInterface.isAdminDbValidFn = [&isAdminDbValidFnCalled](OperationContext* opCtx) { isAdminDbValidFnCalled = true; return Status(ErrorCodes::OperationFailed, "admin db invalid"); }; diff --git a/src/mongo/db/repl/initial_sync.cpp b/src/mongo/db/repl/initial_sync.cpp index 3c010185e64..ba5953f0035 100644 --- a/src/mongo/db/repl/initial_sync.cpp +++ b/src/mongo/db/repl/initial_sync.cpp @@ -52,26 +52,26 @@ InitialSync::~InitialSync() {} /* initial oplog application, during initial sync, after cloning. */ -void InitialSync::oplogApplication(OperationContext* txn, const OpTime& endOpTime) { +void InitialSync::oplogApplication(OperationContext* opCtx, const OpTime& endOpTime) { if (replSetForceInitialSyncFailure > 0) { log() << "test code invoked, forced InitialSync failure: " << replSetForceInitialSyncFailure; replSetForceInitialSyncFailure--; throw DBException("forced error", 0); } - _applyOplogUntil(txn, endOpTime); + _applyOplogUntil(opCtx, endOpTime); } /* applies oplog from "now" until endOpTime using the applier threads for initial sync*/ -void InitialSync::_applyOplogUntil(OperationContext* txn, const OpTime& endOpTime) { +void InitialSync::_applyOplogUntil(OperationContext* opCtx, const OpTime& endOpTime) { unsigned long long bytesApplied = 0; unsigned long long entriesApplied = 0; while (true) { OpQueue ops; - auto replCoord = repl::ReplicationCoordinator::get(txn); - while (!tryPopAndWaitForMore(txn, &ops, BatchLimits{})) { + auto replCoord = repl::ReplicationCoordinator::get(opCtx); + while (!tryPopAndWaitForMore(opCtx, &ops, BatchLimits{})) { if (globalInShutdownDeprecated()) { return; } @@ -108,10 +108,10 @@ void InitialSync::_applyOplogUntil(OperationContext* txn, const OpTime& endOpTim // Tally operation information and apply batch. Don't use ops again after these lines. bytesApplied += ops.getBytes(); entriesApplied += ops.getCount(); - const OpTime lastOpTime = multiApply(txn, ops.releaseBatch()); + const OpTime lastOpTime = multiApply(opCtx, ops.releaseBatch()); replCoord->setMyLastAppliedOpTime(lastOpTime); - setNewTimestamp(txn->getServiceContext(), lastOpTime.getTimestamp()); + setNewTimestamp(opCtx->getServiceContext(), lastOpTime.getTimestamp()); if (globalInShutdownDeprecated()) { return; diff --git a/src/mongo/db/repl/initial_sync.h b/src/mongo/db/repl/initial_sync.h index 167038363a7..3afce50a11d 100644 --- a/src/mongo/db/repl/initial_sync.h +++ b/src/mongo/db/repl/initial_sync.h @@ -47,7 +47,7 @@ public: /** * applies up to endOpTime, fetching missing documents as needed. */ - void oplogApplication(OperationContext* txn, const OpTime& endOpTime); + void oplogApplication(OperationContext* opCtx, const OpTime& endOpTime); private: /** @@ -55,7 +55,7 @@ private: * * NOTE:Will not transition or check states */ - void _applyOplogUntil(OperationContext* txn, const OpTime& endOpTime); + void _applyOplogUntil(OperationContext* opCtx, const OpTime& endOpTime); }; // Used for ReplSetTest testing. diff --git a/src/mongo/db/repl/master_slave.cpp b/src/mongo/db/repl/master_slave.cpp index 7c48ec8671c..10540c0fd3c 100644 --- a/src/mongo/db/repl/master_slave.cpp +++ b/src/mongo/db/repl/master_slave.cpp @@ -95,7 +95,7 @@ const int restartSync = 0; const int restartSyncAfterSleep = 1; } // namespace -void pretouchOperation(OperationContext* txn, const BSONObj& op); +void pretouchOperation(OperationContext* opCtx, const BSONObj& op); void pretouchN(vector<BSONObj>&, unsigned a, unsigned b); /* if 1 sync() is running */ @@ -114,12 +114,12 @@ struct ReplInfo { }; -ReplSource::ReplSource(OperationContext* txn) { +ReplSource::ReplSource(OperationContext* opCtx) { nClonedThisPass = 0; - ensureMe(txn); + ensureMe(opCtx); } -ReplSource::ReplSource(OperationContext* txn, BSONObj o) : nClonedThisPass(0) { +ReplSource::ReplSource(OperationContext* opCtx, BSONObj o) : nClonedThisPass(0) { only = o.getStringField("only"); hostName = o.getStringField("host"); _sourceName = o.getStringField("source"); @@ -155,7 +155,7 @@ ReplSource::ReplSource(OperationContext* txn, BSONObj o) : nClonedThisPass(0) { incompleteCloneDbs.insert(e.fieldName()); } } - ensureMe(txn); + ensureMe(opCtx); } /* Turn our C++ Source object into a BSONObj */ @@ -189,31 +189,31 @@ BSONObj ReplSource::jsobj() { return b.obj(); } -void ReplSource::ensureMe(OperationContext* txn) { +void ReplSource::ensureMe(OperationContext* opCtx) { string myname = getHostName(); // local.me is an identifier for a server for getLastError w:2+ - bool exists = Helpers::getSingleton(txn, "local.me", _me); + bool exists = Helpers::getSingleton(opCtx, "local.me", _me); if (!exists || !_me.hasField("host") || _me["host"].String() != myname) { - ScopedTransaction transaction(txn, MODE_IX); - Lock::DBLock dblk(txn->lockState(), "local", MODE_X); - WriteUnitOfWork wunit(txn); + ScopedTransaction transaction(opCtx, MODE_IX); + Lock::DBLock dblk(opCtx->lockState(), "local", MODE_X); + WriteUnitOfWork wunit(opCtx); // clean out local.me - Helpers::emptyCollection(txn, "local.me"); + Helpers::emptyCollection(opCtx, "local.me"); // repopulate BSONObjBuilder b; b.appendOID("_id", 0, true); b.append("host", myname); _me = b.obj(); - Helpers::putSingleton(txn, "local.me", _me); + Helpers::putSingleton(opCtx, "local.me", _me); wunit.commit(); } _me = _me.getOwned(); } -void ReplSource::save(OperationContext* txn) { +void ReplSource::save(OperationContext* opCtx) { BSONObjBuilder b; verify(!hostName.empty()); b.append("host", hostName); @@ -226,7 +226,7 @@ void ReplSource::save(OperationContext* txn) { LOG(1) << "Saving repl source: " << o << endl; { - OldClientContext ctx(txn, "local.sources", false); + OldClientContext ctx(opCtx, "local.sources", false); const NamespaceString requestNs("local.sources"); UpdateRequest request(requestNs); @@ -235,14 +235,14 @@ void ReplSource::save(OperationContext* txn) { request.setUpdates(o); request.setUpsert(); - UpdateResult res = update(txn, ctx.db(), request); + UpdateResult res = update(opCtx, ctx.db(), request); verify(!res.modifiers); verify(res.numMatched == 1 || !res.upserted.isEmpty()); } } -static void addSourceToList(OperationContext* txn, +static void addSourceToList(OperationContext* opCtx, ReplSource::SourceVector& v, ReplSource& s, ReplSource::SourceVector& old) { @@ -263,9 +263,9 @@ static void addSourceToList(OperationContext* txn, /* we reuse our existing objects so that we can keep our existing connection and cursor in effect. */ -void ReplSource::loadAll(OperationContext* txn, SourceVector& v) { +void ReplSource::loadAll(OperationContext* opCtx, SourceVector& v) { const char* localSources = "local.sources"; - OldClientContext ctx(txn, localSources, false); + OldClientContext ctx(opCtx, localSources, false); SourceVector old = v; v.clear(); @@ -275,13 +275,16 @@ void ReplSource::loadAll(OperationContext* txn, SourceVector& v) { // check that no items are in sources other than that // add if missing int n = 0; - unique_ptr<PlanExecutor> exec(InternalPlanner::collectionScan( - txn, localSources, ctx.db()->getCollection(localSources), PlanExecutor::YIELD_MANUAL)); + unique_ptr<PlanExecutor> exec( + InternalPlanner::collectionScan(opCtx, + localSources, + ctx.db()->getCollection(localSources), + PlanExecutor::YIELD_MANUAL)); BSONObj obj; PlanExecutor::ExecState state; while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) { n++; - ReplSource tmp(txn, obj); + ReplSource tmp(opCtx, obj); if (tmp.hostName != replSettings.getSource()) { log() << "--source " << replSettings.getSource() << " != " << tmp.hostName << " from local.sources collection" << endl; @@ -303,10 +306,10 @@ void ReplSource::loadAll(OperationContext* txn, SourceVector& v) { uassert(10002, "local.sources collection corrupt?", n < 2); if (n == 0) { // source missing. add. - ReplSource s(txn); + ReplSource s(opCtx); s.hostName = replSettings.getSource(); s.only = replSettings.getOnly(); - s.save(txn); + s.save(opCtx); } } else { try { @@ -317,41 +320,41 @@ void ReplSource::loadAll(OperationContext* txn, SourceVector& v) { } unique_ptr<PlanExecutor> exec(InternalPlanner::collectionScan( - txn, localSources, ctx.db()->getCollection(localSources), PlanExecutor::YIELD_MANUAL)); + opCtx, localSources, ctx.db()->getCollection(localSources), PlanExecutor::YIELD_MANUAL)); BSONObj obj; PlanExecutor::ExecState state; while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) { - ReplSource tmp(txn, obj); + ReplSource tmp(opCtx, obj); if (tmp.syncedTo.isNull()) { - DBDirectClient c(txn); + DBDirectClient c(opCtx); BSONObj op = c.findOne("local.oplog.$main", QUERY("op" << NE << "n").sort(BSON("$natural" << -1))); if (!op.isEmpty()) { tmp.syncedTo = op["ts"].timestamp(); } } - addSourceToList(txn, v, tmp, old); + addSourceToList(opCtx, v, tmp, old); } uassert(17066, "Internal error reading from local.sources", PlanExecutor::IS_EOF == state); } -bool ReplSource::throttledForceResyncDead(OperationContext* txn, const char* requester) { +bool ReplSource::throttledForceResyncDead(OperationContext* opCtx, const char* requester) { if (time(0) - lastForcedResync > 600) { - forceResyncDead(txn, requester); + forceResyncDead(opCtx, requester); lastForcedResync = time(0); return true; } return false; } -void ReplSource::forceResyncDead(OperationContext* txn, const char* requester) { +void ReplSource::forceResyncDead(OperationContext* opCtx, const char* requester) { if (!replAllDead) return; SourceVector sources; - ReplSource::loadAll(txn, sources); + ReplSource::loadAll(opCtx, sources); for (SourceVector::iterator i = sources.begin(); i != sources.end(); ++i) { log() << requester << " forcing resync from " << (*i)->hostName << endl; - (*i)->forceResync(txn, requester); + (*i)->forceResync(opCtx, requester); } replAllDead = 0; } @@ -379,7 +382,7 @@ public: out->push_back(Privilege(ResourcePattern::forClusterResource(), actions)); } - virtual bool run(OperationContext* txn, + virtual bool run(OperationContext* opCtx, const string& ns, BSONObj& cmdObj, int options, @@ -391,9 +394,9 @@ public: return appendCommandStatus(result, status); } - ReplClientInfo::forClient(txn->getClient()).setRemoteID(handshake.getRid()); + ReplClientInfo::forClient(opCtx->getClient()).setRemoteID(handshake.getRid()); - status = getGlobalReplicationCoordinator()->processHandshake(txn, handshake); + status = getGlobalReplicationCoordinator()->processHandshake(opCtx, handshake); return appendCommandStatus(result, status); } @@ -429,12 +432,12 @@ bool ReplSource::_connect(OplogReader* reader, const HostAndPort& host, const OI } -void ReplSource::forceResync(OperationContext* txn, const char* requester) { +void ReplSource::forceResync(OperationContext* opCtx, const char* requester) { BSONObj info; { // This is always a GlobalWrite lock (so no ns/db used from the context) - invariant(txn->lockState()->isW()); - Lock::TempRelease tempRelease(txn->lockState()); + invariant(opCtx->lockState()->isW()); + Lock::TempRelease tempRelease(opCtx->lockState()); if (!_connect(&oplogReader, HostAndPort(hostName), @@ -456,14 +459,14 @@ void ReplSource::forceResync(OperationContext* txn, const char* requester) { if (!e.embeddedObject().getBoolField("empty")) { if (name != "local") { if (only.empty() || only == name) { - resyncDrop(txn, name); + resyncDrop(opCtx, name); } } } } syncedTo = Timestamp(); addDbNextPass.clear(); - save(txn); + save(opCtx); } Status ReplSource::_updateIfDoneWithInitialSync() { @@ -483,23 +486,23 @@ Status ReplSource::_updateIfDoneWithInitialSync() { return Status::OK(); } -void ReplSource::resyncDrop(OperationContext* txn, const string& dbName) { +void ReplSource::resyncDrop(OperationContext* opCtx, const string& dbName) { log() << "resync: dropping database " << dbName; - invariant(txn->lockState()->isW()); + invariant(opCtx->lockState()->isW()); - Database* const db = dbHolder().get(txn, dbName); + Database* const db = dbHolder().get(opCtx, dbName); if (!db) { log() << "resync: dropping database " << dbName << " - database does not exist. nothing to do."; return; } - Database::dropDatabase(txn, db); + Database::dropDatabase(opCtx, db); } /* grab initial copy of a database from the master */ -void ReplSource::resync(OperationContext* txn, const std::string& dbName) { +void ReplSource::resync(OperationContext* opCtx, const std::string& dbName) { const std::string db(dbName); // need local copy of the name, we're dropping the original - resyncDrop(txn, db); + resyncDrop(opCtx, db); { log() << "resync: cloning database " << db << " to get an initial copy" << endl; @@ -512,11 +515,11 @@ void ReplSource::resync(OperationContext* txn, const std::string& dbName) { cloneOptions.snapshot = true; Cloner cloner; - Status status = cloner.copyDb(txn, db, hostName.c_str(), cloneOptions, NULL); + Status status = cloner.copyDb(opCtx, db, hostName.c_str(), cloneOptions, NULL); if (!status.isOK()) { if (status.code() == ErrorCodes::DatabaseDifferCase) { - resyncDrop(txn, db); + resyncDrop(opCtx, db); log() << "resync: database " << db << " not valid on the master due to a name conflict, dropping."; return; @@ -552,12 +555,12 @@ bool DatabaseIgnorer::ignoreAt(const string& db, const Timestamp& currentOplogTi } } -bool ReplSource::handleDuplicateDbName(OperationContext* txn, +bool ReplSource::handleDuplicateDbName(OperationContext* opCtx, const BSONObj& op, const char* ns, const char* db) { // We are already locked at this point - if (dbHolder().get(txn, ns) != NULL) { + if (dbHolder().get(opCtx, ns) != NULL) { // Database is already present. return true; } @@ -577,8 +580,8 @@ bool ReplSource::handleDuplicateDbName(OperationContext* txn, bool dbOk = false; { // This is always a GlobalWrite lock (so no ns/db used from the context) - invariant(txn->lockState()->isW()); - Lock::TempRelease tempRelease(txn->lockState()); + invariant(opCtx->lockState()->isW()); + Lock::TempRelease tempRelease(opCtx->lockState()); // We always log an operation after executing it (never before), so // a database list will always be valid as of an oplog entry generated @@ -634,8 +637,8 @@ bool ReplSource::handleDuplicateDbName(OperationContext* txn, incompleteCloneDbs.erase(*i); addDbNextPass.erase(*i); - AutoGetDb autoDb(txn, *i, MODE_X); - Database::dropDatabase(txn, autoDb.getDb()); + AutoGetDb autoDb(opCtx, *i, MODE_X); + Database::dropDatabase(opCtx, autoDb.getDb()); } massert(14034, @@ -644,16 +647,16 @@ bool ReplSource::handleDuplicateDbName(OperationContext* txn, return true; } -void ReplSource::applyCommand(OperationContext* txn, const BSONObj& op) { +void ReplSource::applyCommand(OperationContext* opCtx, const BSONObj& op) { try { - Status status = applyCommand_inlock(txn, op, true); + Status status = applyCommand_inlock(opCtx, op, true); if (!status.isOK()) { SyncTail sync(nullptr, SyncTail::MultiSyncApplyFunc()); sync.setHostname(hostName); - if (sync.shouldRetry(txn, op)) { + if (sync.shouldRetry(opCtx, op)) { uassert(28639, "Failure retrying initial sync update", - applyCommand_inlock(txn, op, true).isOK()); + applyCommand_inlock(opCtx, op, true).isOK()); } } } catch (UserException& e) { @@ -667,16 +670,16 @@ void ReplSource::applyCommand(OperationContext* txn, const BSONObj& op) { } } -void ReplSource::applyOperation(OperationContext* txn, Database* db, const BSONObj& op) { +void ReplSource::applyOperation(OperationContext* opCtx, Database* db, const BSONObj& op) { try { - Status status = applyOperation_inlock(txn, db, op); + Status status = applyOperation_inlock(opCtx, db, op); if (!status.isOK()) { SyncTail sync(nullptr, SyncTail::MultiSyncApplyFunc()); sync.setHostname(hostName); - if (sync.shouldRetry(txn, op)) { + if (sync.shouldRetry(opCtx, op)) { uassert(15914, "Failure retrying initial sync update", - applyOperation_inlock(txn, db, op).isOK()); + applyOperation_inlock(opCtx, db, op).isOK()); } } } catch (UserException& e) { @@ -697,7 +700,7 @@ void ReplSource::applyOperation(OperationContext* txn, Database* db, const BSONO @param alreadyLocked caller already put us in write lock if true */ -void ReplSource::_sync_pullOpLog_applyOperation(OperationContext* txn, +void ReplSource::_sync_pullOpLog_applyOperation(OperationContext* opCtx, BSONObj& op, bool alreadyLocked) { LOG(6) << "processing op: " << redact(op) << endl; @@ -725,10 +728,10 @@ void ReplSource::_sync_pullOpLog_applyOperation(OperationContext* txn, if (!only.empty() && only != dbName) return; - // Push the CurOp stack for "txn" so each individual oplog entry application is separately + // Push the CurOp stack for "opCtx" so each individual oplog entry application is separately // reported. - CurOp individualOp(txn); - txn->setReplicatedWrites(false); + CurOp individualOp(opCtx); + opCtx->setReplicatedWrites(false); const ReplSettings& replSettings = getGlobalReplicationCoordinator()->getSettings(); if (replSettings.getPretouch() && !alreadyLocked /*doesn't make sense if in write lock already*/) { @@ -759,16 +762,16 @@ void ReplSource::_sync_pullOpLog_applyOperation(OperationContext* txn, a += m; } // we do one too... - pretouchOperation(txn, op); + pretouchOperation(opCtx, op); tp->join(); countdown = v.size(); } } else { - pretouchOperation(txn, op); + pretouchOperation(opCtx, op); } } - unique_ptr<Lock::GlobalWrite> lk(alreadyLocked ? 0 : new Lock::GlobalWrite(txn->lockState())); + unique_ptr<Lock::GlobalWrite> lk(alreadyLocked ? 0 : new Lock::GlobalWrite(opCtx->lockState())); if (replAllDead) { // hmmm why is this check here and not at top of this function? does it get set between top @@ -777,20 +780,20 @@ void ReplSource::_sync_pullOpLog_applyOperation(OperationContext* txn, throw SyncException(); } - if (!handleDuplicateDbName(txn, op, ns, dbName)) { + if (!handleDuplicateDbName(opCtx, op, ns, dbName)) { return; } // special case apply for commands to avoid implicit database creation if (*op.getStringField("op") == 'c') { - applyCommand(txn, op); + applyCommand(opCtx, op); return; } // This code executes on the slaves only, so it doesn't need to be sharding-aware since // mongos will not send requests there. That's why the last argument is false (do not do // version checking). - OldClientContext ctx(txn, ns, false); + OldClientContext ctx(opCtx, ns, false); bool empty = !ctx.db()->getDatabaseCatalogEntry()->hasUserData(); bool incompleteClone = incompleteCloneDbs.count(dbName) != 0; @@ -813,16 +816,16 @@ void ReplSource::_sync_pullOpLog_applyOperation(OperationContext* txn, log() << "An earlier initial clone of '" << dbName << "' did not complete, now resyncing." << endl; } - save(txn); - OldClientContext ctx(txn, ns, false); + save(opCtx); + OldClientContext ctx(opCtx, ns, false); nClonedThisPass++; - resync(txn, ctx.db()->name()); + resync(opCtx, ctx.db()->name()); addDbNextPass.erase(dbName); incompleteCloneDbs.erase(dbName); } - save(txn); + save(opCtx); } else { - applyOperation(txn, ctx.db(), op); + applyOperation(opCtx, ctx.db(), op); addDbNextPass.erase(dbName); } } @@ -877,7 +880,7 @@ public: 0 ok, don't sleep 1 ok, sleep */ -int ReplSource::_sync_pullOpLog(OperationContext* txn, int& nApplied) { +int ReplSource::_sync_pullOpLog(OperationContext* opCtx, int& nApplied) { int okResultCode = restartSyncAfterSleep; string ns = string("local.oplog.$") + sourceName(); LOG(2) << "sync_pullOpLog " << ns << " syncedTo:" << syncedTo.toStringLong() << '\n'; @@ -911,9 +914,9 @@ int ReplSource::_sync_pullOpLog(OperationContext* txn, int& nApplied) { } // obviously global isn't ideal, but non-repl set is old so // keeping it simple - ScopedTransaction transaction(txn, MODE_X); - Lock::GlobalWrite lk(txn->lockState()); - save(txn); + ScopedTransaction transaction(opCtx, MODE_X); + Lock::GlobalWrite lk(opCtx->lockState()); + save(opCtx); } BSONObjBuilder gte; @@ -948,7 +951,7 @@ int ReplSource::_sync_pullOpLog(OperationContext* txn, int& nApplied) { b.append("ns", *i + '.'); b.append("op", "db"); BSONObj op = b.done(); - _sync_pullOpLog_applyOperation(txn, op, false); + _sync_pullOpLog_applyOperation(opCtx, op, false); } } @@ -974,9 +977,9 @@ int ReplSource::_sync_pullOpLog(OperationContext* txn, int& nApplied) { log() << ns << " oplog is empty" << endl; } { - ScopedTransaction transaction(txn, MODE_X); - Lock::GlobalWrite lk(txn->lockState()); - save(txn); + ScopedTransaction transaction(opCtx, MODE_X); + Lock::GlobalWrite lk(opCtx->lockState()); + save(opCtx); } return okResultCode; } @@ -1042,26 +1045,26 @@ int ReplSource::_sync_pullOpLog(OperationContext* txn, int& nApplied) { const bool moreInitialSyncsPending = !addDbNextPass.empty() && n; if (moreInitialSyncsPending || !oplogReader.more()) { - ScopedTransaction transaction(txn, MODE_X); - Lock::GlobalWrite lk(txn->lockState()); + ScopedTransaction transaction(opCtx, MODE_X); + Lock::GlobalWrite lk(opCtx->lockState()); if (tailing) { okResultCode = restartSync; // don't sleep } syncedTo = nextOpTime; - save(txn); // note how far we are synced up to now + save(opCtx); // note how far we are synced up to now nApplied = n; break; } OCCASIONALLY if (n > 0 && (n > 100000 || time(0) - saveLast > 60)) { // periodically note our progress, in case we are doing a lot of work and crash - ScopedTransaction transaction(txn, MODE_X); - Lock::GlobalWrite lk(txn->lockState()); + ScopedTransaction transaction(opCtx, MODE_X); + Lock::GlobalWrite lk(opCtx->lockState()); syncedTo = nextOpTime; // can't update local log ts since there are pending operations from our peer - save(txn); + save(opCtx); log() << "checkpoint applied " << n << " operations" << endl; log() << "syncedTo: " << syncedTo.toStringLong() << endl; saveLast = time(0); @@ -1072,7 +1075,8 @@ int ReplSource::_sync_pullOpLog(OperationContext* txn, int& nApplied) { int b = replApplyBatchSize.load(); bool justOne = b == 1; - unique_ptr<Lock::GlobalWrite> lk(justOne ? 0 : new Lock::GlobalWrite(txn->lockState())); + unique_ptr<Lock::GlobalWrite> lk(justOne ? 0 + : new Lock::GlobalWrite(opCtx->lockState())); while (1) { BSONElement ts = op.getField("ts"); if (!(ts.type() == Date || ts.type() == bsonTimestamp)) { @@ -1104,11 +1108,11 @@ int ReplSource::_sync_pullOpLog(OperationContext* txn, int& nApplied) { oplogReader.putBack(op); _sleepAdviceTime = nextOpTime.getSecs() + durationCount<Seconds>(replSettings.getSlaveDelaySecs()) + 1; - ScopedTransaction transaction(txn, MODE_X); - Lock::GlobalWrite lk(txn->lockState()); + ScopedTransaction transaction(opCtx, MODE_X); + Lock::GlobalWrite lk(opCtx->lockState()); if (n > 0) { syncedTo = last; - save(txn); + save(opCtx); } log() << "applied " << n << " operations" << endl; log() << "syncedTo: " << syncedTo.toStringLong() << endl; @@ -1116,7 +1120,7 @@ int ReplSource::_sync_pullOpLog(OperationContext* txn, int& nApplied) { return okResultCode; } - _sync_pullOpLog_applyOperation(txn, op, !justOne); + _sync_pullOpLog_applyOperation(opCtx, op, !justOne); n++; if (--b == 0) @@ -1139,7 +1143,7 @@ int ReplSource::_sync_pullOpLog(OperationContext* txn, int& nApplied) { returns >= 0 if ok. return -1 if you want to reconnect. return value of zero indicates no sleep necessary before next call */ -int ReplSource::sync(OperationContext* txn, int& nApplied) { +int ReplSource::sync(OperationContext* opCtx, int& nApplied) { _sleepAdviceTime = 0; ReplInfo r("sync"); if (!serverGlobalParams.quiet.load()) { @@ -1167,7 +1171,7 @@ int ReplSource::sync(OperationContext* txn, int& nApplied) { return -1; } - return _sync_pullOpLog(txn, nApplied); + return _sync_pullOpLog(opCtx, nApplied); } /* --------------------------------------------------------------*/ @@ -1184,12 +1188,12 @@ _ reuse that cursor when we can 0 = no sleep recommended 1 = special sentinel indicating adaptive sleep recommended */ -int _replMain(OperationContext* txn, ReplSource::SourceVector& sources, int& nApplied) { +int _replMain(OperationContext* opCtx, ReplSource::SourceVector& sources, int& nApplied) { { ReplInfo r("replMain load sources"); - ScopedTransaction transaction(txn, MODE_X); - Lock::GlobalWrite lk(txn->lockState()); - ReplSource::loadAll(txn, sources); + ScopedTransaction transaction(opCtx, MODE_X); + Lock::GlobalWrite lk(opCtx->lockState()); + ReplSource::loadAll(opCtx, sources); // only need this param for initial reset _replMainStarted = true; @@ -1208,7 +1212,7 @@ int _replMain(OperationContext* txn, ReplSource::SourceVector& sources, int& nAp ReplSource* s = i->get(); int res = forceReconnect; try { - res = s->sync(txn, nApplied); + res = s->sync(opCtx, nApplied); bool moreToSync = s->haveMoreDbsToSync(); if (res < 0) { sleepAdvice = 3; @@ -1245,17 +1249,17 @@ int _replMain(OperationContext* txn, ReplSource::SourceVector& sources, int& nAp return sleepAdvice; } -static void replMain(OperationContext* txn) { +static void replMain(OperationContext* opCtx) { ReplSource::SourceVector sources; while (1) { auto s = restartSync; { - ScopedTransaction transaction(txn, MODE_X); - Lock::GlobalWrite lk(txn->lockState()); + ScopedTransaction transaction(opCtx, MODE_X); + Lock::GlobalWrite lk(opCtx->lockState()); if (replAllDead) { // throttledForceResyncDead can throw if (!getGlobalReplicationCoordinator()->getSettings().isAutoResyncEnabled() || - !ReplSource::throttledForceResyncDead(txn, "auto")) { + !ReplSource::throttledForceResyncDead(opCtx, "auto")) { log() << "all sources dead: " << replAllDead << ", sleeping for 5 seconds" << endl; break; @@ -1268,7 +1272,7 @@ static void replMain(OperationContext* txn) { try { int nApplied = 0; - s = _replMain(txn, sources, nApplied); + s = _replMain(opCtx, sources, nApplied); if (s == restartSyncAfterSleep) { if (nApplied == 0) s = 2; @@ -1284,8 +1288,8 @@ static void replMain(OperationContext* txn) { } { - ScopedTransaction transaction(txn, MODE_X); - Lock::GlobalWrite lk(txn->lockState()); + ScopedTransaction transaction(opCtx, MODE_X); + Lock::GlobalWrite lk(opCtx->lockState()); invariant(syncing.swap(0) == 1); } @@ -1316,17 +1320,17 @@ static void replMasterThread() { // Write a keep-alive like entry to the log. This will make things like // printReplicationStatus() and printSlaveReplicationStatus() stay up-to-date even // when things are idle. - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; - AuthorizationSession::get(txn.getClient())->grantInternalAuthorization(); + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; + AuthorizationSession::get(opCtx.getClient())->grantInternalAuthorization(); - Lock::GlobalWrite globalWrite(txn.lockState(), 1); + Lock::GlobalWrite globalWrite(opCtx.lockState(), 1); if (globalWrite.isLocked()) { toSleep = 10; try { - WriteUnitOfWork wuow(&txn); - getGlobalServiceContext()->getOpObserver()->onOpMessage(&txn, BSONObj()); + WriteUnitOfWork wuow(&opCtx); + getGlobalServiceContext()->getOpObserver()->onOpMessage(&opCtx, BSONObj()); wuow.commit(); } catch (...) { log() << "caught exception in replMasterThread()" << endl; @@ -1342,14 +1346,14 @@ static void replSlaveThread() { sleepsecs(1); Client::initThread("replslave"); - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; - AuthorizationSession::get(txn.getClient())->grantInternalAuthorization(); - DisableDocumentValidation validationDisabler(&txn); + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; + AuthorizationSession::get(opCtx.getClient())->grantInternalAuthorization(); + DisableDocumentValidation validationDisabler(&opCtx); while (1) { try { - replMain(&txn); + replMain(&opCtx); sleepsecs(5); } catch (AssertionException&) { ReplInfo r("Assertion in replSlaveThread(): sleeping 5 minutes before retry"); @@ -1366,15 +1370,15 @@ static void replSlaveThread() { } } -void startMasterSlave(OperationContext* txn) { +void startMasterSlave(OperationContext* opCtx) { const ReplSettings& replSettings = getGlobalReplicationCoordinator()->getSettings(); if (!replSettings.isSlave() && !replSettings.isMaster()) return; - AuthorizationSession::get(txn->getClient())->grantInternalAuthorization(); + AuthorizationSession::get(opCtx->getClient())->grantInternalAuthorization(); { - ReplSource temp(txn); // Ensures local.me is populated + ReplSource temp(opCtx); // Ensures local.me is populated } if (replSettings.isSlave()) { @@ -1385,7 +1389,7 @@ void startMasterSlave(OperationContext* txn) { if (replSettings.isMaster()) { LOG(1) << "master=true" << endl; - createOplog(txn); + createOplog(opCtx); stdx::thread t(replMasterThread); t.detach(); } @@ -1400,10 +1404,10 @@ int _dummy_z; void pretouchN(vector<BSONObj>& v, unsigned a, unsigned b) { Client::initThreadIfNotAlready("pretouchN"); - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; // XXX - ScopedTransaction transaction(&txn, MODE_S); - Lock::GlobalRead lk(txn.lockState()); + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; // XXX + ScopedTransaction transaction(&opCtx, MODE_S); + Lock::GlobalRead lk(opCtx.lockState()); for (unsigned i = a; i <= b; i++) { const BSONObj& op = v[i]; @@ -1425,8 +1429,8 @@ void pretouchN(vector<BSONObj>& v, unsigned a, unsigned b) { BSONObjBuilder b; b.append(_id); BSONObj result; - OldClientContext ctx(&txn, ns, false); - if (Helpers::findById(&txn, ctx.db(), ns, b.done(), result)) + OldClientContext ctx(&opCtx, ns, false); + if (Helpers::findById(&opCtx, ctx.db(), ns, b.done(), result)) _dummy_z += result.objsize(); // touch } } catch (DBException& e) { @@ -1436,8 +1440,8 @@ void pretouchN(vector<BSONObj>& v, unsigned a, unsigned b) { } } -void pretouchOperation(OperationContext* txn, const BSONObj& op) { - if (txn->lockState()->isWriteLocked()) { +void pretouchOperation(OperationContext* opCtx, const BSONObj& op) { + if (opCtx->lockState()->isWriteLocked()) { // no point pretouching if write locked. not sure if this will ever fire, but just in case. return; } @@ -1460,8 +1464,8 @@ void pretouchOperation(OperationContext* txn, const BSONObj& op) { BSONObjBuilder b; b.append(_id); BSONObj result; - AutoGetCollectionForRead ctx(txn, NamespaceString(ns)); - if (Helpers::findById(txn, ctx.getDb(), ns, b.done(), result)) { + AutoGetCollectionForRead ctx(opCtx, NamespaceString(ns)); + if (Helpers::findById(opCtx, ctx.getDb(), ns, b.done(), result)) { _dummy_z += result.objsize(); // touch } } diff --git a/src/mongo/db/repl/master_slave.h b/src/mongo/db/repl/master_slave.h index 3a869a750ba..627223efd16 100644 --- a/src/mongo/db/repl/master_slave.h +++ b/src/mongo/db/repl/master_slave.h @@ -50,7 +50,7 @@ class OperationContext; namespace repl { // Main entry point for master/slave at startup time. -void startMasterSlave(OperationContext* txn); +void startMasterSlave(OperationContext* opCtx); // externed for use with resync.cpp extern AtomicInt32 relinquishSyncingSome; @@ -78,15 +78,15 @@ public: class ReplSource { std::shared_ptr<OldThreadPool> tp; - void resync(OperationContext* txn, const std::string& dbName); + void resync(OperationContext* opCtx, const std::string& dbName); /** @param alreadyLocked caller already put us in write lock if true */ - void _sync_pullOpLog_applyOperation(OperationContext* txn, BSONObj& op, bool alreadyLocked); + void _sync_pullOpLog_applyOperation(OperationContext* opCtx, BSONObj& op, bool alreadyLocked); /* pull some operations from the master's oplog, and apply them. calls sync_pullOpLog_applyOperation */ - int _sync_pullOpLog(OperationContext* txn, int& nApplied); + int _sync_pullOpLog(OperationContext* opCtx, int& nApplied); /* we only clone one database per pass, even if a lot need done. This helps us avoid overflowing the master's transaction log by doing too much work before going @@ -106,7 +106,7 @@ class ReplSource { */ bool _doHandshake = false; - void resyncDrop(OperationContext* txn, const std::string& dbName); + void resyncDrop(OperationContext* opCtx, const std::string& dbName); // call without the db mutex void syncToTailOfRemoteLog(); std::string ns() const { @@ -120,16 +120,16 @@ class ReplSource { * master. * @return true iff an op with the specified ns may be applied. */ - bool handleDuplicateDbName(OperationContext* txn, + bool handleDuplicateDbName(OperationContext* opCtx, const BSONObj& op, const char* ns, const char* db); // populates _me so that it can be passed to oplogreader for handshakes /// TODO(spencer): Remove this function once the LegacyReplicationCoordinator is gone. - void ensureMe(OperationContext* txn); + void ensureMe(OperationContext* opCtx); - void forceResync(OperationContext* txn, const char* requester); + void forceResync(OperationContext* opCtx, const char* requester); bool _connect(OplogReader* reader, const HostAndPort& host, const OID& myRID); @@ -138,8 +138,8 @@ class ReplSource { public: OplogReader oplogReader; - void applyCommand(OperationContext* txn, const BSONObj& op); - void applyOperation(OperationContext* txn, Database* db, const BSONObj& op); + void applyCommand(OperationContext* opCtx, const BSONObj& op); + void applyOperation(OperationContext* opCtx, Database* db, const BSONObj& op); std::string hostName; // ip addr or hostname plus optionally, ":<port>" std::string _sourceName; // a logical source name. std::string sourceName() const { @@ -156,18 +156,18 @@ public: int nClonedThisPass; typedef std::vector<std::shared_ptr<ReplSource>> SourceVector; - static void loadAll(OperationContext* txn, SourceVector&); + static void loadAll(OperationContext* opCtx, SourceVector&); - explicit ReplSource(OperationContext* txn, BSONObj); + explicit ReplSource(OperationContext* opCtx, BSONObj); // This is not the constructor you are looking for. Always prefer the version that takes // a BSONObj. This is public only as a hack so that the ReplicationCoordinator can find // out the process's RID in master/slave setups. - ReplSource(OperationContext* txn); + ReplSource(OperationContext* opCtx); /* -1 = error */ - int sync(OperationContext* txn, int& nApplied); + int sync(OperationContext* opCtx, int& nApplied); - void save(OperationContext* txn); // write ourself to local.sources + void save(OperationContext* opCtx); // write ourself to local.sources // make a jsobj from our member fields of the form // { host: ..., source: ..., syncedTo: ... } @@ -190,8 +190,8 @@ public: return wait > 0 ? wait : 0; } - static bool throttledForceResyncDead(OperationContext* txn, const char* requester); - static void forceResyncDead(OperationContext* txn, const char* requester); + static bool throttledForceResyncDead(OperationContext* opCtx, const char* requester); + static void forceResyncDead(OperationContext* opCtx, const char* requester); }; /** diff --git a/src/mongo/db/repl/multiapplier.cpp b/src/mongo/db/repl/multiapplier.cpp index 036ab4f0d4d..ac78a4f2c8c 100644 --- a/src/mongo/db/repl/multiapplier.cpp +++ b/src/mongo/db/repl/multiapplier.cpp @@ -159,8 +159,8 @@ void MultiApplier::_callback(const executor::TaskExecutor::CallbackArgs& cbd) { StatusWith<OpTime> applyStatus(ErrorCodes::InternalError, "not mutated"); try { - auto txn = cc().makeOperationContext(); - applyStatus = _multiApply(txn.get(), _operations, _applyOperation); + auto opCtx = cc().makeOperationContext(); + applyStatus = _multiApply(opCtx.get(), _operations, _applyOperation); } catch (...) { applyStatus = exceptionToStatus(); } diff --git a/src/mongo/db/repl/multiapplier_test.cpp b/src/mongo/db/repl/multiapplier_test.cpp index 9df868384c2..dcbd6979adf 100644 --- a/src/mongo/db/repl/multiapplier_test.cpp +++ b/src/mongo/db/repl/multiapplier_test.cpp @@ -156,7 +156,7 @@ TEST_F(MultiApplierTest, MultiApplierInvokesCallbackWithCallbackCanceledStatusUp const MultiApplier::Operations operations{OplogEntry(BSON("ts" << Timestamp(Seconds(123), 0)))}; bool multiApplyInvoked = false; - auto multiApply = [&](OperationContext* txn, + auto multiApply = [&](OperationContext* opCtx, MultiApplier::Operations operations, MultiApplier::ApplyOperationFn) -> StatusWith<OpTime> { multiApplyInvoked = true; @@ -223,7 +223,7 @@ TEST_F(MultiApplierTest, MultiApplierCatchesMultiApplyExceptionAndConvertsToCall bool multiApplyInvoked = false; Status multiApplyError(ErrorCodes::OperationFailed, "multi apply failed"); - auto multiApply = [&](OperationContext* txn, + auto multiApply = [&](OperationContext* opCtx, MultiApplier::Operations operations, MultiApplier::ApplyOperationFn) -> StatusWith<OpTime> { multiApplyInvoked = true; @@ -255,10 +255,10 @@ TEST_F( OperationContext* multiApplyTxn = nullptr; MultiApplier::Operations operationsToApply; - auto multiApply = [&](OperationContext* txn, + auto multiApply = [&](OperationContext* opCtx, MultiApplier::Operations operations, MultiApplier::ApplyOperationFn) -> StatusWith<OpTime> { - multiApplyTxn = txn; + multiApplyTxn = opCtx; operationsToApply = operations; return operationsToApply.back().getOpTime(); }; diff --git a/src/mongo/db/repl/noop_writer.cpp b/src/mongo/db/repl/noop_writer.cpp index c12425fd423..f127b236614 100644 --- a/src/mongo/db/repl/noop_writer.cpp +++ b/src/mongo/db/repl/noop_writer.cpp @@ -77,8 +77,8 @@ private: void run(Seconds waitTime, NoopWriteFn noopWrite) { Client::initThread("NoopWriter"); while (true) { - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; { stdx::unique_lock<stdx::mutex> lk(_mutex); _cv.wait_for(lk, waitTime.toSystemDuration(), [&] { return _inShutdown; }); @@ -86,7 +86,7 @@ private: if (_inShutdown) return; } - noopWrite(&txn); + noopWrite(&opCtx); } } @@ -126,7 +126,7 @@ Status NoopWriter::startWritingPeriodicNoops(OpTime lastKnownOpTime) { invariant(!_noopRunner); _noopRunner = stdx::make_unique<PeriodicNoopRunner>( - _writeInterval, [this](OperationContext* txn) { _writeNoop(txn); }); + _writeInterval, [this](OperationContext* opCtx) { _writeNoop(opCtx); }); return Status::OK(); } @@ -135,20 +135,20 @@ void NoopWriter::stopWritingPeriodicNoops() { _noopRunner.reset(); } -void NoopWriter::_writeNoop(OperationContext* txn) { - ScopedTransaction transaction(txn, MODE_IX); +void NoopWriter::_writeNoop(OperationContext* opCtx) { + ScopedTransaction transaction(opCtx, MODE_IX); // Use GlobalLock + lockMMAPV1Flush instead of DBLock to allow return when the lock is not // available. It may happen when the primary steps down and a shared global lock is acquired. - Lock::GlobalLock lock(txn->lockState(), MODE_IX, 1); + Lock::GlobalLock lock(opCtx->lockState(), MODE_IX, 1); if (!lock.isLocked()) { LOG(1) << "Global lock is not available skipping noopWrite"; return; } - txn->lockState()->lockMMAPV1Flush(); + opCtx->lockState()->lockMMAPV1Flush(); - auto replCoord = ReplicationCoordinator::get(txn); + auto replCoord = ReplicationCoordinator::get(opCtx); // Its a proxy for being a primary - if (!replCoord->canAcceptWritesForDatabase(txn, "admin")) { + if (!replCoord->canAcceptWritesForDatabase(opCtx, "admin")) { LOG(1) << "Not a primary, skipping the noop write"; return; } @@ -166,11 +166,12 @@ void NoopWriter::_writeNoop(OperationContext* txn) { << "Writing noop to oplog as there has been no writes to this replica set in over " << _writeInterval; MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN { - WriteUnitOfWork uow(txn); - txn->getClient()->getServiceContext()->getOpObserver()->onOpMessage(txn, kMsgObj); + WriteUnitOfWork uow(opCtx); + opCtx->getClient()->getServiceContext()->getOpObserver()->onOpMessage(opCtx, + kMsgObj); uow.commit(); } - MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "writeNoop", rsOplogName); + MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "writeNoop", rsOplogName); } } diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp index e394df05efd..9db957bb00c 100644 --- a/src/mongo/db/repl/oplog.cpp +++ b/src/mongo/db/repl/oplog.cpp @@ -145,13 +145,13 @@ struct OplogSlot { * function registers the new optime with the storage system and the replication coordinator, * and provides no facility to revert those registrations on rollback. */ -void getNextOpTime(OperationContext* txn, +void getNextOpTime(OperationContext* opCtx, Collection* oplog, ReplicationCoordinator* replCoord, ReplicationCoordinator::Mode replicationMode, unsigned count, OplogSlot* slotsOut) { - synchronizeOnCappedInFlightResource(txn->lockState(), oplog->ns()); + synchronizeOnCappedInFlightResource(opCtx->lockState(), oplog->ns()); long long term = OpTime::kUninitializedTerm; // Fetch term out of the newOpMutex. @@ -163,10 +163,10 @@ void getNextOpTime(OperationContext* txn, stdx::lock_guard<stdx::mutex> lk(newOpMutex); - auto ts = LogicalClock::get(txn)->reserveTicks(count).asTimestamp(); + auto ts = LogicalClock::get(opCtx)->reserveTicks(count).asTimestamp(); newTimestampNotifier.notify_all(); - fassert(28560, oplog->getRecordStore()->oplogDiskLocRegister(txn, ts)); + fassert(28560, oplog->getRecordStore()->oplogDiskLocRegister(opCtx, ts)); // Set hash if we're in replset mode, otherwise it remains 0 in master/slave. const bool needHash = (replicationMode == ReplicationCoordinator::modeReplSet); @@ -229,11 +229,12 @@ void setOplogCollectionName() { namespace { -Collection* getLocalOplogCollection(OperationContext* txn, const std::string& oplogCollectionName) { +Collection* getLocalOplogCollection(OperationContext* opCtx, + const std::string& oplogCollectionName) { if (_localOplogCollection) return _localOplogCollection; - AutoGetCollection autoColl(txn, NamespaceString(oplogCollectionName), MODE_IX); + AutoGetCollection autoColl(opCtx, NamespaceString(oplogCollectionName), MODE_IX); _localOplogCollection = autoColl.getCollection(); massert(13347, "the oplog collection " + oplogCollectionName + @@ -243,7 +244,7 @@ Collection* getLocalOplogCollection(OperationContext* txn, const std::string& op return _localOplogCollection; } -bool oplogDisabled(OperationContext* txn, +bool oplogDisabled(OperationContext* opCtx, ReplicationCoordinator::Mode replicationMode, const NamespaceString& nss) { if (replicationMode == ReplicationCoordinator::modeNone) @@ -255,15 +256,15 @@ bool oplogDisabled(OperationContext* txn, if (nss.isSystemDotProfile()) return true; - if (!txn->writesAreReplicated()) + if (!opCtx->writesAreReplicated()) return true; - fassert(28626, txn->recoveryUnit()); + fassert(28626, opCtx->recoveryUnit()); return false; } -OplogDocWriter _logOpWriter(OperationContext* txn, +OplogDocWriter _logOpWriter(OperationContext* opCtx, const char* opstr, const NamespaceString& nss, const BSONObj& obj, @@ -290,11 +291,11 @@ OplogDocWriter _logOpWriter(OperationContext* txn, } // end anon namespace // Truncates the oplog after and including the "truncateTimestamp" entry. -void truncateOplogTo(OperationContext* txn, Timestamp truncateTimestamp) { +void truncateOplogTo(OperationContext* opCtx, Timestamp truncateTimestamp) { const NamespaceString oplogNss(rsOplogName); - ScopedTransaction transaction(txn, MODE_IX); - AutoGetDb autoDb(txn, oplogNss.db(), MODE_IX); - Lock::CollectionLock oplogCollectionLoc(txn->lockState(), oplogNss.ns(), MODE_X); + ScopedTransaction transaction(opCtx, MODE_IX); + AutoGetDb autoDb(opCtx, oplogNss.db(), MODE_IX); + Lock::CollectionLock oplogCollectionLoc(opCtx->lockState(), oplogNss.ns(), MODE_X); Collection* oplogCollection = autoDb.getDb()->getCollection(oplogNss); if (!oplogCollection) { fassertFailedWithStatusNoTrace( @@ -305,7 +306,7 @@ void truncateOplogTo(OperationContext* txn, Timestamp truncateTimestamp) { // Scan through oplog in reverse, from latest entry to first, to find the truncateTimestamp. RecordId oldestIDToDelete; // Non-null if there is something to delete. auto oplogRs = oplogCollection->getRecordStore(); - auto oplogReverseCursor = oplogRs->getCursor(txn, /*forward=*/false); + auto oplogReverseCursor = oplogRs->getCursor(opCtx, /*forward=*/false); size_t count = 0; while (auto next = oplogReverseCursor->next()) { const BSONObj entry = next->data.releaseToBson(); @@ -325,7 +326,7 @@ void truncateOplogTo(OperationContext* txn, Timestamp truncateTimestamp) { // oplog is < truncateTimestamp. if (count != 1) { invariant(!oldestIDToDelete.isNull()); - oplogCollection->cappedTruncateAfter(txn, oldestIDToDelete, /*inclusive=*/true); + oplogCollection->cappedTruncateAfter(opCtx, oldestIDToDelete, /*inclusive=*/true); } return; } @@ -356,7 +357,7 @@ void truncateOplogTo(OperationContext* txn, Timestamp truncateTimestamp) { if not null, specifies a boolean to pass along to the other side as b: param. used for "justOne" or "upsert" flags on 'd', 'u' */ -void _logOpsInner(OperationContext* txn, +void _logOpsInner(OperationContext* opCtx, const NamespaceString& nss, const DocWriter* const* writers, size_t nWriters, @@ -366,79 +367,80 @@ void _logOpsInner(OperationContext* txn, ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator(); if (nss.size() && replicationMode == ReplicationCoordinator::modeReplSet && - !replCoord->canAcceptWritesFor(txn, nss)) { + !replCoord->canAcceptWritesFor(opCtx, nss)) { severe() << "logOp() but can't accept write to collection " << nss.ns(); fassertFailed(17405); } // we jump through a bunch of hoops here to avoid copying the obj buffer twice -- // instead we do a single copy to the destination in the record store. - checkOplogInsert(oplogCollection->insertDocumentsForOplog(txn, writers, nWriters)); + checkOplogInsert(oplogCollection->insertDocumentsForOplog(opCtx, writers, nWriters)); // Set replCoord last optime only after we're sure the WUOW didn't abort and roll back. - txn->recoveryUnit()->onCommit([txn, replCoord, finalOpTime] { + opCtx->recoveryUnit()->onCommit([opCtx, replCoord, finalOpTime] { replCoord->setMyLastAppliedOpTimeForward(finalOpTime); - ReplClientInfo::forClient(txn->getClient()).setLastOp(finalOpTime); + ReplClientInfo::forClient(opCtx->getClient()).setLastOp(finalOpTime); }); } -void logOp(OperationContext* txn, +void logOp(OperationContext* opCtx, const char* opstr, const char* ns, const BSONObj& obj, const BSONObj* o2, bool fromMigrate) { - ReplicationCoordinator::Mode replMode = ReplicationCoordinator::get(txn)->getReplicationMode(); + ReplicationCoordinator::Mode replMode = + ReplicationCoordinator::get(opCtx)->getReplicationMode(); NamespaceString nss(ns); - if (oplogDisabled(txn, replMode, nss)) + if (oplogDisabled(opCtx, replMode, nss)) return; ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator(); - Collection* oplog = getLocalOplogCollection(txn, _oplogCollectionName); - Lock::DBLock lk(txn->lockState(), "local", MODE_IX); - Lock::CollectionLock lock(txn->lockState(), _oplogCollectionName, MODE_IX); + Collection* oplog = getLocalOplogCollection(opCtx, _oplogCollectionName); + Lock::DBLock lk(opCtx->lockState(), "local", MODE_IX); + Lock::CollectionLock lock(opCtx->lockState(), _oplogCollectionName, MODE_IX); OplogSlot slot; - getNextOpTime(txn, oplog, replCoord, replMode, 1, &slot); - auto writer = _logOpWriter(txn, opstr, nss, obj, o2, fromMigrate, slot.opTime, slot.hash); + getNextOpTime(opCtx, oplog, replCoord, replMode, 1, &slot); + auto writer = _logOpWriter(opCtx, opstr, nss, obj, o2, fromMigrate, slot.opTime, slot.hash); const DocWriter* basePtr = &writer; - _logOpsInner(txn, nss, &basePtr, 1, oplog, replMode, slot.opTime); + _logOpsInner(opCtx, nss, &basePtr, 1, oplog, replMode, slot.opTime); } -void logOps(OperationContext* txn, +void logOps(OperationContext* opCtx, const char* opstr, const NamespaceString& nss, std::vector<BSONObj>::const_iterator begin, std::vector<BSONObj>::const_iterator end, bool fromMigrate) { - ReplicationCoordinator* replCoord = ReplicationCoordinator::get(txn); + ReplicationCoordinator* replCoord = ReplicationCoordinator::get(opCtx); ReplicationCoordinator::Mode replMode = replCoord->getReplicationMode(); invariant(begin != end); - if (oplogDisabled(txn, replMode, nss)) + if (oplogDisabled(opCtx, replMode, nss)) return; const size_t count = end - begin; std::vector<OplogDocWriter> writers; writers.reserve(count); - Collection* oplog = getLocalOplogCollection(txn, _oplogCollectionName); - Lock::DBLock lk(txn->lockState(), "local", MODE_IX); - Lock::CollectionLock lock(txn->lockState(), _oplogCollectionName, MODE_IX); + Collection* oplog = getLocalOplogCollection(opCtx, _oplogCollectionName); + Lock::DBLock lk(opCtx->lockState(), "local", MODE_IX); + Lock::CollectionLock lock(opCtx->lockState(), _oplogCollectionName, MODE_IX); std::unique_ptr<OplogSlot[]> slots(new OplogSlot[count]); - getNextOpTime(txn, oplog, replCoord, replMode, count, slots.get()); + getNextOpTime(opCtx, oplog, replCoord, replMode, count, slots.get()); for (size_t i = 0; i < count; i++) { writers.emplace_back(_logOpWriter( - txn, opstr, nss, begin[i], NULL, fromMigrate, slots[i].opTime, slots[i].hash)); + opCtx, opstr, nss, begin[i], NULL, fromMigrate, slots[i].opTime, slots[i].hash)); } std::unique_ptr<DocWriter const* []> basePtrs(new DocWriter const*[count]); for (size_t i = 0; i < count; i++) { basePtrs[i] = &writers[i]; } - _logOpsInner(txn, nss, basePtrs.get(), count, oplog, replMode, slots[count - 1].opTime); + _logOpsInner(opCtx, nss, basePtrs.get(), count, oplog, replMode, slots[count - 1].opTime); } namespace { -long long getNewOplogSizeBytes(OperationContext* txn, const ReplSettings& replSettings) { +long long getNewOplogSizeBytes(OperationContext* opCtx, const ReplSettings& replSettings) { if (replSettings.getOplogSizeBytes() != 0) { return replSettings.getOplogSizeBytes(); } @@ -459,7 +461,7 @@ long long getNewOplogSizeBytes(OperationContext* txn, const ReplSettings& replSe #else long long lowerBound = 0; double bytes = 0; - if (txn->getClient()->getServiceContext()->getGlobalStorageEngine()->isEphemeral()) { + if (opCtx->getClient()->getServiceContext()->getGlobalStorageEngine()->isEphemeral()) { // in memory: 50MB minimum size lowerBound = 50LL * 1024 * 1024; bytes = pi.getMemSizeMB() * 1024 * 1024; @@ -482,19 +484,19 @@ long long getNewOplogSizeBytes(OperationContext* txn, const ReplSettings& replSe } } // namespace -void createOplog(OperationContext* txn, const std::string& oplogCollectionName, bool isReplSet) { - ScopedTransaction transaction(txn, MODE_X); - Lock::GlobalWrite lk(txn->lockState()); +void createOplog(OperationContext* opCtx, const std::string& oplogCollectionName, bool isReplSet) { + ScopedTransaction transaction(opCtx, MODE_X); + Lock::GlobalWrite lk(opCtx->lockState()); - const ReplSettings& replSettings = ReplicationCoordinator::get(txn)->getSettings(); + const ReplSettings& replSettings = ReplicationCoordinator::get(opCtx)->getSettings(); - OldClientContext ctx(txn, oplogCollectionName); + OldClientContext ctx(opCtx, oplogCollectionName); Collection* collection = ctx.db()->getCollection(oplogCollectionName); if (collection) { if (replSettings.getOplogSizeBytes() != 0) { const CollectionOptions oplogOpts = - collection->getCatalogEntry()->getCollectionOptions(txn); + collection->getCatalogEntry()->getCollectionOptions(opCtx); int o = (int)(oplogOpts.cappedSize / (1024 * 1024)); int n = (int)(replSettings.getOplogSizeBytes() / (1024 * 1024)); @@ -508,12 +510,12 @@ void createOplog(OperationContext* txn, const std::string& oplogCollectionName, } if (!isReplSet) - initTimestampFromOplog(txn, oplogCollectionName); + initTimestampFromOplog(opCtx, oplogCollectionName); return; } /* create an oplog collection, if it doesn't yet exist. */ - const auto sz = getNewOplogSizeBytes(txn, replSettings); + const auto sz = getNewOplogSizeBytes(opCtx, replSettings); log() << "******" << endl; log() << "creating replication oplog of size: " << (int)(sz / (1024 * 1024)) << "MB..." << endl; @@ -524,24 +526,24 @@ void createOplog(OperationContext* txn, const std::string& oplogCollectionName, options.autoIndexId = CollectionOptions::NO; MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN { - WriteUnitOfWork uow(txn); - invariant(ctx.db()->createCollection(txn, oplogCollectionName, options)); + WriteUnitOfWork uow(opCtx); + invariant(ctx.db()->createCollection(opCtx, oplogCollectionName, options)); if (!isReplSet) - getGlobalServiceContext()->getOpObserver()->onOpMessage(txn, BSONObj()); + getGlobalServiceContext()->getOpObserver()->onOpMessage(opCtx, BSONObj()); uow.commit(); } - MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "createCollection", oplogCollectionName); + MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "createCollection", oplogCollectionName); /* sync here so we don't get any surprising lag later when we try to sync */ StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine(); - storageEngine->flushAllFiles(txn, true); + storageEngine->flushAllFiles(opCtx, true); log() << "******" << endl; } -void createOplog(OperationContext* txn) { - const auto isReplSet = ReplicationCoordinator::get(txn)->getReplicationMode() == +void createOplog(OperationContext* opCtx) { + const auto isReplSet = ReplicationCoordinator::get(opCtx)->getReplicationMode() == ReplicationCoordinator::modeReplSet; - createOplog(txn, _oplogCollectionName, isReplSet); + createOplog(opCtx, _oplogCollectionName, isReplSet); } // ------------------------------------- @@ -575,13 +577,13 @@ struct ApplyOpMetadata { std::map<std::string, ApplyOpMetadata> opsMap = { {"create", - {[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status { + {[](OperationContext* opCtx, const char* ns, BSONObj& cmd) -> Status { const NamespaceString nss(parseNs(ns, cmd)); if (auto idIndexElem = cmd["idIndex"]) { // Remove "idIndex" field from command. auto cmdWithoutIdIndex = cmd.removeField("idIndex"); return createCollection( - txn, nss.db().toString(), cmdWithoutIdIndex, idIndexElem.Obj()); + opCtx, nss.db().toString(), cmdWithoutIdIndex, idIndexElem.Obj()); } // No _id index spec was provided, so we should build a v:1 _id index. @@ -591,55 +593,55 @@ std::map<std::string, ApplyOpMetadata> opsMap = { idIndexSpecBuilder.append(IndexDescriptor::kIndexNameFieldName, "_id_"); idIndexSpecBuilder.append(IndexDescriptor::kNamespaceFieldName, nss.ns()); idIndexSpecBuilder.append(IndexDescriptor::kKeyPatternFieldName, BSON("_id" << 1)); - return createCollection(txn, nss.db().toString(), cmd, idIndexSpecBuilder.done()); + return createCollection(opCtx, nss.db().toString(), cmd, idIndexSpecBuilder.done()); }, {ErrorCodes::NamespaceExists}}}, {"collMod", - {[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status { + {[](OperationContext* opCtx, const char* ns, BSONObj& cmd) -> Status { BSONObjBuilder resultWeDontCareAbout; - return collMod(txn, parseNs(ns, cmd), cmd, &resultWeDontCareAbout); + return collMod(opCtx, parseNs(ns, cmd), cmd, &resultWeDontCareAbout); }, {ErrorCodes::IndexNotFound, ErrorCodes::NamespaceNotFound}}}, {"dropDatabase", - {[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status { - return dropDatabase(txn, NamespaceString(ns).db().toString()); + {[](OperationContext* opCtx, const char* ns, BSONObj& cmd) -> Status { + return dropDatabase(opCtx, NamespaceString(ns).db().toString()); }, {ErrorCodes::NamespaceNotFound}}}, {"drop", - {[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status { + {[](OperationContext* opCtx, const char* ns, BSONObj& cmd) -> Status { BSONObjBuilder resultWeDontCareAbout; - return dropCollection(txn, parseNs(ns, cmd), resultWeDontCareAbout); + return dropCollection(opCtx, parseNs(ns, cmd), resultWeDontCareAbout); }, // IllegalOperation is necessary because in 3.0 we replicate drops of system.profile // TODO(dannenberg) remove IllegalOperation once we no longer need 3.0 compatibility {ErrorCodes::NamespaceNotFound, ErrorCodes::IllegalOperation}}}, // deleteIndex(es) is deprecated but still works as of April 10, 2015 {"deleteIndex", - {[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status { + {[](OperationContext* opCtx, const char* ns, BSONObj& cmd) -> Status { BSONObjBuilder resultWeDontCareAbout; - return dropIndexes(txn, parseNs(ns, cmd), cmd, &resultWeDontCareAbout); + return dropIndexes(opCtx, parseNs(ns, cmd), cmd, &resultWeDontCareAbout); }, {ErrorCodes::NamespaceNotFound, ErrorCodes::IndexNotFound}}}, {"deleteIndexes", - {[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status { + {[](OperationContext* opCtx, const char* ns, BSONObj& cmd) -> Status { BSONObjBuilder resultWeDontCareAbout; - return dropIndexes(txn, parseNs(ns, cmd), cmd, &resultWeDontCareAbout); + return dropIndexes(opCtx, parseNs(ns, cmd), cmd, &resultWeDontCareAbout); }, {ErrorCodes::NamespaceNotFound, ErrorCodes::IndexNotFound}}}, {"dropIndex", - {[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status { + {[](OperationContext* opCtx, const char* ns, BSONObj& cmd) -> Status { BSONObjBuilder resultWeDontCareAbout; - return dropIndexes(txn, parseNs(ns, cmd), cmd, &resultWeDontCareAbout); + return dropIndexes(opCtx, parseNs(ns, cmd), cmd, &resultWeDontCareAbout); }, {ErrorCodes::NamespaceNotFound, ErrorCodes::IndexNotFound}}}, {"dropIndexes", - {[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status { + {[](OperationContext* opCtx, const char* ns, BSONObj& cmd) -> Status { BSONObjBuilder resultWeDontCareAbout; - return dropIndexes(txn, parseNs(ns, cmd), cmd, &resultWeDontCareAbout); + return dropIndexes(opCtx, parseNs(ns, cmd), cmd, &resultWeDontCareAbout); }, {ErrorCodes::NamespaceNotFound, ErrorCodes::IndexNotFound}}}, {"renameCollection", - {[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status { + {[](OperationContext* opCtx, const char* ns, BSONObj& cmd) -> Status { const auto sourceNsElt = cmd.firstElement(); const auto targetNsElt = cmd["to"]; uassert(ErrorCodes::TypeMismatch, @@ -648,7 +650,7 @@ std::map<std::string, ApplyOpMetadata> opsMap = { uassert(ErrorCodes::TypeMismatch, "'to' must be of type String", targetNsElt.type() == BSONType::String); - return renameCollection(txn, + return renameCollection(opCtx, NamespaceString(sourceNsElt.valueStringData()), NamespaceString(targetNsElt.valueStringData()), cmd["dropTarget"].trueValue(), @@ -656,16 +658,16 @@ std::map<std::string, ApplyOpMetadata> opsMap = { }, {ErrorCodes::NamespaceNotFound, ErrorCodes::NamespaceExists}}}, {"applyOps", - {[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status { + {[](OperationContext* opCtx, const char* ns, BSONObj& cmd) -> Status { BSONObjBuilder resultWeDontCareAbout; - return applyOps(txn, nsToDatabase(ns), cmd, &resultWeDontCareAbout); + return applyOps(opCtx, nsToDatabase(ns), cmd, &resultWeDontCareAbout); }, {ErrorCodes::UnknownError}}}, - {"convertToCapped", {[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status { - return convertToCapped(txn, parseNs(ns, cmd), cmd["size"].number()); + {"convertToCapped", {[](OperationContext* opCtx, const char* ns, BSONObj& cmd) -> Status { + return convertToCapped(opCtx, parseNs(ns, cmd), cmd["size"].number()); }}}, - {"emptycapped", {[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status { - return emptyCapped(txn, parseNs(ns, cmd)); + {"emptycapped", {[](OperationContext* opCtx, const char* ns, BSONObj& cmd) -> Status { + return emptyCapped(opCtx, parseNs(ns, cmd)); }}}, }; @@ -673,14 +675,14 @@ std::map<std::string, ApplyOpMetadata> opsMap = { // @return failure status if an update should have happened and the document DNE. // See replset initial sync code. -Status applyOperation_inlock(OperationContext* txn, +Status applyOperation_inlock(OperationContext* opCtx, Database* db, const BSONObj& op, bool inSteadyStateReplication, IncrementOpsAppliedStatsFn incrementOpsAppliedStats) { LOG(3) << "applying op: " << redact(op); - OpCounters* opCounters = txn->writesAreReplicated() ? &globalOpCounters : &replOpCounters; + OpCounters* opCounters = opCtx->writesAreReplicated() ? &globalOpCounters : &replOpCounters; const char* names[] = {"o", "ns", "op", "b", "o2"}; BSONElement fields[5]; @@ -710,19 +712,19 @@ Status applyOperation_inlock(OperationContext* txn, if (supportsDocLocking()) { // WiredTiger, and others requires MODE_IX since the applier threads driving // this allow writes to the same collection on any thread. - dassert(txn->lockState()->isCollectionLockedForMode(ns, MODE_IX)); + dassert(opCtx->lockState()->isCollectionLockedForMode(ns, MODE_IX)); } else { // mmapV1 ensures that all operations to the same collection are executed from // the same worker thread, so it takes an exclusive lock (MODE_X) - dassert(txn->lockState()->isCollectionLockedForMode(ns, MODE_X)); + dassert(opCtx->lockState()->isCollectionLockedForMode(ns, MODE_X)); } } Collection* collection = db->getCollection(ns); IndexCatalog* indexCatalog = collection == nullptr ? nullptr : collection->getIndexCatalog(); - const bool haveWrappingWriteUnitOfWork = txn->lockState()->inAWriteUnitOfWork(); + const bool haveWrappingWriteUnitOfWork = opCtx->lockState()->inAWriteUnitOfWork(); uassert(ErrorCodes::CommandNotSupportedOnView, str::stream() << "applyOps not supported on view: " << ns, - collection || !db->getViewCatalog()->lookup(txn, ns)); + collection || !db->getViewCatalog()->lookup(opCtx, ns)); // operation type -- see logOp() comments for types const char* opType = fieldOp.valuestrsafe(); @@ -768,15 +770,15 @@ Status applyOperation_inlock(OperationContext* txn, } bool relaxIndexConstraints = - ReplicationCoordinator::get(txn)->shouldRelaxIndexConstraints(txn, indexNss); + ReplicationCoordinator::get(opCtx)->shouldRelaxIndexConstraints(opCtx, indexNss); if (indexSpec["background"].trueValue()) { - Lock::TempRelease release(txn->lockState()); - if (txn->lockState()->isLocked()) { + Lock::TempRelease release(opCtx->lockState()); + if (opCtx->lockState()->isLocked()) { // If TempRelease fails, background index build will deadlock. LOG(3) << "apply op: building background index " << indexSpec << " in the foreground because temp release failed"; IndexBuilder builder(indexSpec, relaxIndexConstraints); - Status status = builder.buildInForeground(txn, db); + Status status = builder.buildInForeground(opCtx, db); uassertStatusOK(status); } else { IndexBuilder* builder = new IndexBuilder(indexSpec, relaxIndexConstraints); @@ -785,10 +787,10 @@ Status applyOperation_inlock(OperationContext* txn, // Wait for thread to start and register itself IndexBuilder::waitForBgIndexStarting(); } - txn->recoveryUnit()->abandonSnapshot(); + opCtx->recoveryUnit()->abandonSnapshot(); } else { IndexBuilder builder(indexSpec, relaxIndexConstraints); - Status status = builder.buildInForeground(txn, db); + Status status = builder.buildInForeground(opCtx, db); uassertStatusOK(status); } // Since this is an index operation we can return without falling through. @@ -812,10 +814,10 @@ Status applyOperation_inlock(OperationContext* txn, str::stream() << "Failed to apply insert due to empty array element: " << op.toString(), !insertObjs.empty()); - WriteUnitOfWork wuow(txn); + WriteUnitOfWork wuow(opCtx); OpDebug* const nullOpDebug = nullptr; Status status = collection->insertDocuments( - txn, insertObjs.begin(), insertObjs.end(), nullOpDebug, true); + opCtx, insertObjs.begin(), insertObjs.end(), nullOpDebug, true); if (!status.isOK()) { return status; } @@ -849,10 +851,10 @@ Status applyOperation_inlock(OperationContext* txn, bool needToDoUpsert = haveWrappingWriteUnitOfWork; if (!needToDoUpsert) { - WriteUnitOfWork wuow(txn); + WriteUnitOfWork wuow(opCtx); try { OpDebug* const nullOpDebug = nullptr; - status = collection->insertDocument(txn, o, nullOpDebug, true); + status = collection->insertDocument(opCtx, o, nullOpDebug, true); } catch (DBException dbe) { status = dbe.toStatus(); } @@ -881,7 +883,7 @@ Status applyOperation_inlock(OperationContext* txn, UpdateLifecycleImpl updateLifecycle(requestNs); request.setLifecycle(&updateLifecycle); - UpdateResult res = update(txn, db, request); + UpdateResult res = update(opCtx, db, request); if (res.numMatched == 0 && res.upserted.isEmpty()) { error() << "No document was updated even though we got a DuplicateKey " "error when inserting"; @@ -912,7 +914,7 @@ Status applyOperation_inlock(OperationContext* txn, UpdateLifecycleImpl updateLifecycle(requestNs); request.setLifecycle(&updateLifecycle); - UpdateResult ur = update(txn, db, request); + UpdateResult ur = update(opCtx, db, request); if (ur.numMatched == 0 && ur.upserted.isEmpty()) { if (ur.modifiers) { @@ -929,11 +931,11 @@ Status applyOperation_inlock(OperationContext* txn, // { _id:..., { x : {$size:...} } // thus this is not ideal. if (collection == NULL || - (indexCatalog->haveIdIndex(txn) && - Helpers::findById(txn, collection, updateCriteria).isNull()) || + (indexCatalog->haveIdIndex(opCtx) && + Helpers::findById(opCtx, collection, updateCriteria).isNull()) || // capped collections won't have an _id index - (!indexCatalog->haveIdIndex(txn) && - Helpers::findOne(txn, collection, updateCriteria, false).isNull())) { + (!indexCatalog->haveIdIndex(opCtx) && + Helpers::findOne(opCtx, collection, updateCriteria, false).isNull())) { string msg = str::stream() << "couldn't find doc: " << redact(op); error() << msg; return Status(ErrorCodes::OperationFailed, msg); @@ -963,7 +965,7 @@ Status applyOperation_inlock(OperationContext* txn, o.hasField("_id")); if (opType[1] == 0) { - deleteObjects(txn, collection, ns, o, PlanExecutor::YIELD_MANUAL, /*justOne*/ valueB); + deleteObjects(opCtx, collection, ns, o, PlanExecutor::YIELD_MANUAL, /*justOne*/ valueB); } else verify(opType[1] == 'b'); // "db" advertisement if (incrementOpsAppliedStats) { @@ -984,15 +986,15 @@ Status applyOperation_inlock(OperationContext* txn, // have a wrapping WUOW, the extra nexting is harmless. The logOp really should have been // done in the WUOW that did the write, but this won't happen because applyOps turns off // observers. - WriteUnitOfWork wuow(txn); + WriteUnitOfWork wuow(opCtx); getGlobalAuthorizationManager()->logOp( - txn, opType, ns.toString().c_str(), o, fieldO2.isABSONObj() ? &o2 : NULL); + opCtx, opType, ns.toString().c_str(), o, fieldO2.isABSONObj() ? &o2 : NULL); wuow.commit(); return Status::OK(); } -Status applyCommand_inlock(OperationContext* txn, +Status applyCommand_inlock(OperationContext* opCtx, const BSONObj& op, bool inSteadyStateReplication) { const char* names[] = {"o", "ns", "op"}; @@ -1023,8 +1025,8 @@ Status applyCommand_inlock(OperationContext* txn, return {ErrorCodes::InvalidNamespace, "invalid ns: " + std::string(nss.ns())}; } { - Database* db = dbHolder().get(txn, nss.ns()); - if (db && !db->getCollection(nss.ns()) && db->getViewCatalog()->lookup(txn, nss.ns())) { + Database* db = dbHolder().get(opCtx, nss.ns()); + if (db && !db->getCollection(nss.ns()) && db->getViewCatalog()->lookup(opCtx, nss.ns())) { return {ErrorCodes::CommandNotSupportedOnView, str::stream() << "applyOps not supported on view:" << nss.ns()}; } @@ -1040,7 +1042,7 @@ Status applyCommand_inlock(OperationContext* txn, // Applying commands in repl is done under Global W-lock, so it is safe to not // perform the current DB checks after reacquiring the lock. - invariant(txn->lockState()->isW()); + invariant(opCtx->lockState()->isW()); bool done = false; @@ -1054,7 +1056,7 @@ Status applyCommand_inlock(OperationContext* txn, ApplyOpMetadata curOpToApply = op->second; Status status = Status::OK(); try { - status = curOpToApply.applyFunc(txn, nss.ns().c_str(), o); + status = curOpToApply.applyFunc(opCtx, nss.ns().c_str(), o); } catch (...) { status = exceptionToStatus(); } @@ -1065,21 +1067,21 @@ Status applyCommand_inlock(OperationContext* txn, throw WriteConflictException(); } case ErrorCodes::BackgroundOperationInProgressForDatabase: { - Lock::TempRelease release(txn->lockState()); + Lock::TempRelease release(opCtx->lockState()); BackgroundOperation::awaitNoBgOpInProgForDb(nss.db()); - txn->recoveryUnit()->abandonSnapshot(); - txn->checkForInterrupt(); + opCtx->recoveryUnit()->abandonSnapshot(); + opCtx->checkForInterrupt(); break; } case ErrorCodes::BackgroundOperationInProgressForNamespace: { - Lock::TempRelease release(txn->lockState()); + Lock::TempRelease release(opCtx->lockState()); Command* cmd = Command::findCommand(o.firstElement().fieldName()); invariant(cmd); BackgroundOperation::awaitNoBgOpInProgForNs(cmd->parseNs(nss.db().toString(), o)); - txn->recoveryUnit()->abandonSnapshot(); - txn->checkForInterrupt(); + opCtx->recoveryUnit()->abandonSnapshot(); + opCtx->checkForInterrupt(); break; } default: @@ -1101,8 +1103,8 @@ Status applyCommand_inlock(OperationContext* txn, // AuthorizationManager's logOp method registers a RecoveryUnit::Change // and to do so we need to have begun a UnitOfWork - WriteUnitOfWork wuow(txn); - getGlobalAuthorizationManager()->logOp(txn, opType, nss.ns().c_str(), o, nullptr); + WriteUnitOfWork wuow(opCtx); + getGlobalAuthorizationManager()->logOp(opCtx, opType, nss.ns().c_str(), o, nullptr); wuow.commit(); return Status::OK(); @@ -1114,19 +1116,19 @@ void setNewTimestamp(ServiceContext* service, const Timestamp& newTime) { newTimestampNotifier.notify_all(); } -void initTimestampFromOplog(OperationContext* txn, const std::string& oplogNS) { - DBDirectClient c(txn); +void initTimestampFromOplog(OperationContext* opCtx, const std::string& oplogNS) { + DBDirectClient c(opCtx); BSONObj lastOp = c.findOne(oplogNS, Query().sort(reverseNaturalObj), NULL, QueryOption_SlaveOk); if (!lastOp.isEmpty()) { LOG(1) << "replSet setting last Timestamp"; const OpTime opTime = fassertStatusOK(28696, OpTime::parseFromOplogEntry(lastOp)); - setNewTimestamp(txn->getServiceContext(), opTime.getTimestamp()); + setNewTimestamp(opCtx->getServiceContext(), opTime.getTimestamp()); } } -void oplogCheckCloseDatabase(OperationContext* txn, Database* db) { - invariant(txn->lockState()->isW()); +void oplogCheckCloseDatabase(OperationContext* opCtx, Database* db) { + invariant(opCtx->lockState()->isW()); _localOplogCollection = nullptr; } @@ -1213,8 +1215,8 @@ void SnapshotThread::run() { } try { - auto txn = client.makeOperationContext(); - Lock::GlobalLock globalLock(txn->lockState(), MODE_IS, UINT_MAX); + auto opCtx = client.makeOperationContext(); + Lock::GlobalLock globalLock(opCtx->lockState(), MODE_IS, UINT_MAX); if (!replCoord->getMemberState().readable()) { // If our MemberState isn't readable, we may not be in a consistent state so don't @@ -1231,9 +1233,9 @@ void SnapshotThread::run() { // Make sure there are no in-flight capped inserts while we create our snapshot. // This lock cannot be aquired until all writes holding the resource commit/abort. Lock::ResourceLock cappedInsertLockForOtherDb( - txn->lockState(), resourceCappedInFlightForOtherDb, MODE_X); + opCtx->lockState(), resourceCappedInFlightForOtherDb, MODE_X); Lock::ResourceLock cappedInsertLockForLocalDb( - txn->lockState(), resourceCappedInFlightForLocalDb, MODE_X); + opCtx->lockState(), resourceCappedInFlightForLocalDb, MODE_X); // Reserve the name immediately before we take our snapshot. This ensures that all // names that compare lower must be from points in time visible to this named @@ -1241,15 +1243,15 @@ void SnapshotThread::run() { name = replCoord->reserveSnapshotName(nullptr); // This establishes the view that we will name. - _manager->prepareForCreateSnapshot(txn.get()); + _manager->prepareForCreateSnapshot(opCtx.get()); } auto opTimeOfSnapshot = OpTime(); { - AutoGetCollectionForRead oplog(txn.get(), NamespaceString(rsOplogName)); + AutoGetCollectionForRead oplog(opCtx.get(), NamespaceString(rsOplogName)); invariant(oplog.getCollection()); // Read the latest op from the oplog. - auto cursor = oplog.getCollection()->getCursor(txn.get(), /*forward*/ false); + auto cursor = oplog.getCollection()->getCursor(opCtx.get(), /*forward*/ false); auto record = cursor->next(); if (!record) continue; // oplog is completely empty. @@ -1259,7 +1261,7 @@ void SnapshotThread::run() { invariant(!opTimeOfSnapshot.isNull()); } - replCoord->createSnapshot(txn.get(), opTimeOfSnapshot, name); + replCoord->createSnapshot(opCtx.get(), opTimeOfSnapshot, name); } catch (const WriteConflictException& wce) { log() << "skipping storage snapshot pass due to write conflict"; continue; diff --git a/src/mongo/db/repl/oplog.h b/src/mongo/db/repl/oplog.h index dc61805e789..fcff70fb607 100644 --- a/src/mongo/db/repl/oplog.h +++ b/src/mongo/db/repl/oplog.h @@ -50,20 +50,20 @@ class ReplSettings; /** * Truncates the oplog after, and including, the "truncateTimestamp" entry. */ -void truncateOplogTo(OperationContext* txn, Timestamp truncateTimestamp); +void truncateOplogTo(OperationContext* opCtx, Timestamp truncateTimestamp); /** * Create a new capped collection for the oplog if it doesn't yet exist. * If the collection already exists (and isReplSet is false), * set the 'last' Timestamp from the last entry of the oplog collection (side effect!) */ -void createOplog(OperationContext* txn, const std::string& oplogCollectionName, bool isReplSet); +void createOplog(OperationContext* opCtx, const std::string& oplogCollectionName, bool isReplSet); /* * Shortcut for above function using oplogCollectionName = _oplogCollectionName, * and replEnabled = replCoord::isReplSet(); */ -void createOplog(OperationContext* txn); +void createOplog(OperationContext* opCtx); extern std::string rsOplogName; extern std::string masterSlaveOplogName; @@ -81,7 +81,7 @@ extern int OPLOG_VERSION; * "db" declares presence of a database (ns is set to the db name + '.') */ -void logOps(OperationContext* txn, +void logOps(OperationContext* opCtx, const char* opstr, const NamespaceString& nss, std::vector<BSONObj>::const_iterator begin, @@ -91,7 +91,7 @@ void logOps(OperationContext* txn, /* For 'u' records, 'obj' captures the mutation made to the object but not * the object itself. 'o2' captures the the criteria for the object that will be modified. */ -void logOp(OperationContext* txn, +void logOp(OperationContext* opCtx, const char* opstr, const char* ns, const BSONObj& obj, @@ -100,7 +100,7 @@ void logOp(OperationContext* txn, // Flush out the cached pointers to the local database and oplog. // Used by the closeDatabase command to ensure we don't cache closed things. -void oplogCheckCloseDatabase(OperationContext* txn, Database* db); +void oplogCheckCloseDatabase(OperationContext* opCtx, Database* db); using IncrementOpsAppliedStatsFn = stdx::function<void()>; /** @@ -110,7 +110,7 @@ using IncrementOpsAppliedStatsFn = stdx::function<void()>; * @param incrementOpsAppliedStats is called whenever an op is applied. * Returns failure status if the op was an update that could not be applied. */ -Status applyOperation_inlock(OperationContext* txn, +Status applyOperation_inlock(OperationContext* opCtx, Database* db, const BSONObj& op, bool inSteadyStateReplication = false, @@ -123,17 +123,19 @@ Status applyOperation_inlock(OperationContext* txn, * initial sync. * Returns failure status if the op that could not be applied. */ -Status applyCommand_inlock(OperationContext* txn, const BSONObj& op, bool inSteadyStateReplication); +Status applyCommand_inlock(OperationContext* opCtx, + const BSONObj& op, + bool inSteadyStateReplication); /** * Initializes the global Timestamp with the value from the timestamp of the last oplog entry. */ -void initTimestampFromOplog(OperationContext* txn, const std::string& oplogNS); +void initTimestampFromOplog(OperationContext* opCtx, const std::string& oplogNS); /** * Sets the global Timestamp to be 'newTime'. */ -void setNewTimestamp(ServiceContext* txn, const Timestamp& newTime); +void setNewTimestamp(ServiceContext* opCtx, const Timestamp& newTime); /** * Detects the current replication mode and sets the "_oplogCollectionName" accordingly. diff --git a/src/mongo/db/repl/oplog_buffer.h b/src/mongo/db/repl/oplog_buffer.h index 9695260c691..f177808a991 100644 --- a/src/mongo/db/repl/oplog_buffer.h +++ b/src/mongo/db/repl/oplog_buffer.h @@ -70,7 +70,7 @@ public: * create backing storage, etc). This method may be called at most once for the lifetime of an * oplog buffer. */ - virtual void startup(OperationContext* txn) = 0; + virtual void startup(OperationContext* opCtx) = 0; /** * Signals to the oplog buffer that it should shut down. This method may block. After @@ -79,7 +79,7 @@ public: * It is legal to call this method multiple times, but it should only be called after startup * has been called. */ - virtual void shutdown(OperationContext* txn) = 0; + virtual void shutdown(OperationContext* opCtx) = 0; /** * Pushes operation into oplog buffer, ignoring any size constraints. Does not block. @@ -87,26 +87,26 @@ public: * the limit returned by getMaxSize() but should not otherwise adversely affect normal * functionality such as pushing and popping operations from the oplog buffer. */ - virtual void pushEvenIfFull(OperationContext* txn, const Value& value) = 0; + virtual void pushEvenIfFull(OperationContext* opCtx, const Value& value) = 0; /** * Pushes operation into oplog buffer. * If there are size constraints on the oplog buffer, this may block until sufficient space * is made available (by popping) to complete this operation. */ - virtual void push(OperationContext* txn, const Value& value) = 0; + virtual void push(OperationContext* opCtx, const Value& value) = 0; /** * Pushes operations in the iterator range [begin, end) into the oplog buffer without blocking. */ - virtual void pushAllNonBlocking(OperationContext* txn, + virtual void pushAllNonBlocking(OperationContext* opCtx, Batch::const_iterator begin, Batch::const_iterator end) = 0; /** * Returns when enough space is available. */ - virtual void waitForSpace(OperationContext* txn, std::size_t size) = 0; + virtual void waitForSpace(OperationContext* opCtx, std::size_t size) = 0; /** * Returns true if oplog buffer is empty. @@ -135,13 +135,13 @@ public: /** * Clears oplog buffer. */ - virtual void clear(OperationContext* txn) = 0; + virtual void clear(OperationContext* opCtx) = 0; /** * Returns false if oplog buffer is empty. "value" is left unchanged. * Otherwise, removes last item (saves in "value") from the oplog buffer and returns true. */ - virtual bool tryPop(OperationContext* txn, Value* value) = 0; + virtual bool tryPop(OperationContext* opCtx, Value* value) = 0; /** * Waits "waitDuration" for an operation to be pushed into the oplog buffer. @@ -154,12 +154,12 @@ public: * Returns false if oplog buffer is empty. * Otherwise, returns true and sets "value" to last item in oplog buffer. */ - virtual bool peek(OperationContext* txn, Value* value) = 0; + virtual bool peek(OperationContext* opCtx, Value* value) = 0; /** * Returns the item most recently added to the oplog buffer or nothing if the buffer is empty. */ - virtual boost::optional<Value> lastObjectPushed(OperationContext* txn) const = 0; + virtual boost::optional<Value> lastObjectPushed(OperationContext* opCtx) const = 0; }; } // namespace repl diff --git a/src/mongo/db/repl/oplog_buffer_blocking_queue.cpp b/src/mongo/db/repl/oplog_buffer_blocking_queue.cpp index 9b9cdb82dac..72eb401ddb5 100644 --- a/src/mongo/db/repl/oplog_buffer_blocking_queue.cpp +++ b/src/mongo/db/repl/oplog_buffer_blocking_queue.cpp @@ -49,8 +49,8 @@ OplogBufferBlockingQueue::OplogBufferBlockingQueue() : _queue(kOplogBufferSize, void OplogBufferBlockingQueue::startup(OperationContext*) {} -void OplogBufferBlockingQueue::shutdown(OperationContext* txn) { - clear(txn); +void OplogBufferBlockingQueue::shutdown(OperationContext* opCtx) { + clear(opCtx); } void OplogBufferBlockingQueue::pushEvenIfFull(OperationContext*, const Value& value) { diff --git a/src/mongo/db/repl/oplog_buffer_blocking_queue.h b/src/mongo/db/repl/oplog_buffer_blocking_queue.h index b0fa36a8157..68c74779b0e 100644 --- a/src/mongo/db/repl/oplog_buffer_blocking_queue.h +++ b/src/mongo/db/repl/oplog_buffer_blocking_queue.h @@ -41,23 +41,23 @@ class OplogBufferBlockingQueue final : public OplogBuffer { public: OplogBufferBlockingQueue(); - void startup(OperationContext* txn) override; - void shutdown(OperationContext* txn) override; - void pushEvenIfFull(OperationContext* txn, const Value& value) override; - void push(OperationContext* txn, const Value& value) override; - void pushAllNonBlocking(OperationContext* txn, + void startup(OperationContext* opCtx) override; + void shutdown(OperationContext* opCtx) override; + void pushEvenIfFull(OperationContext* opCtx, const Value& value) override; + void push(OperationContext* opCtx, const Value& value) override; + void pushAllNonBlocking(OperationContext* opCtx, Batch::const_iterator begin, Batch::const_iterator end) override; - void waitForSpace(OperationContext* txn, std::size_t size) override; + void waitForSpace(OperationContext* opCtx, std::size_t size) override; bool isEmpty() const override; std::size_t getMaxSize() const override; std::size_t getSize() const override; std::size_t getCount() const override; - void clear(OperationContext* txn) override; - bool tryPop(OperationContext* txn, Value* value) override; + void clear(OperationContext* opCtx) override; + bool tryPop(OperationContext* opCtx, Value* value) override; bool waitForData(Seconds waitDuration) override; - bool peek(OperationContext* txn, Value* value) override; - boost::optional<Value> lastObjectPushed(OperationContext* txn) const override; + bool peek(OperationContext* opCtx, Value* value) override; + boost::optional<Value> lastObjectPushed(OperationContext* opCtx) const override; private: BlockingQueue<BSONObj> _queue; diff --git a/src/mongo/db/repl/oplog_buffer_collection.cpp b/src/mongo/db/repl/oplog_buffer_collection.cpp index 1c74963cc14..f738312a3eb 100644 --- a/src/mongo/db/repl/oplog_buffer_collection.cpp +++ b/src/mongo/db/repl/oplog_buffer_collection.cpp @@ -93,27 +93,27 @@ OplogBufferCollection::Options OplogBufferCollection::getOptions() const { return _options; } -void OplogBufferCollection::startup(OperationContext* txn) { - clear(txn); +void OplogBufferCollection::startup(OperationContext* opCtx) { + clear(opCtx); } -void OplogBufferCollection::shutdown(OperationContext* txn) { +void OplogBufferCollection::shutdown(OperationContext* opCtx) { stdx::lock_guard<stdx::mutex> lk(_mutex); - _dropCollection(txn); + _dropCollection(opCtx); _size = 0; _count = 0; } -void OplogBufferCollection::pushEvenIfFull(OperationContext* txn, const Value& value) { +void OplogBufferCollection::pushEvenIfFull(OperationContext* opCtx, const Value& value) { Batch valueBatch = {value}; - pushAllNonBlocking(txn, valueBatch.begin(), valueBatch.end()); + pushAllNonBlocking(opCtx, valueBatch.begin(), valueBatch.end()); } -void OplogBufferCollection::push(OperationContext* txn, const Value& value) { - pushEvenIfFull(txn, value); +void OplogBufferCollection::push(OperationContext* opCtx, const Value& value) { + pushEvenIfFull(opCtx, value); } -void OplogBufferCollection::pushAllNonBlocking(OperationContext* txn, +void OplogBufferCollection::pushAllNonBlocking(OperationContext* opCtx, Batch::const_iterator begin, Batch::const_iterator end) { if (begin == end) { @@ -132,7 +132,7 @@ void OplogBufferCollection::pushAllNonBlocking(OperationContext* txn, return doc; }); - auto status = _storageInterface->insertDocuments(txn, _nss, docsToInsert); + auto status = _storageInterface->insertDocuments(opCtx, _nss, docsToInsert); fassertStatusOK(40161, status); _lastPushedTimestamp = ts; @@ -144,7 +144,7 @@ void OplogBufferCollection::pushAllNonBlocking(OperationContext* txn, _cvNoLongerEmpty.notify_all(); } -void OplogBufferCollection::waitForSpace(OperationContext* txn, std::size_t size) {} +void OplogBufferCollection::waitForSpace(OperationContext* opCtx, std::size_t size) {} bool OplogBufferCollection::isEmpty() const { stdx::lock_guard<stdx::mutex> lk(_mutex); @@ -165,10 +165,10 @@ std::size_t OplogBufferCollection::getCount() const { return _count; } -void OplogBufferCollection::clear(OperationContext* txn) { +void OplogBufferCollection::clear(OperationContext* opCtx) { stdx::lock_guard<stdx::mutex> lk(_mutex); - _dropCollection(txn); - _createCollection(txn); + _dropCollection(opCtx); + _createCollection(opCtx); _size = 0; _count = 0; _sentinelCount = 0; @@ -177,12 +177,12 @@ void OplogBufferCollection::clear(OperationContext* txn) { _peekCache = std::queue<BSONObj>(); } -bool OplogBufferCollection::tryPop(OperationContext* txn, Value* value) { +bool OplogBufferCollection::tryPop(OperationContext* opCtx, Value* value) { stdx::lock_guard<stdx::mutex> lk(_mutex); if (_count == 0) { return false; } - return _pop_inlock(txn, value); + return _pop_inlock(opCtx, value); } bool OplogBufferCollection::waitForData(Seconds waitDuration) { @@ -194,24 +194,24 @@ bool OplogBufferCollection::waitForData(Seconds waitDuration) { return _count != 0; } -bool OplogBufferCollection::peek(OperationContext* txn, Value* value) { +bool OplogBufferCollection::peek(OperationContext* opCtx, Value* value) { stdx::lock_guard<stdx::mutex> lk(_mutex); if (_count == 0) { return false; } - *value = _peek_inlock(txn, PeekMode::kExtractEmbeddedDocument); + *value = _peek_inlock(opCtx, PeekMode::kExtractEmbeddedDocument); return true; } boost::optional<OplogBuffer::Value> OplogBufferCollection::lastObjectPushed( - OperationContext* txn) const { + OperationContext* opCtx) const { stdx::lock_guard<stdx::mutex> lk(_mutex); if (_count == 0) { return boost::none; } const auto docs = fassertStatusOK(40348, - _storageInterface->findDocuments(txn, + _storageInterface->findDocuments(opCtx, _nss, kIdIdxName, StorageInterface::ScanDirection::kBackward, @@ -222,9 +222,9 @@ boost::optional<OplogBuffer::Value> OplogBufferCollection::lastObjectPushed( return extractEmbeddedOplogDocument(docs.front()).getOwned(); } -bool OplogBufferCollection::_pop_inlock(OperationContext* txn, Value* value) { +bool OplogBufferCollection::_pop_inlock(OperationContext* opCtx, Value* value) { BSONObj docFromCollection = - _peek_inlock(txn, PeekMode::kReturnUnmodifiedDocumentFromCollection); + _peek_inlock(opCtx, PeekMode::kReturnUnmodifiedDocumentFromCollection); _lastPoppedKey = docFromCollection["_id"].wrap(""); *value = extractEmbeddedOplogDocument(docFromCollection).getOwned(); @@ -239,7 +239,7 @@ bool OplogBufferCollection::_pop_inlock(OperationContext* txn, Value* value) { return true; } -BSONObj OplogBufferCollection::_peek_inlock(OperationContext* txn, PeekMode peekMode) { +BSONObj OplogBufferCollection::_peek_inlock(OperationContext* opCtx, PeekMode peekMode) { invariant(_count > 0); BSONObj startKey; @@ -259,7 +259,7 @@ BSONObj OplogBufferCollection::_peek_inlock(OperationContext* txn, PeekMode peek std::size_t limit = isPeekCacheEnabled ? _options.peekCacheSize : 1U; const auto docs = fassertStatusOK( 40163, - _storageInterface->findDocuments(txn, + _storageInterface->findDocuments(opCtx, _nss, kIdIdxName, StorageInterface::ScanDirection::kForward, @@ -286,14 +286,14 @@ BSONObj OplogBufferCollection::_peek_inlock(OperationContext* txn, PeekMode peek MONGO_UNREACHABLE; } -void OplogBufferCollection::_createCollection(OperationContext* txn) { +void OplogBufferCollection::_createCollection(OperationContext* opCtx) { CollectionOptions options; options.temp = true; - fassert(40154, _storageInterface->createCollection(txn, _nss, options)); + fassert(40154, _storageInterface->createCollection(opCtx, _nss, options)); } -void OplogBufferCollection::_dropCollection(OperationContext* txn) { - fassert(40155, _storageInterface->dropCollection(txn, _nss)); +void OplogBufferCollection::_dropCollection(OperationContext* opCtx) { + fassert(40155, _storageInterface->dropCollection(opCtx, _nss)); } std::size_t OplogBufferCollection::getSentinelCount_forTest() const { diff --git a/src/mongo/db/repl/oplog_buffer_collection.h b/src/mongo/db/repl/oplog_buffer_collection.h index cb4cbcac8b8..1636b5bc7b8 100644 --- a/src/mongo/db/repl/oplog_buffer_collection.h +++ b/src/mongo/db/repl/oplog_buffer_collection.h @@ -109,27 +109,27 @@ public: */ Options getOptions() const; - void startup(OperationContext* txn) override; - void shutdown(OperationContext* txn) override; - void pushEvenIfFull(OperationContext* txn, const Value& value) override; - void push(OperationContext* txn, const Value& value) override; + void startup(OperationContext* opCtx) override; + void shutdown(OperationContext* opCtx) override; + void pushEvenIfFull(OperationContext* opCtx, const Value& value) override; + void push(OperationContext* opCtx, const Value& value) override; /** * Pushing documents with 'pushAllNonBlocking' will not handle sentinel documents properly. If * pushing sentinel documents is required, use 'push' or 'pushEvenIfFull'. */ - void pushAllNonBlocking(OperationContext* txn, + void pushAllNonBlocking(OperationContext* opCtx, Batch::const_iterator begin, Batch::const_iterator end) override; - void waitForSpace(OperationContext* txn, std::size_t size) override; + void waitForSpace(OperationContext* opCtx, std::size_t size) override; bool isEmpty() const override; std::size_t getMaxSize() const override; std::size_t getSize() const override; std::size_t getCount() const override; - void clear(OperationContext* txn) override; - bool tryPop(OperationContext* txn, Value* value) override; + void clear(OperationContext* opCtx) override; + bool tryPop(OperationContext* opCtx, Value* value) override; bool waitForData(Seconds waitDuration) override; - bool peek(OperationContext* txn, Value* value) override; - boost::optional<Value> lastObjectPushed(OperationContext* txn) const override; + bool peek(OperationContext* opCtx, Value* value) override; + boost::optional<Value> lastObjectPushed(OperationContext* opCtx) const override; // ---- Testing API ---- std::size_t getSentinelCount_forTest() const; @@ -141,19 +141,19 @@ private: /* * Creates a temporary collection with the _nss namespace. */ - void _createCollection(OperationContext* txn); + void _createCollection(OperationContext* opCtx); /* * Drops the collection with the _nss namespace. */ - void _dropCollection(OperationContext* txn); + void _dropCollection(OperationContext* opCtx); enum class PeekMode { kExtractEmbeddedDocument, kReturnUnmodifiedDocumentFromCollection }; /** * Returns the oldest oplog entry in the buffer. * Assumes the buffer is not empty. */ - BSONObj _peek_inlock(OperationContext* txn, PeekMode peekMode); + BSONObj _peek_inlock(OperationContext* opCtx, PeekMode peekMode); // Storage interface used to perform storage engine level functions on the collection. StorageInterface* _storageInterface; @@ -161,7 +161,7 @@ private: /** * Pops an entry off the buffer in a lock. */ - bool _pop_inlock(OperationContext* txn, Value* value); + bool _pop_inlock(OperationContext* opCtx, Value* value); // The namespace for the oplog buffer collection. const NamespaceString _nss; diff --git a/src/mongo/db/repl/oplog_buffer_collection_test.cpp b/src/mongo/db/repl/oplog_buffer_collection_test.cpp index ced9ab1f495..1ee2c3ccc8c 100644 --- a/src/mongo/db/repl/oplog_buffer_collection_test.cpp +++ b/src/mongo/db/repl/oplog_buffer_collection_test.cpp @@ -60,7 +60,7 @@ protected: ServiceContext::UniqueOperationContext makeOperationContext() const; StorageInterface* _storageInterface = nullptr; - ServiceContext::UniqueOperationContext _txn; + ServiceContext::UniqueOperationContext _opCtx; private: void setUp() override; @@ -82,11 +82,11 @@ void OplogBufferCollectionTest::setUp() { _storageInterface = storageInterface.get(); StorageInterface::set(service, std::move(storageInterface)); - _txn = makeOperationContext(); + _opCtx = makeOperationContext(); } void OplogBufferCollectionTest::tearDown() { - _txn.reset(); + _opCtx.reset(); _storageInterface = nullptr; ServiceContextMongoDTest::tearDown(); } @@ -131,50 +131,50 @@ TEST_F(OplogBufferCollectionTest, GetNamespace) { ASSERT_EQUALS(nss, OplogBufferCollection(_storageInterface, nss).getNamespace()); } -void testStartupCreatesCollection(OperationContext* txn, +void testStartupCreatesCollection(OperationContext* opCtx, StorageInterface* storageInterface, const NamespaceString& nss) { OplogBufferCollection oplogBuffer(storageInterface, nss); // Collection should not exist until startup() is called. - ASSERT_FALSE(AutoGetCollectionForRead(txn, nss).getCollection()); + ASSERT_FALSE(AutoGetCollectionForRead(opCtx, nss).getCollection()); - oplogBuffer.startup(txn); - ASSERT_TRUE(AutoGetCollectionForRead(txn, nss).getCollection()); + oplogBuffer.startup(opCtx); + ASSERT_TRUE(AutoGetCollectionForRead(opCtx, nss).getCollection()); } TEST_F(OplogBufferCollectionTest, StartupWithDefaultNamespaceCreatesCollection) { auto nss = OplogBufferCollection::getDefaultNamespace(); ASSERT_FALSE(nss.isOplog()); - testStartupCreatesCollection(_txn.get(), _storageInterface, nss); + testStartupCreatesCollection(_opCtx.get(), _storageInterface, nss); } TEST_F(OplogBufferCollectionTest, StartupWithUserProvidedNamespaceCreatesCollection) { - testStartupCreatesCollection(_txn.get(), _storageInterface, makeNamespace(_agent)); + testStartupCreatesCollection(_opCtx.get(), _storageInterface, makeNamespace(_agent)); } TEST_F(OplogBufferCollectionTest, StartupDropsExistingCollectionBeforeCreatingNewCollection) { auto nss = makeNamespace(_agent); - ASSERT_OK(_storageInterface->createCollection(_txn.get(), nss, CollectionOptions())); + ASSERT_OK(_storageInterface->createCollection(_opCtx.get(), nss, CollectionOptions())); OplogBufferCollection oplogBuffer(_storageInterface, nss); - oplogBuffer.startup(_txn.get()); - ASSERT_TRUE(AutoGetCollectionForRead(_txn.get(), nss).getCollection()); + oplogBuffer.startup(_opCtx.get()); + ASSERT_TRUE(AutoGetCollectionForRead(_opCtx.get(), nss).getCollection()); } DEATH_TEST_F(OplogBufferCollectionTest, StartupWithOplogNamespaceTriggersFatalAssertion, "Fatal assertion 40154 Location28838: cannot create a non-capped oplog collection") { - testStartupCreatesCollection(_txn.get(), _storageInterface, NamespaceString("local.oplog.Z")); + testStartupCreatesCollection(_opCtx.get(), _storageInterface, NamespaceString("local.oplog.Z")); } TEST_F(OplogBufferCollectionTest, ShutdownDropsCollection) { auto nss = makeNamespace(_agent); OplogBufferCollection oplogBuffer(_storageInterface, nss); - oplogBuffer.startup(_txn.get()); - ASSERT_TRUE(AutoGetCollectionForRead(_txn.get(), nss).getCollection()); - oplogBuffer.shutdown(_txn.get()); - ASSERT_FALSE(AutoGetCollectionForRead(_txn.get(), nss).getCollection()); + oplogBuffer.startup(_opCtx.get()); + ASSERT_TRUE(AutoGetCollectionForRead(_opCtx.get(), nss).getCollection()); + oplogBuffer.shutdown(_opCtx.get()); + ASSERT_FALSE(AutoGetCollectionForRead(_opCtx.get(), nss).getCollection()); } TEST_F(OplogBufferCollectionTest, extractEmbeddedOplogDocumentChangesIdToTimestamp) { @@ -250,7 +250,7 @@ DEATH_TEST_F(OplogBufferCollectionTest, /** * Check collection contents. OplogInterface returns documents in reverse natural order. */ -void _assertDocumentsInCollectionEquals(OperationContext* txn, +void _assertDocumentsInCollectionEquals(OperationContext* opCtx, const NamespaceString& nss, const std::vector<BSONObj>& docs) { std::vector<BSONObj> reversedTransformedDocs; @@ -269,7 +269,7 @@ void _assertDocumentsInCollectionEquals(OperationContext* txn, ASSERT_GT(ts, previousTimestamp); } std::reverse(reversedTransformedDocs.begin(), reversedTransformedDocs.end()); - OplogInterfaceLocal oplog(txn, nss.ns()); + OplogInterfaceLocal oplog(opCtx, nss.ns()); auto iter = oplog.makeIterator(); for (const auto& doc : reversedTransformedDocs) { ASSERT_BSONOBJ_EQ(doc, unittest::assertGet(iter->next()).first); @@ -281,13 +281,13 @@ TEST_F(OplogBufferCollectionTest, PushOneDocumentWithPushAllNonBlockingAddsDocum auto nss = makeNamespace(_agent); OplogBufferCollection oplogBuffer(_storageInterface, nss); - oplogBuffer.startup(_txn.get()); + oplogBuffer.startup(_opCtx.get()); const std::vector<BSONObj> oplog = {makeOplogEntry(1)}; ASSERT_EQUALS(oplogBuffer.getCount(), 0UL); - oplogBuffer.pushAllNonBlocking(_txn.get(), oplog.begin(), oplog.end()); + oplogBuffer.pushAllNonBlocking(_opCtx.get(), oplog.begin(), oplog.end()); ASSERT_EQUALS(oplogBuffer.getCount(), 1UL); - _assertDocumentsInCollectionEquals(_txn.get(), nss, oplog); + _assertDocumentsInCollectionEquals(_opCtx.get(), nss, oplog); } TEST_F(OplogBufferCollectionTest, @@ -295,7 +295,7 @@ TEST_F(OplogBufferCollectionTest, auto nss = makeNamespace(_agent); OplogBufferCollection oplogBuffer(_storageInterface, nss); - oplogBuffer.startup(_txn.get()); + oplogBuffer.startup(_opCtx.get()); const std::vector<BSONObj> emptyOplogEntries; ASSERT_EQUALS(oplogBuffer.getCount(), 0UL); oplogBuffer.pushAllNonBlocking(nullptr, emptyOplogEntries.begin(), emptyOplogEntries.end()); @@ -306,163 +306,163 @@ TEST_F(OplogBufferCollectionTest, PushOneDocumentWithPushAddsDocument) { auto nss = makeNamespace(_agent); OplogBufferCollection oplogBuffer(_storageInterface, nss); - oplogBuffer.startup(_txn.get()); + oplogBuffer.startup(_opCtx.get()); BSONObj oplog = makeOplogEntry(1); ASSERT_EQUALS(oplogBuffer.getCount(), 0UL); - oplogBuffer.push(_txn.get(), oplog); + oplogBuffer.push(_opCtx.get(), oplog); ASSERT_EQUALS(oplogBuffer.getCount(), 1UL); - _assertDocumentsInCollectionEquals(_txn.get(), nss, {oplog}); + _assertDocumentsInCollectionEquals(_opCtx.get(), nss, {oplog}); } TEST_F(OplogBufferCollectionTest, PushOneDocumentWithPushEvenIfFullAddsDocument) { auto nss = makeNamespace(_agent); OplogBufferCollection oplogBuffer(_storageInterface, nss); - oplogBuffer.startup(_txn.get()); + oplogBuffer.startup(_opCtx.get()); BSONObj oplog = makeOplogEntry(1); ASSERT_EQUALS(oplogBuffer.getCount(), 0UL); - oplogBuffer.pushEvenIfFull(_txn.get(), oplog); + oplogBuffer.pushEvenIfFull(_opCtx.get(), oplog); ASSERT_EQUALS(oplogBuffer.getCount(), 1UL); ASSERT_EQUALS(0UL, oplogBuffer.getSentinelCount_forTest()); - _assertDocumentsInCollectionEquals(_txn.get(), nss, {oplog}); + _assertDocumentsInCollectionEquals(_opCtx.get(), nss, {oplog}); } TEST_F(OplogBufferCollectionTest, PeekDoesNotRemoveDocument) { auto nss = makeNamespace(_agent); OplogBufferCollection oplogBuffer(_storageInterface, nss); - oplogBuffer.startup(_txn.get()); + oplogBuffer.startup(_opCtx.get()); BSONObj oplog1 = makeOplogEntry(1); ASSERT_EQUALS(oplogBuffer.getCount(), 0UL); - oplogBuffer.push(_txn.get(), oplog1); + oplogBuffer.push(_opCtx.get(), oplog1); ASSERT_EQUALS(oplogBuffer.getCount(), 1UL); // _peekOneSide should provide correct bound inclusion to storage engine when collection has one // document. BSONObj doc; - ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc)); + ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc)); ASSERT_BSONOBJ_EQ(doc, oplog1); ASSERT_EQUALS(oplogBuffer.getCount(), 1UL); BSONObj oplog2 = makeOplogEntry(2); - oplogBuffer.push(_txn.get(), oplog2); + oplogBuffer.push(_opCtx.get(), oplog2); ASSERT_EQUALS(oplogBuffer.getCount(), 2UL); // _peekOneSide should return same result after adding new oplog entry. - ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc)); + ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc)); ASSERT_BSONOBJ_EQ(doc, oplog1); ASSERT_EQUALS(oplogBuffer.getCount(), 2UL); - _assertDocumentsInCollectionEquals(_txn.get(), nss, {oplog1, oplog2}); + _assertDocumentsInCollectionEquals(_opCtx.get(), nss, {oplog1, oplog2}); } TEST_F(OplogBufferCollectionTest, PeekWithNoDocumentsReturnsFalse) { auto nss = makeNamespace(_agent); OplogBufferCollection oplogBuffer(_storageInterface, nss); - oplogBuffer.startup(_txn.get()); + oplogBuffer.startup(_opCtx.get()); ASSERT_EQUALS(oplogBuffer.getCount(), 0UL); BSONObj doc; - ASSERT_FALSE(oplogBuffer.peek(_txn.get(), &doc)); + ASSERT_FALSE(oplogBuffer.peek(_opCtx.get(), &doc)); ASSERT_TRUE(doc.isEmpty()); ASSERT_EQUALS(oplogBuffer.getCount(), 0UL); - _assertDocumentsInCollectionEquals(_txn.get(), nss, {}); + _assertDocumentsInCollectionEquals(_opCtx.get(), nss, {}); } TEST_F(OplogBufferCollectionTest, PopDoesNotRemoveDocumentFromCollection) { auto nss = makeNamespace(_agent); OplogBufferCollection oplogBuffer(_storageInterface, nss); - oplogBuffer.startup(_txn.get()); + oplogBuffer.startup(_opCtx.get()); BSONObj oplog = makeOplogEntry(1); ASSERT_EQUALS(oplogBuffer.getCount(), 0UL); - oplogBuffer.push(_txn.get(), oplog); + oplogBuffer.push(_opCtx.get(), oplog); ASSERT_EQUALS(oplogBuffer.getCount(), 1UL); BSONObj doc; - ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc)); + ASSERT_TRUE(oplogBuffer.tryPop(_opCtx.get(), &doc)); ASSERT_BSONOBJ_EQ(doc, oplog); ASSERT_EQUALS(oplogBuffer.getCount(), 0UL); - _assertDocumentsInCollectionEquals(_txn.get(), nss, {oplog}); + _assertDocumentsInCollectionEquals(_opCtx.get(), nss, {oplog}); } TEST_F(OplogBufferCollectionTest, PopWithNoDocumentsReturnsFalse) { auto nss = makeNamespace(_agent); OplogBufferCollection oplogBuffer(_storageInterface, nss); - oplogBuffer.startup(_txn.get()); + oplogBuffer.startup(_opCtx.get()); ASSERT_EQUALS(oplogBuffer.getCount(), 0UL); BSONObj doc; - ASSERT_FALSE(oplogBuffer.tryPop(_txn.get(), &doc)); + ASSERT_FALSE(oplogBuffer.tryPop(_opCtx.get(), &doc)); ASSERT_TRUE(doc.isEmpty()); ASSERT_EQUALS(oplogBuffer.getCount(), 0UL); - _assertDocumentsInCollectionEquals(_txn.get(), nss, {}); + _assertDocumentsInCollectionEquals(_opCtx.get(), nss, {}); } TEST_F(OplogBufferCollectionTest, PopAndPeekReturnDocumentsInOrder) { auto nss = makeNamespace(_agent); OplogBufferCollection oplogBuffer(_storageInterface, nss); - oplogBuffer.startup(_txn.get()); + oplogBuffer.startup(_opCtx.get()); const std::vector<BSONObj> oplog = { makeOplogEntry(1), makeOplogEntry(2), makeOplogEntry(3), }; ASSERT_EQUALS(oplogBuffer.getCount(), 0UL); - oplogBuffer.pushAllNonBlocking(_txn.get(), oplog.begin(), oplog.end()); + oplogBuffer.pushAllNonBlocking(_opCtx.get(), oplog.begin(), oplog.end()); ASSERT_EQUALS(oplogBuffer.getCount(), 3UL); - _assertDocumentsInCollectionEquals(_txn.get(), nss, oplog); + _assertDocumentsInCollectionEquals(_opCtx.get(), nss, oplog); BSONObj doc; - ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc)); + ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc)); ASSERT_BSONOBJ_EQ(doc, oplog[0]); ASSERT_EQUALS(oplogBuffer.getCount(), 3UL); - ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc)); + ASSERT_TRUE(oplogBuffer.tryPop(_opCtx.get(), &doc)); ASSERT_BSONOBJ_EQ(doc, oplog[0]); ASSERT_EQUALS(oplogBuffer.getCount(), 2UL); - ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc)); + ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc)); ASSERT_BSONOBJ_EQ(doc, oplog[1]); ASSERT_EQUALS(oplogBuffer.getCount(), 2UL); - ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc)); + ASSERT_TRUE(oplogBuffer.tryPop(_opCtx.get(), &doc)); ASSERT_BSONOBJ_EQ(doc, oplog[1]); ASSERT_EQUALS(oplogBuffer.getCount(), 1UL); - ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc)); + ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc)); ASSERT_BSONOBJ_EQ(doc, oplog[2]); ASSERT_EQUALS(oplogBuffer.getCount(), 1UL); - ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc)); + ASSERT_TRUE(oplogBuffer.tryPop(_opCtx.get(), &doc)); ASSERT_BSONOBJ_EQ(doc, oplog[2]); ASSERT_EQUALS(oplogBuffer.getCount(), 0UL); // tryPop does not remove documents from collection. - _assertDocumentsInCollectionEquals(_txn.get(), nss, oplog); + _assertDocumentsInCollectionEquals(_opCtx.get(), nss, oplog); } TEST_F(OplogBufferCollectionTest, LastObjectPushedReturnsNewestOplogEntry) { auto nss = makeNamespace(_agent); OplogBufferCollection oplogBuffer(_storageInterface, nss); - oplogBuffer.startup(_txn.get()); + oplogBuffer.startup(_opCtx.get()); const std::vector<BSONObj> oplog = { makeOplogEntry(1), makeOplogEntry(2), makeOplogEntry(3), }; ASSERT_EQUALS(oplogBuffer.getCount(), 0UL); - oplogBuffer.pushAllNonBlocking(_txn.get(), oplog.begin(), oplog.end()); + oplogBuffer.pushAllNonBlocking(_opCtx.get(), oplog.begin(), oplog.end()); ASSERT_EQUALS(oplogBuffer.getCount(), 3UL); - auto doc = oplogBuffer.lastObjectPushed(_txn.get()); + auto doc = oplogBuffer.lastObjectPushed(_opCtx.get()); ASSERT_BSONOBJ_EQ(*doc, oplog[2]); ASSERT_EQUALS(oplogBuffer.getCount(), 3UL); } @@ -471,9 +471,9 @@ TEST_F(OplogBufferCollectionTest, LastObjectPushedReturnsNoneWithNoEntries) { auto nss = makeNamespace(_agent); OplogBufferCollection oplogBuffer(_storageInterface, nss); - oplogBuffer.startup(_txn.get()); + oplogBuffer.startup(_opCtx.get()); - auto doc = oplogBuffer.lastObjectPushed(_txn.get()); + auto doc = oplogBuffer.lastObjectPushed(_opCtx.get()); ASSERT_EQUALS(doc, boost::none); } @@ -481,10 +481,10 @@ TEST_F(OplogBufferCollectionTest, IsEmptyReturnsTrueWhenEmptyAndFalseWhenNot) { auto nss = makeNamespace(_agent); OplogBufferCollection oplogBuffer(_storageInterface, nss); - oplogBuffer.startup(_txn.get()); + oplogBuffer.startup(_opCtx.get()); BSONObj oplog = makeOplogEntry(1); ASSERT_TRUE(oplogBuffer.isEmpty()); - oplogBuffer.pushEvenIfFull(_txn.get(), oplog); + oplogBuffer.pushEvenIfFull(_opCtx.get(), oplog); ASSERT_FALSE(oplogBuffer.isEmpty()); } @@ -492,7 +492,7 @@ TEST_F(OplogBufferCollectionTest, ClearClearsCollection) { auto nss = makeNamespace(_agent); OplogBufferCollection oplogBuffer(_storageInterface, nss); - oplogBuffer.startup(_txn.get()); + oplogBuffer.startup(_opCtx.get()); ASSERT_EQUALS(oplogBuffer.getCount(), 0UL); ASSERT_EQUALS(oplogBuffer.getSize(), 0UL); ASSERT_EQUALS(0U, oplogBuffer.getSentinelCount_forTest()); @@ -500,27 +500,27 @@ TEST_F(OplogBufferCollectionTest, ClearClearsCollection) { ASSERT_EQUALS(Timestamp(), oplogBuffer.getLastPoppedTimestamp_forTest()); BSONObj oplog = makeOplogEntry(1); - oplogBuffer.push(_txn.get(), oplog); + oplogBuffer.push(_opCtx.get(), oplog); ASSERT_EQUALS(oplogBuffer.getCount(), 1UL); ASSERT_EQUALS(oplogBuffer.getSize(), std::size_t(oplog.objsize())); ASSERT_EQUALS(0U, oplogBuffer.getSentinelCount_forTest()); ASSERT_EQUALS(oplog["ts"].timestamp(), oplogBuffer.getLastPushedTimestamp_forTest()); ASSERT_EQUALS(Timestamp(), oplogBuffer.getLastPoppedTimestamp_forTest()); - _assertDocumentsInCollectionEquals(_txn.get(), nss, {oplog}); + _assertDocumentsInCollectionEquals(_opCtx.get(), nss, {oplog}); BSONObj sentinel; - oplogBuffer.push(_txn.get(), sentinel); + oplogBuffer.push(_opCtx.get(), sentinel); ASSERT_EQUALS(oplogBuffer.getCount(), 2UL); ASSERT_EQUALS(oplogBuffer.getSize(), std::size_t(oplog.objsize() + BSONObj().objsize())); ASSERT_EQUALS(1U, oplogBuffer.getSentinelCount_forTest()); ASSERT_EQUALS(oplog["ts"].timestamp(), oplogBuffer.getLastPushedTimestamp_forTest()); ASSERT_EQUALS(Timestamp(), oplogBuffer.getLastPoppedTimestamp_forTest()); - _assertDocumentsInCollectionEquals(_txn.get(), nss, {oplog, sentinel}); + _assertDocumentsInCollectionEquals(_opCtx.get(), nss, {oplog, sentinel}); BSONObj oplog2 = makeOplogEntry(2); - oplogBuffer.push(_txn.get(), oplog2); + oplogBuffer.push(_opCtx.get(), oplog2); ASSERT_EQUALS(oplogBuffer.getCount(), 3UL); ASSERT_EQUALS(oplogBuffer.getSize(), std::size_t(oplog.objsize() + BSONObj().objsize() + oplog2.objsize())); @@ -528,10 +528,10 @@ TEST_F(OplogBufferCollectionTest, ClearClearsCollection) { ASSERT_EQUALS(oplog2["ts"].timestamp(), oplogBuffer.getLastPushedTimestamp_forTest()); ASSERT_EQUALS(Timestamp(), oplogBuffer.getLastPoppedTimestamp_forTest()); - _assertDocumentsInCollectionEquals(_txn.get(), nss, {oplog, sentinel, oplog2}); + _assertDocumentsInCollectionEquals(_opCtx.get(), nss, {oplog, sentinel, oplog2}); BSONObj poppedDoc; - ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &poppedDoc)); + ASSERT_TRUE(oplogBuffer.tryPop(_opCtx.get(), &poppedDoc)); ASSERT_BSONOBJ_EQ(oplog, poppedDoc); ASSERT_EQUALS(oplogBuffer.getCount(), 2UL); ASSERT_EQUALS(oplogBuffer.getSize(), std::size_t(BSONObj().objsize() + oplog2.objsize())); @@ -539,29 +539,29 @@ TEST_F(OplogBufferCollectionTest, ClearClearsCollection) { ASSERT_EQUALS(oplog2["ts"].timestamp(), oplogBuffer.getLastPushedTimestamp_forTest()); ASSERT_EQUALS(oplog["ts"].timestamp(), oplogBuffer.getLastPoppedTimestamp_forTest()); - _assertDocumentsInCollectionEquals(_txn.get(), nss, {oplog, sentinel, oplog2}); + _assertDocumentsInCollectionEquals(_opCtx.get(), nss, {oplog, sentinel, oplog2}); - oplogBuffer.clear(_txn.get()); - ASSERT_TRUE(AutoGetCollectionForRead(_txn.get(), nss).getCollection()); + oplogBuffer.clear(_opCtx.get()); + ASSERT_TRUE(AutoGetCollectionForRead(_opCtx.get(), nss).getCollection()); ASSERT_EQUALS(oplogBuffer.getCount(), 0UL); ASSERT_EQUALS(oplogBuffer.getSize(), 0UL); ASSERT_EQUALS(0U, oplogBuffer.getSentinelCount_forTest()); ASSERT_EQUALS(Timestamp(), oplogBuffer.getLastPushedTimestamp_forTest()); ASSERT_EQUALS(Timestamp(), oplogBuffer.getLastPoppedTimestamp_forTest()); - _assertDocumentsInCollectionEquals(_txn.get(), nss, {}); + _assertDocumentsInCollectionEquals(_opCtx.get(), nss, {}); BSONObj doc; - ASSERT_FALSE(oplogBuffer.peek(_txn.get(), &doc)); + ASSERT_FALSE(oplogBuffer.peek(_opCtx.get(), &doc)); ASSERT_TRUE(doc.isEmpty()); - ASSERT_FALSE(oplogBuffer.tryPop(_txn.get(), &doc)); + ASSERT_FALSE(oplogBuffer.tryPop(_opCtx.get(), &doc)); ASSERT_TRUE(doc.isEmpty()); } TEST_F(OplogBufferCollectionTest, WaitForDataBlocksAndFindsDocument) { auto nss = makeNamespace(_agent); OplogBufferCollection oplogBuffer(_storageInterface, nss); - oplogBuffer.startup(_txn.get()); + oplogBuffer.startup(_opCtx.get()); unittest::Barrier barrier(2U); BSONObj oplog = makeOplogEntry(1); @@ -578,11 +578,11 @@ TEST_F(OplogBufferCollectionTest, WaitForDataBlocksAndFindsDocument) { ASSERT_EQUALS(oplogBuffer.getCount(), 0UL); barrier.countDownAndWait(); - oplogBuffer.push(_txn.get(), oplog); + oplogBuffer.push(_opCtx.get(), oplog); peekingThread.join(); ASSERT_EQUALS(oplogBuffer.getCount(), 1UL); ASSERT_TRUE(success); - ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc)); + ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc)); ASSERT_BSONOBJ_EQ(doc, oplog); ASSERT_EQUALS(count, 1UL); } @@ -590,7 +590,7 @@ TEST_F(OplogBufferCollectionTest, WaitForDataBlocksAndFindsDocument) { TEST_F(OplogBufferCollectionTest, TwoWaitForDataInvocationsBlockAndFindSameDocument) { auto nss = makeNamespace(_agent); OplogBufferCollection oplogBuffer(_storageInterface, nss); - oplogBuffer.startup(_txn.get()); + oplogBuffer.startup(_opCtx.get()); unittest::Barrier barrier(3U); BSONObj oplog = makeOplogEntry(1); @@ -616,13 +616,13 @@ TEST_F(OplogBufferCollectionTest, TwoWaitForDataInvocationsBlockAndFindSameDocum ASSERT_EQUALS(oplogBuffer.getCount(), 0UL); barrier.countDownAndWait(); - oplogBuffer.push(_txn.get(), oplog); + oplogBuffer.push(_opCtx.get(), oplog); peekingThread1.join(); peekingThread2.join(); ASSERT_EQUALS(oplogBuffer.getCount(), 1UL); ASSERT_TRUE(success1); BSONObj doc; - ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc)); + ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc)); ASSERT_BSONOBJ_EQ(doc, oplog); ASSERT_EQUALS(count1, 1UL); ASSERT_TRUE(success2); @@ -632,7 +632,7 @@ TEST_F(OplogBufferCollectionTest, TwoWaitForDataInvocationsBlockAndFindSameDocum TEST_F(OplogBufferCollectionTest, WaitForDataBlocksAndTimesOutWhenItDoesNotFindDocument) { auto nss = makeNamespace(_agent); OplogBufferCollection oplogBuffer(_storageInterface, nss); - oplogBuffer.startup(_txn.get()); + oplogBuffer.startup(_opCtx.get()); BSONObj doc; bool success = false; @@ -648,95 +648,95 @@ TEST_F(OplogBufferCollectionTest, WaitForDataBlocksAndTimesOutWhenItDoesNotFindD peekingThread.join(); ASSERT_EQUALS(oplogBuffer.getCount(), 0UL); ASSERT_FALSE(success); - ASSERT_FALSE(oplogBuffer.peek(_txn.get(), &doc)); + ASSERT_FALSE(oplogBuffer.peek(_opCtx.get(), &doc)); ASSERT_TRUE(doc.isEmpty()); ASSERT_EQUALS(count, 0UL); } void _testPushSentinelsProperly( - OperationContext* txn, + OperationContext* opCtx, const NamespaceString& nss, StorageInterface* storageInterface, - stdx::function<void(OperationContext* txn, + stdx::function<void(OperationContext* opCtx, OplogBufferCollection* oplogBuffer, const std::vector<BSONObj>& oplog)> pushDocsFn) { OplogBufferCollection oplogBuffer(storageInterface, nss); - oplogBuffer.startup(txn); + oplogBuffer.startup(opCtx); const std::vector<BSONObj> oplog = { BSONObj(), makeOplogEntry(1), BSONObj(), BSONObj(), makeOplogEntry(2), BSONObj(), }; ASSERT_EQUALS(oplogBuffer.getCount(), 0UL); - pushDocsFn(txn, &oplogBuffer, oplog); + pushDocsFn(opCtx, &oplogBuffer, oplog); ASSERT_EQUALS(oplogBuffer.getCount(), 6UL); - _assertDocumentsInCollectionEquals(txn, nss, oplog); + _assertDocumentsInCollectionEquals(opCtx, nss, oplog); } TEST_F(OplogBufferCollectionTest, PushPushesOnSentinelsProperly) { auto nss = makeNamespace(_agent); - _testPushSentinelsProperly(_txn.get(), + _testPushSentinelsProperly(_opCtx.get(), nss, _storageInterface, - [](OperationContext* txn, + [](OperationContext* opCtx, OplogBufferCollection* oplogBuffer, const std::vector<BSONObj>& oplog) { - oplogBuffer->push(txn, oplog[0]); + oplogBuffer->push(opCtx, oplog[0]); ASSERT_EQUALS(1U, oplogBuffer->getSentinelCount_forTest()); - oplogBuffer->push(txn, oplog[1]); + oplogBuffer->push(opCtx, oplog[1]); ASSERT_EQUALS(0U, oplogBuffer->getSentinelCount_forTest()); - oplogBuffer->push(txn, oplog[2]); + oplogBuffer->push(opCtx, oplog[2]); ASSERT_EQUALS(1U, oplogBuffer->getSentinelCount_forTest()); - oplogBuffer->push(txn, oplog[3]); + oplogBuffer->push(opCtx, oplog[3]); ASSERT_EQUALS(2U, oplogBuffer->getSentinelCount_forTest()); - oplogBuffer->push(txn, oplog[4]); + oplogBuffer->push(opCtx, oplog[4]); ASSERT_EQUALS(0U, oplogBuffer->getSentinelCount_forTest()); - oplogBuffer->push(txn, oplog[5]); + oplogBuffer->push(opCtx, oplog[5]); ASSERT_EQUALS(1U, oplogBuffer->getSentinelCount_forTest()); }); } TEST_F(OplogBufferCollectionTest, PushEvenIfFullPushesOnSentinelsProperly) { auto nss = makeNamespace(_agent); - _testPushSentinelsProperly(_txn.get(), + _testPushSentinelsProperly(_opCtx.get(), nss, _storageInterface, - [](OperationContext* txn, + [](OperationContext* opCtx, OplogBufferCollection* oplogBuffer, const std::vector<BSONObj>& oplog) { - oplogBuffer->pushEvenIfFull(txn, oplog[0]); + oplogBuffer->pushEvenIfFull(opCtx, oplog[0]); ASSERT_EQUALS(1U, oplogBuffer->getSentinelCount_forTest()); - oplogBuffer->pushEvenIfFull(txn, oplog[1]); + oplogBuffer->pushEvenIfFull(opCtx, oplog[1]); ASSERT_EQUALS(0U, oplogBuffer->getSentinelCount_forTest()); - oplogBuffer->pushEvenIfFull(txn, oplog[2]); + oplogBuffer->pushEvenIfFull(opCtx, oplog[2]); ASSERT_EQUALS(1U, oplogBuffer->getSentinelCount_forTest()); - oplogBuffer->pushEvenIfFull(txn, oplog[3]); + oplogBuffer->pushEvenIfFull(opCtx, oplog[3]); ASSERT_EQUALS(2U, oplogBuffer->getSentinelCount_forTest()); - oplogBuffer->pushEvenIfFull(txn, oplog[4]); + oplogBuffer->pushEvenIfFull(opCtx, oplog[4]); ASSERT_EQUALS(0U, oplogBuffer->getSentinelCount_forTest()); - oplogBuffer->pushEvenIfFull(txn, oplog[5]); + oplogBuffer->pushEvenIfFull(opCtx, oplog[5]); ASSERT_EQUALS(1U, oplogBuffer->getSentinelCount_forTest()); }); } TEST_F(OplogBufferCollectionTest, PushAllNonBlockingPushesOnSentinelsProperly) { auto nss = makeNamespace(_agent); - _testPushSentinelsProperly(_txn.get(), + _testPushSentinelsProperly(_opCtx.get(), nss, _storageInterface, - [](OperationContext* txn, + [](OperationContext* opCtx, OplogBufferCollection* oplogBuffer, const std::vector<BSONObj>& oplog) { oplogBuffer->pushAllNonBlocking( - txn, oplog.cbegin(), oplog.cend()); + opCtx, oplog.cbegin(), oplog.cend()); ASSERT_EQUALS(1U, oplogBuffer->getSentinelCount_forTest()); }); } @@ -748,207 +748,207 @@ DEATH_TEST_F( auto nss = makeNamespace(_agent); OplogBufferCollection oplogBuffer(_storageInterface, nss); - oplogBuffer.startup(_txn.get()); + oplogBuffer.startup(_opCtx.get()); const std::vector<BSONObj> oplog = { makeOplogEntry(2), makeOplogEntry(1), }; ASSERT_EQUALS(oplogBuffer.getCount(), 0UL); - oplogBuffer.pushAllNonBlocking(_txn.get(), oplog.begin(), oplog.end()); + oplogBuffer.pushAllNonBlocking(_opCtx.get(), oplog.begin(), oplog.end()); } TEST_F(OplogBufferCollectionTest, SentinelInMiddleIsReturnedInOrder) { auto nss = makeNamespace(_agent); OplogBufferCollection oplogBuffer(_storageInterface, nss); - oplogBuffer.startup(_txn.get()); + oplogBuffer.startup(_opCtx.get()); const std::vector<BSONObj> oplog = { makeOplogEntry(1), makeOplogEntry(2), BSONObj(), makeOplogEntry(3), }; ASSERT_EQUALS(oplogBuffer.getCount(), 0UL); - oplogBuffer.pushEvenIfFull(_txn.get(), oplog[0]); - oplogBuffer.pushEvenIfFull(_txn.get(), oplog[1]); - oplogBuffer.pushEvenIfFull(_txn.get(), oplog[2]); - oplogBuffer.pushEvenIfFull(_txn.get(), oplog[3]); + oplogBuffer.pushEvenIfFull(_opCtx.get(), oplog[0]); + oplogBuffer.pushEvenIfFull(_opCtx.get(), oplog[1]); + oplogBuffer.pushEvenIfFull(_opCtx.get(), oplog[2]); + oplogBuffer.pushEvenIfFull(_opCtx.get(), oplog[3]); ASSERT_EQUALS(oplogBuffer.getCount(), 4UL); - _assertDocumentsInCollectionEquals(_txn.get(), nss, oplog); + _assertDocumentsInCollectionEquals(_opCtx.get(), nss, oplog); BSONObj doc; - ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc)); + ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc)); ASSERT_BSONOBJ_EQ(doc, oplog[0]); ASSERT_EQUALS(oplogBuffer.getCount(), 4UL); - ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc)); + ASSERT_TRUE(oplogBuffer.tryPop(_opCtx.get(), &doc)); ASSERT_BSONOBJ_EQ(doc, oplog[0]); ASSERT_EQUALS(oplogBuffer.getCount(), 3UL); - ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc)); + ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc)); ASSERT_BSONOBJ_EQ(doc, oplog[1]); ASSERT_EQUALS(oplogBuffer.getCount(), 3UL); - ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc)); + ASSERT_TRUE(oplogBuffer.tryPop(_opCtx.get(), &doc)); ASSERT_BSONOBJ_EQ(doc, oplog[1]); ASSERT_EQUALS(oplogBuffer.getCount(), 2UL); - ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc)); + ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc)); ASSERT_TRUE(doc.isEmpty()); ASSERT_EQUALS(oplogBuffer.getCount(), 2UL); - ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc)); + ASSERT_TRUE(oplogBuffer.tryPop(_opCtx.get(), &doc)); ASSERT_TRUE(doc.isEmpty()); ASSERT_EQUALS(oplogBuffer.getCount(), 1UL); - ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc)); + ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc)); ASSERT_BSONOBJ_EQ(doc, oplog[3]); ASSERT_EQUALS(oplogBuffer.getCount(), 1UL); - ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc)); + ASSERT_TRUE(oplogBuffer.tryPop(_opCtx.get(), &doc)); ASSERT_BSONOBJ_EQ(doc, oplog[3]); ASSERT_EQUALS(oplogBuffer.getCount(), 0UL); // tryPop does not remove documents from collection. - _assertDocumentsInCollectionEquals(_txn.get(), nss, oplog); + _assertDocumentsInCollectionEquals(_opCtx.get(), nss, oplog); } TEST_F(OplogBufferCollectionTest, SentinelAtBeginningIsReturnedAtBeginning) { auto nss = makeNamespace(_agent); OplogBufferCollection oplogBuffer(_storageInterface, nss); - oplogBuffer.startup(_txn.get()); + oplogBuffer.startup(_opCtx.get()); const std::vector<BSONObj> oplog = {BSONObj(), makeOplogEntry(1)}; ASSERT_EQUALS(oplogBuffer.getCount(), 0UL); - oplogBuffer.pushEvenIfFull(_txn.get(), oplog[0]); - oplogBuffer.pushEvenIfFull(_txn.get(), oplog[1]); + oplogBuffer.pushEvenIfFull(_opCtx.get(), oplog[0]); + oplogBuffer.pushEvenIfFull(_opCtx.get(), oplog[1]); ASSERT_EQUALS(oplogBuffer.getCount(), 2UL); - _assertDocumentsInCollectionEquals(_txn.get(), nss, oplog); + _assertDocumentsInCollectionEquals(_opCtx.get(), nss, oplog); BSONObj doc; - ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc)); + ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc)); ASSERT_TRUE(doc.isEmpty()); ASSERT_EQUALS(oplogBuffer.getCount(), 2UL); - ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc)); + ASSERT_TRUE(oplogBuffer.tryPop(_opCtx.get(), &doc)); ASSERT_TRUE(doc.isEmpty()); ASSERT_EQUALS(oplogBuffer.getCount(), 1UL); - ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc)); + ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc)); ASSERT_BSONOBJ_EQ(doc, oplog[1]); ASSERT_EQUALS(oplogBuffer.getCount(), 1UL); - ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc)); + ASSERT_TRUE(oplogBuffer.tryPop(_opCtx.get(), &doc)); ASSERT_BSONOBJ_EQ(doc, oplog[1]); ASSERT_EQUALS(oplogBuffer.getCount(), 0UL); // tryPop does not remove documents from collection. - _assertDocumentsInCollectionEquals(_txn.get(), nss, oplog); + _assertDocumentsInCollectionEquals(_opCtx.get(), nss, oplog); } TEST_F(OplogBufferCollectionTest, SentinelAtEndIsReturnedAtEnd) { auto nss = makeNamespace(_agent); OplogBufferCollection oplogBuffer(_storageInterface, nss); - oplogBuffer.startup(_txn.get()); + oplogBuffer.startup(_opCtx.get()); const std::vector<BSONObj> oplog = {makeOplogEntry(1), BSONObj()}; ASSERT_EQUALS(oplogBuffer.getCount(), 0UL); - oplogBuffer.pushEvenIfFull(_txn.get(), oplog[0]); - oplogBuffer.pushEvenIfFull(_txn.get(), oplog[1]); + oplogBuffer.pushEvenIfFull(_opCtx.get(), oplog[0]); + oplogBuffer.pushEvenIfFull(_opCtx.get(), oplog[1]); ASSERT_EQUALS(oplogBuffer.getCount(), 2UL); - _assertDocumentsInCollectionEquals(_txn.get(), nss, oplog); + _assertDocumentsInCollectionEquals(_opCtx.get(), nss, oplog); BSONObj doc; - ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc)); + ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc)); ASSERT_BSONOBJ_EQ(doc, oplog[0]); ASSERT_EQUALS(oplogBuffer.getCount(), 2UL); - ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc)); + ASSERT_TRUE(oplogBuffer.tryPop(_opCtx.get(), &doc)); ASSERT_BSONOBJ_EQ(doc, oplog[0]); ASSERT_EQUALS(oplogBuffer.getCount(), 1UL); - ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc)); + ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc)); ASSERT_TRUE(doc.isEmpty()); ASSERT_EQUALS(oplogBuffer.getCount(), 1UL); - ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc)); + ASSERT_TRUE(oplogBuffer.tryPop(_opCtx.get(), &doc)); ASSERT_TRUE(doc.isEmpty()); ASSERT_EQUALS(oplogBuffer.getCount(), 0UL); // tryPop does not remove documents from collection. - _assertDocumentsInCollectionEquals(_txn.get(), nss, oplog); + _assertDocumentsInCollectionEquals(_opCtx.get(), nss, oplog); } TEST_F(OplogBufferCollectionTest, MultipleSentinelsAreReturnedInOrder) { auto nss = makeNamespace(_agent); OplogBufferCollection oplogBuffer(_storageInterface, nss); - oplogBuffer.startup(_txn.get()); + oplogBuffer.startup(_opCtx.get()); const std::vector<BSONObj> oplog = { BSONObj(), makeOplogEntry(1), BSONObj(), BSONObj(), makeOplogEntry(2), BSONObj(), }; ASSERT_EQUALS(oplogBuffer.getCount(), 0UL); - oplogBuffer.pushAllNonBlocking(_txn.get(), oplog.cbegin(), oplog.cend()); + oplogBuffer.pushAllNonBlocking(_opCtx.get(), oplog.cbegin(), oplog.cend()); ASSERT_EQUALS(oplogBuffer.getCount(), 6UL); - _assertDocumentsInCollectionEquals(_txn.get(), nss, oplog); + _assertDocumentsInCollectionEquals(_opCtx.get(), nss, oplog); BSONObj doc; - ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc)); + ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc)); ASSERT_TRUE(doc.isEmpty()); ASSERT_EQUALS(oplogBuffer.getCount(), 6UL); - ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc)); + ASSERT_TRUE(oplogBuffer.tryPop(_opCtx.get(), &doc)); ASSERT_TRUE(doc.isEmpty()); ASSERT_EQUALS(oplogBuffer.getCount(), 5UL); - ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc)); + ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc)); ASSERT_BSONOBJ_EQ(doc, oplog[1]); ASSERT_EQUALS(oplogBuffer.getCount(), 5UL); - ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc)); + ASSERT_TRUE(oplogBuffer.tryPop(_opCtx.get(), &doc)); ASSERT_BSONOBJ_EQ(doc, oplog[1]); ASSERT_EQUALS(oplogBuffer.getCount(), 4UL); - ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc)); + ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc)); ASSERT_TRUE(doc.isEmpty()); ASSERT_EQUALS(oplogBuffer.getCount(), 4UL); - ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc)); + ASSERT_TRUE(oplogBuffer.tryPop(_opCtx.get(), &doc)); ASSERT_TRUE(doc.isEmpty()); ASSERT_EQUALS(oplogBuffer.getCount(), 3UL); - ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc)); + ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc)); ASSERT_TRUE(doc.isEmpty()); ASSERT_EQUALS(oplogBuffer.getCount(), 3UL); - ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc)); + ASSERT_TRUE(oplogBuffer.tryPop(_opCtx.get(), &doc)); ASSERT_TRUE(doc.isEmpty()); ASSERT_EQUALS(oplogBuffer.getCount(), 2UL); - ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc)); + ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc)); ASSERT_BSONOBJ_EQ(doc, oplog[4]); ASSERT_EQUALS(oplogBuffer.getCount(), 2UL); - ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc)); + ASSERT_TRUE(oplogBuffer.tryPop(_opCtx.get(), &doc)); ASSERT_BSONOBJ_EQ(doc, oplog[4]); ASSERT_EQUALS(oplogBuffer.getCount(), 1UL); - ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc)); + ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc)); ASSERT_TRUE(doc.isEmpty()); ASSERT_EQUALS(oplogBuffer.getCount(), 1UL); - ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc)); + ASSERT_TRUE(oplogBuffer.tryPop(_opCtx.get(), &doc)); ASSERT_TRUE(doc.isEmpty()); ASSERT_EQUALS(oplogBuffer.getCount(), 0UL); // tryPop does not remove documents from collection. - _assertDocumentsInCollectionEquals(_txn.get(), nss, oplog); + _assertDocumentsInCollectionEquals(_opCtx.get(), nss, oplog); } TEST_F(OplogBufferCollectionTest, WaitForDataBlocksAndFindsSentinel) { auto nss = makeNamespace(_agent); OplogBufferCollection oplogBuffer(_storageInterface, nss); - oplogBuffer.startup(_txn.get()); + oplogBuffer.startup(_opCtx.get()); unittest::Barrier barrier(2U); BSONObj oplog; @@ -965,11 +965,11 @@ TEST_F(OplogBufferCollectionTest, WaitForDataBlocksAndFindsSentinel) { ASSERT_EQUALS(oplogBuffer.getCount(), 0UL); barrier.countDownAndWait(); - oplogBuffer.pushEvenIfFull(_txn.get(), oplog); + oplogBuffer.pushEvenIfFull(_opCtx.get(), oplog); peekingThread.join(); ASSERT_EQUALS(oplogBuffer.getCount(), 1UL); ASSERT_TRUE(success); - ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc)); + ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc)); ASSERT_TRUE(doc.isEmpty()); ASSERT_EQUALS(count, 1UL); } @@ -977,7 +977,7 @@ TEST_F(OplogBufferCollectionTest, WaitForDataBlocksAndFindsSentinel) { TEST_F(OplogBufferCollectionTest, TwoWaitForDataInvocationsBlockAndFindSameSentinel) { auto nss = makeNamespace(_agent); OplogBufferCollection oplogBuffer(_storageInterface, nss); - oplogBuffer.startup(_txn.get()); + oplogBuffer.startup(_opCtx.get()); unittest::Barrier barrier(3U); BSONObj oplog; @@ -1003,13 +1003,13 @@ TEST_F(OplogBufferCollectionTest, TwoWaitForDataInvocationsBlockAndFindSameSenti ASSERT_EQUALS(oplogBuffer.getCount(), 0UL); barrier.countDownAndWait(); - oplogBuffer.pushEvenIfFull(_txn.get(), oplog); + oplogBuffer.pushEvenIfFull(_opCtx.get(), oplog); peekingThread1.join(); peekingThread2.join(); ASSERT_EQUALS(oplogBuffer.getCount(), 1UL); ASSERT_TRUE(success1); BSONObj doc; - ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc)); + ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc)); ASSERT_TRUE(doc.isEmpty()); ASSERT_EQUALS(count1, 1UL); ASSERT_TRUE(success2); @@ -1041,74 +1041,74 @@ TEST_F(OplogBufferCollectionTest, PeekFillsCacheWithDocumentsFromCollection) { std::size_t peekCacheSize = 3U; OplogBufferCollection oplogBuffer(_storageInterface, nss, _makeOptions(3)); ASSERT_EQUALS(peekCacheSize, oplogBuffer.getOptions().peekCacheSize); - oplogBuffer.startup(_txn.get()); + oplogBuffer.startup(_opCtx.get()); std::vector<BSONObj> oplog; for (int i = 0; i < 5; ++i) { oplog.push_back(makeOplogEntry(i + 1)); }; - oplogBuffer.pushAllNonBlocking(_txn.get(), oplog.cbegin(), oplog.cend()); - _assertDocumentsInCollectionEquals(_txn.get(), nss, oplog); + oplogBuffer.pushAllNonBlocking(_opCtx.get(), oplog.cbegin(), oplog.cend()); + _assertDocumentsInCollectionEquals(_opCtx.get(), nss, oplog); // Before any peek operations, peek cache should be empty. _assertDocumentsEqualCache({}, oplogBuffer.getPeekCache_forTest()); // First peek operation should trigger a read of 'peekCacheSize' documents from the collection. BSONObj doc; - ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc)); + ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc)); ASSERT_BSONOBJ_EQ(oplog[0], doc); _assertDocumentsEqualCache({oplog[0], oplog[1], oplog[2]}, oplogBuffer.getPeekCache_forTest()); // Repeated peek operation should not modify the cache. - ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc)); + ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc)); ASSERT_BSONOBJ_EQ(oplog[0], doc); _assertDocumentsEqualCache({oplog[0], oplog[1], oplog[2]}, oplogBuffer.getPeekCache_forTest()); // Pop operation should remove the first element in the cache - ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc)); + ASSERT_TRUE(oplogBuffer.tryPop(_opCtx.get(), &doc)); ASSERT_BSONOBJ_EQ(oplog[0], doc); _assertDocumentsEqualCache({oplog[1], oplog[2]}, oplogBuffer.getPeekCache_forTest()); // Next peek operation should not modify the cache. - ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc)); + ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc)); ASSERT_BSONOBJ_EQ(oplog[1], doc); _assertDocumentsEqualCache({oplog[1], oplog[2]}, oplogBuffer.getPeekCache_forTest()); // Pop the rest of the items in the cache. - ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc)); + ASSERT_TRUE(oplogBuffer.tryPop(_opCtx.get(), &doc)); ASSERT_BSONOBJ_EQ(oplog[1], doc); _assertDocumentsEqualCache({oplog[2]}, oplogBuffer.getPeekCache_forTest()); - ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc)); + ASSERT_TRUE(oplogBuffer.tryPop(_opCtx.get(), &doc)); ASSERT_BSONOBJ_EQ(oplog[2], doc); _assertDocumentsEqualCache({}, oplogBuffer.getPeekCache_forTest()); // Next peek operation should replenish the cache. // Cache size will be less than the configured 'peekCacheSize' because // there will not be enough documents left unread in the collection. - ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc)); + ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc)); ASSERT_BSONOBJ_EQ(oplog[3], doc); _assertDocumentsEqualCache({oplog[3], oplog[4]}, oplogBuffer.getPeekCache_forTest()); // Pop the remaining documents from the buffer. - ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc)); + ASSERT_TRUE(oplogBuffer.tryPop(_opCtx.get(), &doc)); ASSERT_BSONOBJ_EQ(oplog[3], doc); _assertDocumentsEqualCache({oplog[4]}, oplogBuffer.getPeekCache_forTest()); // Verify state of cache between pops using peek. - ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc)); + ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc)); ASSERT_BSONOBJ_EQ(oplog[4], doc); _assertDocumentsEqualCache({oplog[4]}, oplogBuffer.getPeekCache_forTest()); - ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc)); + ASSERT_TRUE(oplogBuffer.tryPop(_opCtx.get(), &doc)); ASSERT_BSONOBJ_EQ(oplog[4], doc); _assertDocumentsEqualCache({}, oplogBuffer.getPeekCache_forTest()); // Nothing left in the collection. - ASSERT_FALSE(oplogBuffer.peek(_txn.get(), &doc)); + ASSERT_FALSE(oplogBuffer.peek(_opCtx.get(), &doc)); _assertDocumentsEqualCache({}, oplogBuffer.getPeekCache_forTest()); - ASSERT_FALSE(oplogBuffer.tryPop(_txn.get(), &doc)); + ASSERT_FALSE(oplogBuffer.tryPop(_opCtx.get(), &doc)); _assertDocumentsEqualCache({}, oplogBuffer.getPeekCache_forTest()); } diff --git a/src/mongo/db/repl/oplog_buffer_proxy.cpp b/src/mongo/db/repl/oplog_buffer_proxy.cpp index e2080a94f19..675bb4d6186 100644 --- a/src/mongo/db/repl/oplog_buffer_proxy.cpp +++ b/src/mongo/db/repl/oplog_buffer_proxy.cpp @@ -44,33 +44,33 @@ OplogBuffer* OplogBufferProxy::getTarget() const { return _target.get(); } -void OplogBufferProxy::startup(OperationContext* txn) { - _target->startup(txn); +void OplogBufferProxy::startup(OperationContext* opCtx) { + _target->startup(opCtx); } -void OplogBufferProxy::shutdown(OperationContext* txn) { +void OplogBufferProxy::shutdown(OperationContext* opCtx) { { stdx::lock_guard<stdx::mutex> backLock(_lastPushedMutex); stdx::lock_guard<stdx::mutex> frontLock(_lastPeekedMutex); _lastPushed.reset(); _lastPeeked.reset(); } - _target->shutdown(txn); + _target->shutdown(opCtx); } -void OplogBufferProxy::pushEvenIfFull(OperationContext* txn, const Value& value) { +void OplogBufferProxy::pushEvenIfFull(OperationContext* opCtx, const Value& value) { stdx::lock_guard<stdx::mutex> lk(_lastPushedMutex); _lastPushed = value; - _target->pushEvenIfFull(txn, value); + _target->pushEvenIfFull(opCtx, value); } -void OplogBufferProxy::push(OperationContext* txn, const Value& value) { +void OplogBufferProxy::push(OperationContext* opCtx, const Value& value) { stdx::lock_guard<stdx::mutex> lk(_lastPushedMutex); _lastPushed = value; - _target->push(txn, value); + _target->push(opCtx, value); } -void OplogBufferProxy::pushAllNonBlocking(OperationContext* txn, +void OplogBufferProxy::pushAllNonBlocking(OperationContext* opCtx, Batch::const_iterator begin, Batch::const_iterator end) { if (begin == end) { @@ -78,11 +78,11 @@ void OplogBufferProxy::pushAllNonBlocking(OperationContext* txn, } stdx::lock_guard<stdx::mutex> lk(_lastPushedMutex); _lastPushed = *(end - 1); - _target->pushAllNonBlocking(txn, begin, end); + _target->pushAllNonBlocking(opCtx, begin, end); } -void OplogBufferProxy::waitForSpace(OperationContext* txn, std::size_t size) { - _target->waitForSpace(txn, size); +void OplogBufferProxy::waitForSpace(OperationContext* opCtx, std::size_t size) { + _target->waitForSpace(opCtx, size); } bool OplogBufferProxy::isEmpty() const { @@ -101,18 +101,18 @@ std::size_t OplogBufferProxy::getCount() const { return _target->getCount(); } -void OplogBufferProxy::clear(OperationContext* txn) { +void OplogBufferProxy::clear(OperationContext* opCtx) { stdx::lock_guard<stdx::mutex> backLock(_lastPushedMutex); stdx::lock_guard<stdx::mutex> frontLock(_lastPeekedMutex); _lastPushed.reset(); _lastPeeked.reset(); - _target->clear(txn); + _target->clear(opCtx); } -bool OplogBufferProxy::tryPop(OperationContext* txn, Value* value) { +bool OplogBufferProxy::tryPop(OperationContext* opCtx, Value* value) { stdx::lock_guard<stdx::mutex> backLock(_lastPushedMutex); stdx::lock_guard<stdx::mutex> frontLock(_lastPeekedMutex); - if (!_target->tryPop(txn, value)) { + if (!_target->tryPop(opCtx, value)) { return false; } _lastPeeked.reset(); @@ -133,13 +133,13 @@ bool OplogBufferProxy::waitForData(Seconds waitDuration) { return _target->waitForData(waitDuration); } -bool OplogBufferProxy::peek(OperationContext* txn, Value* value) { +bool OplogBufferProxy::peek(OperationContext* opCtx, Value* value) { stdx::lock_guard<stdx::mutex> lk(_lastPeekedMutex); if (_lastPeeked) { *value = *_lastPeeked; return true; } - if (_target->peek(txn, value)) { + if (_target->peek(opCtx, value)) { _lastPeeked = *value; return true; } @@ -147,7 +147,7 @@ bool OplogBufferProxy::peek(OperationContext* txn, Value* value) { } boost::optional<OplogBuffer::Value> OplogBufferProxy::lastObjectPushed( - OperationContext* txn) const { + OperationContext* opCtx) const { stdx::lock_guard<stdx::mutex> lk(_lastPushedMutex); if (!_lastPushed) { return boost::none; diff --git a/src/mongo/db/repl/oplog_buffer_proxy.h b/src/mongo/db/repl/oplog_buffer_proxy.h index 2624a9bb68f..ae9be1340ff 100644 --- a/src/mongo/db/repl/oplog_buffer_proxy.h +++ b/src/mongo/db/repl/oplog_buffer_proxy.h @@ -55,23 +55,23 @@ public: */ OplogBuffer* getTarget() const; - void startup(OperationContext* txn) override; - void shutdown(OperationContext* txn) override; - void pushEvenIfFull(OperationContext* txn, const Value& value) override; - void push(OperationContext* txn, const Value& value) override; - void pushAllNonBlocking(OperationContext* txn, + void startup(OperationContext* opCtx) override; + void shutdown(OperationContext* opCtx) override; + void pushEvenIfFull(OperationContext* opCtx, const Value& value) override; + void push(OperationContext* opCtx, const Value& value) override; + void pushAllNonBlocking(OperationContext* opCtx, Batch::const_iterator begin, Batch::const_iterator end) override; - void waitForSpace(OperationContext* txn, std::size_t size) override; + void waitForSpace(OperationContext* opCtx, std::size_t size) override; bool isEmpty() const override; std::size_t getMaxSize() const override; std::size_t getSize() const override; std::size_t getCount() const override; - void clear(OperationContext* txn) override; - bool tryPop(OperationContext* txn, Value* value) override; + void clear(OperationContext* opCtx) override; + bool tryPop(OperationContext* opCtx, Value* value) override; bool waitForData(Seconds waitDuration) override; - bool peek(OperationContext* txn, Value* value) override; - boost::optional<Value> lastObjectPushed(OperationContext* txn) const override; + bool peek(OperationContext* opCtx, Value* value) override; + boost::optional<Value> lastObjectPushed(OperationContext* opCtx) const override; // ---- Testing API ---- boost::optional<Value> getLastPeeked_forTest() const; diff --git a/src/mongo/db/repl/oplog_buffer_proxy_test.cpp b/src/mongo/db/repl/oplog_buffer_proxy_test.cpp index a59a66f0979..a372eea8c85 100644 --- a/src/mongo/db/repl/oplog_buffer_proxy_test.cpp +++ b/src/mongo/db/repl/oplog_buffer_proxy_test.cpp @@ -57,17 +57,17 @@ public: void shutdown(OperationContext*) override { shutdownCalled = true; } - void pushEvenIfFull(OperationContext* txn, const Value& value) override { - push(txn, value); + void pushEvenIfFull(OperationContext* opCtx, const Value& value) override { + push(opCtx, value); } void push(OperationContext*, const Value& value) override { values.push_back(value); } - void pushAllNonBlocking(OperationContext* txn, + void pushAllNonBlocking(OperationContext* opCtx, Batch::const_iterator begin, Batch::const_iterator end) override { for (auto i = begin; i != end; ++i) { - push(txn, *i); + push(opCtx, *i); } } void waitForSpace(OperationContext*, std::size_t) override { @@ -92,9 +92,9 @@ public: void clear(OperationContext*) override { values.clear(); } - bool tryPop(OperationContext* txn, Value* value) override { + bool tryPop(OperationContext* opCtx, Value* value) override { tryPopCalled = true; - if (!peek(txn, value)) { + if (!peek(opCtx, value)) { return false; } values.pop_front(); @@ -140,7 +140,7 @@ private: protected: OplogBufferMock* _mock = nullptr; std::unique_ptr<OplogBufferProxy> _proxy; - OperationContext* _txn = nullptr; // Not dereferenced. + OperationContext* _opCtx = nullptr; // Not dereferenced. }; void OplogBufferProxyTest::setUp() { @@ -165,29 +165,29 @@ TEST_F(OplogBufferProxyTest, GetTarget) { } TEST_F(OplogBufferProxyTest, Startup) { - _proxy->startup(_txn); + _proxy->startup(_opCtx); ASSERT_TRUE(_mock->startupCalled); } TEST_F(OplogBufferProxyTest, ShutdownResetsCachedValues) { auto pushValue = BSON("x" << 1); - _proxy->push(_txn, pushValue); + _proxy->push(_opCtx, pushValue); OplogBuffer::Value peekValue; - ASSERT_TRUE(_proxy->peek(_txn, &peekValue)); + ASSERT_TRUE(_proxy->peek(_opCtx, &peekValue)); ASSERT_BSONOBJ_EQ(pushValue, peekValue); - ASSERT_NOT_EQUALS(boost::none, _proxy->lastObjectPushed(_txn)); + ASSERT_NOT_EQUALS(boost::none, _proxy->lastObjectPushed(_opCtx)); ASSERT_NOT_EQUALS(boost::none, _proxy->getLastPeeked_forTest()); - _proxy->shutdown(_txn); + _proxy->shutdown(_opCtx); ASSERT_TRUE(_mock->shutdownCalled); - ASSERT_EQUALS(boost::none, _proxy->lastObjectPushed(_txn)); + ASSERT_EQUALS(boost::none, _proxy->lastObjectPushed(_opCtx)); ASSERT_EQUALS(boost::none, _proxy->getLastPeeked_forTest()); } TEST_F(OplogBufferProxyTest, WaitForSpace) { - _proxy->waitForSpace(_txn, 100U); + _proxy->waitForSpace(_opCtx, 100U); ASSERT_TRUE(_mock->waitForSpaceCalled); } @@ -199,7 +199,7 @@ TEST_F(OplogBufferProxyTest, MaxSize) { TEST_F(OplogBufferProxyTest, EmptySizeAndCount) { ASSERT_TRUE(_proxy->isEmpty()); OplogBuffer::Batch values = {BSON("x" << 1), BSON("x" << 2)}; - _proxy->pushAllNonBlocking(_txn, values.cbegin(), values.cend()); + _proxy->pushAllNonBlocking(_opCtx, values.cbegin(), values.cend()); ASSERT_FALSE(_proxy->isEmpty()); ASSERT_EQUALS(values.size(), _mock->getCount()); ASSERT_EQUALS(_mock->getCount(), _proxy->getCount()); @@ -209,79 +209,79 @@ TEST_F(OplogBufferProxyTest, EmptySizeAndCount) { TEST_F(OplogBufferProxyTest, ClearResetsCachedValues) { OplogBuffer::Batch values = {BSON("x" << 1), BSON("x" << 2)}; - _proxy->pushAllNonBlocking(_txn, values.cbegin(), values.cend()); + _proxy->pushAllNonBlocking(_opCtx, values.cbegin(), values.cend()); ASSERT_FALSE(_mock->isEmpty()); - auto lastObjPushed = _proxy->lastObjectPushed(_txn); + auto lastObjPushed = _proxy->lastObjectPushed(_opCtx); ASSERT_NOT_EQUALS(boost::none, lastObjPushed); ASSERT_BSONOBJ_EQ(values.back(), *lastObjPushed); ASSERT_FALSE(_mock->lastObjectPushedCalled); OplogBuffer::Value peekValue; - ASSERT_TRUE(_proxy->peek(_txn, &peekValue)); + ASSERT_TRUE(_proxy->peek(_opCtx, &peekValue)); ASSERT_NOT_EQUALS(boost::none, _proxy->getLastPeeked_forTest()); - _proxy->clear(_txn); + _proxy->clear(_opCtx); ASSERT_TRUE(_mock->isEmpty()); - ASSERT_EQUALS(boost::none, _proxy->lastObjectPushed(_txn)); + ASSERT_EQUALS(boost::none, _proxy->lastObjectPushed(_opCtx)); ASSERT_EQUALS(boost::none, _proxy->getLastPeeked_forTest()); } void _testPushFunctionUpdatesCachedLastObjectPushed( - OperationContext* txn, + OperationContext* opCtx, OplogBuffer* proxy, OplogBufferMock* mock, stdx::function<std::size_t( - OperationContext* txn, OplogBuffer* proxy, const OplogBuffer::Value& value)> pushFn) { - ASSERT_EQUALS(proxy->lastObjectPushed(txn), boost::none); + OperationContext* opCtx, OplogBuffer* proxy, const OplogBuffer::Value& value)> pushFn) { + ASSERT_EQUALS(proxy->lastObjectPushed(opCtx), boost::none); ASSERT_FALSE(mock->lastObjectPushedCalled); auto val = BSON("x" << 1); - auto numPushed = pushFn(txn, proxy, val); + auto numPushed = pushFn(opCtx, proxy, val); ASSERT_EQUALS(numPushed, mock->values.size()); ASSERT_BSONOBJ_EQ(val, mock->values.back()); - auto lastObjPushed = proxy->lastObjectPushed(txn); + auto lastObjPushed = proxy->lastObjectPushed(opCtx); ASSERT_NOT_EQUALS(boost::none, lastObjPushed); ASSERT_BSONOBJ_EQ(val, *lastObjPushed); ASSERT_FALSE(mock->lastObjectPushedCalled); } TEST_F(OplogBufferProxyTest, PushEvenIfFullUpdatesCachedLastObjectPushed) { - auto pushFn = [](OperationContext* txn, OplogBuffer* proxy, const OplogBuffer::Value& value) { - proxy->pushEvenIfFull(txn, value); + auto pushFn = [](OperationContext* opCtx, OplogBuffer* proxy, const OplogBuffer::Value& value) { + proxy->pushEvenIfFull(opCtx, value); return 1U; }; - _testPushFunctionUpdatesCachedLastObjectPushed(_txn, _proxy.get(), _mock, pushFn); + _testPushFunctionUpdatesCachedLastObjectPushed(_opCtx, _proxy.get(), _mock, pushFn); } TEST_F(OplogBufferProxyTest, PushUpdatesCachedLastObjectPushed) { - auto pushFn = [](OperationContext* txn, OplogBuffer* proxy, const OplogBuffer::Value& value) { - proxy->push(txn, value); + auto pushFn = [](OperationContext* opCtx, OplogBuffer* proxy, const OplogBuffer::Value& value) { + proxy->push(opCtx, value); return 1U; }; - _testPushFunctionUpdatesCachedLastObjectPushed(_txn, _proxy.get(), _mock, pushFn); + _testPushFunctionUpdatesCachedLastObjectPushed(_opCtx, _proxy.get(), _mock, pushFn); } TEST_F(OplogBufferProxyTest, PushAllNonBlockingUpdatesCachedLastObjectPushed) { - auto pushFn = [](OperationContext* txn, OplogBuffer* proxy, const OplogBuffer::Value& value) { + auto pushFn = [](OperationContext* opCtx, OplogBuffer* proxy, const OplogBuffer::Value& value) { OplogBuffer::Batch values = {BSON("x" << 2), value}; - proxy->pushAllNonBlocking(txn, values.cbegin(), values.cend()); + proxy->pushAllNonBlocking(opCtx, values.cbegin(), values.cend()); return values.size(); }; - _testPushFunctionUpdatesCachedLastObjectPushed(_txn, _proxy.get(), _mock, pushFn); + _testPushFunctionUpdatesCachedLastObjectPushed(_opCtx, _proxy.get(), _mock, pushFn); } TEST_F(OplogBufferProxyTest, PushAllNonBlockingDoesNotUpdateCachedLastObjectPushedOnEmptyBatch) { OplogBuffer::Batch values; - _proxy->pushAllNonBlocking(_txn, values.cbegin(), values.cend()); + _proxy->pushAllNonBlocking(_opCtx, values.cbegin(), values.cend()); ASSERT_EQUALS(values.size(), _mock->values.size()); - ASSERT_EQUALS(boost::none, _proxy->lastObjectPushed(_txn)); + ASSERT_EQUALS(boost::none, _proxy->lastObjectPushed(_opCtx)); ASSERT_FALSE(_mock->lastObjectPushedCalled); } TEST_F(OplogBufferProxyTest, WaitForDataReturnsTrueImmediatelyIfLastObjectPushedIsCached) { - _proxy->pushEvenIfFull(_txn, BSON("x" << 1)); + _proxy->pushEvenIfFull(_opCtx, BSON("x" << 1)); ASSERT_TRUE(_proxy->waitForData(Seconds(10))); ASSERT_FALSE(_mock->waitForDataCalled); } @@ -293,15 +293,15 @@ TEST_F(OplogBufferProxyTest, WaitForDataForwardsCallToTargetIfLastObjectPushedIs TEST_F(OplogBufferProxyTest, TryPopResetsLastPushedObjectIfBufferIsEmpty) { auto pushValue = BSON("x" << 1); - _proxy->push(_txn, BSON("x" << 1)); - auto lastPushed = _proxy->lastObjectPushed(_txn); - ASSERT_NOT_EQUALS(boost::none, _proxy->lastObjectPushed(_txn)); + _proxy->push(_opCtx, BSON("x" << 1)); + auto lastPushed = _proxy->lastObjectPushed(_opCtx); + ASSERT_NOT_EQUALS(boost::none, _proxy->lastObjectPushed(_opCtx)); ASSERT_BSONOBJ_EQ(pushValue, *lastPushed); OplogBuffer::Value poppedValue; - ASSERT_TRUE(_proxy->tryPop(_txn, &poppedValue)); + ASSERT_TRUE(_proxy->tryPop(_opCtx, &poppedValue)); ASSERT_BSONOBJ_EQ(pushValue, poppedValue); - ASSERT_EQUALS(boost::none, _proxy->lastObjectPushed(_txn)); + ASSERT_EQUALS(boost::none, _proxy->lastObjectPushed(_opCtx)); // waitForData should forward call to underlying buffer. ASSERT_FALSE(_proxy->waitForData(Seconds(10))); @@ -311,41 +311,41 @@ TEST_F(OplogBufferProxyTest, TryPopResetsLastPushedObjectIfBufferIsEmpty) { TEST_F(OplogBufferProxyTest, PeekCachesFrontOfBuffer) { OplogBuffer::Value peekValue; ASSERT_FALSE(_mock->peekCalled); - ASSERT_FALSE(_proxy->peek(_txn, &peekValue)); + ASSERT_FALSE(_proxy->peek(_opCtx, &peekValue)); ASSERT_TRUE(_mock->peekCalled); ASSERT_TRUE(peekValue.isEmpty()); _mock->peekCalled = false; OplogBuffer::Batch values = {BSON("x" << 1), BSON("x" << 2)}; - _proxy->pushAllNonBlocking(_txn, values.cbegin(), values.cend()); + _proxy->pushAllNonBlocking(_opCtx, values.cbegin(), values.cend()); ASSERT_EQUALS(values.size(), _mock->values.size()); - ASSERT_TRUE(_proxy->peek(_txn, &peekValue)); + ASSERT_TRUE(_proxy->peek(_opCtx, &peekValue)); ASSERT_TRUE(_mock->peekCalled); ASSERT_BSONOBJ_EQ(values.front(), peekValue); _mock->peekCalled = false; peekValue = OplogBuffer::Value(); - ASSERT_TRUE(_proxy->peek(_txn, &peekValue)); + ASSERT_TRUE(_proxy->peek(_opCtx, &peekValue)); ASSERT_FALSE(_mock->peekCalled); ASSERT_BSONOBJ_EQ(values.front(), peekValue); } TEST_F(OplogBufferProxyTest, TryPopClearsCachedFrontValue) { OplogBuffer::Batch values = {BSON("x" << 1), BSON("x" << 2)}; - _proxy->pushAllNonBlocking(_txn, values.cbegin(), values.cend()); + _proxy->pushAllNonBlocking(_opCtx, values.cbegin(), values.cend()); ASSERT_EQUALS(values.size(), _mock->values.size()); // Peek and pop first value {x: 1}. OplogBuffer::Value peekValue; - ASSERT_TRUE(_proxy->peek(_txn, &peekValue)); + ASSERT_TRUE(_proxy->peek(_opCtx, &peekValue)); ASSERT_TRUE(_mock->peekCalled); ASSERT_BSONOBJ_EQ(values.front(), peekValue); _mock->peekCalled = false; peekValue = OplogBuffer::Value(); OplogBuffer::Value poppedValue; - ASSERT_TRUE(_proxy->tryPop(_txn, &poppedValue)); + ASSERT_TRUE(_proxy->tryPop(_opCtx, &poppedValue)); ASSERT_TRUE(_mock->tryPopCalled); ASSERT_BSONOBJ_EQ(values.front(), poppedValue); ASSERT_EQUALS(boost::none, _proxy->getLastPeeked_forTest()); @@ -353,14 +353,14 @@ TEST_F(OplogBufferProxyTest, TryPopClearsCachedFrontValue) { poppedValue = OplogBuffer::Value(); // Peek and pop second value {x: 2}. - ASSERT_TRUE(_proxy->peek(_txn, &peekValue)); + ASSERT_TRUE(_proxy->peek(_opCtx, &peekValue)); ASSERT_TRUE(_mock->peekCalled); ASSERT_BSONOBJ_EQ(values.back(), peekValue); ASSERT_NOT_EQUALS(boost::none, _proxy->getLastPeeked_forTest()); _mock->peekCalled = false; peekValue = OplogBuffer::Value(); - ASSERT_TRUE(_proxy->tryPop(_txn, &poppedValue)); + ASSERT_TRUE(_proxy->tryPop(_opCtx, &poppedValue)); ASSERT_TRUE(_mock->tryPopCalled); ASSERT_BSONOBJ_EQ(values.back(), poppedValue); ASSERT_EQUALS(boost::none, _proxy->getLastPeeked_forTest()); @@ -368,12 +368,12 @@ TEST_F(OplogBufferProxyTest, TryPopClearsCachedFrontValue) { poppedValue = OplogBuffer::Value(); // Peek and pop empty buffer. - ASSERT_FALSE(_proxy->peek(_txn, &peekValue)); + ASSERT_FALSE(_proxy->peek(_opCtx, &peekValue)); ASSERT_TRUE(_mock->peekCalled); ASSERT_TRUE(peekValue.isEmpty()); ASSERT_EQUALS(boost::none, _proxy->getLastPeeked_forTest()); - ASSERT_FALSE(_proxy->tryPop(_txn, &poppedValue)); + ASSERT_FALSE(_proxy->tryPop(_opCtx, &poppedValue)); ASSERT_TRUE(_mock->tryPopCalled); ASSERT_TRUE(poppedValue.isEmpty()); ASSERT_EQUALS(boost::none, _proxy->getLastPeeked_forTest()); diff --git a/src/mongo/db/repl/oplog_interface_local.cpp b/src/mongo/db/repl/oplog_interface_local.cpp index 03b93892885..a68fd8db0d6 100644 --- a/src/mongo/db/repl/oplog_interface_local.cpp +++ b/src/mongo/db/repl/oplog_interface_local.cpp @@ -43,7 +43,7 @@ namespace { class OplogIteratorLocal : public OplogInterface::Iterator { public: - OplogIteratorLocal(OperationContext* txn, const std::string& collectionName); + OplogIteratorLocal(OperationContext* opCtx, const std::string& collectionName); StatusWith<Value> next() override; @@ -55,12 +55,12 @@ private: std::unique_ptr<PlanExecutor> _exec; }; -OplogIteratorLocal::OplogIteratorLocal(OperationContext* txn, const std::string& collectionName) - : _transaction(txn, MODE_IS), - _dbLock(txn->lockState(), nsToDatabase(collectionName), MODE_IS), - _collectionLock(txn->lockState(), collectionName, MODE_S), - _ctx(txn, collectionName), - _exec(InternalPlanner::collectionScan(txn, +OplogIteratorLocal::OplogIteratorLocal(OperationContext* opCtx, const std::string& collectionName) + : _transaction(opCtx, MODE_IS), + _dbLock(opCtx->lockState(), nsToDatabase(collectionName), MODE_IS), + _collectionLock(opCtx->lockState(), collectionName, MODE_S), + _ctx(opCtx, collectionName), + _exec(InternalPlanner::collectionScan(opCtx, collectionName, _ctx.db()->getCollection(collectionName), PlanExecutor::YIELD_MANUAL, @@ -84,20 +84,21 @@ StatusWith<OplogInterface::Iterator::Value> OplogIteratorLocal::next() { } // namespace -OplogInterfaceLocal::OplogInterfaceLocal(OperationContext* txn, const std::string& collectionName) - : _txn(txn), _collectionName(collectionName) { - invariant(txn); +OplogInterfaceLocal::OplogInterfaceLocal(OperationContext* opCtx, const std::string& collectionName) + : _opCtx(opCtx), _collectionName(collectionName) { + invariant(opCtx); invariant(!collectionName.empty()); } std::string OplogInterfaceLocal::toString() const { return str::stream() << "LocalOplogInterface: " "operation context: " - << _txn->getOpID() << "; collection: " << _collectionName; + << _opCtx->getOpID() << "; collection: " << _collectionName; } std::unique_ptr<OplogInterface::Iterator> OplogInterfaceLocal::makeIterator() const { - return std::unique_ptr<OplogInterface::Iterator>(new OplogIteratorLocal(_txn, _collectionName)); + return std::unique_ptr<OplogInterface::Iterator>( + new OplogIteratorLocal(_opCtx, _collectionName)); } } // namespace repl diff --git a/src/mongo/db/repl/oplog_interface_local.h b/src/mongo/db/repl/oplog_interface_local.h index 32c9adc4377..577dbe8d7fa 100644 --- a/src/mongo/db/repl/oplog_interface_local.h +++ b/src/mongo/db/repl/oplog_interface_local.h @@ -42,12 +42,12 @@ namespace repl { class OplogInterfaceLocal : public OplogInterface { public: - OplogInterfaceLocal(OperationContext* txn, const std::string& collectionName); + OplogInterfaceLocal(OperationContext* opCtx, const std::string& collectionName); std::string toString() const override; std::unique_ptr<OplogInterface::Iterator> makeIterator() const override; private: - OperationContext* _txn; + OperationContext* _opCtx; std::string _collectionName; }; diff --git a/src/mongo/db/repl/oplogreader.cpp b/src/mongo/db/repl/oplogreader.cpp index 92bc6648b8b..665554c25b2 100644 --- a/src/mongo/db/repl/oplogreader.cpp +++ b/src/mongo/db/repl/oplogreader.cpp @@ -152,7 +152,7 @@ Status OplogReader::_compareRequiredOpTimeWithQueryResponse(const OpTime& requir return Status::OK(); } -void OplogReader::connectToSyncSource(OperationContext* txn, +void OplogReader::connectToSyncSource(OperationContext* opCtx, const OpTime& lastOpTimeFetched, const OpTime& requiredOpTime, ReplicationCoordinator* replCoord) { diff --git a/src/mongo/db/repl/oplogreader.h b/src/mongo/db/repl/oplogreader.h index 1434125697a..641315b748f 100644 --- a/src/mongo/db/repl/oplogreader.h +++ b/src/mongo/db/repl/oplogreader.h @@ -155,7 +155,7 @@ public: * sync source blacklist. * This function may throw DB exceptions. */ - void connectToSyncSource(OperationContext* txn, + void connectToSyncSource(OperationContext* opCtx, const OpTime& lastOpTimeFetched, const OpTime& requiredOpTime, ReplicationCoordinator* replCoord); diff --git a/src/mongo/db/repl/repl_client_info.cpp b/src/mongo/db/repl/repl_client_info.cpp index 3e98a0cb9d3..8064f8c5fc0 100644 --- a/src/mongo/db/repl/repl_client_info.cpp +++ b/src/mongo/db/repl/repl_client_info.cpp @@ -48,9 +48,10 @@ void ReplClientInfo::setLastOp(const OpTime& ot) { _lastOp = ot; } -void ReplClientInfo::setLastOpToSystemLastOpTime(OperationContext* txn) { - ReplicationCoordinator* replCoord = repl::ReplicationCoordinator::get(txn->getServiceContext()); - if (replCoord->isReplEnabled() && txn->writesAreReplicated()) { +void ReplClientInfo::setLastOpToSystemLastOpTime(OperationContext* opCtx) { + ReplicationCoordinator* replCoord = + repl::ReplicationCoordinator::get(opCtx->getServiceContext()); + if (replCoord->isReplEnabled() && opCtx->writesAreReplicated()) { setLastOp(replCoord->getMyLastAppliedOpTime()); } } diff --git a/src/mongo/db/repl/repl_client_info.h b/src/mongo/db/repl/repl_client_info.h index 3c81953d65f..3c3910cf78d 100644 --- a/src/mongo/db/repl/repl_client_info.h +++ b/src/mongo/db/repl/repl_client_info.h @@ -76,7 +76,7 @@ public: * This is necessary when doing no-op writes, as we need to set the client's lastOp to a proper * value for write concern wait to work. */ - void setLastOpToSystemLastOpTime(OperationContext* txn); + void setLastOpToSystemLastOpTime(OperationContext* opCtx); private: static const long long kUninitializedTerm = -1; diff --git a/src/mongo/db/repl/repl_set_commands.cpp b/src/mongo/db/repl/repl_set_commands.cpp index c2793e42764..f3a0cd748e2 100644 --- a/src/mongo/db/repl/repl_set_commands.cpp +++ b/src/mongo/db/repl/repl_set_commands.cpp @@ -93,7 +93,7 @@ public: return Status::OK(); } CmdReplSetTest() : ReplSetCommand("replSetTest") {} - virtual bool run(OperationContext* txn, + virtual bool run(OperationContext* opCtx, const string&, BSONObj& cmdObj, int, @@ -165,7 +165,7 @@ MONGO_INITIALIZER(RegisterReplSetTestCmd)(InitializerContext* context) { class CmdReplSetGetRBID : public ReplSetCommand { public: CmdReplSetGetRBID() : ReplSetCommand("replSetGetRBID") {} - virtual bool run(OperationContext* txn, + virtual bool run(OperationContext* opCtx, const string&, BSONObj& cmdObj, int, @@ -188,14 +188,14 @@ public: help << "\nhttp://dochub.mongodb.org/core/replicasetcommands"; } CmdReplSetGetStatus() : ReplSetCommand("replSetGetStatus", true) {} - virtual bool run(OperationContext* txn, + virtual bool run(OperationContext* opCtx, const string&, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result) { if (cmdObj["forShell"].trueValue()) - LastError::get(txn->getClient()).disable(); + LastError::get(opCtx->getClient()).disable(); Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result); if (!status.isOK()) @@ -230,7 +230,7 @@ public: help << "\nhttp://dochub.mongodb.org/core/replicasetcommands"; } CmdReplSetGetConfig() : ReplSetCommand("replSetGetConfig", true) {} - virtual bool run(OperationContext* txn, + virtual bool run(OperationContext* opCtx, const string&, BSONObj& cmdObj, int, @@ -330,7 +330,7 @@ public: h << "Initiate/christen a replica set."; h << "\nhttp://dochub.mongodb.org/core/replicasetcommands"; } - virtual bool run(OperationContext* txn, + virtual bool run(OperationContext* opCtx, const string&, BSONObj& cmdObj, int, @@ -342,7 +342,7 @@ public: } std::string replSetString = - ReplicationCoordinator::get(txn)->getSettings().getReplSetString(); + ReplicationCoordinator::get(opCtx)->getSettings().getReplSetString(); if (replSetString.empty()) { return appendCommandStatus(result, Status(ErrorCodes::NoReplicationEnabled, @@ -356,7 +356,7 @@ public: result.append("info2", noConfigMessage); log() << "initiate : " << noConfigMessage; - ReplicationCoordinatorExternalStateImpl externalState(StorageInterface::get(txn)); + ReplicationCoordinatorExternalStateImpl externalState(StorageInterface::get(opCtx)); std::string name; std::vector<HostAndPort> seeds; parseReplSetSeedList(&externalState, replSetString, &name, &seeds); // may throw... @@ -386,7 +386,7 @@ public: } Status status = - getGlobalReplicationCoordinator()->processReplSetInitiate(txn, configObj, &result); + getGlobalReplicationCoordinator()->processReplSetInitiate(opCtx, configObj, &result); return appendCommandStatus(result, status); } @@ -404,7 +404,7 @@ public: help << "\nhttp://dochub.mongodb.org/core/replicasetcommands"; } CmdReplSetReconfig() : ReplSetCommand("replSetReconfig") {} - virtual bool run(OperationContext* txn, + virtual bool run(OperationContext* opCtx, const string&, BSONObj& cmdObj, int, @@ -424,15 +424,15 @@ public: parsedArgs.newConfigObj = cmdObj["replSetReconfig"].Obj(); parsedArgs.force = cmdObj.hasField("force") && cmdObj["force"].trueValue(); status = - getGlobalReplicationCoordinator()->processReplSetReconfig(txn, parsedArgs, &result); + getGlobalReplicationCoordinator()->processReplSetReconfig(opCtx, parsedArgs, &result); - ScopedTransaction scopedXact(txn, MODE_X); - Lock::GlobalWrite globalWrite(txn->lockState()); + ScopedTransaction scopedXact(opCtx, MODE_X); + Lock::GlobalWrite globalWrite(opCtx->lockState()); - WriteUnitOfWork wuow(txn); + WriteUnitOfWork wuow(opCtx); if (status.isOK() && !parsedArgs.force) { getGlobalServiceContext()->getOpObserver()->onOpMessage( - txn, + opCtx, BSON("msg" << "Reconfig set" << "version" @@ -462,7 +462,7 @@ public: help << "\nhttp://dochub.mongodb.org/core/replicasetcommands"; } CmdReplSetFreeze() : ReplSetCommand("replSetFreeze") {} - virtual bool run(OperationContext* txn, + virtual bool run(OperationContext* opCtx, const string&, BSONObj& cmdObj, int, @@ -494,7 +494,7 @@ public: help << "http://dochub.mongodb.org/core/replicasetcommands"; } CmdReplSetStepDown() : ReplSetCommand("replSetStepDown") {} - virtual bool run(OperationContext* txn, + virtual bool run(OperationContext* opCtx, const string&, BSONObj& cmdObj, int, @@ -543,7 +543,7 @@ public: log() << "Attempting to step down in response to replSetStepDown command"; status = getGlobalReplicationCoordinator()->stepDown( - txn, force, Seconds(secondaryCatchUpPeriodSecs), Seconds(stepDownForSecs)); + opCtx, force, Seconds(secondaryCatchUpPeriodSecs), Seconds(stepDownForSecs)); return appendCommandStatus(result, status); } @@ -560,7 +560,7 @@ public: help << "Enable or disable maintenance mode."; } CmdReplSetMaintenance() : ReplSetCommand("replSetMaintenance") {} - virtual bool run(OperationContext* txn, + virtual bool run(OperationContext* opCtx, const string&, BSONObj& cmdObj, int, @@ -589,7 +589,7 @@ public: "in-progress initial sync."; } CmdReplSetSyncFrom() : ReplSetCommand("replSetSyncFrom") {} - virtual bool run(OperationContext* txn, + virtual bool run(OperationContext* opCtx, const string&, BSONObj& cmdObj, int, @@ -606,7 +606,7 @@ public: return appendCommandStatus(result, getGlobalReplicationCoordinator()->processReplSetSyncFrom( - txn, targetHostAndPort, &result)); + opCtx, targetHostAndPort, &result)); } private: @@ -618,13 +618,13 @@ private: class CmdReplSetUpdatePosition : public ReplSetCommand { public: CmdReplSetUpdatePosition() : ReplSetCommand("replSetUpdatePosition") {} - virtual bool run(OperationContext* txn, + virtual bool run(OperationContext* opCtx, const string&, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result) { - auto replCoord = repl::ReplicationCoordinator::get(txn->getClient()->getServiceContext()); + auto replCoord = repl::ReplicationCoordinator::get(opCtx->getClient()->getServiceContext()); Status status = replCoord->checkReplEnabledForCommand(&result); if (!status.isOK()) @@ -684,7 +684,7 @@ namespace { * The "local" database does NOT count except for "rs.oplog" collection. * Used to set the hasData field on replset heartbeat command response. */ -bool replHasDatabases(OperationContext* txn) { +bool replHasDatabases(OperationContext* opCtx) { std::vector<string> names; StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine(); storageEngine->listDatabases(&names); @@ -697,7 +697,7 @@ bool replHasDatabases(OperationContext* txn) { // we have a local database. return true if oplog isn't empty BSONObj o; - if (Helpers::getSingleton(txn, repl::rsOplogName.c_str(), o)) { + if (Helpers::getSingleton(opCtx, repl::rsOplogName.c_str(), o)) { return true; } } @@ -718,7 +718,7 @@ MONGO_FP_DECLARE(rsDelayHeartbeatResponse); class CmdReplSetHeartbeat : public ReplSetCommand { public: CmdReplSetHeartbeat() : ReplSetCommand("replSetHeartbeat") {} - virtual bool run(OperationContext* txn, + virtual bool run(OperationContext* opCtx, const string&, BSONObj& cmdObj, int, @@ -740,7 +740,7 @@ public: /* we want to keep heartbeat connections open when relinquishing primary. tag them here. */ transport::Session::TagMask originalTag = 0; - auto session = txn->getClient()->session(); + auto session = opCtx->getClient()->session(); if (session) { originalTag = session->getTags(); session->replaceTags(originalTag | transport::Session::kKeepOpen); @@ -777,7 +777,7 @@ public: // ugh. if (args.getCheckEmpty()) { - result.append("hasData", replHasDatabases(txn)); + result.append("hasData", replHasDatabases(opCtx)); } ReplSetHeartbeatResponse response; @@ -795,7 +795,7 @@ class CmdReplSetFresh : public ReplSetCommand { public: CmdReplSetFresh() : ReplSetCommand("replSetFresh") {} - virtual bool run(OperationContext* txn, + virtual bool run(OperationContext* opCtx, const string&, BSONObj& cmdObj, int, @@ -828,7 +828,7 @@ public: CmdReplSetElect() : ReplSetCommand("replSetElect") {} private: - virtual bool run(OperationContext* txn, + virtual bool run(OperationContext* opCtx, const string&, BSONObj& cmdObj, int, @@ -862,7 +862,7 @@ class CmdReplSetStepUp : public ReplSetCommand { public: CmdReplSetStepUp() : ReplSetCommand("replSetStepUp") {} - virtual bool run(OperationContext* txn, + virtual bool run(OperationContext* opCtx, const string&, BSONObj& cmdObj, int, diff --git a/src/mongo/db/repl/repl_set_request_votes.cpp b/src/mongo/db/repl/repl_set_request_votes.cpp index 02eb5311cb3..5577952807d 100644 --- a/src/mongo/db/repl/repl_set_request_votes.cpp +++ b/src/mongo/db/repl/repl_set_request_votes.cpp @@ -47,7 +47,7 @@ public: CmdReplSetRequestVotes() : ReplSetCommand("replSetRequestVotes") {} private: - bool run(OperationContext* txn, + bool run(OperationContext* opCtx, const std::string&, BSONObj& cmdObj, int, @@ -67,7 +67,7 @@ private: // We want to keep request vote connection open when relinquishing primary. // Tag it here. transport::Session::TagMask originalTag = 0; - auto session = txn->getClient()->session(); + auto session = opCtx->getClient()->session(); if (session) { originalTag = session->getTags(); session->replaceTags(originalTag | transport::Session::kKeepOpen); @@ -82,7 +82,7 @@ private: ReplSetRequestVotesResponse response; status = getGlobalReplicationCoordinator()->processReplSetRequestVotes( - txn, parsedArgs, &response); + opCtx, parsedArgs, &response); response.addToBSON(&result); return appendCommandStatus(result, status); } diff --git a/src/mongo/db/repl/repl_set_web_handler.cpp b/src/mongo/db/repl/repl_set_web_handler.cpp index 3f67cd9a45c..404dc659ae7 100644 --- a/src/mongo/db/repl/repl_set_web_handler.cpp +++ b/src/mongo/db/repl/repl_set_web_handler.cpp @@ -51,7 +51,7 @@ public: return str::startsWith(url, "/_replSet"); } - virtual void handle(OperationContext* txn, + virtual void handle(OperationContext* opCtx, const char* rq, const std::string& url, BSONObj params, @@ -59,12 +59,12 @@ public: int& responseCode, std::vector<std::string>& headers, const SockAddr& from) { - responseMsg = _replSet(txn); + responseMsg = _replSet(opCtx); responseCode = 200; } /* /_replSet show replica set status in html format */ - std::string _replSet(OperationContext* txn) { + std::string _replSet(OperationContext* opCtx) { std::stringstream s; s << start("Replica Set Status " + prettyHostName()); s << p(a("/", "back", "Home") + " | " + diff --git a/src/mongo/db/repl/replication_coordinator.h b/src/mongo/db/repl/replication_coordinator.h index 1d3dda6f5b6..d5557213bb4 100644 --- a/src/mongo/db/repl/replication_coordinator.h +++ b/src/mongo/db/repl/replication_coordinator.h @@ -123,14 +123,14 @@ public: * components of the replication system to start up whatever threads and do whatever * initialization they need. */ - virtual void startup(OperationContext* txn) = 0; + virtual void startup(OperationContext* opCtx) = 0; /** * Does whatever cleanup is required to stop replication, including instructing the other * components of the replication system to shut down and stop any threads they are using, * blocking until all replication-related shutdown tasks are complete. */ - virtual void shutdown(OperationContext* txn) = 0; + virtual void shutdown(OperationContext* opCtx) = 0; /** * Returns a pointer to the ReplicationExecutor. @@ -194,7 +194,7 @@ public: * writeConcern.wTimeout of -1 indicates return immediately after checking. Return codes: * ErrorCodes::WriteConcernFailed if the writeConcern.wTimeout is reached before * the data has been sufficiently replicated - * ErrorCodes::ExceededTimeLimit if the txn->getMaxTimeMicrosRemaining is reached before + * ErrorCodes::ExceededTimeLimit if the opCtx->getMaxTimeMicrosRemaining is reached before * the data has been sufficiently replicated * ErrorCodes::NotMaster if the node is not Primary/Master * ErrorCodes::UnknownReplWriteConcern if the writeConcern.wMode contains a write concern @@ -202,16 +202,16 @@ public: * ErrorCodes::ShutdownInProgress if we are mid-shutdown * ErrorCodes::Interrupted if the operation was killed with killop() */ - virtual StatusAndDuration awaitReplication(OperationContext* txn, + virtual StatusAndDuration awaitReplication(OperationContext* opCtx, const OpTime& opTime, const WriteConcernOptions& writeConcern) = 0; /** * Like awaitReplication(), above, but waits for the replication of the last operation - * performed on the client associated with "txn". + * performed on the client associated with "opCtx". */ virtual StatusAndDuration awaitReplicationOfLastOpForClient( - OperationContext* txn, const WriteConcernOptions& writeConcern) = 0; + OperationContext* opCtx, const WriteConcernOptions& writeConcern) = 0; /** * Causes this node to relinquish being primary for at least 'stepdownTime'. If 'force' is @@ -222,7 +222,7 @@ public: * ErrorCodes::SecondaryAheadOfPrimary if we are primary but there is another node that * seems to be ahead of us in replication, and Status::OK otherwise. */ - virtual Status stepDown(OperationContext* txn, + virtual Status stepDown(OperationContext* opCtx, bool force, const Milliseconds& waitTime, const Milliseconds& stepdownTime) = 0; @@ -245,14 +245,14 @@ public: * NOTE: This function can only be meaningfully called while the caller holds the global * lock in some mode other than MODE_NONE. */ - virtual bool canAcceptWritesForDatabase(OperationContext* txn, StringData dbName) = 0; + virtual bool canAcceptWritesForDatabase(OperationContext* opCtx, StringData dbName) = 0; /** * Version which does not check for the global lock. Do not use in new code. * Without the global lock held, the return value may be inaccurate by the time * the function returns. */ - virtual bool canAcceptWritesForDatabase_UNSAFE(OperationContext* txn, StringData dbName) = 0; + virtual bool canAcceptWritesForDatabase_UNSAFE(OperationContext* opCtx, StringData dbName) = 0; /** * Returns true if it is valid for this node to accept writes on the given namespace. @@ -260,14 +260,14 @@ public: * The result of this function should be consistent with canAcceptWritesForDatabase() * for the database the namespace refers to, with additional checks on the collection. */ - virtual bool canAcceptWritesFor(OperationContext* txn, const NamespaceString& ns) = 0; + virtual bool canAcceptWritesFor(OperationContext* opCtx, const NamespaceString& ns) = 0; /** * Version which does not check for the global lock. Do not use in new code. * Without the global lock held, the return value may be inaccurate by the time * the function returns. */ - virtual bool canAcceptWritesFor_UNSAFE(OperationContext* txn, const NamespaceString& ns) = 0; + virtual bool canAcceptWritesFor_UNSAFE(OperationContext* opCtx, const NamespaceString& ns) = 0; /** * Checks if the current replica set configuration can satisfy the given write concern. @@ -284,7 +284,7 @@ public: * Returns Status::OK() if it is valid for this node to serve reads on the given collection * and an errorcode indicating why the node cannot if it cannot. */ - virtual Status checkCanServeReadsFor(OperationContext* txn, + virtual Status checkCanServeReadsFor(OperationContext* opCtx, const NamespaceString& ns, bool slaveOk) = 0; @@ -293,7 +293,7 @@ public: * Without the global lock held, the return value may be inaccurate by the time * the function returns. */ - virtual Status checkCanServeReadsFor_UNSAFE(OperationContext* txn, + virtual Status checkCanServeReadsFor_UNSAFE(OperationContext* opCtx, const NamespaceString& ns, bool slaveOk) = 0; @@ -303,7 +303,8 @@ public: * The namespace "ns" is passed in because the "local" database is usually writable * and we need to enforce the constraints for it. */ - virtual bool shouldRelaxIndexConstraints(OperationContext* txn, const NamespaceString& ns) = 0; + virtual bool shouldRelaxIndexConstraints(OperationContext* opCtx, + const NamespaceString& ns) = 0; /** * Updates our internal tracking of the last OpTime applied for the given slave @@ -378,7 +379,7 @@ public: * * Returns whether the wait was successful. */ - virtual Status waitUntilOpTimeForRead(OperationContext* txn, + virtual Status waitUntilOpTimeForRead(OperationContext* opCtx, const ReadConcernArgs& settings) = 0; /** @@ -486,7 +487,7 @@ public: * steps down and steps up so quickly that the applier signals drain complete in the wrong * term. */ - virtual void signalDrainComplete(OperationContext* txn, long long termWhenBufferIsEmpty) = 0; + virtual void signalDrainComplete(OperationContext* opCtx, long long termWhenBufferIsEmpty) = 0; /** * Waits duration of 'timeout' for applier to finish draining its buffer of operations. @@ -527,7 +528,7 @@ public: /** * Does an initial sync of data, after dropping existing data. */ - virtual Status resyncData(OperationContext* txn, bool waitUntilCompleted) = 0; + virtual Status resyncData(OperationContext* opCtx, bool waitUntilCompleted) = 0; /** * Handles an incoming isMaster command for a replica set node. Should not be @@ -592,7 +593,7 @@ public: * returns Status::OK if the sync target could be set and an ErrorCode indicating why it * couldn't otherwise. */ - virtual Status processReplSetSyncFrom(OperationContext* txn, + virtual Status processReplSetSyncFrom(OperationContext* opCtx, const HostAndPort& target, BSONObjBuilder* resultObj) = 0; @@ -625,7 +626,7 @@ public: * Handles an incoming replSetReconfig command. Adds BSON to 'resultObj'; * returns a Status with either OK or an error message. */ - virtual Status processReplSetReconfig(OperationContext* txn, + virtual Status processReplSetReconfig(OperationContext* opCtx, const ReplSetReconfigArgs& args, BSONObjBuilder* resultObj) = 0; @@ -634,7 +635,7 @@ public: * configuration to use. * Adds BSON to 'resultObj'; returns a Status with either OK or an error message. */ - virtual Status processReplSetInitiate(OperationContext* txn, + virtual Status processReplSetInitiate(OperationContext* opCtx, const BSONObj& configObj, BSONObjBuilder* resultObj) = 0; @@ -710,7 +711,7 @@ public: * * Returns ErrorCodes::IllegalOperation if we're not running with master/slave replication. */ - virtual Status processHandshake(OperationContext* txn, const HandshakeArgs& handshake) = 0; + virtual Status processHandshake(OperationContext* opCtx, const HandshakeArgs& handshake) = 0; /** * Returns a bool indicating whether or not this node builds indexes. @@ -749,7 +750,7 @@ public: * Loads the optime from the last op in the oplog into the coordinator's lastAppliedOpTime and * lastDurableOpTime values. */ - virtual void resetLastOpTimesFromOplog(OperationContext* txn) = 0; + virtual void resetLastOpTimesFromOplog(OperationContext* opCtx) = 0; /** * Returns the OpTime of the latest replica set-committed op known to this server. @@ -762,7 +763,7 @@ public: * Handles an incoming replSetRequestVotes command. * Adds BSON to 'resultObj'; returns a Status with either OK or an error message. */ - virtual Status processReplSetRequestVotes(OperationContext* txn, + virtual Status processReplSetRequestVotes(OperationContext* opCtx, const ReplSetRequestVotesArgs& args, ReplSetRequestVotesResponse* response) = 0; @@ -803,7 +804,7 @@ public: * the rest of the work, because the term is still the same). * Returns StaleTerm if the supplied term was higher than the current term. */ - virtual Status updateTerm(OperationContext* txn, long long term) = 0; + virtual Status updateTerm(OperationContext* opCtx, long long term) = 0; /** * Reserves a unique SnapshotName. @@ -819,7 +820,7 @@ public: * A null OperationContext can be used in cases where the snapshot to wait for should not be * adjusted. */ - virtual SnapshotName reserveSnapshotName(OperationContext* txn) = 0; + virtual SnapshotName reserveSnapshotName(OperationContext* opCtx) = 0; /** * Signals the SnapshotThread, if running, to take a forced snapshot even if the global @@ -833,16 +834,16 @@ public: * Creates a new snapshot in the storage engine and registers it for use in the replication * coordinator. */ - virtual void createSnapshot(OperationContext* txn, + virtual void createSnapshot(OperationContext* opCtx, OpTime timeOfSnapshot, SnapshotName name) = 0; /** * Blocks until either the current committed snapshot is at least as high as 'untilSnapshot', * or we are interrupted for any reason, including shutdown or maxTimeMs expiration. - * 'txn' is used to checkForInterrupt and enforce maxTimeMS. + * 'opCtx' is used to checkForInterrupt and enforce maxTimeMS. */ - virtual void waitUntilSnapshotCommitted(OperationContext* txn, + virtual void waitUntilSnapshotCommitted(OperationContext* opCtx, const SnapshotName& untilSnapshot) = 0; /** diff --git a/src/mongo/db/repl/replication_coordinator_external_state.h b/src/mongo/db/repl/replication_coordinator_external_state.h index 66283518bfe..05f07b8aada 100644 --- a/src/mongo/db/repl/replication_coordinator_external_state.h +++ b/src/mongo/db/repl/replication_coordinator_external_state.h @@ -60,7 +60,7 @@ class ReplSettings; class ReplicationCoordinator; class ReplicationExecutor; -using OnInitialSyncFinishedFn = stdx::function<void(OperationContext* txn)>; +using OnInitialSyncFinishedFn = stdx::function<void(OperationContext* opCtx)>; using StartInitialSyncFn = stdx::function<void(OnInitialSyncFinishedFn callback)>; using StartSteadyReplicationFn = stdx::function<void()>; /** @@ -94,33 +94,33 @@ public: /** * Returns true if an incomplete initial sync is detected. */ - virtual bool isInitialSyncFlagSet(OperationContext* txn) = 0; + virtual bool isInitialSyncFlagSet(OperationContext* opCtx) = 0; /** * Starts steady state sync for replica set member -- legacy impl not in DataReplicator. * * NOTE: Use either this or the Master/Slave version, but not both. */ - virtual void startSteadyStateReplication(OperationContext* txn, + virtual void startSteadyStateReplication(OperationContext* opCtx, ReplicationCoordinator* replCoord) = 0; - virtual void runOnInitialSyncThread(stdx::function<void(OperationContext* txn)> run) = 0; + virtual void runOnInitialSyncThread(stdx::function<void(OperationContext* opCtx)> run) = 0; /** * Stops the data replication threads = bgsync, applier, reporter. */ - virtual void stopDataReplication(OperationContext* txn) = 0; + virtual void stopDataReplication(OperationContext* opCtx) = 0; /** * Starts the Master/Slave threads and sets up logOp */ - virtual void startMasterSlave(OperationContext* txn) = 0; + virtual void startMasterSlave(OperationContext* opCtx) = 0; /** * Performs any necessary external state specific shutdown tasks, such as cleaning up * the threads it started. */ - virtual void shutdown(OperationContext* txn) = 0; + virtual void shutdown(OperationContext* opCtx) = 0; /** * Returns task executor for scheduling tasks to be run asynchronously. @@ -136,12 +136,12 @@ public: * Runs the repair database command on the "local" db, if the storage engine is MMapV1. * Note: Used after initial sync to compact the database files. */ - virtual Status runRepairOnLocalDB(OperationContext* txn) = 0; + virtual Status runRepairOnLocalDB(OperationContext* opCtx) = 0; /** * Creates the oplog, writes the first entry and stores the replica set config document. */ - virtual Status initializeReplSetStorage(OperationContext* txn, const BSONObj& config) = 0; + virtual Status initializeReplSetStorage(OperationContext* opCtx, const BSONObj& config) = 0; /** * Called when a node on way to becoming a primary is ready to leave drain mode. It is called @@ -149,7 +149,7 @@ public: * * Throws on errors. */ - virtual void onDrainComplete(OperationContext* txn) = 0; + virtual void onDrainComplete(OperationContext* opCtx) = 0; /** * Called as part of the process of transitioning to primary and run with the global X lock and @@ -163,7 +163,7 @@ public: * * Throws on errors. */ - virtual OpTime onTransitionToPrimary(OperationContext* txn, bool isV1ElectionProtocol) = 0; + virtual OpTime onTransitionToPrimary(OperationContext* opCtx, bool isV1ElectionProtocol) = 0; /** * Simple wrapper around SyncSourceFeedback::forwardSlaveProgress. Signals to the @@ -188,22 +188,23 @@ public: /** * Gets the replica set config document from local storage, or returns an error. */ - virtual StatusWith<BSONObj> loadLocalConfigDocument(OperationContext* txn) = 0; + virtual StatusWith<BSONObj> loadLocalConfigDocument(OperationContext* opCtx) = 0; /** * Stores the replica set config document in local storage, or returns an error. */ - virtual Status storeLocalConfigDocument(OperationContext* txn, const BSONObj& config) = 0; + virtual Status storeLocalConfigDocument(OperationContext* opCtx, const BSONObj& config) = 0; /** * Gets the replica set lastVote document from local storage, or returns an error. */ - virtual StatusWith<LastVote> loadLocalLastVoteDocument(OperationContext* txn) = 0; + virtual StatusWith<LastVote> loadLocalLastVoteDocument(OperationContext* opCtx) = 0; /** * Stores the replica set lastVote document in local storage, or returns an error. */ - virtual Status storeLocalLastVoteDocument(OperationContext* txn, const LastVote& lastVote) = 0; + virtual Status storeLocalLastVoteDocument(OperationContext* opCtx, + const LastVote& lastVote) = 0; /** * Sets the global opTime to be 'newTime'. @@ -214,20 +215,20 @@ public: * Gets the last optime of an operation performed on this host, from stable * storage. */ - virtual StatusWith<OpTime> loadLastOpTime(OperationContext* txn) = 0; + virtual StatusWith<OpTime> loadLastOpTime(OperationContext* opCtx) = 0; /** * Cleaning up the oplog, by potentially truncating: * If we are recovering from a failed batch then minvalid.start though minvalid.end need * to be removed from the oplog before we can start applying operations. */ - virtual void cleanUpLastApplyBatch(OperationContext* txn) = 0; + virtual void cleanUpLastApplyBatch(OperationContext* opCtx) = 0; /** * Returns the HostAndPort of the remote client connected to us that initiated the operation - * represented by "txn". + * represented by "opCtx". */ - virtual HostAndPort getClientHostAndPort(const OperationContext* txn) = 0; + virtual HostAndPort getClientHostAndPort(const OperationContext* opCtx) = 0; /** * Closes all connections in the given TransportLayer except those marked with the @@ -240,7 +241,7 @@ public: * Kills all operations that have a Client that is associated with an incoming user * connection. Used during stepdown. */ - virtual void killAllUserOperations(OperationContext* txn) = 0; + virtual void killAllUserOperations(OperationContext* opCtx) = 0; /** * Resets any active sharding metadata on this server and stops any sharding-related threads @@ -279,7 +280,7 @@ public: /** * Creates a new snapshot. */ - virtual void createSnapshot(OperationContext* txn, SnapshotName name) = 0; + virtual void createSnapshot(OperationContext* opCtx, SnapshotName name) = 0; /** * Signals the SnapshotThread, if running, to take a forced snapshot even if the global @@ -305,13 +306,13 @@ public: /** * Returns true if the current storage engine supports read committed. */ - virtual bool isReadCommittedSupportedByStorageEngine(OperationContext* txn) const = 0; + virtual bool isReadCommittedSupportedByStorageEngine(OperationContext* opCtx) const = 0; /** * Applies the operations described in the oplog entries contained in "ops" using the * "applyOperation" function. */ - virtual StatusWith<OpTime> multiApply(OperationContext* txn, + virtual StatusWith<OpTime> multiApply(OperationContext* opCtx, MultiApplier::Operations ops, MultiApplier::ApplyOperationFn applyOperation) = 0; @@ -333,13 +334,13 @@ public: * This function creates an oplog buffer of the type specified at server startup. */ virtual std::unique_ptr<OplogBuffer> makeInitialSyncOplogBuffer( - OperationContext* txn) const = 0; + OperationContext* opCtx) const = 0; /** * Creates an oplog buffer suitable for steady state replication. */ virtual std::unique_ptr<OplogBuffer> makeSteadyStateOplogBuffer( - OperationContext* txn) const = 0; + OperationContext* opCtx) const = 0; /** * Returns true if the user specified to use the data replicator for initial sync. diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp index 70c74cc8942..36d8a84fd06 100644 --- a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp +++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp @@ -195,46 +195,46 @@ ReplicationCoordinatorExternalStateImpl::ReplicationCoordinatorExternalStateImpl } ReplicationCoordinatorExternalStateImpl::~ReplicationCoordinatorExternalStateImpl() {} -bool ReplicationCoordinatorExternalStateImpl::isInitialSyncFlagSet(OperationContext* txn) { - return _storageInterface->getInitialSyncFlag(txn); +bool ReplicationCoordinatorExternalStateImpl::isInitialSyncFlagSet(OperationContext* opCtx) { + return _storageInterface->getInitialSyncFlag(opCtx); } void ReplicationCoordinatorExternalStateImpl::startInitialSync(OnInitialSyncFinishedFn finished) { - _initialSyncRunner.schedule([finished, this](OperationContext* txn, const Status& status) { + _initialSyncRunner.schedule([finished, this](OperationContext* opCtx, const Status& status) { if (status == ErrorCodes::CallbackCanceled) { return TaskRunner::NextAction::kDisposeOperationContext; } // Do initial sync. - syncDoInitialSync(txn, this); - finished(txn); + syncDoInitialSync(opCtx, this); + finished(opCtx); return TaskRunner::NextAction::kDisposeOperationContext; }); } void ReplicationCoordinatorExternalStateImpl::runOnInitialSyncThread( - stdx::function<void(OperationContext* txn)> run) { + stdx::function<void(OperationContext* opCtx)> run) { _initialSyncRunner.cancel(); _initialSyncRunner.join(); - _initialSyncRunner.schedule([run, this](OperationContext* txn, const Status& status) { + _initialSyncRunner.schedule([run, this](OperationContext* opCtx, const Status& status) { if (status == ErrorCodes::CallbackCanceled) { return TaskRunner::NextAction::kDisposeOperationContext; } - invariant(txn); - invariant(txn->getClient()); - run(txn); + invariant(opCtx); + invariant(opCtx->getClient()); + run(opCtx); return TaskRunner::NextAction::kDisposeOperationContext; }); } void ReplicationCoordinatorExternalStateImpl::startSteadyStateReplication( - OperationContext* txn, ReplicationCoordinator* replCoord) { + OperationContext* opCtx, ReplicationCoordinator* replCoord) { LockGuard lk(_threadMutex); invariant(replCoord); invariant(!_bgSync); log() << "Starting replication fetcher thread"; - _bgSync = stdx::make_unique<BackgroundSync>(this, makeSteadyStateOplogBuffer(txn)); - _bgSync->startup(txn); + _bgSync = stdx::make_unique<BackgroundSync>(this, makeSteadyStateOplogBuffer(opCtx)); + _bgSync->startup(opCtx); log() << "Starting replication applier thread"; invariant(!_applierThread); @@ -246,12 +246,12 @@ void ReplicationCoordinatorExternalStateImpl::startSteadyStateReplication( &SyncSourceFeedback::run, &_syncSourceFeedback, _taskExecutor.get(), _bgSync.get()))); } -void ReplicationCoordinatorExternalStateImpl::stopDataReplication(OperationContext* txn) { +void ReplicationCoordinatorExternalStateImpl::stopDataReplication(OperationContext* opCtx) { UniqueLock lk(_threadMutex); - _stopDataReplication_inlock(txn, &lk); + _stopDataReplication_inlock(opCtx, &lk); } -void ReplicationCoordinatorExternalStateImpl::_stopDataReplication_inlock(OperationContext* txn, +void ReplicationCoordinatorExternalStateImpl::_stopDataReplication_inlock(OperationContext* opCtx, UniqueLock* lock) { // Make sue no other _stopDataReplication calls are in progress. _dataReplicationStopped.wait(*lock, [this]() { return !_stoppingDataReplication; }); @@ -270,7 +270,7 @@ void ReplicationCoordinatorExternalStateImpl::_stopDataReplication_inlock(Operat if (oldBgSync) { log() << "Stopping replication fetcher thread"; - oldBgSync->shutdown(txn); + oldBgSync->shutdown(opCtx); } if (oldApplier) { @@ -279,7 +279,7 @@ void ReplicationCoordinatorExternalStateImpl::_stopDataReplication_inlock(Operat } if (oldBgSync) { - oldBgSync->join(txn); + oldBgSync->join(opCtx); } _initialSyncRunner.cancel(); @@ -320,25 +320,25 @@ void ReplicationCoordinatorExternalStateImpl::startThreads(const ReplSettings& s _startedThreads = true; } -void ReplicationCoordinatorExternalStateImpl::startMasterSlave(OperationContext* txn) { - repl::startMasterSlave(txn); +void ReplicationCoordinatorExternalStateImpl::startMasterSlave(OperationContext* opCtx) { + repl::startMasterSlave(opCtx); } -void ReplicationCoordinatorExternalStateImpl::shutdown(OperationContext* txn) { +void ReplicationCoordinatorExternalStateImpl::shutdown(OperationContext* opCtx) { UniqueLock lk(_threadMutex); if (_startedThreads) { - _stopDataReplication_inlock(txn, &lk); + _stopDataReplication_inlock(opCtx, &lk); if (_snapshotThread) { log() << "Stopping replication snapshot thread"; _snapshotThread->shutdown(); } - if (_storageInterface->getOplogDeleteFromPoint(txn).isNull() && - loadLastOpTime(txn) == _storageInterface->getAppliedThrough(txn)) { + if (_storageInterface->getOplogDeleteFromPoint(opCtx).isNull() && + loadLastOpTime(opCtx) == _storageInterface->getAppliedThrough(opCtx)) { // Clear the appliedThrough marker to indicate we are consistent with the top of the // oplog. - _storageInterface->setAppliedThrough(txn, {}); + _storageInterface->setAppliedThrough(opCtx, {}); } if (_noopWriter) { @@ -361,95 +361,95 @@ OldThreadPool* ReplicationCoordinatorExternalStateImpl::getDbWorkThreadPool() co return _writerPool.get(); } -Status ReplicationCoordinatorExternalStateImpl::runRepairOnLocalDB(OperationContext* txn) { +Status ReplicationCoordinatorExternalStateImpl::runRepairOnLocalDB(OperationContext* opCtx) { try { - ScopedTransaction scopedXact(txn, MODE_X); - Lock::GlobalWrite globalWrite(txn->lockState()); + ScopedTransaction scopedXact(opCtx, MODE_X); + Lock::GlobalWrite globalWrite(opCtx->lockState()); StorageEngine* engine = getGlobalServiceContext()->getGlobalStorageEngine(); if (!engine->isMmapV1()) { return Status::OK(); } - txn->setReplicatedWrites(false); - Status status = repairDatabase(txn, engine, localDbName, false, false); + opCtx->setReplicatedWrites(false); + Status status = repairDatabase(opCtx, engine, localDbName, false, false); // Open database before returning - dbHolder().openDb(txn, localDbName); + dbHolder().openDb(opCtx, localDbName); } catch (const DBException& ex) { return ex.toStatus(); } return Status::OK(); } -Status ReplicationCoordinatorExternalStateImpl::initializeReplSetStorage(OperationContext* txn, +Status ReplicationCoordinatorExternalStateImpl::initializeReplSetStorage(OperationContext* opCtx, const BSONObj& config) { try { - createOplog(txn); + createOplog(opCtx); MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN { - ScopedTransaction scopedXact(txn, MODE_X); - Lock::GlobalWrite globalWrite(txn->lockState()); + ScopedTransaction scopedXact(opCtx, MODE_X); + Lock::GlobalWrite globalWrite(opCtx->lockState()); - WriteUnitOfWork wuow(txn); - Helpers::putSingleton(txn, configCollectionName, config); + WriteUnitOfWork wuow(opCtx); + Helpers::putSingleton(opCtx, configCollectionName, config); const auto msgObj = BSON("msg" << "initiating set"); - getGlobalServiceContext()->getOpObserver()->onOpMessage(txn, msgObj); + getGlobalServiceContext()->getOpObserver()->onOpMessage(opCtx, msgObj); wuow.commit(); } - MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "initiate oplog entry", "local.oplog.rs"); + MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "initiate oplog entry", "local.oplog.rs"); // This initializes the minvalid document with a null "ts" because older versions (<=3.2) // get angry if the minValid document is present but doesn't have a "ts" field. // Consider removing this once we no longer need to support downgrading to 3.2. - _storageInterface->setMinValidToAtLeast(txn, {}); + _storageInterface->setMinValidToAtLeast(opCtx, {}); - FeatureCompatibilityVersion::setIfCleanStartup(txn, _storageInterface); + FeatureCompatibilityVersion::setIfCleanStartup(opCtx, _storageInterface); } catch (const DBException& ex) { return ex.toStatus(); } return Status::OK(); } -void ReplicationCoordinatorExternalStateImpl::onDrainComplete(OperationContext* txn) { - invariant(!txn->lockState()->isLocked()); +void ReplicationCoordinatorExternalStateImpl::onDrainComplete(OperationContext* opCtx) { + invariant(!opCtx->lockState()->isLocked()); // If this is a config server node becoming a primary, ensure the balancer is ready to start. if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { // We must ensure the balancer has stopped because it may still be in the process of // stopping if this node was previously primary. - Balancer::get(txn)->waitForBalancerToStop(); + Balancer::get(opCtx)->waitForBalancerToStop(); } } -OpTime ReplicationCoordinatorExternalStateImpl::onTransitionToPrimary(OperationContext* txn, +OpTime ReplicationCoordinatorExternalStateImpl::onTransitionToPrimary(OperationContext* opCtx, bool isV1ElectionProtocol) { - invariant(txn->lockState()->isW()); + invariant(opCtx->lockState()->isW()); // Clear the appliedThrough marker so on startup we'll use the top of the oplog. This must be // done before we add anything to our oplog. - invariant(_storageInterface->getOplogDeleteFromPoint(txn).isNull()); - _storageInterface->setAppliedThrough(txn, {}); + invariant(_storageInterface->getOplogDeleteFromPoint(opCtx).isNull()); + _storageInterface->setAppliedThrough(opCtx, {}); if (isV1ElectionProtocol) { MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN { - ScopedTransaction scopedXact(txn, MODE_X); + ScopedTransaction scopedXact(opCtx, MODE_X); - WriteUnitOfWork wuow(txn); - txn->getClient()->getServiceContext()->getOpObserver()->onOpMessage( - txn, + WriteUnitOfWork wuow(opCtx); + opCtx->getClient()->getServiceContext()->getOpObserver()->onOpMessage( + opCtx, BSON("msg" << "new primary")); wuow.commit(); } MONGO_WRITE_CONFLICT_RETRY_LOOP_END( - txn, "logging transition to primary to oplog", "local.oplog.rs"); + opCtx, "logging transition to primary to oplog", "local.oplog.rs"); } - const auto opTimeToReturn = fassertStatusOK(28665, loadLastOpTime(txn)); + const auto opTimeToReturn = fassertStatusOK(28665, loadLastOpTime(opCtx)); - _shardingOnTransitionToPrimaryHook(txn); - _dropAllTempCollections(txn); + _shardingOnTransitionToPrimaryHook(opCtx); + _dropAllTempCollections(opCtx); serverGlobalParams.featureCompatibility.validateFeaturesAsMaster.store(true); @@ -460,28 +460,28 @@ void ReplicationCoordinatorExternalStateImpl::forwardSlaveProgress() { _syncSourceFeedback.forwardSlaveProgress(); } -OID ReplicationCoordinatorExternalStateImpl::ensureMe(OperationContext* txn) { +OID ReplicationCoordinatorExternalStateImpl::ensureMe(OperationContext* opCtx) { std::string myname = getHostName(); OID myRID; { - ScopedTransaction transaction(txn, MODE_IX); - Lock::DBLock lock(txn->lockState(), meDatabaseName, MODE_X); + ScopedTransaction transaction(opCtx, MODE_IX); + Lock::DBLock lock(opCtx->lockState(), meDatabaseName, MODE_X); BSONObj me; // local.me is an identifier for a server for getLastError w:2+ // TODO: handle WriteConflictExceptions below - if (!Helpers::getSingleton(txn, meCollectionName, me) || !me.hasField("host") || + if (!Helpers::getSingleton(opCtx, meCollectionName, me) || !me.hasField("host") || me["host"].String() != myname) { myRID = OID::gen(); // clean out local.me - Helpers::emptyCollection(txn, meCollectionName); + Helpers::emptyCollection(opCtx, meCollectionName); // repopulate BSONObjBuilder b; b.append("_id", myRID); b.append("host", myname); - Helpers::putSingleton(txn, meCollectionName, b.done()); + Helpers::putSingleton(opCtx, meCollectionName, b.done()); } else { myRID = me["_id"].OID(); } @@ -490,11 +490,11 @@ OID ReplicationCoordinatorExternalStateImpl::ensureMe(OperationContext* txn) { } StatusWith<BSONObj> ReplicationCoordinatorExternalStateImpl::loadLocalConfigDocument( - OperationContext* txn) { + OperationContext* opCtx) { try { MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN { BSONObj config; - if (!Helpers::getSingleton(txn, configCollectionName, config)) { + if (!Helpers::getSingleton(opCtx, configCollectionName, config)) { return StatusWith<BSONObj>( ErrorCodes::NoMatchingDocument, str::stream() << "Did not find replica set configuration document in " @@ -502,33 +502,33 @@ StatusWith<BSONObj> ReplicationCoordinatorExternalStateImpl::loadLocalConfigDocu } return StatusWith<BSONObj>(config); } - MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "load replica set config", configCollectionName); + MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "load replica set config", configCollectionName); } catch (const DBException& ex) { return StatusWith<BSONObj>(ex.toStatus()); } } -Status ReplicationCoordinatorExternalStateImpl::storeLocalConfigDocument(OperationContext* txn, +Status ReplicationCoordinatorExternalStateImpl::storeLocalConfigDocument(OperationContext* opCtx, const BSONObj& config) { try { MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN { - ScopedTransaction transaction(txn, MODE_IX); - Lock::DBLock dbWriteLock(txn->lockState(), configDatabaseName, MODE_X); - Helpers::putSingleton(txn, configCollectionName, config); + ScopedTransaction transaction(opCtx, MODE_IX); + Lock::DBLock dbWriteLock(opCtx->lockState(), configDatabaseName, MODE_X); + Helpers::putSingleton(opCtx, configCollectionName, config); return Status::OK(); } - MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "save replica set config", configCollectionName); + MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "save replica set config", configCollectionName); } catch (const DBException& ex) { return ex.toStatus(); } } StatusWith<LastVote> ReplicationCoordinatorExternalStateImpl::loadLocalLastVoteDocument( - OperationContext* txn) { + OperationContext* opCtx) { try { MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN { BSONObj lastVoteObj; - if (!Helpers::getSingleton(txn, lastVoteCollectionName, lastVoteObj)) { + if (!Helpers::getSingleton(opCtx, lastVoteCollectionName, lastVoteObj)) { return StatusWith<LastVote>(ErrorCodes::NoMatchingDocument, str::stream() << "Did not find replica set lastVote document in " @@ -537,41 +537,41 @@ StatusWith<LastVote> ReplicationCoordinatorExternalStateImpl::loadLocalLastVoteD return LastVote::readFromLastVote(lastVoteObj); } MONGO_WRITE_CONFLICT_RETRY_LOOP_END( - txn, "load replica set lastVote", lastVoteCollectionName); + opCtx, "load replica set lastVote", lastVoteCollectionName); } catch (const DBException& ex) { return StatusWith<LastVote>(ex.toStatus()); } } Status ReplicationCoordinatorExternalStateImpl::storeLocalLastVoteDocument( - OperationContext* txn, const LastVote& lastVote) { + OperationContext* opCtx, const LastVote& lastVote) { BSONObj lastVoteObj = lastVote.toBSON(); try { MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN { - ScopedTransaction transaction(txn, MODE_IX); - Lock::DBLock dbWriteLock(txn->lockState(), lastVoteDatabaseName, MODE_X); + ScopedTransaction transaction(opCtx, MODE_IX); + Lock::DBLock dbWriteLock(opCtx->lockState(), lastVoteDatabaseName, MODE_X); // If there is no last vote document, we want to store one. Otherwise, we only want to // replace it if the new last vote document would have a higher term. We both check // the term of the current last vote document and insert the new document under the // DBLock to synchronize the two operations. BSONObj result; - bool exists = Helpers::getSingleton(txn, lastVoteCollectionName, result); + bool exists = Helpers::getSingleton(opCtx, lastVoteCollectionName, result); if (!exists) { - Helpers::putSingleton(txn, lastVoteCollectionName, lastVoteObj); + Helpers::putSingleton(opCtx, lastVoteCollectionName, lastVoteObj); } else { StatusWith<LastVote> oldLastVoteDoc = LastVote::readFromLastVote(result); if (!oldLastVoteDoc.isOK()) { return oldLastVoteDoc.getStatus(); } if (lastVote.getTerm() > oldLastVoteDoc.getValue().getTerm()) { - Helpers::putSingleton(txn, lastVoteCollectionName, lastVoteObj); + Helpers::putSingleton(opCtx, lastVoteCollectionName, lastVoteObj); } } } MONGO_WRITE_CONFLICT_RETRY_LOOP_END( - txn, "save replica set lastVote", lastVoteCollectionName); - txn->recoveryUnit()->waitUntilDurable(); + opCtx, "save replica set lastVote", lastVoteCollectionName); + opCtx->recoveryUnit()->waitUntilDurable(); return Status::OK(); } catch (const DBException& ex) { return ex.toStatus(); @@ -583,18 +583,18 @@ void ReplicationCoordinatorExternalStateImpl::setGlobalTimestamp(ServiceContext* setNewTimestamp(ctx, newTime); } -void ReplicationCoordinatorExternalStateImpl::cleanUpLastApplyBatch(OperationContext* txn) { - if (_storageInterface->getInitialSyncFlag(txn)) { +void ReplicationCoordinatorExternalStateImpl::cleanUpLastApplyBatch(OperationContext* opCtx) { + if (_storageInterface->getInitialSyncFlag(opCtx)) { return; // Initial Sync will take over so no cleanup is needed. } // This initializes the minvalid document with a null "ts" because older versions (<=3.2) // get angry if the minValid document is present but doesn't have a "ts" field. // Consider removing this once we no longer need to support downgrading to 3.2. - _storageInterface->setMinValidToAtLeast(txn, {}); + _storageInterface->setMinValidToAtLeast(opCtx, {}); - const auto deleteFromPoint = _storageInterface->getOplogDeleteFromPoint(txn); - const auto appliedThrough = _storageInterface->getAppliedThrough(txn); + const auto deleteFromPoint = _storageInterface->getOplogDeleteFromPoint(opCtx); + const auto appliedThrough = _storageInterface->getAppliedThrough(opCtx); const bool needToDeleteEndOfOplog = !deleteFromPoint.isNull() && // This version should never have a non-null deleteFromPoint with a null appliedThrough. @@ -609,9 +609,9 @@ void ReplicationCoordinatorExternalStateImpl::cleanUpLastApplyBatch(OperationCon !(appliedThrough.getTimestamp() >= deleteFromPoint); if (needToDeleteEndOfOplog) { log() << "Removing unapplied entries starting at: " << deleteFromPoint; - truncateOplogTo(txn, deleteFromPoint); + truncateOplogTo(opCtx, deleteFromPoint); } - _storageInterface->setOplogDeleteFromPoint(txn, {}); // clear the deleteFromPoint + _storageInterface->setOplogDeleteFromPoint(opCtx, {}); // clear the deleteFromPoint if (appliedThrough.isNull()) { // No follow-up work to do. @@ -620,7 +620,7 @@ void ReplicationCoordinatorExternalStateImpl::cleanUpLastApplyBatch(OperationCon // Check if we have any unapplied ops in our oplog. It is important that this is done after // deleting the ragged end of the oplog. - const auto topOfOplog = fassertStatusOK(40290, loadLastOpTime(txn)); + const auto topOfOplog = fassertStatusOK(40290, loadLastOpTime(opCtx)); if (appliedThrough == topOfOplog) { return; // We've applied all the valid oplog we have. } else if (appliedThrough > topOfOplog) { @@ -632,7 +632,7 @@ void ReplicationCoordinatorExternalStateImpl::cleanUpLastApplyBatch(OperationCon log() << "Replaying stored operations from " << appliedThrough << " (exclusive) to " << topOfOplog << " (inclusive)."; - DBDirectClient db(txn); + DBDirectClient db(opCtx); auto cursor = db.query(rsOplogName, QUERY("ts" << BSON("$gte" << appliedThrough.getTimestamp())), /*batchSize*/ 0, @@ -658,28 +658,29 @@ void ReplicationCoordinatorExternalStateImpl::cleanUpLastApplyBatch(OperationCon } // Apply remaining ops one at at time, but don't log them because they are already logged. - const bool wereWritesReplicated = txn->writesAreReplicated(); - ON_BLOCK_EXIT([&] { txn->setReplicatedWrites(wereWritesReplicated); }); - txn->setReplicatedWrites(false); + const bool wereWritesReplicated = opCtx->writesAreReplicated(); + ON_BLOCK_EXIT([&] { opCtx->setReplicatedWrites(wereWritesReplicated); }); + opCtx->setReplicatedWrites(false); while (cursor->more()) { auto entry = cursor->nextSafe(); - fassertStatusOK(40294, SyncTail::syncApply(txn, entry, true)); + fassertStatusOK(40294, SyncTail::syncApply(opCtx, entry, true)); _storageInterface->setAppliedThrough( - txn, fassertStatusOK(40295, OpTime::parseFromOplogEntry(entry))); + opCtx, fassertStatusOK(40295, OpTime::parseFromOplogEntry(entry))); } } -StatusWith<OpTime> ReplicationCoordinatorExternalStateImpl::loadLastOpTime(OperationContext* txn) { +StatusWith<OpTime> ReplicationCoordinatorExternalStateImpl::loadLastOpTime( + OperationContext* opCtx) { // TODO: handle WriteConflictExceptions below try { // If we are doing an initial sync do not read from the oplog. - if (_storageInterface->getInitialSyncFlag(txn)) { + if (_storageInterface->getInitialSyncFlag(opCtx)) { return {ErrorCodes::InitialSyncFailure, "In the middle of an initial sync."}; } BSONObj oplogEntry; - if (!Helpers::getLast(txn, rsOplogName.c_str(), oplogEntry)) { + if (!Helpers::getLast(opCtx, rsOplogName.c_str(), oplogEntry)) { return StatusWith<OpTime>(ErrorCodes::NoMatchingDocument, str::stream() << "Did not find any entries in " << rsOplogName); @@ -711,17 +712,17 @@ bool ReplicationCoordinatorExternalStateImpl::isSelf(const HostAndPort& host, Se } HostAndPort ReplicationCoordinatorExternalStateImpl::getClientHostAndPort( - const OperationContext* txn) { - return HostAndPort(txn->getClient()->clientAddress(true)); + const OperationContext* opCtx) { + return HostAndPort(opCtx->getClient()->clientAddress(true)); } void ReplicationCoordinatorExternalStateImpl::closeConnections() { getGlobalServiceContext()->getTransportLayer()->endAllSessions(transport::Session::kKeepOpen); } -void ReplicationCoordinatorExternalStateImpl::killAllUserOperations(OperationContext* txn) { - ServiceContext* environment = txn->getServiceContext(); - environment->killAllUserOperations(txn, ErrorCodes::InterruptedDueToReplStateChange); +void ReplicationCoordinatorExternalStateImpl::killAllUserOperations(OperationContext* opCtx) { + ServiceContext* environment = opCtx->getServiceContext(); + environment->killAllUserOperations(opCtx, ErrorCodes::InterruptedDueToReplStateChange); } void ReplicationCoordinatorExternalStateImpl::shardingOnStepDownHook() { @@ -733,8 +734,8 @@ void ReplicationCoordinatorExternalStateImpl::shardingOnStepDownHook() { } void ReplicationCoordinatorExternalStateImpl::_shardingOnTransitionToPrimaryHook( - OperationContext* txn) { - auto status = ShardingStateRecovery::recover(txn); + OperationContext* opCtx) { + auto status = ShardingStateRecovery::recover(opCtx); if (ErrorCodes::isShutdownError(status.code())) { // Note: callers of this method don't expect exceptions, so throw only unexpected fatal @@ -745,7 +746,7 @@ void ReplicationCoordinatorExternalStateImpl::_shardingOnTransitionToPrimaryHook fassertStatusOK(40107, status); if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { - status = Grid::get(txn)->catalogManager()->initializeConfigDatabaseIfNeeded(txn); + status = Grid::get(opCtx)->catalogManager()->initializeConfigDatabaseIfNeeded(opCtx); if (!status.isOK() && status != ErrorCodes::AlreadyInitialized) { if (ErrorCodes::isShutdownError(status.code())) { // Don't fassert if we're mid-shutdown, let the shutdown happen gracefully. @@ -768,8 +769,8 @@ void ReplicationCoordinatorExternalStateImpl::_shardingOnTransitionToPrimaryHook // Since we *just* wrote the cluster ID to the config.version document (via // ShardingCatalogManager::initializeConfigDatabaseIfNeeded), this should always // succeed. - status = ClusterIdentityLoader::get(txn)->loadClusterId( - txn, repl::ReadConcernLevel::kLocalReadConcern); + status = ClusterIdentityLoader::get(opCtx)->loadClusterId( + opCtx, repl::ReadConcernLevel::kLocalReadConcern); if (ErrorCodes::isShutdownError(status.code())) { // Don't fassert if we're mid-shutdown, let the shutdown happen gracefully. @@ -780,20 +781,20 @@ void ReplicationCoordinatorExternalStateImpl::_shardingOnTransitionToPrimaryHook } // Free any leftover locks from previous instantiations. - auto distLockManager = Grid::get(txn)->catalogClient(txn)->getDistLockManager(); - distLockManager->unlockAll(txn, distLockManager->getProcessID()); + auto distLockManager = Grid::get(opCtx)->catalogClient(opCtx)->getDistLockManager(); + distLockManager->unlockAll(opCtx, distLockManager->getProcessID()); // If this is a config server node becoming a primary, start the balancer - Balancer::get(txn)->initiateBalancer(txn); + Balancer::get(opCtx)->initiateBalancer(opCtx); // Generate and upsert random 20 byte key for the LogicalClock's TimeProofService. // TODO: SERVER-27768 - } else if (ShardingState::get(txn)->enabled()) { + } else if (ShardingState::get(opCtx)->enabled()) { const auto configsvrConnStr = - Grid::get(txn)->shardRegistry()->getConfigShard()->getConnString(); - auto status = ShardingState::get(txn)->updateShardIdentityConfigString( - txn, configsvrConnStr.toString()); + Grid::get(opCtx)->shardRegistry()->getConfigShard()->getConnString(); + auto status = ShardingState::get(opCtx)->updateShardIdentityConfigString( + opCtx, configsvrConnStr.toString()); if (!status.isOK()) { warning() << "error encountered while trying to update config connection string to " << configsvrConnStr << causedBy(status); @@ -802,7 +803,7 @@ void ReplicationCoordinatorExternalStateImpl::_shardingOnTransitionToPrimaryHook // There is a slight chance that some stale metadata might have been loaded before the latest // optime has been recovered, so throw out everything that we have up to now - ShardingState::get(txn)->markCollectionsNotShardedAtStepdown(); + ShardingState::get(opCtx)->markCollectionsNotShardedAtStepdown(); } void ReplicationCoordinatorExternalStateImpl::signalApplierToChooseNewSyncSource() { @@ -826,7 +827,7 @@ void ReplicationCoordinatorExternalStateImpl::startProducerIfStopped() { } } -void ReplicationCoordinatorExternalStateImpl::_dropAllTempCollections(OperationContext* txn) { +void ReplicationCoordinatorExternalStateImpl::_dropAllTempCollections(OperationContext* opCtx) { std::vector<std::string> dbNames; StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine(); storageEngine->listDatabases(&dbNames); @@ -837,12 +838,12 @@ void ReplicationCoordinatorExternalStateImpl::_dropAllTempCollections(OperationC if (*it == "local") continue; LOG(2) << "Removing temporary collections from " << *it; - Database* db = dbHolder().get(txn, *it); + Database* db = dbHolder().get(opCtx, *it); // Since we must be holding the global lock during this function, if listDatabases // returned this dbname, we should be able to get a reference to it - it can't have // been dropped. invariant(db); - db->clearTmpCollections(txn); + db->clearTmpCollections(opCtx); } } @@ -857,11 +858,11 @@ void ReplicationCoordinatorExternalStateImpl::updateCommittedSnapshot(SnapshotNa manager->setCommittedSnapshot(newCommitPoint); } -void ReplicationCoordinatorExternalStateImpl::createSnapshot(OperationContext* txn, +void ReplicationCoordinatorExternalStateImpl::createSnapshot(OperationContext* opCtx, SnapshotName name) { auto manager = getGlobalServiceContext()->getGlobalStorageEngine()->getSnapshotManager(); invariant(manager); // This should never be called if there is no SnapshotManager. - manager->createSnapshot(txn, name); + manager->createSnapshot(opCtx, name); } void ReplicationCoordinatorExternalStateImpl::forceSnapshotCreation() { @@ -882,18 +883,18 @@ double ReplicationCoordinatorExternalStateImpl::getElectionTimeoutOffsetLimitFra } bool ReplicationCoordinatorExternalStateImpl::isReadCommittedSupportedByStorageEngine( - OperationContext* txn) const { - auto storageEngine = txn->getServiceContext()->getGlobalStorageEngine(); + OperationContext* opCtx) const { + auto storageEngine = opCtx->getServiceContext()->getGlobalStorageEngine(); // This should never be called if the storage engine has not been initialized. invariant(storageEngine); return storageEngine->getSnapshotManager(); } StatusWith<OpTime> ReplicationCoordinatorExternalStateImpl::multiApply( - OperationContext* txn, + OperationContext* opCtx, MultiApplier::Operations ops, MultiApplier::ApplyOperationFn applyOperation) { - return repl::multiApply(txn, _writerPool.get(), std::move(ops), applyOperation); + return repl::multiApply(opCtx, _writerPool.get(), std::move(ops), applyOperation); } Status ReplicationCoordinatorExternalStateImpl::multiSyncApply(MultiApplier::OperationPtrs* ops) { @@ -915,20 +916,20 @@ Status ReplicationCoordinatorExternalStateImpl::multiInitialSyncApply( } std::unique_ptr<OplogBuffer> ReplicationCoordinatorExternalStateImpl::makeInitialSyncOplogBuffer( - OperationContext* txn) const { + OperationContext* opCtx) const { if (initialSyncOplogBuffer == kCollectionOplogBufferName) { invariant(initialSyncOplogBufferPeekCacheSize >= 0); OplogBufferCollection::Options options; options.peekCacheSize = std::size_t(initialSyncOplogBufferPeekCacheSize); return stdx::make_unique<OplogBufferProxy>( - stdx::make_unique<OplogBufferCollection>(StorageInterface::get(txn), options)); + stdx::make_unique<OplogBufferCollection>(StorageInterface::get(opCtx), options)); } else { return stdx::make_unique<OplogBufferBlockingQueue>(); } } std::unique_ptr<OplogBuffer> ReplicationCoordinatorExternalStateImpl::makeSteadyStateOplogBuffer( - OperationContext* txn) const { + OperationContext* opCtx) const { return stdx::make_unique<OplogBufferBlockingQueue>(); } diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.h b/src/mongo/db/repl/replication_coordinator_external_state_impl.h index b0d9487eaaf..16c5adf0be6 100644 --- a/src/mongo/db/repl/replication_coordinator_external_state_impl.h +++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.h @@ -64,47 +64,47 @@ public: virtual ~ReplicationCoordinatorExternalStateImpl(); virtual void startThreads(const ReplSettings& settings) override; virtual void startInitialSync(OnInitialSyncFinishedFn finished) override; - virtual void startSteadyStateReplication(OperationContext* txn, + virtual void startSteadyStateReplication(OperationContext* opCtx, ReplicationCoordinator* replCoord) override; - virtual void stopDataReplication(OperationContext* txn) override; - virtual void runOnInitialSyncThread(stdx::function<void(OperationContext* txn)> run) override; + virtual void stopDataReplication(OperationContext* opCtx) override; + virtual void runOnInitialSyncThread(stdx::function<void(OperationContext* opCtx)> run) override; - virtual bool isInitialSyncFlagSet(OperationContext* txn) override; + virtual bool isInitialSyncFlagSet(OperationContext* opCtx) override; - virtual void startMasterSlave(OperationContext* txn); - virtual void shutdown(OperationContext* txn); + virtual void startMasterSlave(OperationContext* opCtx); + virtual void shutdown(OperationContext* opCtx); virtual executor::TaskExecutor* getTaskExecutor() const override; virtual OldThreadPool* getDbWorkThreadPool() const override; - virtual Status runRepairOnLocalDB(OperationContext* txn) override; - virtual Status initializeReplSetStorage(OperationContext* txn, const BSONObj& config); - void onDrainComplete(OperationContext* txn) override; - OpTime onTransitionToPrimary(OperationContext* txn, bool isV1ElectionProtocol) override; + virtual Status runRepairOnLocalDB(OperationContext* opCtx) override; + virtual Status initializeReplSetStorage(OperationContext* opCtx, const BSONObj& config); + void onDrainComplete(OperationContext* opCtx) override; + OpTime onTransitionToPrimary(OperationContext* opCtx, bool isV1ElectionProtocol) override; virtual void forwardSlaveProgress(); - virtual OID ensureMe(OperationContext* txn); + virtual OID ensureMe(OperationContext* opCtx); virtual bool isSelf(const HostAndPort& host, ServiceContext* service); - virtual StatusWith<BSONObj> loadLocalConfigDocument(OperationContext* txn); - virtual Status storeLocalConfigDocument(OperationContext* txn, const BSONObj& config); - virtual StatusWith<LastVote> loadLocalLastVoteDocument(OperationContext* txn); - virtual Status storeLocalLastVoteDocument(OperationContext* txn, const LastVote& lastVote); + virtual StatusWith<BSONObj> loadLocalConfigDocument(OperationContext* opCtx); + virtual Status storeLocalConfigDocument(OperationContext* opCtx, const BSONObj& config); + virtual StatusWith<LastVote> loadLocalLastVoteDocument(OperationContext* opCtx); + virtual Status storeLocalLastVoteDocument(OperationContext* opCtx, const LastVote& lastVote); virtual void setGlobalTimestamp(ServiceContext* service, const Timestamp& newTime); - virtual StatusWith<OpTime> loadLastOpTime(OperationContext* txn); - virtual void cleanUpLastApplyBatch(OperationContext* txn); - virtual HostAndPort getClientHostAndPort(const OperationContext* txn); + virtual StatusWith<OpTime> loadLastOpTime(OperationContext* opCtx); + virtual void cleanUpLastApplyBatch(OperationContext* opCtx); + virtual HostAndPort getClientHostAndPort(const OperationContext* opCtx); virtual void closeConnections(); - virtual void killAllUserOperations(OperationContext* txn); + virtual void killAllUserOperations(OperationContext* opCtx); virtual void shardingOnStepDownHook(); virtual void signalApplierToChooseNewSyncSource(); virtual void stopProducer(); virtual void startProducerIfStopped(); void dropAllSnapshots() final; void updateCommittedSnapshot(SnapshotName newCommitPoint) final; - void createSnapshot(OperationContext* txn, SnapshotName name) final; + void createSnapshot(OperationContext* opCtx, SnapshotName name) final; void forceSnapshotCreation() final; virtual bool snapshotsEnabled() const; virtual void notifyOplogMetadataWaiters(); virtual double getElectionTimeoutOffsetLimitFraction() const; - virtual bool isReadCommittedSupportedByStorageEngine(OperationContext* txn) const; - virtual StatusWith<OpTime> multiApply(OperationContext* txn, + virtual bool isReadCommittedSupportedByStorageEngine(OperationContext* opCtx) const; + virtual StatusWith<OpTime> multiApply(OperationContext* opCtx, MultiApplier::Operations ops, MultiApplier::ApplyOperationFn applyOperation) override; virtual Status multiSyncApply(MultiApplier::OperationPtrs* ops) override; @@ -112,9 +112,9 @@ public: const HostAndPort& source, AtomicUInt32* fetchCount) override; virtual std::unique_ptr<OplogBuffer> makeInitialSyncOplogBuffer( - OperationContext* txn) const override; + OperationContext* opCtx) const override; virtual std::unique_ptr<OplogBuffer> makeSteadyStateOplogBuffer( - OperationContext* txn) const override; + OperationContext* opCtx) const override; virtual bool shouldUseDataReplicatorInitialSync() const override; virtual std::size_t getOplogFetcherMaxFetcherRestarts() const override; @@ -130,7 +130,7 @@ private: /** * Stops data replication and returns with 'lock' locked. */ - void _stopDataReplication_inlock(OperationContext* txn, UniqueLock* lock); + void _stopDataReplication_inlock(OperationContext* opCtx, UniqueLock* lock); /** * Called when the instance transitions to primary in order to notify a potentially sharded host @@ -138,15 +138,15 @@ private: * * Throws on errors. */ - void _shardingOnTransitionToPrimaryHook(OperationContext* txn); + void _shardingOnTransitionToPrimaryHook(OperationContext* opCtx); /** * Drops all temporary collections on all databases except "local". * * The implementation may assume that the caller has acquired the global exclusive lock - * for "txn". + * for "opCtx". */ - void _dropAllTempCollections(OperationContext* txn); + void _dropAllTempCollections(OperationContext* opCtx); // Guards starting threads and setting _startedThreads stdx::mutex _threadMutex; diff --git a/src/mongo/db/repl/replication_coordinator_external_state_mock.cpp b/src/mongo/db/repl/replication_coordinator_external_state_mock.cpp index 274806326a6..87e0e8c6af6 100644 --- a/src/mongo/db/repl/replication_coordinator_external_state_mock.cpp +++ b/src/mongo/db/repl/replication_coordinator_external_state_mock.cpp @@ -61,7 +61,7 @@ ReplicationCoordinatorExternalStateMock::ReplicationCoordinatorExternalStateMock ReplicationCoordinatorExternalStateMock::~ReplicationCoordinatorExternalStateMock() {} void ReplicationCoordinatorExternalStateMock::runOnInitialSyncThread( - stdx::function<void(OperationContext* txn)> run) { + stdx::function<void(OperationContext* opCtx)> run) { log() << "not running initial sync during test."; } @@ -83,13 +83,13 @@ void ReplicationCoordinatorExternalStateMock::stopDataReplication(OperationConte void ReplicationCoordinatorExternalStateMock::startMasterSlave(OperationContext*) {} -Status ReplicationCoordinatorExternalStateMock::runRepairOnLocalDB(OperationContext* txn) { +Status ReplicationCoordinatorExternalStateMock::runRepairOnLocalDB(OperationContext* opCtx) { return Status::OK(); } -Status ReplicationCoordinatorExternalStateMock::initializeReplSetStorage(OperationContext* txn, +Status ReplicationCoordinatorExternalStateMock::initializeReplSetStorage(OperationContext* opCtx, const BSONObj& config) { - return storeLocalConfigDocument(txn, config); + return storeLocalConfigDocument(opCtx, config); } void ReplicationCoordinatorExternalStateMock::shutdown(OperationContext*) {} @@ -118,7 +118,7 @@ void ReplicationCoordinatorExternalStateMock::addSelf(const HostAndPort& host) { } HostAndPort ReplicationCoordinatorExternalStateMock::getClientHostAndPort( - const OperationContext* txn) { + const OperationContext* opCtx) { return _clientHostAndPort; } @@ -128,11 +128,11 @@ void ReplicationCoordinatorExternalStateMock::setClientHostAndPort( } StatusWith<BSONObj> ReplicationCoordinatorExternalStateMock::loadLocalConfigDocument( - OperationContext* txn) { + OperationContext* opCtx) { return _localRsConfigDocument; } -Status ReplicationCoordinatorExternalStateMock::storeLocalConfigDocument(OperationContext* txn, +Status ReplicationCoordinatorExternalStateMock::storeLocalConfigDocument(OperationContext* opCtx, const BSONObj& config) { { stdx::unique_lock<stdx::mutex> lock(_shouldHangConfigMutex); @@ -153,12 +153,12 @@ void ReplicationCoordinatorExternalStateMock::setLocalConfigDocument( } StatusWith<LastVote> ReplicationCoordinatorExternalStateMock::loadLocalLastVoteDocument( - OperationContext* txn) { + OperationContext* opCtx) { return _localRsLastVoteDocument; } Status ReplicationCoordinatorExternalStateMock::storeLocalLastVoteDocument( - OperationContext* txn, const LastVote& lastVote) { + OperationContext* opCtx, const LastVote& lastVote) { { stdx::unique_lock<stdx::mutex> lock(_shouldHangLastVoteMutex); while (_storeLocalLastVoteDocumentShouldHang) { @@ -180,9 +180,10 @@ void ReplicationCoordinatorExternalStateMock::setLocalLastVoteDocument( void ReplicationCoordinatorExternalStateMock::setGlobalTimestamp(ServiceContext* service, const Timestamp& newTime) {} -void ReplicationCoordinatorExternalStateMock::cleanUpLastApplyBatch(OperationContext* txn) {} +void ReplicationCoordinatorExternalStateMock::cleanUpLastApplyBatch(OperationContext* opCtx) {} -StatusWith<OpTime> ReplicationCoordinatorExternalStateMock::loadLastOpTime(OperationContext* txn) { +StatusWith<OpTime> ReplicationCoordinatorExternalStateMock::loadLastOpTime( + OperationContext* opCtx) { return _lastOpTime; } @@ -222,7 +223,7 @@ void ReplicationCoordinatorExternalStateMock::closeConnections() { _connectionsClosed = true; } -void ReplicationCoordinatorExternalStateMock::killAllUserOperations(OperationContext* txn) {} +void ReplicationCoordinatorExternalStateMock::killAllUserOperations(OperationContext* opCtx) {} void ReplicationCoordinatorExternalStateMock::shardingOnStepDownHook() {} @@ -237,7 +238,7 @@ void ReplicationCoordinatorExternalStateMock::dropAllSnapshots() {} void ReplicationCoordinatorExternalStateMock::updateCommittedSnapshot(SnapshotName newCommitPoint) { } -void ReplicationCoordinatorExternalStateMock::createSnapshot(OperationContext* txn, +void ReplicationCoordinatorExternalStateMock::createSnapshot(OperationContext* opCtx, SnapshotName name) {} void ReplicationCoordinatorExternalStateMock::forceSnapshotCreation() {} @@ -257,7 +258,7 @@ double ReplicationCoordinatorExternalStateMock::getElectionTimeoutOffsetLimitFra } bool ReplicationCoordinatorExternalStateMock::isReadCommittedSupportedByStorageEngine( - OperationContext* txn) const { + OperationContext* opCtx) const { return _isReadCommittedSupported; } @@ -276,12 +277,12 @@ Status ReplicationCoordinatorExternalStateMock::multiInitialSyncApply( } std::unique_ptr<OplogBuffer> ReplicationCoordinatorExternalStateMock::makeInitialSyncOplogBuffer( - OperationContext* txn) const { + OperationContext* opCtx) const { return stdx::make_unique<OplogBufferBlockingQueue>(); } std::unique_ptr<OplogBuffer> ReplicationCoordinatorExternalStateMock::makeSteadyStateOplogBuffer( - OperationContext* txn) const { + OperationContext* opCtx) const { return stdx::make_unique<OplogBufferBlockingQueue>(); } @@ -297,9 +298,9 @@ void ReplicationCoordinatorExternalStateMock::setIsReadCommittedEnabled(bool val _isReadCommittedSupported = val; } -void ReplicationCoordinatorExternalStateMock::onDrainComplete(OperationContext* txn) {} +void ReplicationCoordinatorExternalStateMock::onDrainComplete(OperationContext* opCtx) {} -OpTime ReplicationCoordinatorExternalStateMock::onTransitionToPrimary(OperationContext* txn, +OpTime ReplicationCoordinatorExternalStateMock::onTransitionToPrimary(OperationContext* opCtx, bool isV1ElectionProtocol) { if (isV1ElectionProtocol) { _lastOpTime = OpTime(Timestamp(1, 0), 1); diff --git a/src/mongo/db/repl/replication_coordinator_external_state_mock.h b/src/mongo/db/repl/replication_coordinator_external_state_mock.h index 1b575ede697..c33044a2510 100644 --- a/src/mongo/db/repl/replication_coordinator_external_state_mock.h +++ b/src/mongo/db/repl/replication_coordinator_external_state_mock.h @@ -58,46 +58,46 @@ public: virtual ~ReplicationCoordinatorExternalStateMock(); virtual void startThreads(const ReplSettings& settings) override; virtual void startInitialSync(OnInitialSyncFinishedFn finished) override; - virtual void startSteadyStateReplication(OperationContext* txn, + virtual void startSteadyStateReplication(OperationContext* opCtx, ReplicationCoordinator* replCoord) override; - virtual void stopDataReplication(OperationContext* txn) override; - virtual void runOnInitialSyncThread(stdx::function<void(OperationContext* txn)> run) override; - virtual bool isInitialSyncFlagSet(OperationContext* txn) override; + virtual void stopDataReplication(OperationContext* opCtx) override; + virtual void runOnInitialSyncThread(stdx::function<void(OperationContext* opCtx)> run) override; + virtual bool isInitialSyncFlagSet(OperationContext* opCtx) override; virtual void startMasterSlave(OperationContext*); - virtual void shutdown(OperationContext* txn); + virtual void shutdown(OperationContext* opCtx); virtual executor::TaskExecutor* getTaskExecutor() const override; virtual OldThreadPool* getDbWorkThreadPool() const override; - virtual Status runRepairOnLocalDB(OperationContext* txn) override; - virtual Status initializeReplSetStorage(OperationContext* txn, const BSONObj& config); - void onDrainComplete(OperationContext* txn) override; - OpTime onTransitionToPrimary(OperationContext* txn, bool isV1ElectionProtocol) override; + virtual Status runRepairOnLocalDB(OperationContext* opCtx) override; + virtual Status initializeReplSetStorage(OperationContext* opCtx, const BSONObj& config); + void onDrainComplete(OperationContext* opCtx) override; + OpTime onTransitionToPrimary(OperationContext* opCtx, bool isV1ElectionProtocol) override; virtual void forwardSlaveProgress(); virtual OID ensureMe(OperationContext*); virtual bool isSelf(const HostAndPort& host, ServiceContext* service); - virtual HostAndPort getClientHostAndPort(const OperationContext* txn); - virtual StatusWith<BSONObj> loadLocalConfigDocument(OperationContext* txn); - virtual Status storeLocalConfigDocument(OperationContext* txn, const BSONObj& config); - virtual StatusWith<LastVote> loadLocalLastVoteDocument(OperationContext* txn); - virtual Status storeLocalLastVoteDocument(OperationContext* txn, const LastVote& lastVote); + virtual HostAndPort getClientHostAndPort(const OperationContext* opCtx); + virtual StatusWith<BSONObj> loadLocalConfigDocument(OperationContext* opCtx); + virtual Status storeLocalConfigDocument(OperationContext* opCtx, const BSONObj& config); + virtual StatusWith<LastVote> loadLocalLastVoteDocument(OperationContext* opCtx); + virtual Status storeLocalLastVoteDocument(OperationContext* opCtx, const LastVote& lastVote); virtual void setGlobalTimestamp(ServiceContext* service, const Timestamp& newTime); - virtual StatusWith<OpTime> loadLastOpTime(OperationContext* txn); - virtual void cleanUpLastApplyBatch(OperationContext* txn); + virtual StatusWith<OpTime> loadLastOpTime(OperationContext* opCtx); + virtual void cleanUpLastApplyBatch(OperationContext* opCtx); virtual void closeConnections(); - virtual void killAllUserOperations(OperationContext* txn); + virtual void killAllUserOperations(OperationContext* opCtx); virtual void shardingOnStepDownHook(); virtual void signalApplierToChooseNewSyncSource(); virtual void stopProducer(); virtual void startProducerIfStopped(); virtual void dropAllSnapshots(); virtual void updateCommittedSnapshot(SnapshotName newCommitPoint); - virtual void createSnapshot(OperationContext* txn, SnapshotName name); + virtual void createSnapshot(OperationContext* opCtx, SnapshotName name); virtual void forceSnapshotCreation(); virtual bool snapshotsEnabled() const; virtual void notifyOplogMetadataWaiters(); virtual double getElectionTimeoutOffsetLimitFraction() const; - virtual bool isReadCommittedSupportedByStorageEngine(OperationContext* txn) const; - virtual StatusWith<OpTime> multiApply(OperationContext* txn, + virtual bool isReadCommittedSupportedByStorageEngine(OperationContext* opCtx) const; + virtual StatusWith<OpTime> multiApply(OperationContext* opCtx, MultiApplier::Operations ops, MultiApplier::ApplyOperationFn applyOperation) override; virtual Status multiSyncApply(MultiApplier::OperationPtrs* ops) override; @@ -105,9 +105,9 @@ public: const HostAndPort& source, AtomicUInt32* fetchCount) override; virtual std::unique_ptr<OplogBuffer> makeInitialSyncOplogBuffer( - OperationContext* txn) const override; + OperationContext* opCtx) const override; virtual std::unique_ptr<OplogBuffer> makeSteadyStateOplogBuffer( - OperationContext* txn) const override; + OperationContext* opCtx) const override; virtual bool shouldUseDataReplicatorInitialSync() const override; virtual std::size_t getOplogFetcherMaxFetcherRestarts() const override; diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp index 8ad99d7e24a..a238cee74de 100644 --- a/src/mongo/db/repl/replication_coordinator_impl.cpp +++ b/src/mongo/db/repl/replication_coordinator_impl.cpp @@ -411,8 +411,8 @@ void ReplicationCoordinatorImpl::appendConnectionStats(executor::ConnectionPoolS _replExecutor.appendConnectionStats(stats); } -bool ReplicationCoordinatorImpl::_startLoadLocalConfig(OperationContext* txn) { - StatusWith<LastVote> lastVote = _externalState->loadLocalLastVoteDocument(txn); +bool ReplicationCoordinatorImpl::_startLoadLocalConfig(OperationContext* opCtx) { + StatusWith<LastVote> lastVote = _externalState->loadLocalLastVoteDocument(opCtx); if (!lastVote.isOK()) { if (lastVote.getStatus() == ErrorCodes::NoMatchingDocument) { log() << "Did not find local voted for document at startup."; @@ -426,7 +426,7 @@ bool ReplicationCoordinatorImpl::_startLoadLocalConfig(OperationContext* txn) { _topCoord->loadLastVote(lastVote.getValue()); } - StatusWith<BSONObj> cfg = _externalState->loadLocalConfigDocument(txn); + StatusWith<BSONObj> cfg = _externalState->loadLocalConfigDocument(opCtx); if (!cfg.isOK()) { log() << "Did not find local replica set configuration document at startup; " << cfg.getStatus(); @@ -443,8 +443,8 @@ bool ReplicationCoordinatorImpl::_startLoadLocalConfig(OperationContext* txn) { } // Read the last op from the oplog after cleaning up any partially applied batches. - _externalState->cleanUpLastApplyBatch(txn); - auto lastOpTimeStatus = _externalState->loadLastOpTime(txn); + _externalState->cleanUpLastApplyBatch(opCtx); + auto lastOpTimeStatus = _externalState->loadLastOpTime(opCtx); // Use a callback here, because _finishLoadLocalConfig calls isself() which requires // that the server's networking layer be up and running and accepting connections, which @@ -546,12 +546,12 @@ void ReplicationCoordinatorImpl::_finishLoadLocalConfig( _performPostMemberStateUpdateAction(action); if (!isArbiter) { _externalState->startThreads(_settings); - invariant(cbData.txn); - _startDataReplication(cbData.txn); + invariant(cbData.opCtx); + _startDataReplication(cbData.opCtx); } } -void ReplicationCoordinatorImpl::_stopDataReplication(OperationContext* txn) { +void ReplicationCoordinatorImpl::_stopDataReplication(OperationContext* opCtx) { std::shared_ptr<DataReplicator> drCopy; { LockGuard lk(_mutex); @@ -569,19 +569,20 @@ void ReplicationCoordinatorImpl::_stopDataReplication(OperationContext* txn) { } LOG(1) << "ReplicationCoordinatorImpl::_stopDataReplication calling " "ReplCoordExtState::stopDataReplication."; - _externalState->stopDataReplication(txn); + _externalState->stopDataReplication(opCtx); } -void ReplicationCoordinatorImpl::_startDataReplication(OperationContext* txn, +void ReplicationCoordinatorImpl::_startDataReplication(OperationContext* opCtx, stdx::function<void()> startCompleted) { // Check to see if we need to do an initial sync. const auto lastOpTime = getMyLastAppliedOpTime(); - const auto needsInitialSync = lastOpTime.isNull() || _externalState->isInitialSyncFlagSet(txn); + const auto needsInitialSync = + lastOpTime.isNull() || _externalState->isInitialSyncFlagSet(opCtx); if (!needsInitialSync) { stdx::lock_guard<stdx::mutex> lk(_mutex); if (!_inShutdown) { // Start steady replication, since we already have data. - _externalState->startSteadyStateReplication(txn, this); + _externalState->startSteadyStateReplication(opCtx, this); } return; } @@ -624,9 +625,9 @@ void ReplicationCoordinatorImpl::_startDataReplication(OperationContext* txn, startCompleted(); } // Repair local db (to compact it). - auto txn = cc().makeOperationContext(); - uassertStatusOK(_externalState->runRepairOnLocalDB(txn.get())); - _externalState->startSteadyStateReplication(txn.get(), this); + auto opCtx = cc().makeOperationContext(); + uassertStatusOK(_externalState->runRepairOnLocalDB(opCtx.get())); + _externalState->startSteadyStateReplication(opCtx.get(), this); }; std::shared_ptr<DataReplicator> drCopy; @@ -644,7 +645,7 @@ void ReplicationCoordinatorImpl::_startDataReplication(OperationContext* txn, } // DataReplicator::startup() must be called outside lock because it uses features (eg. // setting the initial sync flag) which depend on the ReplicationCoordinatorImpl. - uassertStatusOK(drCopy->startup(txn, numInitialSyncAttempts.load())); + uassertStatusOK(drCopy->startup(opCtx, numInitialSyncAttempts.load())); } catch (...) { auto status = exceptionToStatus(); log() << "Initial Sync failed to start: " << status; @@ -655,19 +656,19 @@ void ReplicationCoordinatorImpl::_startDataReplication(OperationContext* txn, fassertFailedWithStatusNoTrace(40354, status); } } else { - _externalState->startInitialSync([this, startCompleted](OperationContext* txn) { + _externalState->startInitialSync([this, startCompleted](OperationContext* opCtx) { stdx::lock_guard<stdx::mutex> lk(_mutex); if (!_inShutdown) { if (startCompleted) { startCompleted(); } - _externalState->startSteadyStateReplication(txn, this); + _externalState->startSteadyStateReplication(opCtx, this); } }); } } -void ReplicationCoordinatorImpl::startup(OperationContext* txn) { +void ReplicationCoordinatorImpl::startup(OperationContext* opCtx) { if (!isReplEnabled()) { stdx::lock_guard<stdx::mutex> lk(_mutex); _setConfigState_inlock(kConfigReplicationDisabled); @@ -675,7 +676,7 @@ void ReplicationCoordinatorImpl::startup(OperationContext* txn) { } { - OID rid = _externalState->ensureMe(txn); + OID rid = _externalState->ensureMe(opCtx); stdx::lock_guard<stdx::mutex> lk(_mutex); fassert(18822, !_inShutdown); @@ -687,16 +688,16 @@ void ReplicationCoordinatorImpl::startup(OperationContext* txn) { if (!_settings.usingReplSets()) { // Must be Master/Slave invariant(_settings.isMaster() || _settings.isSlave()); - _externalState->startMasterSlave(txn); + _externalState->startMasterSlave(opCtx); return; } _replExecutor.startup(); _topCoord->setStorageEngineSupportsReadCommitted( - _externalState->isReadCommittedSupportedByStorageEngine(txn)); + _externalState->isReadCommittedSupportedByStorageEngine(opCtx)); - bool doneLoadingConfig = _startLoadLocalConfig(txn); + bool doneLoadingConfig = _startLoadLocalConfig(opCtx); if (doneLoadingConfig) { // If we're not done loading the config, then the config state will be set by // _finishLoadLocalConfig. @@ -706,7 +707,7 @@ void ReplicationCoordinatorImpl::startup(OperationContext* txn) { } } -void ReplicationCoordinatorImpl::shutdown(OperationContext* txn) { +void ReplicationCoordinatorImpl::shutdown(OperationContext* opCtx) { // Shutdown must: // * prevent new threads from blocking in awaitReplication // * wake up all existing threads blocking in awaitReplication @@ -759,7 +760,7 @@ void ReplicationCoordinatorImpl::shutdown(OperationContext* txn) { drCopy->join(); drCopy.reset(); } - _externalState->shutdown(txn); + _externalState->shutdown(opCtx); _replExecutor.shutdown(); _replExecutor.join(); } @@ -892,7 +893,7 @@ ReplicationCoordinator::ApplierState ReplicationCoordinatorImpl::getApplierState return _applierState; } -void ReplicationCoordinatorImpl::signalDrainComplete(OperationContext* txn, +void ReplicationCoordinatorImpl::signalDrainComplete(OperationContext* opCtx, long long termWhenBufferIsEmpty) { // This logic is a little complicated in order to avoid acquiring the global exclusive lock // unnecessarily. This is important because the applier may call signalDrainComplete() @@ -918,7 +919,7 @@ void ReplicationCoordinatorImpl::signalDrainComplete(OperationContext* txn, // temp collection isn't introduced on the new primary before we drop all the temp collections. // When we go to drop all temp collections, we must replicate the drops. - invariant(txn->writesAreReplicated()); + invariant(opCtx->writesAreReplicated()); stdx::unique_lock<stdx::mutex> lk(_mutex); if (_applierState != ApplierState::Draining) { @@ -926,7 +927,7 @@ void ReplicationCoordinatorImpl::signalDrainComplete(OperationContext* txn, } lk.unlock(); - _externalState->onDrainComplete(txn); + _externalState->onDrainComplete(opCtx); if (MONGO_FAIL_POINT(transitionToPrimaryHangBeforeTakingGlobalExclusiveLock)) { log() << "transition to primary - " @@ -943,8 +944,8 @@ void ReplicationCoordinatorImpl::signalDrainComplete(OperationContext* txn, } } - ScopedTransaction transaction(txn, MODE_X); - Lock::GlobalWrite globalWriteLock(txn->lockState()); + ScopedTransaction transaction(opCtx, MODE_X); + Lock::GlobalWrite globalWriteLock(opCtx->lockState()); lk.lock(); // Exit drain mode when the buffer is empty in the current term and we're in Draining mode. @@ -959,7 +960,7 @@ void ReplicationCoordinatorImpl::signalDrainComplete(OperationContext* txn, _canAcceptNonLocalWrites = true; lk.unlock(); - _setFirstOpTimeOfMyTerm(_externalState->onTransitionToPrimary(txn, isV1ElectionProtocol())); + _setFirstOpTimeOfMyTerm(_externalState->onTransitionToPrimary(opCtx, isV1ElectionProtocol())); lk.lock(); // Must calculate the commit level again because firstOpTimeOfMyTerm wasn't set when we logged @@ -1232,11 +1233,11 @@ OpTime ReplicationCoordinatorImpl::getMyLastDurableOpTime() const { return _getMyLastDurableOpTime_inlock(); } -Status ReplicationCoordinatorImpl::waitUntilOpTimeForRead(OperationContext* txn, +Status ReplicationCoordinatorImpl::waitUntilOpTimeForRead(OperationContext* opCtx, const ReadConcernArgs& settings) { // We should never wait for replication if we are holding any locks, because this can // potentially block for long time while doing network activity. - if (txn->lockState()->isLocked()) { + if (opCtx->lockState()->isLocked()) { return {ErrorCodes::IllegalOperation, "Waiting for replication not allowed while holding a lock"}; } @@ -1291,10 +1292,10 @@ Status ReplicationCoordinatorImpl::waitUntilOpTimeForRead(OperationContext* txn, // If we are doing a majority read concern we only need to wait for a new snapshot. if (isMajorityReadConcern) { // Wait for a snapshot that meets our needs (< targetOpTime). - LOG(3) << "waitUntilOpTime: waiting for a new snapshot until " << txn->getDeadline(); + LOG(3) << "waitUntilOpTime: waiting for a new snapshot until " << opCtx->getDeadline(); auto waitStatus = - txn->waitForConditionOrInterruptNoAssert(_currentCommittedSnapshotCond, lock); + opCtx->waitForConditionOrInterruptNoAssert(_currentCommittedSnapshotCond, lock); if (!waitStatus.isOK()) { return waitStatus; } @@ -1305,12 +1306,12 @@ Status ReplicationCoordinatorImpl::waitUntilOpTimeForRead(OperationContext* txn, // We just need to wait for the opTime to catch up to what we need (not majority RC). stdx::condition_variable condVar; WaiterInfoGuard waitInfo( - &_opTimeWaiterList, txn->getOpID(), targetOpTime, nullptr, &condVar); + &_opTimeWaiterList, opCtx->getOpID(), targetOpTime, nullptr, &condVar); LOG(3) << "waituntilOpTime: waiting for OpTime " << waitInfo.waiter << " until " - << txn->getDeadline(); + << opCtx->getDeadline(); - auto waitStatus = txn->waitForConditionOrInterruptNoAssert(condVar, lock); + auto waitStatus = opCtx->waitForConditionOrInterruptNoAssert(condVar, lock); if (!waitStatus.isOK()) { return waitStatus; } @@ -1591,37 +1592,37 @@ bool ReplicationCoordinatorImpl::_haveTaggedNodesReachedOpTime_inlock( } ReplicationCoordinator::StatusAndDuration ReplicationCoordinatorImpl::awaitReplication( - OperationContext* txn, const OpTime& opTime, const WriteConcernOptions& writeConcern) { + OperationContext* opCtx, const OpTime& opTime, const WriteConcernOptions& writeConcern) { Timer timer; WriteConcernOptions fixedWriteConcern = populateUnsetWriteConcernOptionsSyncMode(writeConcern); stdx::unique_lock<stdx::mutex> lock(_mutex); auto status = - _awaitReplication_inlock(&lock, txn, opTime, SnapshotName::min(), fixedWriteConcern); + _awaitReplication_inlock(&lock, opCtx, opTime, SnapshotName::min(), fixedWriteConcern); return {std::move(status), duration_cast<Milliseconds>(timer.elapsed())}; } ReplicationCoordinator::StatusAndDuration ReplicationCoordinatorImpl::awaitReplicationOfLastOpForClient( - OperationContext* txn, const WriteConcernOptions& writeConcern) { + OperationContext* opCtx, const WriteConcernOptions& writeConcern) { Timer timer; WriteConcernOptions fixedWriteConcern = populateUnsetWriteConcernOptionsSyncMode(writeConcern); stdx::unique_lock<stdx::mutex> lock(_mutex); - const auto& clientInfo = ReplClientInfo::forClient(txn->getClient()); + const auto& clientInfo = ReplClientInfo::forClient(opCtx->getClient()); auto status = _awaitReplication_inlock( - &lock, txn, clientInfo.getLastOp(), clientInfo.getLastSnapshot(), fixedWriteConcern); + &lock, opCtx, clientInfo.getLastOp(), clientInfo.getLastSnapshot(), fixedWriteConcern); return {std::move(status), duration_cast<Milliseconds>(timer.elapsed())}; } Status ReplicationCoordinatorImpl::_awaitReplication_inlock( stdx::unique_lock<stdx::mutex>* lock, - OperationContext* txn, + OperationContext* opCtx, const OpTime& opTime, SnapshotName minSnapshot, const WriteConcernOptions& writeConcern) { // We should never wait for replication if we are holding any locks, because this can // potentially block for long time while doing network activity. - if (txn->lockState()->isLocked()) { + if (opCtx->lockState()->isLocked()) { return {ErrorCodes::IllegalOperation, "Waiting for replication not allowed while holding a lock"}; } @@ -1668,7 +1669,7 @@ Status ReplicationCoordinatorImpl::_awaitReplication_inlock( return stepdownStatus; } - auto interruptStatus = txn->checkForInterruptNoAssert(); + auto interruptStatus = opCtx->checkForInterruptNoAssert(); if (!interruptStatus.isOK()) { return interruptStatus; } @@ -1681,7 +1682,7 @@ Status ReplicationCoordinatorImpl::_awaitReplication_inlock( } } - auto clockSource = txn->getServiceContext()->getFastClockSource(); + auto clockSource = opCtx->getServiceContext()->getFastClockSource(); const auto wTimeoutDate = [&]() -> const Date_t { if (writeConcern.wDeadline != Date_t::max()) { return writeConcern.wDeadline; @@ -1696,14 +1697,14 @@ Status ReplicationCoordinatorImpl::_awaitReplication_inlock( // Must hold _mutex before constructing waitInfo as it will modify _replicationWaiterList stdx::condition_variable condVar; WaiterInfoGuard waitInfo( - &_replicationWaiterList, txn->getOpID(), opTime, &writeConcern, &condVar); + &_replicationWaiterList, opCtx->getOpID(), opTime, &writeConcern, &condVar); while (!_doneWaitingForReplication_inlock(opTime, minSnapshot, writeConcern)) { if (_inShutdown) { return {ErrorCodes::ShutdownInProgress, "Replication is being shut down"}; } - auto status = txn->waitForConditionOrInterruptNoAssertUntil(condVar, *lock, wTimeoutDate); + auto status = opCtx->waitForConditionOrInterruptNoAssertUntil(condVar, *lock, wTimeoutDate); if (!status.isOK()) { return status.getStatus(); } @@ -1729,7 +1730,7 @@ Status ReplicationCoordinatorImpl::_awaitReplication_inlock( return _checkIfWriteConcernCanBeSatisfied_inlock(writeConcern); } -Status ReplicationCoordinatorImpl::stepDown(OperationContext* txn, +Status ReplicationCoordinatorImpl::stepDown(OperationContext* opCtx, const bool force, const Milliseconds& waitTime, const Milliseconds& stepdownTime) { @@ -1745,12 +1746,12 @@ Status ReplicationCoordinatorImpl::stepDown(OperationContext* txn, return {ErrorCodes::NotMaster, "not primary so can't step down"}; } - Lock::GlobalLock globalReadLock(txn->lockState(), MODE_S, Lock::GlobalLock::EnqueueOnly()); + Lock::GlobalLock globalReadLock(opCtx->lockState(), MODE_S, Lock::GlobalLock::EnqueueOnly()); // We've requested the global shared lock which will stop new writes from coming in, // but existing writes could take a long time to finish, so kill all user operations // to help us get the global lock faster. - _externalState->killAllUserOperations(txn); + _externalState->killAllUserOperations(opCtx); globalReadLock.waitForLock(durationCount<Milliseconds>(stepdownTime)); @@ -1763,7 +1764,7 @@ Status ReplicationCoordinatorImpl::stepDown(OperationContext* txn, try { stdx::unique_lock<stdx::mutex> topoLock(_topoMutex); bool restartHeartbeats = true; - txn->checkForInterrupt(); + opCtx->checkForInterrupt(); while (!_tryToStepDown(waitUntil, stepDownUntil, force)) { if (restartHeartbeats) { // We send out a fresh round of heartbeats because stepping down successfully @@ -1773,7 +1774,7 @@ Status ReplicationCoordinatorImpl::stepDown(OperationContext* txn, _restartHeartbeats_inlock(); restartHeartbeats = false; } - txn->waitForConditionOrInterruptUntil( + opCtx->waitForConditionOrInterruptUntil( _stepDownWaiters, topoLock, std::min(stepDownUntil, waitUntil)); } } catch (const DBException& ex) { @@ -1864,14 +1865,14 @@ bool ReplicationCoordinatorImpl::isMasterForReportingPurposes() { return false; } -bool ReplicationCoordinatorImpl::canAcceptWritesForDatabase(OperationContext* txn, +bool ReplicationCoordinatorImpl::canAcceptWritesForDatabase(OperationContext* opCtx, StringData dbName) { // The answer isn't meaningful unless we hold the global lock. - invariant(txn->lockState()->isLocked()); - return canAcceptWritesForDatabase_UNSAFE(txn, dbName); + invariant(opCtx->lockState()->isLocked()); + return canAcceptWritesForDatabase_UNSAFE(opCtx, dbName); } -bool ReplicationCoordinatorImpl::canAcceptWritesForDatabase_UNSAFE(OperationContext* txn, +bool ReplicationCoordinatorImpl::canAcceptWritesForDatabase_UNSAFE(OperationContext* opCtx, StringData dbName) { // _canAcceptNonLocalWrites is always true for standalone nodes, always false for nodes // started with --slave, and adjusted based on primary+drain state in replica sets. @@ -1889,32 +1890,32 @@ bool ReplicationCoordinatorImpl::canAcceptWritesForDatabase_UNSAFE(OperationCont return !replAllDead && _settings.isMaster(); } -bool ReplicationCoordinatorImpl::canAcceptWritesFor(OperationContext* txn, +bool ReplicationCoordinatorImpl::canAcceptWritesFor(OperationContext* opCtx, const NamespaceString& ns) { - invariant(txn->lockState()->isLocked()); - return canAcceptWritesFor_UNSAFE(txn, ns); + invariant(opCtx->lockState()->isLocked()); + return canAcceptWritesFor_UNSAFE(opCtx, ns); } -bool ReplicationCoordinatorImpl::canAcceptWritesFor_UNSAFE(OperationContext* txn, +bool ReplicationCoordinatorImpl::canAcceptWritesFor_UNSAFE(OperationContext* opCtx, const NamespaceString& ns) { if (_memberState.rollback() && ns.isOplog()) { return false; } StringData dbName = ns.db(); - return canAcceptWritesForDatabase_UNSAFE(txn, dbName); + return canAcceptWritesForDatabase_UNSAFE(opCtx, dbName); } -Status ReplicationCoordinatorImpl::checkCanServeReadsFor(OperationContext* txn, +Status ReplicationCoordinatorImpl::checkCanServeReadsFor(OperationContext* opCtx, const NamespaceString& ns, bool slaveOk) { - invariant(txn->lockState()->isLocked()); - return checkCanServeReadsFor_UNSAFE(txn, ns, slaveOk); + invariant(opCtx->lockState()->isLocked()); + return checkCanServeReadsFor_UNSAFE(opCtx, ns, slaveOk); } -Status ReplicationCoordinatorImpl::checkCanServeReadsFor_UNSAFE(OperationContext* txn, +Status ReplicationCoordinatorImpl::checkCanServeReadsFor_UNSAFE(OperationContext* opCtx, const NamespaceString& ns, bool slaveOk) { - auto client = txn->getClient(); + auto client = opCtx->getClient(); // Oplog reads are not allowed during STARTUP state, but we make an exception for internal // reads and master-slave replication. Internel reads are required for cleaning up unfinished // apply batches. Master-slave never sets the state so we make an exception for it as well. @@ -1928,7 +1929,7 @@ Status ReplicationCoordinatorImpl::checkCanServeReadsFor_UNSAFE(OperationContext if (client->isInDirectClient()) { return Status::OK(); } - if (canAcceptWritesFor_UNSAFE(txn, ns)) { + if (canAcceptWritesFor_UNSAFE(opCtx, ns)) { return Status::OK(); } if (_settings.isSlave() || _settings.isMaster()) { @@ -1948,9 +1949,9 @@ bool ReplicationCoordinatorImpl::isInPrimaryOrSecondaryState() const { return _canServeNonLocalReads.loadRelaxed(); } -bool ReplicationCoordinatorImpl::shouldRelaxIndexConstraints(OperationContext* txn, +bool ReplicationCoordinatorImpl::shouldRelaxIndexConstraints(OperationContext* opCtx, const NamespaceString& ns) { - return !canAcceptWritesFor(txn, ns); + return !canAcceptWritesFor(opCtx, ns); } OID ReplicationCoordinatorImpl::getElectionId() { @@ -1977,8 +1978,8 @@ int ReplicationCoordinatorImpl::_getMyId_inlock() const { return self.getId(); } -Status ReplicationCoordinatorImpl::resyncData(OperationContext* txn, bool waitUntilCompleted) { - _stopDataReplication(txn); +Status ReplicationCoordinatorImpl::resyncData(OperationContext* opCtx, bool waitUntilCompleted) { + _stopDataReplication(opCtx); auto finishedEvent = uassertStatusOK(_replExecutor.makeEvent()); stdx::function<void()> f; if (waitUntilCompleted) @@ -1987,7 +1988,7 @@ Status ReplicationCoordinatorImpl::resyncData(OperationContext* txn, bool waitUn stdx::unique_lock<stdx::mutex> lk(_mutex); _resetMyLastOpTimes_inlock(); lk.unlock(); // unlock before calling into replCoordExtState. - _startDataReplication(txn, f); + _startDataReplication(opCtx, f); if (waitUntilCompleted) { _replExecutor.waitForEvent(finishedEvent); } @@ -2212,7 +2213,7 @@ Status ReplicationCoordinatorImpl::setMaintenanceMode(bool activate) { return Status::OK(); } -Status ReplicationCoordinatorImpl::processReplSetSyncFrom(OperationContext* txn, +Status ReplicationCoordinatorImpl::processReplSetSyncFrom(OperationContext* opCtx, const HostAndPort& target, BSONObjBuilder* resultObj) { Status result(ErrorCodes::InternalError, "didn't set status in prepareSyncFromResponse"); @@ -2227,7 +2228,7 @@ Status ReplicationCoordinatorImpl::processReplSetSyncFrom(OperationContext* txn, } if (doResync) { - return resyncData(txn, false); + return resyncData(opCtx, false); } return result; @@ -2292,7 +2293,7 @@ Status ReplicationCoordinatorImpl::processHeartbeat(const ReplSetHeartbeatArgs& return result; } -Status ReplicationCoordinatorImpl::processReplSetReconfig(OperationContext* txn, +Status ReplicationCoordinatorImpl::processReplSetReconfig(OperationContext* opCtx, const ReplSetReconfigArgs& args, BSONObjBuilder* resultObj) { log() << "replSetReconfig admin command received from client"; @@ -2363,7 +2364,7 @@ Status ReplicationCoordinatorImpl::processReplSetReconfig(OperationContext* txn, } StatusWith<int> myIndex = validateConfigForReconfig( - _externalState.get(), oldConfig, newConfig, txn->getServiceContext(), args.force); + _externalState.get(), oldConfig, newConfig, opCtx->getServiceContext(), args.force); if (!myIndex.isOK()) { error() << "replSetReconfig got " << myIndex.getStatus() << " while validating " << newConfigObj; @@ -2382,7 +2383,7 @@ Status ReplicationCoordinatorImpl::processReplSetReconfig(OperationContext* txn, } } - status = _externalState->storeLocalConfigDocument(txn, newConfig.toBSON()); + status = _externalState->storeLocalConfigDocument(opCtx, newConfig.toBSON()); if (!status.isOK()) { error() << "replSetReconfig failed to store config document; " << status; return status; @@ -2465,7 +2466,7 @@ void ReplicationCoordinatorImpl::_finishReplSetReconfig( } } -Status ReplicationCoordinatorImpl::processReplSetInitiate(OperationContext* txn, +Status ReplicationCoordinatorImpl::processReplSetInitiate(OperationContext* opCtx, const BSONObj& configObj, BSONObjBuilder* resultObj) { log() << "replSetInitiate admin command received from client"; @@ -2508,7 +2509,7 @@ Status ReplicationCoordinatorImpl::processReplSetInitiate(OperationContext* txn, } StatusWith<int> myIndex = - validateConfigForInitiate(_externalState.get(), newConfig, txn->getServiceContext()); + validateConfigForInitiate(_externalState.get(), newConfig, opCtx->getServiceContext()); if (!myIndex.isOK()) { error() << "replSet initiate got " << myIndex.getStatus() << " while validating " << configObj; @@ -2525,7 +2526,7 @@ Status ReplicationCoordinatorImpl::processReplSetInitiate(OperationContext* txn, return status; } - status = _externalState->initializeReplSetStorage(txn, newConfig.toBSON()); + status = _externalState->initializeReplSetStorage(opCtx, newConfig.toBSON()); if (!status.isOK()) { error() << "replSetInitiate failed to store config document or create the oplog; " << status; @@ -2545,7 +2546,7 @@ Status ReplicationCoordinatorImpl::processReplSetInitiate(OperationContext* txn, // will fail validation with a "replSet initiate got ... while validating" reason. invariant(!newConfig.getMemberAt(myIndex.getValue()).isArbiter()); _externalState->startThreads(_settings); - _startDataReplication(txn); + _startDataReplication(opCtx); configStateGuard.Dismiss(); return Status::OK(); @@ -2949,7 +2950,7 @@ Status ReplicationCoordinatorImpl::processReplSetUpdatePosition(const UpdatePosi return status; } -Status ReplicationCoordinatorImpl::processHandshake(OperationContext* txn, +Status ReplicationCoordinatorImpl::processHandshake(OperationContext* opCtx, const HandshakeArgs& handshake) { LOG(2) << "Received handshake " << handshake.toBSON(); @@ -2968,7 +2969,7 @@ Status ReplicationCoordinatorImpl::processHandshake(OperationContext* txn, SlaveInfo newSlaveInfo; newSlaveInfo.rid = handshake.getRid(); newSlaveInfo.memberId = -1; - newSlaveInfo.hostAndPort = _externalState->getClientHostAndPort(txn); + newSlaveInfo.hostAndPort = _externalState->getClientHostAndPort(opCtx); // Don't call _addSlaveInfo_inlock as that would wake sleepers unnecessarily. _slaveInfo.push_back(newSlaveInfo); @@ -3121,8 +3122,8 @@ void ReplicationCoordinatorImpl::blacklistSyncSource(const HostAndPort& host, Da host)); } -void ReplicationCoordinatorImpl::resetLastOpTimesFromOplog(OperationContext* txn) { - StatusWith<OpTime> lastOpTimeStatus = _externalState->loadLastOpTime(txn); +void ReplicationCoordinatorImpl::resetLastOpTimesFromOplog(OperationContext* opCtx) { + StatusWith<OpTime> lastOpTimeStatus = _externalState->loadLastOpTime(opCtx); OpTime lastOpTime; if (!lastOpTimeStatus.isOK()) { warning() << "Failed to load timestamp of most recently applied operation; " @@ -3137,7 +3138,7 @@ void ReplicationCoordinatorImpl::resetLastOpTimesFromOplog(OperationContext* txn _reportUpstream_inlock(std::move(lock)); // Unlocked below. - _externalState->setGlobalTimestamp(txn->getServiceContext(), lastOpTime.getTimestamp()); + _externalState->setGlobalTimestamp(opCtx->getServiceContext(), lastOpTime.getTimestamp()); } bool ReplicationCoordinatorImpl::shouldChangeSyncSource( @@ -3244,14 +3245,14 @@ OpTime ReplicationCoordinatorImpl::getLastCommittedOpTime() const { } Status ReplicationCoordinatorImpl::processReplSetRequestVotes( - OperationContext* txn, + OperationContext* opCtx, const ReplSetRequestVotesArgs& args, ReplSetRequestVotesResponse* response) { if (!isV1ElectionProtocol()) { return {ErrorCodes::BadValue, "not using election protocol v1"}; } - auto termStatus = updateTerm(txn, args.getTerm()); + auto termStatus = updateTerm(opCtx, args.getTerm()); if (!termStatus.isOK() && termStatus.code() != ErrorCodes::StaleTerm) return termStatus; @@ -3264,7 +3265,7 @@ Status ReplicationCoordinatorImpl::processReplSetRequestVotes( if (!args.isADryRun() && response->getVoteGranted()) { LastVote lastVote{args.getTerm(), args.getCandidateIndex()}; - Status status = _externalState->storeLocalLastVoteDocument(txn, lastVote); + Status status = _externalState->storeLocalLastVoteDocument(opCtx, lastVote); if (!status.isOK()) { error() << "replSetRequestVotes failed to store LastVote document; " << status; return status; @@ -3405,7 +3406,7 @@ EventHandle ReplicationCoordinatorImpl::updateTerm_forTest( return finishEvh; } -Status ReplicationCoordinatorImpl::updateTerm(OperationContext* txn, long long term) { +Status ReplicationCoordinatorImpl::updateTerm(OperationContext* opCtx, long long term) { // Term is only valid if we are replicating. if (getReplicationMode() != modeReplSet) { return {ErrorCodes::BadValue, "cannot supply 'term' without active replication"}; @@ -3417,7 +3418,7 @@ Status ReplicationCoordinatorImpl::updateTerm(OperationContext* txn, long long t } // Check we haven't acquired any lock, because potential stepdown needs global lock. - dassert(!txn->lockState()->isLocked()); + dassert(!opCtx->lockState()->isLocked()); TopologyCoordinator::UpdateTermResult updateTermResult; EventHandle finishEvh; @@ -3469,12 +3470,12 @@ EventHandle ReplicationCoordinatorImpl::_updateTerm_incallback( return EventHandle(); } -SnapshotName ReplicationCoordinatorImpl::reserveSnapshotName(OperationContext* txn) { +SnapshotName ReplicationCoordinatorImpl::reserveSnapshotName(OperationContext* opCtx) { auto reservedName = SnapshotName(_snapshotNameGenerator.addAndFetch(1)); dassert(reservedName > SnapshotName::min()); dassert(reservedName < SnapshotName::max()); - if (txn) { - ReplClientInfo::forClient(txn->getClient()).setLastSnapshot(reservedName); + if (opCtx) { + ReplClientInfo::forClient(opCtx->getClient()).setLastSnapshot(reservedName); } return reservedName; } @@ -3483,12 +3484,12 @@ void ReplicationCoordinatorImpl::forceSnapshotCreation() { _externalState->forceSnapshotCreation(); } -void ReplicationCoordinatorImpl::waitUntilSnapshotCommitted(OperationContext* txn, +void ReplicationCoordinatorImpl::waitUntilSnapshotCommitted(OperationContext* opCtx, const SnapshotName& untilSnapshot) { stdx::unique_lock<stdx::mutex> lock(_mutex); while (!_currentCommittedSnapshot || _currentCommittedSnapshot->name < untilSnapshot) { - txn->waitForConditionOrInterrupt(_currentCommittedSnapshotCond, lock); + opCtx->waitForConditionOrInterrupt(_currentCommittedSnapshotCond, lock); } } @@ -3496,11 +3497,11 @@ size_t ReplicationCoordinatorImpl::getNumUncommittedSnapshots() { return _uncommittedSnapshotsSize.load(); } -void ReplicationCoordinatorImpl::createSnapshot(OperationContext* txn, +void ReplicationCoordinatorImpl::createSnapshot(OperationContext* opCtx, OpTime timeOfSnapshot, SnapshotName name) { stdx::lock_guard<stdx::mutex> lock(_mutex); - _externalState->createSnapshot(txn, name); + _externalState->createSnapshot(opCtx, name); auto snapshotInfo = SnapshotInfo{timeOfSnapshot, name}; if (timeOfSnapshot <= _lastCommittedOpTime) { @@ -3588,10 +3589,10 @@ EventHandle ReplicationCoordinatorImpl::_resetElectionInfoOnProtocolVersionUpgra if (cbData.status == ErrorCodes::CallbackCanceled) { return; } - invariant(cbData.txn); + invariant(cbData.opCtx); LastVote lastVote{OpTime::kInitialTerm, -1}; - auto status = _externalState->storeLocalLastVoteDocument(cbData.txn, lastVote); + auto status = _externalState->storeLocalLastVoteDocument(cbData.opCtx, lastVote); invariant(status.isOK()); _replExecutor.signalEvent(evh); }); diff --git a/src/mongo/db/repl/replication_coordinator_impl.h b/src/mongo/db/repl/replication_coordinator_impl.h index 5b2722eaab2..1fa82993ff6 100644 --- a/src/mongo/db/repl/replication_coordinator_impl.h +++ b/src/mongo/db/repl/replication_coordinator_impl.h @@ -101,9 +101,9 @@ public: // ================== Members of public ReplicationCoordinator API =================== - virtual void startup(OperationContext* txn) override; + virtual void startup(OperationContext* opCtx) override; - virtual void shutdown(OperationContext* txn) override; + virtual void shutdown(OperationContext* opCtx) override; virtual ReplicationExecutor* getExecutor() override { return &_replExecutor; @@ -124,34 +124,34 @@ public: virtual void clearSyncSourceBlacklist() override; virtual ReplicationCoordinator::StatusAndDuration awaitReplication( - OperationContext* txn, const OpTime& opTime, const WriteConcernOptions& writeConcern); + OperationContext* opCtx, const OpTime& opTime, const WriteConcernOptions& writeConcern); virtual ReplicationCoordinator::StatusAndDuration awaitReplicationOfLastOpForClient( - OperationContext* txn, const WriteConcernOptions& writeConcern); + OperationContext* opCtx, const WriteConcernOptions& writeConcern); - virtual Status stepDown(OperationContext* txn, + virtual Status stepDown(OperationContext* opCtx, bool force, const Milliseconds& waitTime, const Milliseconds& stepdownTime); virtual bool isMasterForReportingPurposes(); - virtual bool canAcceptWritesForDatabase(OperationContext* txn, StringData dbName); - virtual bool canAcceptWritesForDatabase_UNSAFE(OperationContext* txn, StringData dbName); + virtual bool canAcceptWritesForDatabase(OperationContext* opCtx, StringData dbName); + virtual bool canAcceptWritesForDatabase_UNSAFE(OperationContext* opCtx, StringData dbName); - bool canAcceptWritesFor(OperationContext* txn, const NamespaceString& ns) override; - bool canAcceptWritesFor_UNSAFE(OperationContext* txn, const NamespaceString& ns) override; + bool canAcceptWritesFor(OperationContext* opCtx, const NamespaceString& ns) override; + bool canAcceptWritesFor_UNSAFE(OperationContext* opCtx, const NamespaceString& ns) override; virtual Status checkIfWriteConcernCanBeSatisfied(const WriteConcernOptions& writeConcern) const; - virtual Status checkCanServeReadsFor(OperationContext* txn, + virtual Status checkCanServeReadsFor(OperationContext* opCtx, const NamespaceString& ns, bool slaveOk); - virtual Status checkCanServeReadsFor_UNSAFE(OperationContext* txn, + virtual Status checkCanServeReadsFor_UNSAFE(OperationContext* opCtx, const NamespaceString& ns, bool slaveOk); - virtual bool shouldRelaxIndexConstraints(OperationContext* txn, const NamespaceString& ns); + virtual bool shouldRelaxIndexConstraints(OperationContext* opCtx, const NamespaceString& ns); virtual Status setLastOptimeForSlave(const OID& rid, const Timestamp& ts); @@ -168,7 +168,7 @@ public: virtual OpTime getMyLastAppliedOpTime() const override; virtual OpTime getMyLastDurableOpTime() const override; - virtual Status waitUntilOpTimeForRead(OperationContext* txn, + virtual Status waitUntilOpTimeForRead(OperationContext* opCtx, const ReadConcernArgs& settings) override; virtual OID getElectionId() override; @@ -181,14 +181,14 @@ public: virtual ApplierState getApplierState() override; - virtual void signalDrainComplete(OperationContext* txn, + virtual void signalDrainComplete(OperationContext* opCtx, long long termWhenBufferIsEmpty) override; virtual Status waitForDrainFinish(Milliseconds timeout) override; virtual void signalUpstreamUpdater() override; - virtual Status resyncData(OperationContext* txn, bool waitUntilCompleted) override; + virtual Status resyncData(OperationContext* opCtx, bool waitUntilCompleted) override; virtual StatusWith<BSONObj> prepareReplSetUpdatePositionCommand( ReplSetUpdatePositionCommandStyle commandStyle) const override; @@ -214,7 +214,7 @@ public: virtual bool getMaintenanceMode() override; - virtual Status processReplSetSyncFrom(OperationContext* txn, + virtual Status processReplSetSyncFrom(OperationContext* opCtx, const HostAndPort& target, BSONObjBuilder* resultObj) override; @@ -223,11 +223,11 @@ public: virtual Status processHeartbeat(const ReplSetHeartbeatArgs& args, ReplSetHeartbeatResponse* response) override; - virtual Status processReplSetReconfig(OperationContext* txn, + virtual Status processReplSetReconfig(OperationContext* opCtx, const ReplSetReconfigArgs& args, BSONObjBuilder* resultObj) override; - virtual Status processReplSetInitiate(OperationContext* txn, + virtual Status processReplSetInitiate(OperationContext* opCtx, const BSONObj& configObj, BSONObjBuilder* resultObj) override; @@ -246,7 +246,8 @@ public: virtual Status processReplSetUpdatePosition(const UpdatePositionArgs& updates, long long* configVersion) override; - virtual Status processHandshake(OperationContext* txn, const HandshakeArgs& handshake) override; + virtual Status processHandshake(OperationContext* opCtx, + const HandshakeArgs& handshake) override; virtual bool buildsIndexes() override; @@ -265,7 +266,7 @@ public: virtual void blacklistSyncSource(const HostAndPort& host, Date_t until) override; - virtual void resetLastOpTimesFromOplog(OperationContext* txn) override; + virtual void resetLastOpTimesFromOplog(OperationContext* opCtx) override; virtual bool shouldChangeSyncSource( const HostAndPort& currentSource, @@ -274,7 +275,7 @@ public: virtual OpTime getLastCommittedOpTime() const override; - virtual Status processReplSetRequestVotes(OperationContext* txn, + virtual Status processReplSetRequestVotes(OperationContext* opCtx, const ReplSetRequestVotesArgs& args, ReplSetRequestVotesResponse* response) override; @@ -302,19 +303,19 @@ public: return _service; } - virtual Status updateTerm(OperationContext* txn, long long term) override; + virtual Status updateTerm(OperationContext* opCtx, long long term) override; - virtual SnapshotName reserveSnapshotName(OperationContext* txn) override; + virtual SnapshotName reserveSnapshotName(OperationContext* opCtx) override; virtual void forceSnapshotCreation() override; - virtual void createSnapshot(OperationContext* txn, + virtual void createSnapshot(OperationContext* opCtx, OpTime timeOfSnapshot, SnapshotName name) override; virtual OpTime getCurrentCommittedSnapshotOpTime() const override; - virtual void waitUntilSnapshotCommitted(OperationContext* txn, + virtual void waitUntilSnapshotCommitted(OperationContext* opCtx, const SnapshotName& untilSnapshot) override; virtual void appendConnectionStats(executor::ConnectionPoolStats* stats) const override; @@ -622,7 +623,7 @@ private: * operation timing to the caller. */ Status _awaitReplication_inlock(stdx::unique_lock<stdx::mutex>* lock, - OperationContext* txn, + OperationContext* opCtx, const OpTime& opTime, SnapshotName minSnapshot, const WriteConcernOptions& writeConcern); @@ -793,7 +794,7 @@ private: * config detected but more work is needed to set it as the local config (which will be * handled by the callback to _finishLoadLocalConfig). */ - bool _startLoadLocalConfig(OperationContext* txn); + bool _startLoadLocalConfig(OperationContext* opCtx); /** * Callback that finishes the work started in _startLoadLocalConfig and sets _rsConfigState @@ -807,13 +808,13 @@ private: /** * Start replicating data, and does an initial sync if needed first. */ - void _startDataReplication(OperationContext* txn, + void _startDataReplication(OperationContext* opCtx, stdx::function<void()> startCompleted = nullptr); /** * Stops replicating data by stopping the applier, fetcher and such. */ - void _stopDataReplication(OperationContext* txn); + void _stopDataReplication(OperationContext* opCtx); /** * Finishes the work of processReplSetInitiate() while holding _topoMutex, in the event of diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect_test.cpp index 89325f7f424..5724fb43bf6 100644 --- a/src/mongo/db/repl/replication_coordinator_impl_elect_test.cpp +++ b/src/mongo/db/repl/replication_coordinator_impl_elect_test.cpp @@ -157,15 +157,15 @@ TEST_F(ReplCoordElectTest, ElectionSucceedsWhenNodeIsTheOnlyElectableNode) { << getReplCoord()->getMemberState().toString(); ASSERT(getReplCoord()->getApplierState() == ReplicationCoordinator::ApplierState::Draining); - const auto txnPtr = makeOperationContext(); - auto& txn = *txnPtr; + const auto opCtxPtr = makeOperationContext(); + auto& opCtx = *opCtxPtr; // Since we're still in drain mode, expect that we report ismaster: false, issecondary:true. IsMasterResponse imResponse; getReplCoord()->fillIsMasterForReplSet(&imResponse); ASSERT_FALSE(imResponse.isMaster()) << imResponse.toBSON().toString(); ASSERT_TRUE(imResponse.isSecondary()) << imResponse.toBSON().toString(); - getReplCoord()->signalDrainComplete(&txn, getReplCoord()->getTerm()); + getReplCoord()->signalDrainComplete(&opCtx, getReplCoord()->getTerm()); getReplCoord()->fillIsMasterForReplSet(&imResponse); ASSERT_TRUE(imResponse.isMaster()) << imResponse.toBSON().toString(); ASSERT_FALSE(imResponse.isSecondary()) << imResponse.toBSON().toString(); @@ -191,15 +191,15 @@ TEST_F(ReplCoordElectTest, ElectionSucceedsWhenNodeIsTheOnlyNode) { << getReplCoord()->getMemberState().toString(); ASSERT(getReplCoord()->getApplierState() == ReplicationCoordinator::ApplierState::Draining); - const auto txnPtr = makeOperationContext(); - auto& txn = *txnPtr; + const auto opCtxPtr = makeOperationContext(); + auto& opCtx = *opCtxPtr; // Since we're still in drain mode, expect that we report ismaster: false, issecondary:true. IsMasterResponse imResponse; getReplCoord()->fillIsMasterForReplSet(&imResponse); ASSERT_FALSE(imResponse.isMaster()) << imResponse.toBSON().toString(); ASSERT_TRUE(imResponse.isSecondary()) << imResponse.toBSON().toString(); - getReplCoord()->signalDrainComplete(&txn, getReplCoord()->getTerm()); + getReplCoord()->signalDrainComplete(&opCtx, getReplCoord()->getTerm()); getReplCoord()->fillIsMasterForReplSet(&imResponse); ASSERT_TRUE(imResponse.isMaster()) << imResponse.toBSON().toString(); ASSERT_FALSE(imResponse.isSecondary()) << imResponse.toBSON().toString(); @@ -218,7 +218,7 @@ TEST_F(ReplCoordElectTest, ElectionSucceedsWhenAllNodesVoteYea) { << BSON("_id" << 3 << "host" << "node3:12345"))); assertStartSuccess(configObj, HostAndPort("node1", 12345)); - OperationContextNoop txn; + OperationContextNoop opCtx; getReplCoord()->setMyLastAppliedOpTime(OpTime{{100, 1}, 0}); getExternalState()->setLastOpTime(OpTime{{100, 1}, 0}); @@ -246,7 +246,7 @@ TEST_F(ReplCoordElectTest, ElectionFailsWhenOneNodeVotesNay) { assertStartSuccess(configObj, HostAndPort("node1", 12345)); ReplSetConfig config = assertMakeRSConfig(configObj); - OperationContextNoop txn; + OperationContextNoop opCtx; OpTime time1(Timestamp(100, 1), 0); getReplCoord()->setMyLastAppliedOpTime(time1); ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY)); @@ -293,7 +293,7 @@ TEST_F(ReplCoordElectTest, VotesWithStringValuesAreNotCountedAsYeas) { assertStartSuccess(configObj, HostAndPort("node1", 12345)); ReplSetConfig config = assertMakeRSConfig(configObj); - OperationContextNoop txn; + OperationContextNoop opCtx; OpTime time1(Timestamp(100, 1), 0); getReplCoord()->setMyLastAppliedOpTime(time1); ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY)); @@ -341,7 +341,7 @@ TEST_F(ReplCoordElectTest, ElectionsAbortWhenNodeTransitionsToRollbackState) { assertStartSuccess(configObj, HostAndPort("node1", 12345)); ReplSetConfig config = assertMakeRSConfig(configObj); - OperationContextNoop txn; + OperationContextNoop opCtx; OpTime time1(Timestamp(100, 1), 0); getReplCoord()->setMyLastAppliedOpTime(time1); ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY)); @@ -363,7 +363,7 @@ TEST_F(ReplCoordElectTest, ElectionsAbortWhenNodeTransitionsToRollbackState) { TEST_F(ReplCoordElectTest, NodeWillNotStandForElectionDuringHeartbeatReconfig) { // start up, receive reconfig via heartbeat while at the same time, become candidate. // candidate state should be cleared. - OperationContextNoop txn; + OperationContextNoop opCtx; assertStartSuccess(BSON("_id" << "mySet" << "version" @@ -419,7 +419,7 @@ TEST_F(ReplCoordElectTest, NodeWillNotStandForElectionDuringHeartbeatReconfig) { args.force = false; args.newConfigObj = config.toBSON(); ASSERT_EQUALS(ErrorCodes::ConfigurationInProgress, - getReplCoord()->processReplSetReconfig(&txn, args, &result)); + getReplCoord()->processReplSetReconfig(&opCtx, args, &result)); logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(2)); startCapturingLogMessages(); @@ -477,7 +477,7 @@ TEST_F(ReplCoordElectTest, StepsDownRemoteIfNodeHasHigherPriorityThanCurrentPrim auto replCoord = getReplCoord(); - OperationContextNoop txn; + OperationContextNoop opCtx; OpTime time1(Timestamp(100, 1), 0); getReplCoord()->setMyLastAppliedOpTime(time1); ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY)); @@ -583,8 +583,8 @@ TEST_F(ReplCoordElectTest, NodeCancelsElectionUponReceivingANewConfigDuringFresh true}; BSONObjBuilder result; - const auto txn = makeOperationContext(); - ASSERT_OK(getReplCoord()->processReplSetReconfig(txn.get(), config, &result)); + const auto opCtx = makeOperationContext(); + ASSERT_OK(getReplCoord()->processReplSetReconfig(opCtx.get(), config, &result)); // Wait until election cancels. net->enterNetwork(); net->runReadyNetworkOperations(); @@ -629,8 +629,8 @@ TEST_F(ReplCoordElectTest, NodeCancelsElectionUponReceivingANewConfigDuringElect true}; BSONObjBuilder result; - const auto txn = makeOperationContext(); - ASSERT_OK(getReplCoord()->processReplSetReconfig(txn.get(), config, &result)); + const auto opCtx = makeOperationContext(); + ASSERT_OK(getReplCoord()->processReplSetReconfig(opCtx.get(), config, &result)); // Wait until election cancels. getNet()->enterNetwork(); getNet()->runReadyNetworkOperations(); diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp index 4484e3beead..e5d2dc214f7 100644 --- a/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp +++ b/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp @@ -210,9 +210,9 @@ void ReplicationCoordinatorImpl::_writeLastVoteForMyElection( if (cbData.status == ErrorCodes::CallbackCanceled) { return; } - invariant(cbData.txn); + invariant(cbData.opCtx); - Status status = _externalState->storeLocalLastVoteDocument(cbData.txn, lastVote); + Status status = _externalState->storeLocalLastVoteDocument(cbData.opCtx, lastVote); if (!status.isOK()) { error() << "failed to store LastVote document when voting for myself: " << status; return; diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp index e3660682e3c..be2370b7108 100644 --- a/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp +++ b/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp @@ -162,15 +162,15 @@ TEST_F(ReplCoordTest, ElectionSucceedsWhenNodeIsTheOnlyElectableNode) { simulateCatchUpTimeout(); ASSERT(getReplCoord()->getApplierState() == ApplierState::Draining); - const auto txnPtr = makeOperationContext(); - auto& txn = *txnPtr; + const auto opCtxPtr = makeOperationContext(); + auto& opCtx = *opCtxPtr; // Since we're still in drain mode, expect that we report ismaster: false, issecondary:true. IsMasterResponse imResponse; getReplCoord()->fillIsMasterForReplSet(&imResponse); ASSERT_FALSE(imResponse.isMaster()) << imResponse.toBSON().toString(); ASSERT_TRUE(imResponse.isSecondary()) << imResponse.toBSON().toString(); - getReplCoord()->signalDrainComplete(&txn, getReplCoord()->getTerm()); + getReplCoord()->signalDrainComplete(&opCtx, getReplCoord()->getTerm()); getReplCoord()->fillIsMasterForReplSet(&imResponse); ASSERT_TRUE(imResponse.isMaster()) << imResponse.toBSON().toString(); ASSERT_FALSE(imResponse.isSecondary()) << imResponse.toBSON().toString(); @@ -226,15 +226,15 @@ TEST_F(ReplCoordTest, ElectionSucceedsWhenNodeIsTheOnlyNode) { simulateCatchUpTimeout(); ASSERT(getReplCoord()->getApplierState() == ApplierState::Draining); - const auto txnPtr = makeOperationContext(); - auto& txn = *txnPtr; + const auto opCtxPtr = makeOperationContext(); + auto& opCtx = *opCtxPtr; // Since we're still in drain mode, expect that we report ismaster: false, issecondary:true. IsMasterResponse imResponse; getReplCoord()->fillIsMasterForReplSet(&imResponse); ASSERT_FALSE(imResponse.isMaster()) << imResponse.toBSON().toString(); ASSERT_TRUE(imResponse.isSecondary()) << imResponse.toBSON().toString(); - getReplCoord()->signalDrainComplete(&txn, getReplCoord()->getTerm()); + getReplCoord()->signalDrainComplete(&opCtx, getReplCoord()->getTerm()); getReplCoord()->fillIsMasterForReplSet(&imResponse); ASSERT_TRUE(imResponse.isMaster()) << imResponse.toBSON().toString(); ASSERT_FALSE(imResponse.isSecondary()) << imResponse.toBSON().toString(); @@ -255,7 +255,7 @@ TEST_F(ReplCoordTest, ElectionSucceedsWhenAllNodesVoteYea) { << "protocolVersion" << 1); assertStartSuccess(configObj, HostAndPort("node1", 12345)); - OperationContextNoop txn; + OperationContextNoop opCtx; getReplCoord()->setMyLastAppliedOpTime(OpTime(Timestamp(100, 1), 0)); getReplCoord()->setMyLastDurableOpTime(OpTime(Timestamp(100, 1), 0)); ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY)); @@ -296,7 +296,7 @@ TEST_F(ReplCoordTest, ElectionSucceedsWhenMaxSevenNodesVoteYea) { << "protocolVersion" << 1); assertStartSuccess(configObj, HostAndPort("node1", 12345)); - OperationContextNoop txn; + OperationContextNoop opCtx; getReplCoord()->setMyLastAppliedOpTime(OpTime(Timestamp(100, 1), 0)); getReplCoord()->setMyLastDurableOpTime(OpTime(Timestamp(100, 1), 0)); ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY)); @@ -332,7 +332,7 @@ TEST_F(ReplCoordTest, ElectionFailsWhenInsufficientVotesAreReceivedDuringDryRun) assertStartSuccess(configObj, HostAndPort("node1", 12345)); ReplSetConfig config = assertMakeRSConfig(configObj); - OperationContextNoop txn; + OperationContextNoop opCtx; OpTime time1(Timestamp(100, 1), 0); getReplCoord()->setMyLastAppliedOpTime(time1); getReplCoord()->setMyLastDurableOpTime(time1); @@ -391,7 +391,7 @@ TEST_F(ReplCoordTest, ElectionFailsWhenDryRunResponseContainsANewerTerm) { assertStartSuccess(configObj, HostAndPort("node1", 12345)); ReplSetConfig config = assertMakeRSConfig(configObj); - OperationContextNoop txn; + OperationContextNoop opCtx; OpTime time1(Timestamp(100, 1), 0); getReplCoord()->setMyLastAppliedOpTime(time1); getReplCoord()->setMyLastDurableOpTime(time1); @@ -439,7 +439,7 @@ TEST_F(ReplCoordTest, ElectionFailsWhenDryRunResponseContainsANewerTerm) { TEST_F(ReplCoordTest, NodeWillNotStandForElectionDuringHeartbeatReconfig) { // start up, receive reconfig via heartbeat while at the same time, become candidate. // candidate state should be cleared. - OperationContextNoop txn; + OperationContextNoop opCtx; assertStartSuccess(BSON("_id" << "mySet" << "version" @@ -497,7 +497,7 @@ TEST_F(ReplCoordTest, NodeWillNotStandForElectionDuringHeartbeatReconfig) { args.force = false; args.newConfigObj = config.toBSON(); ASSERT_EQUALS(ErrorCodes::ConfigurationInProgress, - getReplCoord()->processReplSetReconfig(&txn, args, &result)); + getReplCoord()->processReplSetReconfig(&opCtx, args, &result)); logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(2)); startCapturingLogMessages(); @@ -568,7 +568,7 @@ TEST_F(ReplCoordTest, ElectionFailsWhenInsufficientVotesAreReceivedDuringRequest assertStartSuccess(configObj, HostAndPort("node1", 12345)); ReplSetConfig config = assertMakeRSConfig(configObj); - OperationContextNoop txn; + OperationContextNoop opCtx; OpTime time1(Timestamp(100, 1), 0); getReplCoord()->setMyLastAppliedOpTime(time1); getReplCoord()->setMyLastDurableOpTime(time1); @@ -619,7 +619,7 @@ TEST_F(ReplCoordTest, ElectionsAbortWhenNodeTransitionsToRollbackState) { assertStartSuccess(configObj, HostAndPort("node1", 12345)); ReplSetConfig config = assertMakeRSConfig(configObj); - OperationContextNoop txn; + OperationContextNoop opCtx; OpTime time1(Timestamp(100, 1), 0); getReplCoord()->setMyLastAppliedOpTime(time1); getReplCoord()->setMyLastDurableOpTime(time1); @@ -657,7 +657,7 @@ TEST_F(ReplCoordTest, ElectionFailsWhenVoteRequestResponseContainsANewerTerm) { assertStartSuccess(configObj, HostAndPort("node1", 12345)); ReplSetConfig config = assertMakeRSConfig(configObj); - OperationContextNoop txn; + OperationContextNoop opCtx; OpTime time1(Timestamp(100, 1), 0); getReplCoord()->setMyLastAppliedOpTime(time1); getReplCoord()->setMyLastDurableOpTime(time1); @@ -713,7 +713,7 @@ TEST_F(ReplCoordTest, ElectionFailsWhenTermChangesDuringDryRun) { assertStartSuccess(configObj, HostAndPort("node1", 12345)); ReplSetConfig config = assertMakeRSConfig(configObj); - OperationContextNoop txn; + OperationContextNoop opCtx; OpTime time1(Timestamp(100, 1), 0); getReplCoord()->setMyLastAppliedOpTime(time1); getReplCoord()->setMyLastDurableOpTime(time1); @@ -752,7 +752,7 @@ TEST_F(ReplCoordTest, ElectionFailsWhenTermChangesDuringActualElection) { assertStartSuccess(configObj, HostAndPort("node1", 12345)); ReplSetConfig config = assertMakeRSConfig(configObj); - OperationContextNoop txn; + OperationContextNoop opCtx; OpTime time1(Timestamp(100, 1), 0); getReplCoord()->setMyLastAppliedOpTime(time1); getReplCoord()->setMyLastDurableOpTime(time1); @@ -761,7 +761,7 @@ TEST_F(ReplCoordTest, ElectionFailsWhenTermChangesDuringActualElection) { simulateEnoughHeartbeatsForAllNodesUp(); simulateSuccessfulDryRun(); // update to a future term before the election completes - getReplCoord()->updateTerm(&txn, 1000); + getReplCoord()->updateTerm(&opCtx, 1000); NetworkInterfaceMock* net = getNet(); net->enterNetwork(); @@ -942,7 +942,7 @@ TEST_F(PriorityTakeoverTest, SchedulesPriorityTakeoverIfNodeHasHigherPriorityTha auto replCoord = getReplCoord(); auto now = getNet()->now(); - OperationContextNoop txn; + OperationContextNoop opCtx; OpTime myOptime(Timestamp(100, 1), 0); replCoord->setMyLastAppliedOpTime(myOptime); replCoord->setMyLastDurableOpTime(myOptime); @@ -963,7 +963,7 @@ TEST_F(PriorityTakeoverTest, SchedulesPriorityTakeoverIfNodeHasHigherPriorityTha assertValidTakeoverDelay(config, now, priorityTakeoverTime, 0); // Also make sure that updating the term cancels the scheduled priority takeover. - ASSERT_EQUALS(ErrorCodes::StaleTerm, replCoord->updateTerm(&txn, replCoord->getTerm() + 1)); + ASSERT_EQUALS(ErrorCodes::StaleTerm, replCoord->updateTerm(&opCtx, replCoord->getTerm() + 1)); ASSERT_FALSE(replCoord->getPriorityTakeover_forTest()); } @@ -989,7 +989,7 @@ TEST_F(PriorityTakeoverTest, SuccessfulPriorityTakeover) { auto replCoord = getReplCoord(); auto now = getNet()->now(); - OperationContextNoop txn; + OperationContextNoop opCtx; OpTime myOptime(Timestamp(100, 1), 0); replCoord->setMyLastAppliedOpTime(myOptime); replCoord->setMyLastDurableOpTime(myOptime); @@ -1043,7 +1043,7 @@ TEST_F(PriorityTakeoverTest, DontCallForPriorityTakeoverWhenLaggedSameSecond) { auto timeZero = getNet()->now(); auto now = getNet()->now(); - OperationContextNoop txn; + OperationContextNoop opCtx; OpTime currentOpTime(Timestamp(100, 5000), 0); OpTime behindOpTime(Timestamp(100, 3999), 0); OpTime closeEnoughOpTime(Timestamp(100, 4000), 0); @@ -1119,7 +1119,7 @@ TEST_F(PriorityTakeoverTest, DontCallForPriorityTakeoverWhenLaggedDifferentSecon auto timeZero = getNet()->now(); auto now = getNet()->now(); - OperationContextNoop txn; + OperationContextNoop opCtx; OpTime currentOpTime(Timestamp(100, 0), 0); OpTime behindOpTime(Timestamp(97, 0), 0); OpTime closeEnoughOpTime(Timestamp(98, 0), 0); @@ -1218,8 +1218,8 @@ TEST_F(ReplCoordTest, NodeCancelsElectionUponReceivingANewConfigDuringDryRun) { true}; BSONObjBuilder result; - const auto txn = makeOperationContext(); - ASSERT_OK(getReplCoord()->processReplSetReconfig(txn.get(), config, &result)); + const auto opCtx = makeOperationContext(); + ASSERT_OK(getReplCoord()->processReplSetReconfig(opCtx.get(), config, &result)); // Wait until election cancels. net->enterNetwork(); net->runReadyNetworkOperations(); @@ -1264,8 +1264,8 @@ TEST_F(ReplCoordTest, NodeCancelsElectionUponReceivingANewConfigDuringVotePhase) true}; BSONObjBuilder result; - const auto txn = makeOperationContext(); - ASSERT_OK(getReplCoord()->processReplSetReconfig(txn.get(), config, &result)); + const auto opCtx = makeOperationContext(); + ASSERT_OK(getReplCoord()->processReplSetReconfig(opCtx.get(), config, &result)); // Wait until election cancels. getNet()->enterNetwork(); getNet()->runReadyNetworkOperations(); @@ -1429,10 +1429,10 @@ TEST_F(PrimaryCatchUpTest, PrimaryDoNotNeedToCatchUp) { ASSERT(getReplCoord()->getApplierState() == ApplierState::Draining); stopCapturingLogMessages(); ASSERT_EQUALS(1, countLogLinesContaining("My optime is most up-to-date, skipping catch-up")); - auto txn = makeOperationContext(); - getReplCoord()->signalDrainComplete(txn.get(), getReplCoord()->getTerm()); - Lock::GlobalLock lock(txn->lockState(), MODE_IX, 1); - ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(txn.get(), "test")); + auto opCtx = makeOperationContext(); + getReplCoord()->signalDrainComplete(opCtx.get(), getReplCoord()->getTerm()); + Lock::GlobalLock lock(opCtx->lockState(), MODE_IX, 1); + ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "test")); } TEST_F(PrimaryCatchUpTest, PrimaryFreshnessScanTimeout) { @@ -1453,10 +1453,10 @@ TEST_F(PrimaryCatchUpTest, PrimaryFreshnessScanTimeout) { ASSERT(getReplCoord()->getApplierState() == ApplierState::Draining); stopCapturingLogMessages(); ASSERT_EQUALS(1, countLogLinesContaining("Could not access any nodes within timeout")); - auto txn = makeOperationContext(); - getReplCoord()->signalDrainComplete(txn.get(), getReplCoord()->getTerm()); - Lock::GlobalLock lock(txn->lockState(), MODE_IX, 1); - ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(txn.get(), "test")); + auto opCtx = makeOperationContext(); + getReplCoord()->signalDrainComplete(opCtx.get(), getReplCoord()->getTerm()); + Lock::GlobalLock lock(opCtx->lockState(), MODE_IX, 1); + ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "test")); } TEST_F(PrimaryCatchUpTest, PrimaryCatchUpSucceeds) { @@ -1483,10 +1483,10 @@ TEST_F(PrimaryCatchUpTest, PrimaryCatchUpSucceeds) { ASSERT(getReplCoord()->getApplierState() == ApplierState::Draining); stopCapturingLogMessages(); ASSERT_EQUALS(1, countLogLinesContaining("Finished catch-up oplog after becoming primary.")); - auto txn = makeOperationContext(); - getReplCoord()->signalDrainComplete(txn.get(), getReplCoord()->getTerm()); - Lock::GlobalLock lock(txn->lockState(), MODE_IX, 1); - ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(txn.get(), "test")); + auto opCtx = makeOperationContext(); + getReplCoord()->signalDrainComplete(opCtx.get(), getReplCoord()->getTerm()); + Lock::GlobalLock lock(opCtx->lockState(), MODE_IX, 1); + ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "test")); } TEST_F(PrimaryCatchUpTest, PrimaryCatchUpTimeout) { @@ -1507,10 +1507,10 @@ TEST_F(PrimaryCatchUpTest, PrimaryCatchUpTimeout) { ASSERT(getReplCoord()->getApplierState() == ApplierState::Draining); stopCapturingLogMessages(); ASSERT_EQUALS(1, countLogLinesContaining("Cannot catch up oplog after becoming primary")); - auto txn = makeOperationContext(); - getReplCoord()->signalDrainComplete(txn.get(), getReplCoord()->getTerm()); - Lock::GlobalLock lock(txn->lockState(), MODE_IX, 1); - ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(txn.get(), "test")); + auto opCtx = makeOperationContext(); + getReplCoord()->signalDrainComplete(opCtx.get(), getReplCoord()->getTerm()); + Lock::GlobalLock lock(opCtx->lockState(), MODE_IX, 1); + ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "test")); } TEST_F(PrimaryCatchUpTest, PrimaryStepsDownDuringFreshnessScan) { @@ -1536,9 +1536,9 @@ TEST_F(PrimaryCatchUpTest, PrimaryStepsDownDuringFreshnessScan) { ASSERT(getReplCoord()->getApplierState() == ApplierState::Running); stopCapturingLogMessages(); ASSERT_EQUALS(1, countLogLinesContaining("Stopped transition to primary")); - auto txn = makeOperationContext(); - Lock::GlobalLock lock(txn->lockState(), MODE_IX, 1); - ASSERT_FALSE(getReplCoord()->canAcceptWritesForDatabase(txn.get(), "test")); + auto opCtx = makeOperationContext(); + Lock::GlobalLock lock(opCtx->lockState(), MODE_IX, 1); + ASSERT_FALSE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "test")); } TEST_F(PrimaryCatchUpTest, PrimaryStepsDownDuringCatchUp) { @@ -1564,15 +1564,15 @@ TEST_F(PrimaryCatchUpTest, PrimaryStepsDownDuringCatchUp) { net->enterNetwork(); net->runReadyNetworkOperations(); net->exitNetwork(); - auto txn = makeOperationContext(); + auto opCtx = makeOperationContext(); // Simulate the applier signaling replCoord to exit drain mode. // At this point, we see the stepdown and reset the states. - getReplCoord()->signalDrainComplete(txn.get(), getReplCoord()->getTerm()); + getReplCoord()->signalDrainComplete(opCtx.get(), getReplCoord()->getTerm()); ASSERT(getReplCoord()->getApplierState() == ApplierState::Running); stopCapturingLogMessages(); ASSERT_EQUALS(1, countLogLinesContaining("Cannot catch up oplog after becoming primary")); - Lock::GlobalLock lock(txn->lockState(), MODE_IX, 1); - ASSERT_FALSE(getReplCoord()->canAcceptWritesForDatabase(txn.get(), "test")); + Lock::GlobalLock lock(opCtx->lockState(), MODE_IX, 1); + ASSERT_FALSE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "test")); } TEST_F(PrimaryCatchUpTest, PrimaryStepsDownDuringDrainMode) { @@ -1618,15 +1618,15 @@ TEST_F(PrimaryCatchUpTest, PrimaryStepsDownDuringDrainMode) { getNet()->scheduleResponse(noi, getNet()->now(), makeFreshnessScanResponse(OpTime())); }); ASSERT(replCoord->getApplierState() == ApplierState::Draining); - auto txn = makeOperationContext(); + auto opCtx = makeOperationContext(); { - Lock::GlobalLock lock(txn->lockState(), MODE_IX, 1); - ASSERT_FALSE(replCoord->canAcceptWritesForDatabase(txn.get(), "test")); + Lock::GlobalLock lock(opCtx->lockState(), MODE_IX, 1); + ASSERT_FALSE(replCoord->canAcceptWritesForDatabase(opCtx.get(), "test")); } - replCoord->signalDrainComplete(txn.get(), replCoord->getTerm()); - Lock::GlobalLock lock(txn->lockState(), MODE_IX, 1); + replCoord->signalDrainComplete(opCtx.get(), replCoord->getTerm()); + Lock::GlobalLock lock(opCtx->lockState(), MODE_IX, 1); ASSERT(replCoord->getApplierState() == ApplierState::Stopped); - ASSERT_TRUE(replCoord->canAcceptWritesForDatabase(txn.get(), "test")); + ASSERT_TRUE(replCoord->canAcceptWritesForDatabase(opCtx.get(), "test")); } } // namespace diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp index 602f3cb5f40..db42e12f13d 100644 --- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp +++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp @@ -385,7 +385,7 @@ void ReplicationCoordinatorImpl::_stepDownFinish( LockGuard topoLock(_topoMutex); - invariant(cbData.txn); + invariant(cbData.opCtx); // TODO Add invariant that we've got global shared or global exclusive lock, when supported // by lock manager. stdx::unique_lock<stdx::mutex> lk(_mutex); @@ -496,7 +496,7 @@ void ReplicationCoordinatorImpl::_heartbeatReconfigStore( "it is invalid: " << myIndex.getStatus(); } else { - Status status = _externalState->storeLocalConfigDocument(cbd.txn, newConfig.toBSON()); + Status status = _externalState->storeLocalConfigDocument(cbd.opCtx, newConfig.toBSON()); lk.lock(); if (!status.isOK()) { @@ -518,7 +518,7 @@ void ReplicationCoordinatorImpl::_heartbeatReconfigStore( newConfig.getMemberAt(myIndex.getValue()).isArbiter(); if (!isArbiter && isFirstConfig) { _externalState->startThreads(_settings); - _startDataReplication(cbd.txn); + _startDataReplication(cbd.opCtx); } } @@ -558,7 +558,7 @@ void ReplicationCoordinatorImpl::_heartbeatReconfigFinish( invariant(!_rsConfig.isInitialized() || _rsConfig.getConfigVersion() < newConfig.getConfigVersion()); - if (_getMemberState_inlock().primary() && !cbData.txn) { + if (_getMemberState_inlock().primary() && !cbData.opCtx) { // Not having an OperationContext in the CallbackData means we definitely aren't holding // the global lock. Since we're primary and this reconfig could cause us to stepdown, // reschedule this work with the global exclusive lock so the stepdown is safe. diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat_test.cpp index 69c9b6541c6..2aee49fd3e4 100644 --- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat_test.cpp +++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat_test.cpp @@ -139,10 +139,10 @@ TEST_F(ReplCoordHBTest, NodeJoinsExistingReplSetWhenReceivingAConfigContainingTh noi = net->getNextReadyRequest(); assertMemberState(MemberState::RS_STARTUP2); - OperationContextNoop txn; + OperationContextNoop opCtx; ReplSetConfig storedConfig; ASSERT_OK(storedConfig.initialize( - unittest::assertGet(getExternalState()->loadLocalConfigDocument(&txn)))); + unittest::assertGet(getExternalState()->loadLocalConfigDocument(&opCtx)))); ASSERT_OK(storedConfig.validate()); ASSERT_EQUALS(3, storedConfig.getConfigVersion()); ASSERT_EQUALS(3, storedConfig.getNumMembers()); @@ -205,9 +205,9 @@ TEST_F(ReplCoordHBTest, noi = net->getNextReadyRequest(); assertMemberState(MemberState::RS_STARTUP, "2"); - OperationContextNoop txn; + OperationContextNoop opCtx; - StatusWith<BSONObj> loadedConfig(getExternalState()->loadLocalConfigDocument(&txn)); + StatusWith<BSONObj> loadedConfig(getExternalState()->loadLocalConfigDocument(&opCtx)); ASSERT_NOT_OK(loadedConfig.getStatus()) << loadedConfig.getValue(); exitNetwork(); } diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp index ccc53d0bff9..5f4101e9eb2 100644 --- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp +++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp @@ -138,10 +138,10 @@ TEST_F(ReplCoordHBV1Test, noi = net->getNextReadyRequest(); assertMemberState(MemberState::RS_STARTUP2); - OperationContextNoop txn; + OperationContextNoop opCtx; ReplSetConfig storedConfig; ASSERT_OK(storedConfig.initialize( - unittest::assertGet(getExternalState()->loadLocalConfigDocument(&txn)))); + unittest::assertGet(getExternalState()->loadLocalConfigDocument(&opCtx)))); ASSERT_OK(storedConfig.validate()); ASSERT_EQUALS(3, storedConfig.getConfigVersion()); ASSERT_EQUALS(3, storedConfig.getNumMembers()); @@ -207,10 +207,10 @@ TEST_F(ReplCoordHBV1Test, noi = net->getNextReadyRequest(); assertMemberState(MemberState::RS_ARBITER); - OperationContextNoop txn; + OperationContextNoop opCtx; ReplSetConfig storedConfig; ASSERT_OK(storedConfig.initialize( - unittest::assertGet(getExternalState()->loadLocalConfigDocument(&txn)))); + unittest::assertGet(getExternalState()->loadLocalConfigDocument(&opCtx)))); ASSERT_OK(storedConfig.validate()); ASSERT_EQUALS(3, storedConfig.getConfigVersion()); ASSERT_EQUALS(3, storedConfig.getNumMembers()); @@ -276,9 +276,9 @@ TEST_F(ReplCoordHBV1Test, noi = net->getNextReadyRequest(); assertMemberState(MemberState::RS_STARTUP, "2"); - OperationContextNoop txn; + OperationContextNoop opCtx; - StatusWith<BSONObj> loadedConfig(getExternalState()->loadLocalConfigDocument(&txn)); + StatusWith<BSONObj> loadedConfig(getExternalState()->loadLocalConfigDocument(&opCtx)); ASSERT_NOT_OK(loadedConfig.getStatus()) << loadedConfig.getValue(); exitNetwork(); } diff --git a/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp index 69c5f2c6fb9..926b4a47a0f 100644 --- a/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp +++ b/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp @@ -60,9 +60,9 @@ TEST_F(ReplCoordTest, NodeReturnsNotYetInitializedWhenReconfigReceivedPriorToIni BSONObjBuilder result; ReplSetReconfigArgs args; - const auto txn = makeOperationContext(); + const auto opCtx = makeOperationContext(); ASSERT_EQUALS(ErrorCodes::NotYetInitialized, - getReplCoord()->processReplSetReconfig(txn.get(), args, &result)); + getReplCoord()->processReplSetReconfig(opCtx.get(), args, &result)); ASSERT_TRUE(result.obj().isEmpty()); } @@ -87,9 +87,9 @@ TEST_F(ReplCoordTest, NodeReturnsNotMasterWhenReconfigReceivedWhileSecondary) { BSONObjBuilder result; ReplSetReconfigArgs args; args.force = false; - const auto txn = makeOperationContext(); + const auto opCtx = makeOperationContext(); ASSERT_EQUALS(ErrorCodes::NotMaster, - getReplCoord()->processReplSetReconfig(txn.get(), args, &result)); + getReplCoord()->processReplSetReconfig(opCtx.get(), args, &result)); ASSERT_TRUE(result.obj().isEmpty()); } @@ -128,10 +128,10 @@ TEST_F(ReplCoordTest, NodeReturnsInvalidReplicaSetConfigWhenReconfigReceivedWith << "node2:12345" << "arbiterOnly" << true))); - const auto txn = makeOperationContext(); + const auto opCtx = makeOperationContext(); // ErrorCodes::BadValue should be propagated from ReplSetConfig::initialize() ASSERT_EQUALS(ErrorCodes::InvalidReplicaSetConfig, - getReplCoord()->processReplSetReconfig(txn.get(), args, &result)); + getReplCoord()->processReplSetReconfig(opCtx.get(), args, &result)); ASSERT_TRUE(result.obj().isEmpty()); } @@ -165,9 +165,9 @@ TEST_F(ReplCoordTest, NodeReturnsInvalidReplicaSetConfigWhenReconfigReceivedWith << BSON("_id" << 2 << "host" << "node2:12345"))); - const auto txn = makeOperationContext(); + const auto opCtx = makeOperationContext(); ASSERT_EQUALS(ErrorCodes::InvalidReplicaSetConfig, - getReplCoord()->processReplSetReconfig(txn.get(), args, &result)); + getReplCoord()->processReplSetReconfig(opCtx.get(), args, &result)); ASSERT_TRUE(result.obj().isEmpty()); } @@ -205,9 +205,9 @@ TEST_F(ReplCoordTest, NodeReturnsInvalidReplicaSetConfigWhenReconfigReceivedWith << "settings" << BSON("replicaSetId" << OID::gen())); - const auto txn = makeOperationContext(); + const auto opCtx = makeOperationContext(); ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible, - getReplCoord()->processReplSetReconfig(txn.get(), args, &result)); + getReplCoord()->processReplSetReconfig(opCtx.get(), args, &result)); ASSERT_TRUE(result.obj().isEmpty()); } @@ -242,18 +242,18 @@ TEST_F(ReplCoordTest, << BSON("_id" << 2 << "host" << "node2:12345"))); - const auto txn = makeOperationContext(); + const auto opCtx = makeOperationContext(); ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible, - getReplCoord()->processReplSetReconfig(txn.get(), args, &result)); + getReplCoord()->processReplSetReconfig(opCtx.get(), args, &result)); ASSERT_TRUE(result.obj().isEmpty()); } void doReplSetInitiate(ReplicationCoordinatorImpl* replCoord, Status* status, - OperationContext* txn) { + OperationContext* opCtx) { BSONObjBuilder garbage; *status = - replCoord->processReplSetInitiate(txn, + replCoord->processReplSetInitiate(opCtx, BSON("_id" << "mySet" << "version" @@ -268,7 +268,7 @@ void doReplSetInitiate(ReplicationCoordinatorImpl* replCoord, void doReplSetReconfig(ReplicationCoordinatorImpl* replCoord, Status* status, - OperationContext* txn) { + OperationContext* opCtx) { BSONObjBuilder garbage; ReplSetReconfigArgs args; args.force = false; @@ -284,7 +284,7 @@ void doReplSetReconfig(ReplicationCoordinatorImpl* replCoord, << "node2:12345" << "priority" << 3))); - *status = replCoord->processReplSetReconfig(txn, args, &garbage); + *status = replCoord->processReplSetReconfig(opCtx, args, &garbage); } TEST_F(ReplCoordTest, @@ -307,8 +307,9 @@ TEST_F(ReplCoordTest, simulateSuccessfulV1Election(); Status status(ErrorCodes::InternalError, "Not Set"); - const auto txn = makeOperationContext(); - stdx::thread reconfigThread(stdx::bind(doReplSetReconfig, getReplCoord(), &status, txn.get())); + const auto opCtx = makeOperationContext(); + stdx::thread reconfigThread( + stdx::bind(doReplSetReconfig, getReplCoord(), &status, opCtx.get())); NetworkInterfaceMock* net = getNet(); getNet()->enterNetwork(); @@ -350,8 +351,9 @@ TEST_F(ReplCoordTest, NodeReturnsOutOfDiskSpaceWhenSavingANewConfigFailsDuringRe Status status(ErrorCodes::InternalError, "Not Set"); getExternalState()->setStoreLocalConfigDocumentStatus( Status(ErrorCodes::OutOfDiskSpace, "The test set this")); - const auto txn = makeOperationContext(); - stdx::thread reconfigThread(stdx::bind(doReplSetReconfig, getReplCoord(), &status, txn.get())); + const auto opCtx = makeOperationContext(); + stdx::thread reconfigThread( + stdx::bind(doReplSetReconfig, getReplCoord(), &status, opCtx.get())); replyToReceivedHeartbeat(); reconfigThread.join(); @@ -377,9 +379,10 @@ TEST_F(ReplCoordTest, simulateSuccessfulV1Election(); Status status(ErrorCodes::InternalError, "Not Set"); - const auto txn = makeOperationContext(); + const auto opCtx = makeOperationContext(); // first reconfig - stdx::thread reconfigThread(stdx::bind(doReplSetReconfig, getReplCoord(), &status, txn.get())); + stdx::thread reconfigThread( + stdx::bind(doReplSetReconfig, getReplCoord(), &status, opCtx.get())); getNet()->enterNetwork(); getNet()->blackHole(getNet()->getNextReadyRequest()); getNet()->exitNetwork(); @@ -398,10 +401,10 @@ TEST_F(ReplCoordTest, << BSON("_id" << 2 << "host" << "node2:12345"))); ASSERT_EQUALS(ErrorCodes::ConfigurationInProgress, - getReplCoord()->processReplSetReconfig(txn.get(), args, &result)); + getReplCoord()->processReplSetReconfig(opCtx.get(), args, &result)); ASSERT_TRUE(result.obj().isEmpty()); - shutdown(txn.get()); + shutdown(opCtx.get()); reconfigThread.join(); } @@ -415,8 +418,8 @@ TEST_F(ReplCoordTest, NodeReturnsConfigurationInProgressWhenReceivingAReconfigWh // initiate Status status(ErrorCodes::InternalError, "Not Set"); - const auto txn = makeOperationContext(); - stdx::thread initateThread(stdx::bind(doReplSetInitiate, getReplCoord(), &status, txn.get())); + const auto opCtx = makeOperationContext(); + stdx::thread initateThread(stdx::bind(doReplSetInitiate, getReplCoord(), &status, opCtx.get())); getNet()->enterNetwork(); getNet()->blackHole(getNet()->getNextReadyRequest()); getNet()->exitNetwork(); @@ -435,10 +438,10 @@ TEST_F(ReplCoordTest, NodeReturnsConfigurationInProgressWhenReceivingAReconfigWh << BSON("_id" << 2 << "host" << "node2:12345"))); ASSERT_EQUALS(ErrorCodes::ConfigurationInProgress, - getReplCoord()->processReplSetReconfig(txn.get(), args, &result)); + getReplCoord()->processReplSetReconfig(opCtx.get(), args, &result)); ASSERT_TRUE(result.obj().isEmpty()); - shutdown(txn.get()); + shutdown(opCtx.get()); initateThread.join(); } @@ -462,8 +465,9 @@ TEST_F(ReplCoordTest, PrimaryNodeAcceptsNewConfigWhenReceivingAReconfigWithAComp simulateSuccessfulV1Election(); Status status(ErrorCodes::InternalError, "Not Set"); - const auto txn = makeOperationContext(); - stdx::thread reconfigThread(stdx::bind(doReplSetReconfig, getReplCoord(), &status, txn.get())); + const auto opCtx = makeOperationContext(); + stdx::thread reconfigThread( + stdx::bind(doReplSetReconfig, getReplCoord(), &status, opCtx.get())); NetworkInterfaceMock* net = getNet(); getNet()->enterNetwork(); @@ -541,9 +545,9 @@ TEST_F( ReplSetReconfigArgs args; args.force = false; args.newConfigObj = config.toBSON(); - const auto txn = makeOperationContext(); + const auto opCtx = makeOperationContext(); ASSERT_EQUALS(ErrorCodes::ConfigurationInProgress, - getReplCoord()->processReplSetReconfig(txn.get(), args, &result)); + getReplCoord()->processReplSetReconfig(opCtx.get(), args, &result)); getExternalState()->setStoreLocalConfigDocumentToHang(false); } @@ -568,8 +572,9 @@ TEST_F(ReplCoordTest, NodeDoesNotAcceptHeartbeatReconfigWhileInTheMidstOfReconfi // start reconfigThread Status status(ErrorCodes::InternalError, "Not Set"); - const auto txn = makeOperationContext(); - stdx::thread reconfigThread(stdx::bind(doReplSetReconfig, getReplCoord(), &status, txn.get())); + const auto opCtx = makeOperationContext(); + stdx::thread reconfigThread( + stdx::bind(doReplSetReconfig, getReplCoord(), &status, opCtx.get())); // wait for reconfigThread to create network requests to ensure the replication coordinator // is in state kConfigReconfiguring @@ -609,7 +614,7 @@ TEST_F(ReplCoordTest, NodeDoesNotAcceptHeartbeatReconfigWhileInTheMidstOfReconfi stopCapturingLogMessages(); ASSERT_EQUALS( 1, countLogLinesContaining("because already in the midst of a configuration process")); - shutdown(txn.get()); + shutdown(opCtx.get()); reconfigThread.join(); logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Log()); } @@ -644,13 +649,13 @@ TEST_F(ReplCoordTest, NodeAcceptsConfigFromAReconfigWithForceTrueWhileNotPrimary << "node1:12345") << BSON("_id" << 2 << "host" << "node2:12345"))); - const auto txn = makeOperationContext(); + const auto opCtx = makeOperationContext(); ASSERT_EQUALS(ErrorCodes::NotMaster, - getReplCoord()->processReplSetReconfig(txn.get(), args, &result)); + getReplCoord()->processReplSetReconfig(opCtx.get(), args, &result)); // forced should succeed args.force = true; - ASSERT_OK(getReplCoord()->processReplSetReconfig(txn.get(), args, &result)); + ASSERT_OK(getReplCoord()->processReplSetReconfig(opCtx.get(), args, &result)); getReplCoord()->processReplSetGetConfig(&result); // ensure forced reconfig results in a random larger version diff --git a/src/mongo/db/repl/replication_coordinator_impl_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_test.cpp index 3cb0d2d9e3b..ae8d5b5fe92 100644 --- a/src/mongo/db/repl/replication_coordinator_impl_test.cpp +++ b/src/mongo/db/repl/replication_coordinator_impl_test.cpp @@ -100,7 +100,7 @@ struct OpTimeWithTermOne { Timestamp timestamp; }; -void runSingleNodeElection(ServiceContext::UniqueOperationContext txn, +void runSingleNodeElection(ServiceContext::UniqueOperationContext opCtx, ReplicationCoordinatorImpl* replCoord, executor::NetworkInterfaceMock* net) { replCoord->setMyLastAppliedOpTime(OpTime(Timestamp(1, 0), 0)); @@ -116,15 +116,15 @@ void runSingleNodeElection(ServiceContext::UniqueOperationContext txn, ASSERT(replCoord->getApplierState() == ReplicationCoordinator::ApplierState::Draining); ASSERT(replCoord->getMemberState().primary()) << replCoord->getMemberState().toString(); - replCoord->signalDrainComplete(txn.get(), replCoord->getTerm()); + replCoord->signalDrainComplete(opCtx.get(), replCoord->getTerm()); } /** * Helper that kills an operation, taking the necessary locks. */ -void killOperation(OperationContext* txn) { - stdx::lock_guard<Client> lkClient(*txn->getClient()); - txn->getServiceContext()->killOperation(txn); +void killOperation(OperationContext* opCtx) { + stdx::lock_guard<Client> lkClient(*opCtx->getClient()); + opCtx->getServiceContext()->killOperation(opCtx); } TEST_F(ReplCoordTest, NodeEntersStartup2StateWhenStartingUpWithValidLocalConfig) { @@ -202,10 +202,10 @@ TEST_F(ReplCoordTest, NodeEntersStartupStateWhenStartingUpWithNoLocalConfig) { TEST_F(ReplCoordTest, NodeReturnsInvalidReplicaSetConfigWhenInitiatedWithAnEmptyConfig) { init("mySet"); start(HostAndPort("node1", 12345)); - auto txn = makeOperationContext(); + auto opCtx = makeOperationContext(); BSONObjBuilder result; ASSERT_EQUALS(ErrorCodes::InvalidReplicaSetConfig, - getReplCoord()->processReplSetInitiate(txn.get(), BSONObj(), &result)); + getReplCoord()->processReplSetInitiate(opCtx.get(), BSONObj(), &result)); ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s); } @@ -215,12 +215,12 @@ TEST_F(ReplCoordTest, start(HostAndPort("node1", 12345)); ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s); - auto txn = makeOperationContext(); + auto opCtx = makeOperationContext(); // Starting uninitialized, show that we can perform the initiate behavior. BSONObjBuilder result1; ASSERT_OK( - getReplCoord()->processReplSetInitiate(txn.get(), + getReplCoord()->processReplSetInitiate(opCtx.get(), BSON("_id" << "mySet" << "version" @@ -236,7 +236,7 @@ TEST_F(ReplCoordTest, BSONObjBuilder result2; ASSERT_EQUALS( ErrorCodes::AlreadyInitialized, - getReplCoord()->processReplSetInitiate(txn.get(), + getReplCoord()->processReplSetInitiate(opCtx.get(), BSON("_id" << "mySet" << "version" @@ -254,14 +254,14 @@ TEST_F(ReplCoordTest, NodeReturnsInvalidReplicaSetConfigWhenInitiatingViaANodeThatCannotBecomePrimary) { init("mySet"); start(HostAndPort("node1", 12345)); - auto txn = makeOperationContext(); + auto opCtx = makeOperationContext(); ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s); // Starting uninitialized, show that we can perform the initiate behavior. BSONObjBuilder result1; auto status = - getReplCoord()->processReplSetInitiate(txn.get(), + getReplCoord()->processReplSetInitiate(opCtx.get(), BSON("_id" << "mySet" << "version" @@ -283,16 +283,16 @@ TEST_F(ReplCoordTest, InitiateShouldSucceedWithAValidConfigEvenIfItHasFailedWithAnInvalidConfigPreviously) { init("mySet"); start(HostAndPort("node1", 12345)); - auto txn = makeOperationContext(); + auto opCtx = makeOperationContext(); BSONObjBuilder result; ASSERT_EQUALS(ErrorCodes::InvalidReplicaSetConfig, - getReplCoord()->processReplSetInitiate(txn.get(), BSONObj(), &result)); + getReplCoord()->processReplSetInitiate(opCtx.get(), BSONObj(), &result)); ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s); // Having failed to initiate once, show that we can now initiate. BSONObjBuilder result1; ASSERT_OK( - getReplCoord()->processReplSetInitiate(txn.get(), + getReplCoord()->processReplSetInitiate(opCtx.get(), BSON("_id" << "mySet" << "version" @@ -309,10 +309,10 @@ TEST_F(ReplCoordTest, BSONObjBuilder result; init("mySet"); start(HostAndPort("node1", 12345)); - auto txn = makeOperationContext(); + auto opCtx = makeOperationContext(); ASSERT_EQUALS( ErrorCodes::InvalidReplicaSetConfig, - getReplCoord()->processReplSetInitiate(txn.get(), + getReplCoord()->processReplSetInitiate(opCtx.get(), BSON("_id" << "mySet" << "version" @@ -326,9 +326,9 @@ TEST_F(ReplCoordTest, void doReplSetInitiate(ReplicationCoordinatorImpl* replCoord, Status* status) { BSONObjBuilder garbage; auto client = getGlobalServiceContext()->makeClient("rsi"); - auto txn = client->makeOperationContext(); + auto opCtx = client->makeOperationContext(); *status = - replCoord->processReplSetInitiate(txn.get(), + replCoord->processReplSetInitiate(opCtx.get(), BSON("_id" << "mySet" << "version" @@ -411,13 +411,13 @@ TEST_F(ReplCoordTest, NodeReturnsInvalidReplicaSetConfigWhenInitiatingWithAConfigWithAMismatchedSetName) { init("mySet"); start(HostAndPort("node1", 12345)); - auto txn = makeOperationContext(); + auto opCtx = makeOperationContext(); ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s); BSONObjBuilder result1; ASSERT_EQUALS( ErrorCodes::InvalidReplicaSetConfig, - getReplCoord()->processReplSetInitiate(txn.get(), + getReplCoord()->processReplSetInitiate(opCtx.get(), BSON("_id" << "wrongSet" << "version" @@ -432,11 +432,11 @@ TEST_F(ReplCoordTest, TEST_F(ReplCoordTest, NodeReturnsInvalidReplicaSetConfigWhenInitiatingWithAnEmptyConfig) { init("mySet"); start(HostAndPort("node1", 12345)); - auto txn = makeOperationContext(); + auto opCtx = makeOperationContext(); ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s); BSONObjBuilder result1; - auto status = getReplCoord()->processReplSetInitiate(txn.get(), BSONObj(), &result1); + auto status = getReplCoord()->processReplSetInitiate(opCtx.get(), BSONObj(), &result1); ASSERT_EQUALS(ErrorCodes::InvalidReplicaSetConfig, status); ASSERT_STRING_CONTAINS(status.reason(), "Missing expected field \"_id\""); ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s); @@ -445,12 +445,12 @@ TEST_F(ReplCoordTest, NodeReturnsInvalidReplicaSetConfigWhenInitiatingWithAnEmpt TEST_F(ReplCoordTest, NodeReturnsInvalidReplicaSetConfigWhenInitiatingWithoutAn_idField) { init("mySet"); start(HostAndPort("node1", 12345)); - auto txn = makeOperationContext(); + auto opCtx = makeOperationContext(); ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s); BSONObjBuilder result1; auto status = getReplCoord()->processReplSetInitiate( - txn.get(), + opCtx.get(), BSON("version" << 1 << "members" << BSON_ARRAY(BSON("_id" << 0 << "host" << "node1:12345"))), &result1); @@ -463,12 +463,12 @@ TEST_F(ReplCoordTest, NodeReturnsInvalidReplicaSetConfigWhenInitiatingWithAConfigVersionNotEqualToOne) { init("mySet"); start(HostAndPort("node1", 12345)); - auto txn = makeOperationContext(); + auto opCtx = makeOperationContext(); ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s); BSONObjBuilder result1; auto status = - getReplCoord()->processReplSetInitiate(txn.get(), + getReplCoord()->processReplSetInitiate(opCtx.get(), BSON("_id" << "mySet" << "version" @@ -485,13 +485,13 @@ TEST_F(ReplCoordTest, TEST_F(ReplCoordTest, InitiateFailsWithoutReplSetFlag) { init(""); start(HostAndPort("node1", 12345)); - auto txn = makeOperationContext(); + auto opCtx = makeOperationContext(); ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s); BSONObjBuilder result1; ASSERT_EQUALS( ErrorCodes::NoReplicationEnabled, - getReplCoord()->processReplSetInitiate(txn.get(), + getReplCoord()->processReplSetInitiate(opCtx.get(), BSON("_id" << "mySet" << "version" @@ -506,7 +506,7 @@ TEST_F(ReplCoordTest, InitiateFailsWithoutReplSetFlag) { TEST_F(ReplCoordTest, NodeReturnsOutOfDiskSpaceWhenInitiateCannotWriteConfigToDisk) { init("mySet"); start(HostAndPort("node1", 12345)); - auto txn = makeOperationContext(); + auto opCtx = makeOperationContext(); ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s); BSONObjBuilder result1; @@ -514,7 +514,7 @@ TEST_F(ReplCoordTest, NodeReturnsOutOfDiskSpaceWhenInitiateCannotWriteConfigToDi Status(ErrorCodes::OutOfDiskSpace, "The test set this")); ASSERT_EQUALS( ErrorCodes::OutOfDiskSpace, - getReplCoord()->processReplSetInitiate(txn.get(), + getReplCoord()->processReplSetInitiate(opCtx.get(), BSON("_id" << "mySet" << "version" @@ -602,7 +602,7 @@ TEST_F(ReplCoordTest, RollBackIDShouldIncreaseByOneWhenIncrementRollbackIDIsCall TEST_F(ReplCoordTest, NodeReturnsImmediatelyWhenAwaitReplicationIsRanAgainstAStandaloneNode) { init(""); - auto txn = makeOperationContext(); + auto opCtx = makeOperationContext(); OpTimeWithTermOne time(100, 1); @@ -613,7 +613,7 @@ TEST_F(ReplCoordTest, NodeReturnsImmediatelyWhenAwaitReplicationIsRanAgainstASta // Because we didn't set ReplSettings.replSet, it will think we're a standalone so // awaitReplication will always work. ReplicationCoordinator::StatusAndDuration statusAndDur = - getReplCoord()->awaitReplication(txn.get(), time, writeConcern); + getReplCoord()->awaitReplication(opCtx.get(), time, writeConcern); ASSERT_OK(statusAndDur.status); } @@ -621,7 +621,7 @@ TEST_F(ReplCoordTest, NodeReturnsImmediatelyWhenAwaitReplicationIsRanAgainstAMas ReplSettings settings; settings.setMaster(true); init(settings); - auto txn = makeOperationContext(); + auto opCtx = makeOperationContext(); OpTimeWithTermOne time(100, 1); @@ -631,7 +631,7 @@ TEST_F(ReplCoordTest, NodeReturnsImmediatelyWhenAwaitReplicationIsRanAgainstAMas writeConcern.wMode = WriteConcernOptions::kMajority; // w:majority always works on master/slave ReplicationCoordinator::StatusAndDuration statusAndDur = - getReplCoord()->awaitReplication(txn.get(), time, writeConcern); + getReplCoord()->awaitReplication(opCtx.get(), time, writeConcern); ASSERT_OK(statusAndDur.status); } @@ -655,7 +655,7 @@ TEST_F(ReplCoordTest, NodeReturnsNotMasterWhenRunningAwaitReplicationAgainstASec << 2))), HostAndPort("node1", 12345)); - auto txn = makeOperationContext(); + auto opCtx = makeOperationContext(); OpTimeWithTermOne time(100, 1); @@ -666,7 +666,7 @@ TEST_F(ReplCoordTest, NodeReturnsNotMasterWhenRunningAwaitReplicationAgainstASec // Node should fail to awaitReplication when not primary. ReplicationCoordinator::StatusAndDuration statusAndDur = - getReplCoord()->awaitReplication(txn.get(), time, writeConcern); + getReplCoord()->awaitReplication(opCtx.get(), time, writeConcern); ASSERT_EQUALS(ErrorCodes::PrimarySteppedDown, statusAndDur.status); } @@ -704,10 +704,10 @@ TEST_F(ReplCoordTest, NodeReturnsOkWhenRunningAwaitReplicationAgainstPrimaryWith simulateSuccessfulV1Election(); ASSERT(getReplCoord()->getMemberState().primary()); - auto txn = makeOperationContext(); + auto opCtx = makeOperationContext(); ReplicationCoordinator::StatusAndDuration statusAndDur = - getReplCoord()->awaitReplication(txn.get(), time, writeConcern); + getReplCoord()->awaitReplication(opCtx.get(), time, writeConcern); ASSERT_OK(statusAndDur.status); } @@ -748,47 +748,47 @@ TEST_F(ReplCoordTest, writeConcern.wNumNodes = 1; writeConcern.syncMode = WriteConcernOptions::SyncMode::JOURNAL; - auto txn = makeOperationContext(); + auto opCtx = makeOperationContext(); // 1 node waiting for time 1 ReplicationCoordinator::StatusAndDuration statusAndDur = - getReplCoord()->awaitReplication(txn.get(), time1, writeConcern); + getReplCoord()->awaitReplication(opCtx.get(), time1, writeConcern); ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status); getReplCoord()->setMyLastAppliedOpTime(time1); getReplCoord()->setMyLastDurableOpTime(time1); - statusAndDur = getReplCoord()->awaitReplication(txn.get(), time1, writeConcern); + statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time1, writeConcern); ASSERT_OK(statusAndDur.status); // 2 nodes waiting for time1 writeConcern.wNumNodes = 2; - statusAndDur = getReplCoord()->awaitReplication(txn.get(), time1, writeConcern); + statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time1, writeConcern); ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status); // Applied is not durable and will not satisfy WriteConcern with SyncMode JOURNAL. ASSERT_OK(getReplCoord()->setLastAppliedOptime_forTest(2, 1, time1)); - statusAndDur = getReplCoord()->awaitReplication(txn.get(), time1, writeConcern); + statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time1, writeConcern); ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status); ASSERT_OK(getReplCoord()->setLastDurableOptime_forTest(2, 1, time1)); - statusAndDur = getReplCoord()->awaitReplication(txn.get(), time1, writeConcern); + statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time1, writeConcern); ASSERT_OK(statusAndDur.status); // 2 nodes waiting for time2 - statusAndDur = getReplCoord()->awaitReplication(txn.get(), time2, writeConcern); + statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time2, writeConcern); ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status); getReplCoord()->setMyLastAppliedOpTime(time2); getReplCoord()->setMyLastDurableOpTime(time2); - statusAndDur = getReplCoord()->awaitReplication(txn.get(), time2, writeConcern); + statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time2, writeConcern); ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status); ASSERT_OK(getReplCoord()->setLastAppliedOptime_forTest(2, 2, time2)); ASSERT_OK(getReplCoord()->setLastDurableOptime_forTest(2, 2, time2)); - statusAndDur = getReplCoord()->awaitReplication(txn.get(), time2, writeConcern); + statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time2, writeConcern); ASSERT_OK(statusAndDur.status); // 3 nodes waiting for time2 writeConcern.wNumNodes = 3; - statusAndDur = getReplCoord()->awaitReplication(txn.get(), time2, writeConcern); + statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time2, writeConcern); ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status); ASSERT_OK(getReplCoord()->setLastAppliedOptime_forTest(2, 3, time2)); ASSERT_OK(getReplCoord()->setLastDurableOptime_forTest(2, 3, time2)); - statusAndDur = getReplCoord()->awaitReplication(txn.get(), time2, writeConcern); + statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time2, writeConcern); ASSERT_OK(statusAndDur.status); } @@ -827,44 +827,44 @@ TEST_F(ReplCoordTest, NodeReturnsWriteConcernFailedUntilASufficientNumberOfNodes writeConcern.wTimeout = WriteConcernOptions::kNoWaiting; writeConcern.wNumNodes = 1; - auto txn = makeOperationContext(); + auto opCtx = makeOperationContext(); // 1 node waiting for time 1 ReplicationCoordinator::StatusAndDuration statusAndDur = - getReplCoord()->awaitReplication(txn.get(), time1, writeConcern); + getReplCoord()->awaitReplication(opCtx.get(), time1, writeConcern); ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status); getReplCoord()->setMyLastAppliedOpTime(time1); getReplCoord()->setMyLastDurableOpTime(time1); - statusAndDur = getReplCoord()->awaitReplication(txn.get(), time1, writeConcern); + statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time1, writeConcern); ASSERT_OK(statusAndDur.status); // 2 nodes waiting for time1 writeConcern.wNumNodes = 2; - statusAndDur = getReplCoord()->awaitReplication(txn.get(), time1, writeConcern); + statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time1, writeConcern); ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status); ASSERT_OK(getReplCoord()->setLastAppliedOptime_forTest(2, 1, time1)); - statusAndDur = getReplCoord()->awaitReplication(txn.get(), time1, writeConcern); + statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time1, writeConcern); ASSERT_OK(statusAndDur.status); // 2 nodes waiting for time2 - statusAndDur = getReplCoord()->awaitReplication(txn.get(), time2, writeConcern); + statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time2, writeConcern); ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status); getReplCoord()->setMyLastAppliedOpTime(time2); getReplCoord()->setMyLastDurableOpTime(time2); - statusAndDur = getReplCoord()->awaitReplication(txn.get(), time2, writeConcern); + statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time2, writeConcern); ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status); ASSERT_OK(getReplCoord()->setLastAppliedOptime_forTest(2, 2, time2)); ASSERT_OK(getReplCoord()->setLastDurableOptime_forTest(2, 2, time2)); - statusAndDur = getReplCoord()->awaitReplication(txn.get(), time2, writeConcern); + statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time2, writeConcern); ASSERT_OK(statusAndDur.status); // 3 nodes waiting for time2 writeConcern.wNumNodes = 3; - statusAndDur = getReplCoord()->awaitReplication(txn.get(), time2, writeConcern); + statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time2, writeConcern); ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status); ASSERT_OK(getReplCoord()->setLastAppliedOptime_forTest(2, 3, time2)); - statusAndDur = getReplCoord()->awaitReplication(txn.get(), time2, writeConcern); + statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time2, writeConcern); ASSERT_OK(statusAndDur.status); } @@ -872,7 +872,7 @@ TEST_F(ReplCoordTest, NodeReturnsUnknownReplWriteConcernWhenAwaitReplicationReceivesAnInvalidWriteConcernMode) { auto service = stdx::make_unique<ServiceContextNoop>(); auto client = service->makeClient("test"); - auto txn = client->makeOperationContext(); + auto opCtx = client->makeOperationContext(); assertStartSuccess(BSON("_id" << "mySet" @@ -904,7 +904,7 @@ TEST_F(ReplCoordTest, invalidWriteConcern.wMode = "fakemode"; ReplicationCoordinator::StatusAndDuration statusAndDur = - getReplCoord()->awaitReplication(txn.get(), time1, invalidWriteConcern); + getReplCoord()->awaitReplication(opCtx.get(), time1, invalidWriteConcern); ASSERT_EQUALS(ErrorCodes::UnknownReplWriteConcern, statusAndDur.status); } @@ -913,7 +913,7 @@ TEST_F( NodeReturnsWriteConcernFailedUntilASufficientSetOfNodesHaveTheWriteAndTheWriteIsInACommittedSnapshot) { auto service = stdx::make_unique<ServiceContextNoop>(); auto client = service->makeClient("test"); - auto txn = client->makeOperationContext(); + auto opCtx = client->makeOperationContext(); assertStartSuccess( BSON("_id" @@ -988,11 +988,11 @@ TEST_F( getReplCoord()->setMyLastAppliedOpTime(time1); getReplCoord()->setMyLastDurableOpTime(time1); ReplicationCoordinator::StatusAndDuration statusAndDur = - getReplCoord()->awaitReplication(txn.get(), time1, majorityWriteConcern); + getReplCoord()->awaitReplication(opCtx.get(), time1, majorityWriteConcern); ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status); - statusAndDur = getReplCoord()->awaitReplication(txn.get(), time1, multiDCWriteConcern); + statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time1, multiDCWriteConcern); ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status); - statusAndDur = getReplCoord()->awaitReplication(txn.get(), time1, multiRackWriteConcern); + statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time1, multiRackWriteConcern); ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status); // Majority satisfied but not either custom mode @@ -1000,56 +1000,57 @@ TEST_F( getReplCoord()->setLastDurableOptime_forTest(2, 1, time1); getReplCoord()->setLastAppliedOptime_forTest(2, 2, time1); getReplCoord()->setLastDurableOptime_forTest(2, 2, time1); - getReplCoord()->createSnapshot(txn.get(), time1, SnapshotName(1)); + getReplCoord()->createSnapshot(opCtx.get(), time1, SnapshotName(1)); - statusAndDur = getReplCoord()->awaitReplication(txn.get(), time1, majorityWriteConcern); + statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time1, majorityWriteConcern); ASSERT_OK(statusAndDur.status); - statusAndDur = getReplCoord()->awaitReplication(txn.get(), time1, multiDCWriteConcern); + statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time1, multiDCWriteConcern); ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status); - statusAndDur = getReplCoord()->awaitReplication(txn.get(), time1, multiRackWriteConcern); + statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time1, multiRackWriteConcern); ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status); // All modes satisfied getReplCoord()->setLastAppliedOptime_forTest(2, 3, time1); getReplCoord()->setLastDurableOptime_forTest(2, 3, time1); - statusAndDur = getReplCoord()->awaitReplication(txn.get(), time1, majorityWriteConcern); + statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time1, majorityWriteConcern); ASSERT_OK(statusAndDur.status); - statusAndDur = getReplCoord()->awaitReplication(txn.get(), time1, multiDCWriteConcern); + statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time1, multiDCWriteConcern); ASSERT_OK(statusAndDur.status); - statusAndDur = getReplCoord()->awaitReplication(txn.get(), time1, multiRackWriteConcern); + statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time1, multiRackWriteConcern); ASSERT_OK(statusAndDur.status); // Majority also waits for the committed snapshot to be newer than all snapshots reserved by // this operation. Custom modes not affected by this. - while (getReplCoord()->reserveSnapshotName(txn.get()) <= SnapshotName(1)) { + while (getReplCoord()->reserveSnapshotName(opCtx.get()) <= SnapshotName(1)) { // These unittests "cheat" and use SnapshotName(1) without advancing the counter. Reserve // another name if we didn't get a high enough one. } auto zeroOpTimeInCurrentTerm = OpTime(Timestamp(0, 0), 1); - ReplClientInfo::forClient(txn.get()->getClient()).setLastOp(zeroOpTimeInCurrentTerm); + ReplClientInfo::forClient(opCtx.get()->getClient()).setLastOp(zeroOpTimeInCurrentTerm); statusAndDur = - getReplCoord()->awaitReplicationOfLastOpForClient(txn.get(), majorityWriteConcern); + getReplCoord()->awaitReplicationOfLastOpForClient(opCtx.get(), majorityWriteConcern); ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status); statusAndDur = - getReplCoord()->awaitReplicationOfLastOpForClient(txn.get(), multiDCWriteConcern); + getReplCoord()->awaitReplicationOfLastOpForClient(opCtx.get(), multiDCWriteConcern); ASSERT_OK(statusAndDur.status); statusAndDur = - getReplCoord()->awaitReplicationOfLastOpForClient(txn.get(), multiRackWriteConcern); + getReplCoord()->awaitReplicationOfLastOpForClient(opCtx.get(), multiRackWriteConcern); ASSERT_OK(statusAndDur.status); // All modes satisfied - getReplCoord()->createSnapshot(txn.get(), time1, getReplCoord()->reserveSnapshotName(nullptr)); + getReplCoord()->createSnapshot( + opCtx.get(), time1, getReplCoord()->reserveSnapshotName(nullptr)); statusAndDur = - getReplCoord()->awaitReplicationOfLastOpForClient(txn.get(), majorityWriteConcern); + getReplCoord()->awaitReplicationOfLastOpForClient(opCtx.get(), majorityWriteConcern); ASSERT_OK(statusAndDur.status); statusAndDur = - getReplCoord()->awaitReplicationOfLastOpForClient(txn.get(), multiDCWriteConcern); + getReplCoord()->awaitReplicationOfLastOpForClient(opCtx.get(), multiDCWriteConcern); ASSERT_OK(statusAndDur.status); statusAndDur = - getReplCoord()->awaitReplicationOfLastOpForClient(txn.get(), multiRackWriteConcern); + getReplCoord()->awaitReplicationOfLastOpForClient(opCtx.get(), multiRackWriteConcern); ASSERT_OK(statusAndDur.status); // multiDC satisfied but not majority or multiRack @@ -1058,11 +1059,11 @@ TEST_F( getReplCoord()->setLastAppliedOptime_forTest(2, 3, time2); getReplCoord()->setLastDurableOptime_forTest(2, 3, time2); - statusAndDur = getReplCoord()->awaitReplication(txn.get(), time2, majorityWriteConcern); + statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time2, majorityWriteConcern); ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status); - statusAndDur = getReplCoord()->awaitReplication(txn.get(), time2, multiDCWriteConcern); + statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time2, multiDCWriteConcern); ASSERT_OK(statusAndDur.status); - statusAndDur = getReplCoord()->awaitReplication(txn.get(), time2, multiRackWriteConcern); + statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time2, multiRackWriteConcern); ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status); } @@ -1080,12 +1081,12 @@ public: : _replCoord(replCoord), _service(service), _client(service->makeClient("replAwaiter")), - _txn(_client->makeOperationContext()), + _opCtx(_client->makeOperationContext()), _finished(false), _result(ReplicationCoordinator::StatusAndDuration(Status::OK(), Milliseconds(0))) {} OperationContext* getOperationContext() { - return _txn.get(); + return _opCtx.get(); } void setOpTime(const OpTime& ot) { @@ -1116,14 +1117,14 @@ public: private: void _awaitReplication() { - _result = _replCoord->awaitReplication(_txn.get(), _optime, _writeConcern); + _result = _replCoord->awaitReplication(_opCtx.get(), _optime, _writeConcern); _finished = true; } ReplicationCoordinatorImpl* _replCoord; ServiceContext* _service; ServiceContext::UniqueClient _client; - ServiceContext::UniqueOperationContext _txn; + ServiceContext::UniqueOperationContext _opCtx; bool _finished; OpTime _optime; WriteConcernOptions _writeConcern; @@ -1286,8 +1287,8 @@ TEST_F(ReplCoordTest, ASSERT_OK(getReplCoord()->setLastAppliedOptime_forTest(2, 1, time1)); ASSERT_OK(getReplCoord()->setLastAppliedOptime_forTest(2, 2, time1)); { - auto txn = makeOperationContext(); - shutdown(txn.get()); + auto opCtx = makeOperationContext(); + shutdown(opCtx.get()); } ReplicationCoordinator::StatusAndDuration statusAndDur = awaiter.getResult(); ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, statusAndDur.status); @@ -1320,7 +1321,7 @@ TEST_F(ReplCoordTest, NodeReturnsNotMasterWhenSteppingDownBeforeSatisfyingAWrite getReplCoord()->setMyLastDurableOpTime(OpTimeWithTermOne(100, 0)); simulateSuccessfulV1Election(); - const auto txn = makeOperationContext(); + const auto opCtx = makeOperationContext(); ReplicationAwaiter awaiter(getReplCoord(), getServiceContext()); OpTimeWithTermOne time1(100, 1); @@ -1336,7 +1337,7 @@ TEST_F(ReplCoordTest, NodeReturnsNotMasterWhenSteppingDownBeforeSatisfyingAWrite awaiter.start(); ASSERT_OK(getReplCoord()->setLastAppliedOptime_forTest(2, 1, time1)); ASSERT_OK(getReplCoord()->setLastAppliedOptime_forTest(2, 2, time1)); - ASSERT_OK(getReplCoord()->stepDown(txn.get(), true, Milliseconds(0), Milliseconds(1000))); + ASSERT_OK(getReplCoord()->stepDown(opCtx.get(), true, Milliseconds(0), Milliseconds(1000))); ReplicationCoordinator::StatusAndDuration statusAndDur = awaiter.getResult(); ASSERT_EQUALS(ErrorCodes::PrimarySteppedDown, statusAndDur.status); awaiter.reset(); @@ -1391,11 +1392,11 @@ protected: static SharedClientAndOperation make(ServiceContext* serviceContext) { SharedClientAndOperation result; result.client = serviceContext->makeClient("StepDownThread"); - result.txn = result.client->makeOperationContext(); + result.opCtx = result.client->makeOperationContext(); return result; } std::shared_ptr<Client> client; - std::shared_ptr<OperationContext> txn; + std::shared_ptr<OperationContext> opCtx; }; std::pair<SharedClientAndOperation, stdx::future<boost::optional<Status>>> stepDown_nonBlocking( @@ -1405,7 +1406,7 @@ protected: [=](PromisedClientAndOperation operationPromise) -> boost::optional<Status> { auto result = SharedClientAndOperation::make(getServiceContext()); operationPromise.set_value(result); - return getReplCoord()->stepDown(result.txn.get(), force, waitTime, stepDownTime); + return getReplCoord()->stepDown(result.opCtx.get(), force, waitTime, stepDownTime); }); auto result = task.get_future(); PromisedClientAndOperation operationPromise; @@ -1443,9 +1444,9 @@ private: TEST_F(ReplCoordTest, NodeReturnsBadValueWhenUpdateTermIsRunAgainstANonReplNode) { init(ReplSettings()); ASSERT_TRUE(ReplicationCoordinator::modeNone == getReplCoord()->getReplicationMode()); - auto txn = makeOperationContext(); + auto opCtx = makeOperationContext(); - ASSERT_EQUALS(ErrorCodes::BadValue, getReplCoord()->updateTerm(txn.get(), 0).code()); + ASSERT_EQUALS(ErrorCodes::BadValue, getReplCoord()->updateTerm(opCtx.get(), 0).code()); } TEST_F(ReplCoordTest, NodeChangesTermAndStepsDownWhenAndOnlyWhenUpdateTermSuppliesAHigherTerm) { @@ -1471,31 +1472,31 @@ TEST_F(ReplCoordTest, NodeChangesTermAndStepsDownWhenAndOnlyWhenUpdateTermSuppli ASSERT_TRUE(getReplCoord()->getMemberState().secondary()); simulateSuccessfulV1Election(); - auto txn = makeOperationContext(); + auto opCtx = makeOperationContext(); ASSERT_EQUALS(1, getReplCoord()->getTerm()); ASSERT_TRUE(getReplCoord()->getMemberState().primary()); // lower term, no change - ASSERT_OK(getReplCoord()->updateTerm(txn.get(), 0)); + ASSERT_OK(getReplCoord()->updateTerm(opCtx.get(), 0)); ASSERT_EQUALS(1, getReplCoord()->getTerm()); ASSERT_TRUE(getReplCoord()->getMemberState().primary()); // same term, no change - ASSERT_OK(getReplCoord()->updateTerm(txn.get(), 1)); + ASSERT_OK(getReplCoord()->updateTerm(opCtx.get(), 1)); ASSERT_EQUALS(1, getReplCoord()->getTerm()); ASSERT_TRUE(getReplCoord()->getMemberState().primary()); // higher term, step down and change term executor::TaskExecutor::CallbackHandle cbHandle; - ASSERT_EQUALS(ErrorCodes::StaleTerm, getReplCoord()->updateTerm(txn.get(), 2).code()); + ASSERT_EQUALS(ErrorCodes::StaleTerm, getReplCoord()->updateTerm(opCtx.get(), 2).code()); // Term hasn't been incremented yet, as we need another try to update it after stepdown. ASSERT_EQUALS(1, getReplCoord()->getTerm()); ASSERT_TRUE(getReplCoord()->getMemberState().secondary()); // Now update term should actually update the term, as stepdown is complete. - ASSERT_EQUALS(ErrorCodes::StaleTerm, getReplCoord()->updateTerm(txn.get(), 2).code()); + ASSERT_EQUALS(ErrorCodes::StaleTerm, getReplCoord()->updateTerm(opCtx.get(), 2).code()); ASSERT_EQUALS(2, getReplCoord()->getTerm()); } @@ -1576,7 +1577,7 @@ TEST_F(ReplCoordTest, ConcurrentStepDownShouldNotSignalTheSameFinishEventMoreTha } TEST_F(StepDownTest, NodeReturnsNotMasterWhenAskedToStepDownAsANonPrimaryNode) { - const auto txn = makeOperationContext(); + const auto opCtx = makeOperationContext(); OpTimeWithTermOne optime1(100, 1); // All nodes are caught up @@ -1585,7 +1586,7 @@ TEST_F(StepDownTest, NodeReturnsNotMasterWhenAskedToStepDownAsANonPrimaryNode) { ASSERT_OK(getReplCoord()->setLastAppliedOptime_forTest(1, 1, optime1)); ASSERT_OK(getReplCoord()->setLastAppliedOptime_forTest(1, 2, optime1)); - Status status = getReplCoord()->stepDown(txn.get(), false, Milliseconds(0), Milliseconds(0)); + Status status = getReplCoord()->stepDown(opCtx.get(), false, Milliseconds(0), Milliseconds(0)); ASSERT_EQUALS(ErrorCodes::NotMaster, status); ASSERT_TRUE(getReplCoord()->getMemberState().secondary()); } @@ -1601,12 +1602,13 @@ TEST_F(StepDownTest, simulateSuccessfulV1Election(); - const auto txn = makeOperationContext(); + const auto opCtx = makeOperationContext(); // Make sure stepDown cannot grab the global shared lock - Lock::GlobalWrite lk(txn->lockState()); + Lock::GlobalWrite lk(opCtx->lockState()); - Status status = getReplCoord()->stepDown(txn.get(), false, Milliseconds(0), Milliseconds(1000)); + Status status = + getReplCoord()->stepDown(opCtx.get(), false, Milliseconds(0), Milliseconds(1000)); ASSERT_EQUALS(ErrorCodes::ExceededTimeLimit, status); ASSERT_TRUE(getReplCoord()->getMemberState().primary()); } @@ -1710,10 +1712,10 @@ TEST_F( getNet()->runReadyNetworkOperations(); exitNetwork(); - const auto txn = makeOperationContext(); + const auto opCtx = makeOperationContext(); ASSERT_TRUE(getReplCoord()->getMemberState().primary()); - auto status = getReplCoord()->stepDown(txn.get(), false, Milliseconds(0), Milliseconds(1000)); + auto status = getReplCoord()->stepDown(opCtx.get(), false, Milliseconds(0), Milliseconds(1000)); ASSERT_EQUALS(ErrorCodes::ExceededTimeLimit, status); ASSERT_TRUE(getReplCoord()->getMemberState().primary()); } @@ -1744,10 +1746,10 @@ TEST_F(StepDownTestFiveNode, getNet()->runReadyNetworkOperations(); exitNetwork(); - const auto txn = makeOperationContext(); + const auto opCtx = makeOperationContext(); ASSERT_TRUE(getReplCoord()->getMemberState().primary()); - auto status = getReplCoord()->stepDown(txn.get(), false, Milliseconds(0), Milliseconds(1000)); + auto status = getReplCoord()->stepDown(opCtx.get(), false, Milliseconds(0), Milliseconds(1000)); ASSERT_EQUALS(ErrorCodes::ExceededTimeLimit, status); ASSERT_TRUE(getReplCoord()->getMemberState().primary()); } @@ -1779,10 +1781,10 @@ TEST_F( getNet()->runReadyNetworkOperations(); exitNetwork(); - const auto txn = makeOperationContext(); + const auto opCtx = makeOperationContext(); ASSERT_TRUE(getReplCoord()->getMemberState().primary()); - ASSERT_OK(getReplCoord()->stepDown(txn.get(), false, Milliseconds(0), Milliseconds(1000))); + ASSERT_OK(getReplCoord()->stepDown(opCtx.get(), false, Milliseconds(0), Milliseconds(1000))); enterNetwork(); // So we can safely inspect the topology coordinator ASSERT_EQUALS(getNet()->now() + Seconds(1), getTopoCoord().getStepDownTime()); ASSERT_TRUE(getTopoCoord().getMemberState().secondary()); @@ -1802,9 +1804,9 @@ TEST_F(ReplCoordTest, NodeBecomesPrimaryAgainWhenStepDownTimeoutExpiresInASingle << "test1:1234"))), HostAndPort("test1", 1234)); runSingleNodeElection(makeOperationContext(), getReplCoord(), getNet()); - const auto txn = makeOperationContext(); + const auto opCtx = makeOperationContext(); - ASSERT_OK(getReplCoord()->stepDown(txn.get(), true, Milliseconds(0), Milliseconds(1000))); + ASSERT_OK(getReplCoord()->stepDown(opCtx.get(), true, Milliseconds(0), Milliseconds(1000))); getNet()->enterNetwork(); // Must do this before inspecting the topocoord Date_t stepdownUntil = getNet()->now() + Seconds(1); ASSERT_EQUALS(stepdownUntil, getTopoCoord().getStepDownTime()); @@ -1833,10 +1835,10 @@ TEST_F(StepDownTest, simulateSuccessfulV1Election(); - const auto txn = makeOperationContext(); + const auto opCtx = makeOperationContext(); // Try to stepDown but time out because no secondaries are caught up. - auto status = repl->stepDown(txn.get(), false, Milliseconds(0), Milliseconds(1000)); + auto status = repl->stepDown(opCtx.get(), false, Milliseconds(0), Milliseconds(1000)); ASSERT_EQUALS(ErrorCodes::ExceededTimeLimit, status); ASSERT_TRUE(repl->getMemberState().primary()); @@ -1851,7 +1853,7 @@ TEST_F(StepDownTest, } getNet()->exitNetwork(); ASSERT_TRUE(repl->getMemberState().primary()); - status = repl->stepDown(txn.get(), true, Milliseconds(0), Milliseconds(1000)); + status = repl->stepDown(opCtx.get(), true, Milliseconds(0), Milliseconds(1000)); ASSERT_OK(status); ASSERT_TRUE(repl->getMemberState().secondary()); } @@ -2004,7 +2006,7 @@ TEST_F(StepDownTest, NodeReturnsInterruptedWhenInterruptedDuringStepDown) { // stepDown where the secondary actually has to catch up before the stepDown can succeed. auto result = stepDown_nonBlocking(false, Seconds(10), Seconds(60)); - killOperation(result.first.txn.get()); + killOperation(result.first.opCtx.get()); ASSERT_EQUALS(ErrorCodes::Interrupted, *result.second.get()); ASSERT_TRUE(repl->getMemberState().primary()); } @@ -2347,11 +2349,11 @@ TEST_F(ReplCoordTest, DoNotAllowMaintenanceModeWhilePrimary) { ASSERT_EQUALS(ErrorCodes::NotSecondary, status); ASSERT_TRUE(getReplCoord()->getMemberState().primary()); - auto txn = makeOperationContext(); + auto opCtx = makeOperationContext(); // Step down from primary. - getReplCoord()->updateTerm(txn.get(), getReplCoord()->getTerm() + 1); + getReplCoord()->updateTerm(opCtx.get(), getReplCoord()->getTerm() + 1); ASSERT_OK(getReplCoord()->waitForMemberState(MemberState::RS_SECONDARY, Seconds(1))); status = getReplCoord()->setMaintenanceMode(false); @@ -2383,11 +2385,11 @@ TEST_F(ReplCoordTest, DoNotAllowSettingMaintenanceModeWhileConductingAnElection) // TODO this election shouldn't have to happen. simulateSuccessfulV1Election(); - auto txn = makeOperationContext(); + auto opCtx = makeOperationContext(); // Step down from primary. - getReplCoord()->updateTerm(txn.get(), getReplCoord()->getTerm() + 1); + getReplCoord()->updateTerm(opCtx.get(), getReplCoord()->getTerm() + 1); getReplCoord()->waitForMemberState(MemberState::RS_SECONDARY, Milliseconds(10 * 1000)); // Can't modify maintenance mode when running for election (before and after dry run). @@ -2514,7 +2516,7 @@ TEST_F(ReplCoordTest, NodeDoesNotIncludeItselfWhenRunningGetHostsWrittenToInMast settings.setMaster(true); init(settings); HostAndPort clientHost("node2:12345"); - auto txn = makeOperationContext(); + auto opCtx = makeOperationContext(); OID client = OID::gen(); @@ -2524,7 +2526,7 @@ TEST_F(ReplCoordTest, NodeDoesNotIncludeItselfWhenRunningGetHostsWrittenToInMast getExternalState()->setClientHostAndPort(clientHost); HandshakeArgs handshake; ASSERT_OK(handshake.initialize(BSON("handshake" << client))); - ASSERT_OK(getReplCoord()->processHandshake(txn.get(), handshake)); + ASSERT_OK(getReplCoord()->processHandshake(opCtx.get(), handshake)); getReplCoord()->setMyLastAppliedOpTime(time2); getReplCoord()->setMyLastDurableOpTime(time2); @@ -2675,11 +2677,11 @@ TEST_F(ReplCoordTest, IsMasterWithCommittedSnapshot) { time_t majorityWriteDate = 100; OpTime majorityOpTime = OpTime(Timestamp(majorityWriteDate, 1), 1); - auto txn = makeOperationContext(); + auto opCtx = makeOperationContext(); getReplCoord()->setMyLastAppliedOpTime(opTime); getReplCoord()->setMyLastDurableOpTime(opTime); - getReplCoord()->createSnapshot(txn.get(), majorityOpTime, SnapshotName(1)); + getReplCoord()->createSnapshot(opCtx.get(), majorityOpTime, SnapshotName(1)); ASSERT_EQUALS(majorityOpTime, getReplCoord()->getCurrentCommittedSnapshotOpTime()); IsMasterResponse response; @@ -2695,8 +2697,8 @@ TEST_F(ReplCoordTest, LogAMessageWhenShutDownBeforeReplicationStartUpFinished) { init(); startCapturingLogMessages(); { - auto txn = makeOperationContext(); - getReplCoord()->shutdown(txn.get()); + auto opCtx = makeOperationContext(); + getReplCoord()->shutdown(opCtx.get()); } stopCapturingLogMessages(); ASSERT_EQUALS(1, countLogLinesContaining("shutdown() called before startup() finished")); @@ -2735,11 +2737,11 @@ TEST_F(ReplCoordTest, DoNotProcessSelfWhenUpdatePositionContainsInfoAboutSelf) { writeConcern.wTimeout = WriteConcernOptions::kNoWaiting; writeConcern.wNumNodes = 1; - auto txn = makeOperationContext(); + auto opCtx = makeOperationContext(); ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, - getReplCoord()->awaitReplication(txn.get(), time2, writeConcern).status); + getReplCoord()->awaitReplication(opCtx.get(), time2, writeConcern).status); // receive updatePosition containing ourself, should not process the update for self UpdatePositionArgs args; @@ -2757,7 +2759,7 @@ TEST_F(ReplCoordTest, DoNotProcessSelfWhenUpdatePositionContainsInfoAboutSelf) { ASSERT_OK(getReplCoord()->processReplSetUpdatePosition(args, 0)); ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, - getReplCoord()->awaitReplication(txn.get(), time2, writeConcern).status); + getReplCoord()->awaitReplication(opCtx.get(), time2, writeConcern).status); } TEST_F(ReplCoordTest, DoNotProcessSelfWhenOldUpdatePositionContainsInfoAboutSelf) { @@ -2794,11 +2796,11 @@ TEST_F(ReplCoordTest, DoNotProcessSelfWhenOldUpdatePositionContainsInfoAboutSelf writeConcern.wTimeout = WriteConcernOptions::kNoWaiting; writeConcern.wNumNodes = 1; - auto txn = makeOperationContext(); + auto opCtx = makeOperationContext(); ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, - getReplCoord()->awaitReplication(txn.get(), time2, writeConcern).status); + getReplCoord()->awaitReplication(opCtx.get(), time2, writeConcern).status); // receive updatePosition containing ourself, should not process the update for self OldUpdatePositionArgs args; @@ -2814,7 +2816,7 @@ TEST_F(ReplCoordTest, DoNotProcessSelfWhenOldUpdatePositionContainsInfoAboutSelf ASSERT_OK(getReplCoord()->processReplSetUpdatePosition(args, 0)); ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, - getReplCoord()->awaitReplication(txn.get(), time2, writeConcern).status); + getReplCoord()->awaitReplication(opCtx.get(), time2, writeConcern).status); } TEST_F(ReplCoordTest, DoNotProcessUpdatePositionWhenItsConfigVersionIsIncorrect) { @@ -2864,14 +2866,14 @@ TEST_F(ReplCoordTest, DoNotProcessUpdatePositionWhenItsConfigVersionIsIncorrect) << UpdatePositionArgs::kAppliedOpTimeFieldName << time2.toBSON()))))); - auto txn = makeOperationContext(); + auto opCtx = makeOperationContext(); long long cfgver; ASSERT_EQUALS(ErrorCodes::InvalidReplicaSetConfig, getReplCoord()->processReplSetUpdatePosition(args, &cfgver)); ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, - getReplCoord()->awaitReplication(txn.get(), time2, writeConcern).status); + getReplCoord()->awaitReplication(opCtx.get(), time2, writeConcern).status); } TEST_F(ReplCoordTest, DoNotProcessOldUpdatePositionWhenItsConfigVersionIsIncorrect) { @@ -2920,14 +2922,14 @@ TEST_F(ReplCoordTest, DoNotProcessOldUpdatePositionWhenItsConfigVersionIsIncorre << OldUpdatePositionArgs::kOpTimeFieldName << time2.timestamp))))); - auto txn = makeOperationContext(); + auto opCtx = makeOperationContext(); long long cfgver; ASSERT_EQUALS(ErrorCodes::InvalidReplicaSetConfig, getReplCoord()->processReplSetUpdatePosition(args, &cfgver)); ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, - getReplCoord()->awaitReplication(txn.get(), time2, writeConcern).status); + getReplCoord()->awaitReplication(opCtx.get(), time2, writeConcern).status); } TEST_F(ReplCoordTest, DoNotProcessUpdatePositionOfMembersWhoseIdsAreNotInTheConfig) { @@ -2977,12 +2979,12 @@ TEST_F(ReplCoordTest, DoNotProcessUpdatePositionOfMembersWhoseIdsAreNotInTheConf << UpdatePositionArgs::kAppliedOpTimeFieldName << time2.toBSON()))))); - auto txn = makeOperationContext(); + auto opCtx = makeOperationContext(); ASSERT_EQUALS(ErrorCodes::NodeNotFound, getReplCoord()->processReplSetUpdatePosition(args, 0)); ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, - getReplCoord()->awaitReplication(txn.get(), time2, writeConcern).status); + getReplCoord()->awaitReplication(opCtx.get(), time2, writeConcern).status); } TEST_F(ReplCoordTest, DoNotProcessOldUpdatePositionOfMembersWhoseIdsAreNotInTheConfig) { @@ -3031,12 +3033,12 @@ TEST_F(ReplCoordTest, DoNotProcessOldUpdatePositionOfMembersWhoseIdsAreNotInTheC << OldUpdatePositionArgs::kOpTimeFieldName << time2.timestamp))))); - auto txn = makeOperationContext(); + auto opCtx = makeOperationContext(); ASSERT_EQUALS(ErrorCodes::NodeNotFound, getReplCoord()->processReplSetUpdatePosition(args, 0)); ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, - getReplCoord()->awaitReplication(txn.get(), time2, writeConcern).status); + getReplCoord()->awaitReplication(opCtx.get(), time2, writeConcern).status); } TEST_F(ReplCoordTest, @@ -3095,19 +3097,19 @@ TEST_F(ReplCoordTest, << OldUpdatePositionArgs::kOpTimeFieldName << time2.timestamp))))); - auto txn = makeOperationContext(); + auto opCtx = makeOperationContext(); ASSERT_OK(getReplCoord()->processReplSetUpdatePosition(args, 0)); - ASSERT_OK(getReplCoord()->awaitReplication(txn.get(), time2, writeConcern).status); + ASSERT_OK(getReplCoord()->awaitReplication(opCtx.get(), time2, writeConcern).status); writeConcern.wNumNodes = 3; - ASSERT_OK(getReplCoord()->awaitReplication(txn.get(), time2, writeConcern).status); + ASSERT_OK(getReplCoord()->awaitReplication(opCtx.get(), time2, writeConcern).status); } void doReplSetReconfig(ReplicationCoordinatorImpl* replCoord, Status* status) { auto client = getGlobalServiceContext()->makeClient("rsr"); - auto txn = client->makeOperationContext(); + auto opCtx = client->makeOperationContext(); BSONObjBuilder garbage; ReplSetReconfigArgs args; @@ -3125,7 +3127,7 @@ void doReplSetReconfig(ReplicationCoordinatorImpl* replCoord, Status* status) { << "node2:12345") << BSON("_id" << 2 << "host" << "node3:12345"))); - *status = replCoord->processReplSetReconfig(txn.get(), args, &garbage); + *status = replCoord->processReplSetReconfig(opCtx.get(), args, &garbage); } TEST_F(ReplCoordTest, AwaitReplicationShouldResolveAsNormalDuringAReconfig) { @@ -3205,7 +3207,7 @@ TEST_F(ReplCoordTest, AwaitReplicationShouldResolveAsNormalDuringAReconfig) { void doReplSetReconfigToFewer(ReplicationCoordinatorImpl* replCoord, Status* status) { auto client = getGlobalServiceContext()->makeClient("rsr"); - auto txn = client->makeOperationContext(); + auto opCtx = client->makeOperationContext(); BSONObjBuilder garbage; ReplSetReconfigArgs args; @@ -3219,7 +3221,7 @@ void doReplSetReconfigToFewer(ReplicationCoordinatorImpl* replCoord, Status* sta << "node1:12345") << BSON("_id" << 2 << "host" << "node3:12345"))); - *status = replCoord->processReplSetReconfig(txn.get(), args, &garbage); + *status = replCoord->processReplSetReconfig(opCtx.get(), args, &garbage); } TEST_F( @@ -3323,11 +3325,11 @@ TEST_F(ReplCoordTest, simulateSuccessfulV1Election(); OpTime time(Timestamp(100, 2), 1); - auto txn = makeOperationContext(); + auto opCtx = makeOperationContext(); getReplCoord()->setMyLastAppliedOpTime(time); getReplCoord()->setMyLastDurableOpTime(time); - getReplCoord()->createSnapshot(txn.get(), time, SnapshotName(1)); + getReplCoord()->createSnapshot(opCtx.get(), time, SnapshotName(1)); ASSERT_OK(getReplCoord()->setLastAppliedOptime_forTest(2, 1, time)); @@ -3350,7 +3352,7 @@ TEST_F(ReplCoordTest, writeConcern.syncMode = WriteConcernOptions::SyncMode::NONE; ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, - getReplCoord()->awaitReplication(txn.get(), time, writeConcern2).status); + getReplCoord()->awaitReplication(opCtx.get(), time, writeConcern2).status); // reconfig to three nodes Status status(ErrorCodes::InternalError, "Not Set"); @@ -3413,30 +3415,30 @@ TEST_F(ReplCoordTest, majorityWriteConcern.wMode = WriteConcernOptions::kMajority; majorityWriteConcern.syncMode = WriteConcernOptions::SyncMode::JOURNAL; - auto txn = makeOperationContext(); + auto opCtx = makeOperationContext(); ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, - getReplCoord()->awaitReplication(txn.get(), time, majorityWriteConcern).status); + getReplCoord()->awaitReplication(opCtx.get(), time, majorityWriteConcern).status); ASSERT_OK(getReplCoord()->setLastAppliedOptime_forTest(2, 1, time)); ASSERT_OK(getReplCoord()->setLastDurableOptime_forTest(2, 1, time)); ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, - getReplCoord()->awaitReplication(txn.get(), time, majorityWriteConcern).status); + getReplCoord()->awaitReplication(opCtx.get(), time, majorityWriteConcern).status); // this member does not vote and as a result should not count towards write concern ASSERT_OK(getReplCoord()->setLastAppliedOptime_forTest(2, 3, time)); ASSERT_OK(getReplCoord()->setLastDurableOptime_forTest(2, 3, time)); ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, - getReplCoord()->awaitReplication(txn.get(), time, majorityWriteConcern).status); + getReplCoord()->awaitReplication(opCtx.get(), time, majorityWriteConcern).status); ASSERT_OK(getReplCoord()->setLastAppliedOptime_forTest(2, 2, time)); ASSERT_OK(getReplCoord()->setLastDurableOptime_forTest(2, 2, time)); ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, - getReplCoord()->awaitReplication(txn.get(), time, majorityWriteConcern).status); + getReplCoord()->awaitReplication(opCtx.get(), time, majorityWriteConcern).status); - getReplCoord()->createSnapshot(txn.get(), time, SnapshotName(1)); - ASSERT_OK(getReplCoord()->awaitReplication(txn.get(), time, majorityWriteConcern).status); + getReplCoord()->createSnapshot(opCtx.get(), time, SnapshotName(1)); + ASSERT_OK(getReplCoord()->awaitReplication(opCtx.get(), time, majorityWriteConcern).status); } TEST_F(ReplCoordTest, @@ -3527,12 +3529,13 @@ TEST_F(ReplCoordTest, NodeReturnsShutdownInProgressWhenWaitingUntilAnOpTimeDurin getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermOne(10, 0)); getReplCoord()->setMyLastDurableOpTime(OpTimeWithTermOne(10, 0)); - auto txn = makeOperationContext(); + auto opCtx = makeOperationContext(); - shutdown(txn.get()); + shutdown(opCtx.get()); auto status = getReplCoord()->waitUntilOpTimeForRead( - txn.get(), ReadConcernArgs(OpTimeWithTermOne(50, 0), ReadConcernLevel::kLocalReadConcern)); + opCtx.get(), + ReadConcernArgs(OpTimeWithTermOne(50, 0), ReadConcernLevel::kLocalReadConcern)); ASSERT_EQ(status, ErrorCodes::ShutdownInProgress); } @@ -3551,11 +3554,12 @@ TEST_F(ReplCoordTest, NodeReturnsInterruptedWhenWaitingUntilAnOpTimeIsInterrupte getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermOne(10, 0)); getReplCoord()->setMyLastDurableOpTime(OpTimeWithTermOne(10, 0)); - const auto txn = makeOperationContext(); - killOperation(txn.get()); + const auto opCtx = makeOperationContext(); + killOperation(opCtx.get()); auto status = getReplCoord()->waitUntilOpTimeForRead( - txn.get(), ReadConcernArgs(OpTimeWithTermOne(50, 0), ReadConcernLevel::kLocalReadConcern)); + opCtx.get(), + ReadConcernArgs(OpTimeWithTermOne(50, 0), ReadConcernLevel::kLocalReadConcern)); ASSERT_EQ(status, ErrorCodes::Interrupted); } @@ -3571,9 +3575,9 @@ TEST_F(ReplCoordTest, NodeReturnsOkImmediatelyWhenWaitingUntilOpTimePassesNoOpTi << 0))), HostAndPort("node1", 12345)); - auto txn = makeOperationContext(); + auto opCtx = makeOperationContext(); - ASSERT_OK(getReplCoord()->waitUntilOpTimeForRead(txn.get(), ReadConcernArgs())); + ASSERT_OK(getReplCoord()->waitUntilOpTimeForRead(opCtx.get(), ReadConcernArgs())); } TEST_F(ReplCoordTest, NodeReturnsOkImmediatelyWhenWaitingUntilOpTimePassesAnOpTimePriorToOurLast) { @@ -3591,10 +3595,11 @@ TEST_F(ReplCoordTest, NodeReturnsOkImmediatelyWhenWaitingUntilOpTimePassesAnOpTi getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermOne(100, 0)); getReplCoord()->setMyLastDurableOpTime(OpTimeWithTermOne(100, 0)); - auto txn = makeOperationContext(); + auto opCtx = makeOperationContext(); ASSERT_OK(getReplCoord()->waitUntilOpTimeForRead( - txn.get(), ReadConcernArgs(OpTimeWithTermOne(50, 0), ReadConcernLevel::kLocalReadConcern))); + opCtx.get(), + ReadConcernArgs(OpTimeWithTermOne(50, 0), ReadConcernLevel::kLocalReadConcern))); } TEST_F(ReplCoordTest, NodeReturnsOkImmediatelyWhenWaitingUntilOpTimePassesAnOpTimeEqualToOurLast) { @@ -3614,20 +3619,21 @@ TEST_F(ReplCoordTest, NodeReturnsOkImmediatelyWhenWaitingUntilOpTimePassesAnOpTi getReplCoord()->setMyLastAppliedOpTime(time); getReplCoord()->setMyLastDurableOpTime(time); - auto txn = makeOperationContext(); + auto opCtx = makeOperationContext(); ASSERT_OK(getReplCoord()->waitUntilOpTimeForRead( - txn.get(), ReadConcernArgs(time, ReadConcernLevel::kLocalReadConcern))); + opCtx.get(), ReadConcernArgs(time, ReadConcernLevel::kLocalReadConcern))); } TEST_F(ReplCoordTest, NodeReturnsNotAReplicaSetWhenWaitUntilOpTimeIsRunWithoutMajorityReadConcernEnabled) { init(ReplSettings()); - auto txn = makeOperationContext(); + auto opCtx = makeOperationContext(); auto status = getReplCoord()->waitUntilOpTimeForRead( - txn.get(), ReadConcernArgs(OpTimeWithTermOne(50, 0), ReadConcernLevel::kLocalReadConcern)); + opCtx.get(), + ReadConcernArgs(OpTimeWithTermOne(50, 0), ReadConcernLevel::kLocalReadConcern)); ASSERT_EQ(status, ErrorCodes::NotAReplicaSet); } @@ -3636,10 +3642,10 @@ TEST_F(ReplCoordTest, NodeReturnsNotAReplicaSetWhenWaitUntilOpTimeIsRunAgainstAS settings.setMajorityReadConcernEnabled(true); init(settings); - auto txn = makeOperationContext(); + auto opCtx = makeOperationContext(); auto status = getReplCoord()->waitUntilOpTimeForRead( - txn.get(), + opCtx.get(), ReadConcernArgs(OpTime(Timestamp(50, 0), 0), ReadConcernLevel::kMajorityReadConcern)); ASSERT_EQ(status, ErrorCodes::NotAReplicaSet); } @@ -3662,11 +3668,11 @@ TEST_F(ReplCoordTest, ReadAfterCommittedWhileShutdown) { getReplCoord()->setMyLastAppliedOpTime(OpTime(Timestamp(10, 0), 0)); getReplCoord()->setMyLastDurableOpTime(OpTime(Timestamp(10, 0), 0)); - auto txn = makeOperationContext(); - shutdown(txn.get()); + auto opCtx = makeOperationContext(); + shutdown(opCtx.get()); auto status = getReplCoord()->waitUntilOpTimeForRead( - txn.get(), + opCtx.get(), ReadConcernArgs(OpTime(Timestamp(50, 0), 0), ReadConcernLevel::kMajorityReadConcern)); ASSERT_EQUALS(status, ErrorCodes::ShutdownInProgress); } @@ -3683,13 +3689,13 @@ TEST_F(ReplCoordTest, ReadAfterCommittedInterrupted) { << 0))), HostAndPort("node1", 12345)); runSingleNodeElection(makeOperationContext(), getReplCoord(), getNet()); - const auto txn = makeOperationContext(); + const auto opCtx = makeOperationContext(); getReplCoord()->setMyLastAppliedOpTime(OpTime(Timestamp(10, 0), 0)); getReplCoord()->setMyLastDurableOpTime(OpTime(Timestamp(10, 0), 0)); - killOperation(txn.get()); + killOperation(opCtx.get()); auto status = getReplCoord()->waitUntilOpTimeForRead( - txn.get(), + opCtx.get(), ReadConcernArgs(OpTime(Timestamp(50, 0), 0), ReadConcernLevel::kMajorityReadConcern)); ASSERT_EQUALS(status, ErrorCodes::Interrupted); } @@ -3707,13 +3713,13 @@ TEST_F(ReplCoordTest, ReadAfterCommittedGreaterOpTime) { HostAndPort("node1", 12345)); runSingleNodeElection(makeOperationContext(), getReplCoord(), getNet()); - auto txn = makeOperationContext(); + auto opCtx = makeOperationContext(); getReplCoord()->setMyLastAppliedOpTime(OpTime(Timestamp(100, 0), 1)); getReplCoord()->setMyLastDurableOpTime(OpTime(Timestamp(100, 0), 1)); - getReplCoord()->createSnapshot(txn.get(), OpTime(Timestamp(100, 0), 1), SnapshotName(1)); + getReplCoord()->createSnapshot(opCtx.get(), OpTime(Timestamp(100, 0), 1), SnapshotName(1)); ASSERT_OK(getReplCoord()->waitUntilOpTimeForRead( - txn.get(), + opCtx.get(), ReadConcernArgs(OpTime(Timestamp(50, 0), 1), ReadConcernLevel::kMajorityReadConcern))); } @@ -3729,15 +3735,15 @@ TEST_F(ReplCoordTest, ReadAfterCommittedEqualOpTime) { << 0))), HostAndPort("node1", 12345)); runSingleNodeElection(makeOperationContext(), getReplCoord(), getNet()); - auto txn = makeOperationContext(); + auto opCtx = makeOperationContext(); OpTime time(Timestamp(100, 0), 1); getReplCoord()->setMyLastAppliedOpTime(time); getReplCoord()->setMyLastDurableOpTime(time); - getReplCoord()->createSnapshot(txn.get(), time, SnapshotName(1)); + getReplCoord()->createSnapshot(opCtx.get(), time, SnapshotName(1)); ASSERT_OK(getReplCoord()->waitUntilOpTimeForRead( - txn.get(), ReadConcernArgs(time, ReadConcernLevel::kMajorityReadConcern))); + opCtx.get(), ReadConcernArgs(time, ReadConcernLevel::kMajorityReadConcern))); } TEST_F(ReplCoordTest, ReadAfterCommittedDeferredGreaterOpTime) { @@ -3762,10 +3768,10 @@ TEST_F(ReplCoordTest, ReadAfterCommittedDeferredGreaterOpTime) { getReplCoord()->createSnapshot(nullptr, committedOpTime, SnapshotName(1)); }); - auto txn = makeOperationContext(); + auto opCtx = makeOperationContext(); ASSERT_OK(getReplCoord()->waitUntilOpTimeForRead( - txn.get(), + opCtx.get(), ReadConcernArgs(OpTime(Timestamp(100, 0), 1), ReadConcernLevel::kMajorityReadConcern))); } @@ -3793,10 +3799,10 @@ TEST_F(ReplCoordTest, ReadAfterCommittedDeferredEqualOpTime) { getReplCoord()->createSnapshot(nullptr, opTimeToWait, SnapshotName(1)); }); - auto txn = makeOperationContext(); + auto opCtx = makeOperationContext(); ASSERT_OK(getReplCoord()->waitUntilOpTimeForRead( - txn.get(), ReadConcernArgs(opTimeToWait, ReadConcernLevel::kMajorityReadConcern))); + opCtx.get(), ReadConcernArgs(opTimeToWait, ReadConcernLevel::kMajorityReadConcern))); pseudoLogOp.get(); } @@ -3880,13 +3886,13 @@ TEST_F(ReplCoordTest, UpdateLastCommittedOpTimeWhenTheLastCommittedOpTimeIsNewer HostAndPort("node1", 12345)); getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY); ASSERT_EQUALS(OpTime(Timestamp(0, 0), 0), getReplCoord()->getLastCommittedOpTime()); - auto txn = makeOperationContext(); - getReplCoord()->updateTerm(txn.get(), 1); + auto opCtx = makeOperationContext(); + getReplCoord()->updateTerm(opCtx.get(), 1); ASSERT_EQUALS(1, getReplCoord()->getTerm()); OpTime time(Timestamp(10, 0), 1); OpTime oldTime(Timestamp(9, 0), 1); - getReplCoord()->createSnapshot(txn.get(), time, SnapshotName(1)); + getReplCoord()->createSnapshot(opCtx.get(), time, SnapshotName(1)); // higher OpTime, should change getReplCoord()->advanceCommitPoint(time); @@ -3922,8 +3928,8 @@ TEST_F(ReplCoordTest, UpdateTermWhenTheTermFromMetadataIsNewerButNeverUpdateCurr << 1), HostAndPort("node1", 12345)); ASSERT_EQUALS(OpTime(Timestamp(0, 0), 0), getReplCoord()->getLastCommittedOpTime()); - auto txn = makeOperationContext(); - getReplCoord()->updateTerm(txn.get(), 1); + auto opCtx = makeOperationContext(); + getReplCoord()->updateTerm(opCtx.get(), 1); ASSERT_EQUALS(1, getReplCoord()->getTerm()); // higher term, should change @@ -4001,8 +4007,8 @@ TEST_F(ReplCoordTest, << 1), HostAndPort("node1", 12345)); ASSERT_EQUALS(OpTime(Timestamp(0, 0), 0), getReplCoord()->getLastCommittedOpTime()); - auto txn = makeOperationContext(); - getReplCoord()->updateTerm(txn.get(), 1); + auto opCtx = makeOperationContext(); + getReplCoord()->updateTerm(opCtx.get(), 1); ASSERT_EQUALS(1, getReplCoord()->getTerm()); auto replCoord = getReplCoord(); @@ -4129,8 +4135,8 @@ TEST_F(ReplCoordTest, TermAndLastCommittedOpTimeUpdatedFromHeartbeatWhenArbiter) << 1), HostAndPort("node1", 12345)); ASSERT_EQUALS(OpTime(Timestamp(0, 0), 0), getReplCoord()->getLastCommittedOpTime()); - auto txn = makeOperationContext(); - getReplCoord()->updateTerm(txn.get(), 1); + auto opCtx = makeOperationContext(); + getReplCoord()->updateTerm(opCtx.get(), 1); ASSERT_EQUALS(1, getReplCoord()->getTerm()); auto replCoord = getReplCoord(); @@ -4555,11 +4561,11 @@ TEST_F(ReplCoordTest, AdvanceCommittedSnapshotToMostRecentSnapshotPriorToOpTimeW OpTime time4(Timestamp(100, 4), 1); OpTime time5(Timestamp(100, 5), 1); OpTime time6(Timestamp(100, 6), 1); - auto txn = makeOperationContext(); + auto opCtx = makeOperationContext(); - getReplCoord()->createSnapshot(txn.get(), time1, SnapshotName(1)); - getReplCoord()->createSnapshot(txn.get(), time2, SnapshotName(2)); - getReplCoord()->createSnapshot(txn.get(), time5, SnapshotName(3)); + getReplCoord()->createSnapshot(opCtx.get(), time1, SnapshotName(1)); + getReplCoord()->createSnapshot(opCtx.get(), time2, SnapshotName(2)); + getReplCoord()->createSnapshot(opCtx.get(), time5, SnapshotName(3)); // ensure current snapshot follows price is right rules (closest but not greater than) getReplCoord()->setMyLastAppliedOpTime(time3); @@ -4589,11 +4595,11 @@ TEST_F(ReplCoordTest, DoNotAdvanceCommittedSnapshotWhenAnOpTimeIsNewerThanOurLat OpTime time4(Timestamp(100, 4), 1); OpTime time5(Timestamp(100, 5), 1); OpTime time6(Timestamp(100, 6), 1); - auto txn = makeOperationContext(); + auto opCtx = makeOperationContext(); - getReplCoord()->createSnapshot(txn.get(), time1, SnapshotName(1)); - getReplCoord()->createSnapshot(txn.get(), time2, SnapshotName(2)); - getReplCoord()->createSnapshot(txn.get(), time5, SnapshotName(3)); + getReplCoord()->createSnapshot(opCtx.get(), time1, SnapshotName(1)); + getReplCoord()->createSnapshot(opCtx.get(), time2, SnapshotName(2)); + getReplCoord()->createSnapshot(opCtx.get(), time5, SnapshotName(3)); // ensure current snapshot will not advance beyond existing snapshots getReplCoord()->setMyLastAppliedOpTime(time6); @@ -4621,18 +4627,18 @@ TEST_F(ReplCoordTest, OpTime time4(Timestamp(100, 4), 1); OpTime time5(Timestamp(100, 5), 1); OpTime time6(Timestamp(100, 6), 1); - auto txn = makeOperationContext(); + auto opCtx = makeOperationContext(); - getReplCoord()->createSnapshot(txn.get(), time1, SnapshotName(1)); - getReplCoord()->createSnapshot(txn.get(), time2, SnapshotName(2)); - getReplCoord()->createSnapshot(txn.get(), time5, SnapshotName(3)); + getReplCoord()->createSnapshot(opCtx.get(), time1, SnapshotName(1)); + getReplCoord()->createSnapshot(opCtx.get(), time2, SnapshotName(2)); + getReplCoord()->createSnapshot(opCtx.get(), time5, SnapshotName(3)); getReplCoord()->setMyLastAppliedOpTime(time6); getReplCoord()->setMyLastDurableOpTime(time6); ASSERT_EQUALS(time5, getReplCoord()->getCurrentCommittedSnapshotOpTime()); // ensure current snapshot updates on new snapshot if we are that far - getReplCoord()->createSnapshot(txn.get(), time6, SnapshotName(4)); + getReplCoord()->createSnapshot(opCtx.get(), time6, SnapshotName(4)); ASSERT_EQUALS(time6, getReplCoord()->getCurrentCommittedSnapshotOpTime()); } @@ -4655,11 +4661,11 @@ TEST_F(ReplCoordTest, ZeroCommittedSnapshotWhenAllSnapshotsAreDropped) { OpTime time4(Timestamp(100, 4), 1); OpTime time5(Timestamp(100, 5), 1); OpTime time6(Timestamp(100, 6), 1); - auto txn = makeOperationContext(); + auto opCtx = makeOperationContext(); - getReplCoord()->createSnapshot(txn.get(), time1, SnapshotName(1)); - getReplCoord()->createSnapshot(txn.get(), time2, SnapshotName(2)); - getReplCoord()->createSnapshot(txn.get(), time5, SnapshotName(3)); + getReplCoord()->createSnapshot(opCtx.get(), time1, SnapshotName(1)); + getReplCoord()->createSnapshot(opCtx.get(), time2, SnapshotName(2)); + getReplCoord()->createSnapshot(opCtx.get(), time5, SnapshotName(3)); // ensure dropping all snapshots should reset the current committed snapshot getReplCoord()->dropAllSnapshots(); @@ -4681,9 +4687,9 @@ TEST_F(ReplCoordTest, DoNotAdvanceCommittedSnapshotWhenAppliedOpTimeChanges) { OpTime time1(Timestamp(100, 1), 1); OpTime time2(Timestamp(100, 2), 1); - auto txn = makeOperationContext(); + auto opCtx = makeOperationContext(); - getReplCoord()->createSnapshot(txn.get(), time1, SnapshotName(1)); + getReplCoord()->createSnapshot(opCtx.get(), time1, SnapshotName(1)); getReplCoord()->setMyLastAppliedOpTime(time1); ASSERT_EQUALS(OpTime(), getReplCoord()->getCurrentCommittedSnapshotOpTime()); @@ -4842,13 +4848,13 @@ TEST_F(ReplCoordTest, NewStyleUpdatePositionCmdHasMetadata) { OpTime optime(Timestamp(100, 2), 0); getReplCoord()->setMyLastAppliedOpTime(optime); getReplCoord()->setMyLastDurableOpTime(optime); - auto txn = makeOperationContext(); + auto opCtx = makeOperationContext(); // Set last committed optime via metadata. rpc::ReplSetMetadata syncSourceMetadata(optime.getTerm(), optime, optime, 1, OID(), -1, 1); getReplCoord()->processReplSetMetadata(syncSourceMetadata); getReplCoord()->advanceCommitPoint(optime); - getReplCoord()->createSnapshot(txn.get(), optime, SnapshotName(1)); + getReplCoord()->createSnapshot(opCtx.get(), optime, SnapshotName(1)); BSONObj cmd = unittest::assertGet(getReplCoord()->prepareReplSetUpdatePositionCommand( ReplicationCoordinator::ReplSetUpdatePositionCommandStyle::kNewStyle)); @@ -5070,8 +5076,8 @@ TEST_F(ReplCoordTest, WaitForDrainFinish) { ASSERT_EQUALS(ErrorCodes::BadValue, replCoord->waitForDrainFinish(Milliseconds(-1))); - const auto txn = makeOperationContext(); - replCoord->signalDrainComplete(txn.get(), replCoord->getTerm()); + const auto opCtx = makeOperationContext(); + replCoord->signalDrainComplete(opCtx.get(), replCoord->getTerm()); ASSERT_OK(replCoord->waitForDrainFinish(timeout)); // Zero timeout is fine. @@ -5346,7 +5352,7 @@ TEST_F(ReplCoordTest, NodeStoresElectionVotes) { getReplCoord()->setMyLastDurableOpTime(time); simulateSuccessfulV1Election(); - auto txn = makeOperationContext(); + auto opCtx = makeOperationContext(); ReplSetRequestVotesArgs args; ASSERT_OK(args.initialize(BSON("replSetRequestVotes" << 1 << "setName" @@ -5363,11 +5369,11 @@ TEST_F(ReplCoordTest, NodeStoresElectionVotes) { << time.asOpTime().toBSON()))); ReplSetRequestVotesResponse response; - ASSERT_OK(getReplCoord()->processReplSetRequestVotes(txn.get(), args, &response)); + ASSERT_OK(getReplCoord()->processReplSetRequestVotes(opCtx.get(), args, &response)); ASSERT_EQUALS("", response.getReason()); ASSERT_TRUE(response.getVoteGranted()); - auto lastVote = getExternalState()->loadLocalLastVoteDocument(txn.get()); + auto lastVote = getExternalState()->loadLocalLastVoteDocument(opCtx.get()); ASSERT_OK(lastVote.getStatus()); // This is not a dry-run election so the last vote should include the new term and candidate. @@ -5400,7 +5406,7 @@ TEST_F(ReplCoordTest, NodeDoesNotStoreDryRunVotes) { getReplCoord()->setMyLastDurableOpTime(time); simulateSuccessfulV1Election(); - auto txn = makeOperationContext(); + auto opCtx = makeOperationContext(); ReplSetRequestVotesArgs args; ASSERT_OK(args.initialize(BSON("replSetRequestVotes" << 1 << "setName" @@ -5417,11 +5423,11 @@ TEST_F(ReplCoordTest, NodeDoesNotStoreDryRunVotes) { << time.asOpTime().toBSON()))); ReplSetRequestVotesResponse response; - ASSERT_OK(getReplCoord()->processReplSetRequestVotes(txn.get(), args, &response)); + ASSERT_OK(getReplCoord()->processReplSetRequestVotes(opCtx.get(), args, &response)); ASSERT_EQUALS("", response.getReason()); ASSERT_TRUE(response.getVoteGranted()); - auto lastVote = getExternalState()->loadLocalLastVoteDocument(txn.get()); + auto lastVote = getExternalState()->loadLocalLastVoteDocument(opCtx.get()); ASSERT_OK(lastVote.getStatus()); // This is a dry-run election so the last vote should not be updated with the new term and diff --git a/src/mongo/db/repl/replication_coordinator_mock.cpp b/src/mongo/db/repl/replication_coordinator_mock.cpp index e72083ef012..27d62c0af1e 100644 --- a/src/mongo/db/repl/replication_coordinator_mock.cpp +++ b/src/mongo/db/repl/replication_coordinator_mock.cpp @@ -50,7 +50,7 @@ ReplicationCoordinatorMock::ReplicationCoordinatorMock(ServiceContext* service, ReplicationCoordinatorMock::~ReplicationCoordinatorMock() {} -void ReplicationCoordinatorMock::startup(OperationContext* txn) { +void ReplicationCoordinatorMock::startup(OperationContext* opCtx) { // TODO } @@ -97,18 +97,18 @@ Seconds ReplicationCoordinatorMock::getSlaveDelaySecs() const { void ReplicationCoordinatorMock::clearSyncSourceBlacklist() {} ReplicationCoordinator::StatusAndDuration ReplicationCoordinatorMock::awaitReplication( - OperationContext* txn, const OpTime& opTime, const WriteConcernOptions& writeConcern) { + OperationContext* opCtx, const OpTime& opTime, const WriteConcernOptions& writeConcern) { // TODO return StatusAndDuration(Status::OK(), Milliseconds(0)); } ReplicationCoordinator::StatusAndDuration ReplicationCoordinatorMock::awaitReplicationOfLastOpForClient( - OperationContext* txn, const WriteConcernOptions& writeConcern) { + OperationContext* opCtx, const WriteConcernOptions& writeConcern) { return StatusAndDuration(Status::OK(), Milliseconds(0)); } -Status ReplicationCoordinatorMock::stepDown(OperationContext* txn, +Status ReplicationCoordinatorMock::stepDown(OperationContext* opCtx, bool force, const Milliseconds& waitTime, const Milliseconds& stepdownTime) { @@ -120,7 +120,7 @@ bool ReplicationCoordinatorMock::isMasterForReportingPurposes() { return true; } -bool ReplicationCoordinatorMock::canAcceptWritesForDatabase(OperationContext* txn, +bool ReplicationCoordinatorMock::canAcceptWritesForDatabase(OperationContext* opCtx, StringData dbName) { // Return true if we allow writes explicitly even when not in primary state, as in sharding // unit tests, so that the op observers can fire but the tests don't have to set all the states @@ -131,38 +131,38 @@ bool ReplicationCoordinatorMock::canAcceptWritesForDatabase(OperationContext* tx return dbName == "local" || _memberState.primary() || _settings.isMaster(); } -bool ReplicationCoordinatorMock::canAcceptWritesForDatabase_UNSAFE(OperationContext* txn, +bool ReplicationCoordinatorMock::canAcceptWritesForDatabase_UNSAFE(OperationContext* opCtx, StringData dbName) { - return canAcceptWritesForDatabase(txn, dbName); + return canAcceptWritesForDatabase(opCtx, dbName); } -bool ReplicationCoordinatorMock::canAcceptWritesFor(OperationContext* txn, +bool ReplicationCoordinatorMock::canAcceptWritesFor(OperationContext* opCtx, const NamespaceString& ns) { // TODO - return canAcceptWritesForDatabase(txn, ns.db()); + return canAcceptWritesForDatabase(opCtx, ns.db()); } -bool ReplicationCoordinatorMock::canAcceptWritesFor_UNSAFE(OperationContext* txn, +bool ReplicationCoordinatorMock::canAcceptWritesFor_UNSAFE(OperationContext* opCtx, const NamespaceString& ns) { - return canAcceptWritesFor(txn, ns); + return canAcceptWritesFor(opCtx, ns); } -Status ReplicationCoordinatorMock::checkCanServeReadsFor(OperationContext* txn, +Status ReplicationCoordinatorMock::checkCanServeReadsFor(OperationContext* opCtx, const NamespaceString& ns, bool slaveOk) { // TODO return Status::OK(); } -Status ReplicationCoordinatorMock::checkCanServeReadsFor_UNSAFE(OperationContext* txn, +Status ReplicationCoordinatorMock::checkCanServeReadsFor_UNSAFE(OperationContext* opCtx, const NamespaceString& ns, bool slaveOk) { - return checkCanServeReadsFor(txn, ns, slaveOk); + return checkCanServeReadsFor(opCtx, ns, slaveOk); } -bool ReplicationCoordinatorMock::shouldRelaxIndexConstraints(OperationContext* txn, +bool ReplicationCoordinatorMock::shouldRelaxIndexConstraints(OperationContext* opCtx, const NamespaceString& ns) { - return !canAcceptWritesFor(txn, ns); + return !canAcceptWritesFor(opCtx, ns); } Status ReplicationCoordinatorMock::setLastOptimeForSlave(const OID& rid, const Timestamp& ts) { @@ -205,7 +205,7 @@ OpTime ReplicationCoordinatorMock::getMyLastDurableOpTime() const { return _myLastDurableOpTime; } -Status ReplicationCoordinatorMock::waitUntilOpTimeForRead(OperationContext* txn, +Status ReplicationCoordinatorMock::waitUntilOpTimeForRead(OperationContext* opCtx, const ReadConcernArgs& settings) { return Status::OK(); } @@ -242,7 +242,7 @@ Status ReplicationCoordinatorMock::waitForDrainFinish(Milliseconds timeout) { void ReplicationCoordinatorMock::signalUpstreamUpdater() {} -Status ReplicationCoordinatorMock::resyncData(OperationContext* txn, bool waitUntilCompleted) { +Status ReplicationCoordinatorMock::resyncData(OperationContext* opCtx, bool waitUntilCompleted) { return Status::OK(); } @@ -297,7 +297,7 @@ bool ReplicationCoordinatorMock::getMaintenanceMode() { return false; } -Status ReplicationCoordinatorMock::processReplSetSyncFrom(OperationContext* txn, +Status ReplicationCoordinatorMock::processReplSetSyncFrom(OperationContext* opCtx, const HostAndPort& target, BSONObjBuilder* resultObj) { // TODO @@ -314,13 +314,13 @@ Status ReplicationCoordinatorMock::processHeartbeat(const ReplSetHeartbeatArgs& return Status::OK(); } -Status ReplicationCoordinatorMock::processReplSetReconfig(OperationContext* txn, +Status ReplicationCoordinatorMock::processReplSetReconfig(OperationContext* opCtx, const ReplSetReconfigArgs& args, BSONObjBuilder* resultObj) { return Status::OK(); } -Status ReplicationCoordinatorMock::processReplSetInitiate(OperationContext* txn, +Status ReplicationCoordinatorMock::processReplSetInitiate(OperationContext* opCtx, const BSONObj& configObj, BSONObjBuilder* resultObj) { return Status::OK(); @@ -355,7 +355,7 @@ Status ReplicationCoordinatorMock::processReplSetUpdatePosition(const UpdatePosi return Status::OK(); } -Status ReplicationCoordinatorMock::processHandshake(OperationContext* txn, +Status ReplicationCoordinatorMock::processHandshake(OperationContext* opCtx, const HandshakeArgs& handshake) { return Status::OK(); } @@ -394,7 +394,7 @@ HostAndPort ReplicationCoordinatorMock::chooseNewSyncSource(const OpTime& lastOp void ReplicationCoordinatorMock::blacklistSyncSource(const HostAndPort& host, Date_t until) {} -void ReplicationCoordinatorMock::resetLastOpTimesFromOplog(OperationContext* txn) { +void ReplicationCoordinatorMock::resetLastOpTimesFromOplog(OperationContext* opCtx) { invariant(false); } @@ -410,7 +410,7 @@ OpTime ReplicationCoordinatorMock::getLastCommittedOpTime() const { } Status ReplicationCoordinatorMock::processReplSetRequestVotes( - OperationContext* txn, + OperationContext* opCtx, const ReplSetRequestVotesArgs& args, ReplSetRequestVotesResponse* response) { return Status::OK(); @@ -439,17 +439,17 @@ long long ReplicationCoordinatorMock::getTerm() { return OpTime::kInitialTerm; } -Status ReplicationCoordinatorMock::updateTerm(OperationContext* txn, long long term) { +Status ReplicationCoordinatorMock::updateTerm(OperationContext* opCtx, long long term) { return Status::OK(); } -SnapshotName ReplicationCoordinatorMock::reserveSnapshotName(OperationContext* txn) { +SnapshotName ReplicationCoordinatorMock::reserveSnapshotName(OperationContext* opCtx) { return SnapshotName(_snapshotNameGenerator.addAndFetch(1)); } void ReplicationCoordinatorMock::forceSnapshotCreation() {} -void ReplicationCoordinatorMock::createSnapshot(OperationContext* txn, +void ReplicationCoordinatorMock::createSnapshot(OperationContext* opCtx, OpTime timeOfSnapshot, SnapshotName name){}; @@ -459,7 +459,7 @@ OpTime ReplicationCoordinatorMock::getCurrentCommittedSnapshotOpTime() const { return OpTime(); } -void ReplicationCoordinatorMock::waitUntilSnapshotCommitted(OperationContext* txn, +void ReplicationCoordinatorMock::waitUntilSnapshotCommitted(OperationContext* opCtx, const SnapshotName& untilSnapshot) {} size_t ReplicationCoordinatorMock::getNumUncommittedSnapshots() { diff --git a/src/mongo/db/repl/replication_coordinator_mock.h b/src/mongo/db/repl/replication_coordinator_mock.h index 4b3fd99d3ce..0e9109b1665 100644 --- a/src/mongo/db/repl/replication_coordinator_mock.h +++ b/src/mongo/db/repl/replication_coordinator_mock.h @@ -53,9 +53,9 @@ public: ReplicationCoordinatorMock(ServiceContext* service, const ReplSettings& settings); virtual ~ReplicationCoordinatorMock(); - virtual void startup(OperationContext* txn); + virtual void startup(OperationContext* opCtx); - virtual void shutdown(OperationContext* txn); + virtual void shutdown(OperationContext* opCtx); virtual ReplicationExecutor* getExecutor() override { return nullptr; @@ -78,36 +78,36 @@ public: virtual void clearSyncSourceBlacklist(); virtual ReplicationCoordinator::StatusAndDuration awaitReplication( - OperationContext* txn, const OpTime& opTime, const WriteConcernOptions& writeConcern); + OperationContext* opCtx, const OpTime& opTime, const WriteConcernOptions& writeConcern); virtual ReplicationCoordinator::StatusAndDuration awaitReplicationOfLastOpForClient( - OperationContext* txn, const WriteConcernOptions& writeConcern); + OperationContext* opCtx, const WriteConcernOptions& writeConcern); - virtual Status stepDown(OperationContext* txn, + virtual Status stepDown(OperationContext* opCtx, bool force, const Milliseconds& waitTime, const Milliseconds& stepdownTime); virtual bool isMasterForReportingPurposes(); - virtual bool canAcceptWritesForDatabase(OperationContext* txn, StringData dbName); + virtual bool canAcceptWritesForDatabase(OperationContext* opCtx, StringData dbName); - virtual bool canAcceptWritesForDatabase_UNSAFE(OperationContext* txn, StringData dbName); + virtual bool canAcceptWritesForDatabase_UNSAFE(OperationContext* opCtx, StringData dbName); - bool canAcceptWritesFor(OperationContext* txn, const NamespaceString& ns) override; + bool canAcceptWritesFor(OperationContext* opCtx, const NamespaceString& ns) override; - bool canAcceptWritesFor_UNSAFE(OperationContext* txn, const NamespaceString& ns) override; + bool canAcceptWritesFor_UNSAFE(OperationContext* opCtx, const NamespaceString& ns) override; virtual Status checkIfWriteConcernCanBeSatisfied(const WriteConcernOptions& writeConcern) const; - virtual Status checkCanServeReadsFor(OperationContext* txn, + virtual Status checkCanServeReadsFor(OperationContext* opCtx, const NamespaceString& ns, bool slaveOk); - virtual Status checkCanServeReadsFor_UNSAFE(OperationContext* txn, + virtual Status checkCanServeReadsFor_UNSAFE(OperationContext* opCtx, const NamespaceString& ns, bool slaveOk); - virtual bool shouldRelaxIndexConstraints(OperationContext* txn, const NamespaceString& ns); + virtual bool shouldRelaxIndexConstraints(OperationContext* opCtx, const NamespaceString& ns); virtual Status setLastOptimeForSlave(const OID& rid, const Timestamp& ts); @@ -124,7 +124,7 @@ public: virtual OpTime getMyLastAppliedOpTime() const; virtual OpTime getMyLastDurableOpTime() const; - virtual Status waitUntilOpTimeForRead(OperationContext* txn, + virtual Status waitUntilOpTimeForRead(OperationContext* opCtx, const ReadConcernArgs& settings) override; virtual OID getElectionId(); @@ -143,7 +143,7 @@ public: virtual void signalUpstreamUpdater(); - virtual Status resyncData(OperationContext* txn, bool waitUntilCompleted) override; + virtual Status resyncData(OperationContext* opCtx, bool waitUntilCompleted) override; virtual StatusWith<BSONObj> prepareReplSetUpdatePositionCommand( ReplSetUpdatePositionCommandStyle commandStyle) const override; @@ -170,7 +170,7 @@ public: virtual bool getMaintenanceMode(); - virtual Status processReplSetSyncFrom(OperationContext* txn, + virtual Status processReplSetSyncFrom(OperationContext* opCtx, const HostAndPort& target, BSONObjBuilder* resultObj); @@ -179,11 +179,11 @@ public: virtual Status processHeartbeat(const ReplSetHeartbeatArgs& args, ReplSetHeartbeatResponse* response); - virtual Status processReplSetReconfig(OperationContext* txn, + virtual Status processReplSetReconfig(OperationContext* opCtx, const ReplSetReconfigArgs& args, BSONObjBuilder* resultObj); - virtual Status processReplSetInitiate(OperationContext* txn, + virtual Status processReplSetInitiate(OperationContext* opCtx, const BSONObj& configObj, BSONObjBuilder* resultObj); @@ -200,7 +200,7 @@ public: virtual Status processReplSetUpdatePosition(const UpdatePositionArgs& updates, long long* configVersion); - virtual Status processHandshake(OperationContext* txn, const HandshakeArgs& handshake); + virtual Status processHandshake(OperationContext* opCtx, const HandshakeArgs& handshake); virtual bool buildsIndexes(); @@ -216,7 +216,7 @@ public: virtual void blacklistSyncSource(const HostAndPort& host, Date_t until); - virtual void resetLastOpTimesFromOplog(OperationContext* txn); + virtual void resetLastOpTimesFromOplog(OperationContext* opCtx); virtual bool shouldChangeSyncSource(const HostAndPort& currentSource, const rpc::ReplSetMetadata& replMetadata, @@ -224,7 +224,7 @@ public: virtual OpTime getLastCommittedOpTime() const; - virtual Status processReplSetRequestVotes(OperationContext* txn, + virtual Status processReplSetRequestVotes(OperationContext* opCtx, const ReplSetRequestVotesArgs& args, ReplSetRequestVotesResponse* response); @@ -243,13 +243,13 @@ public: virtual long long getTerm(); - virtual Status updateTerm(OperationContext* txn, long long term); + virtual Status updateTerm(OperationContext* opCtx, long long term); - virtual SnapshotName reserveSnapshotName(OperationContext* txn); + virtual SnapshotName reserveSnapshotName(OperationContext* opCtx); virtual void forceSnapshotCreation() override; - virtual void createSnapshot(OperationContext* txn, + virtual void createSnapshot(OperationContext* opCtx, OpTime timeOfSnapshot, SnapshotName name) override; @@ -257,7 +257,7 @@ public: virtual OpTime getCurrentCommittedSnapshotOpTime() const override; - virtual void waitUntilSnapshotCommitted(OperationContext* txn, + virtual void waitUntilSnapshotCommitted(OperationContext* opCtx, const SnapshotName& untilSnapshot) override; virtual size_t getNumUncommittedSnapshots() override; diff --git a/src/mongo/db/repl/replication_coordinator_test_fixture.cpp b/src/mongo/db/repl/replication_coordinator_test_fixture.cpp index 43dff9b4c06..945455d4512 100644 --- a/src/mongo/db/repl/replication_coordinator_test_fixture.cpp +++ b/src/mongo/db/repl/replication_coordinator_test_fixture.cpp @@ -89,8 +89,8 @@ void ReplCoordTest::tearDown() { _externalState->setStoreLocalConfigDocumentToHang(false); } if (_callShutdown) { - auto txn = makeOperationContext(); - shutdown(txn.get()); + auto opCtx = makeOperationContext(); + shutdown(opCtx.get()); } } @@ -165,8 +165,8 @@ void ReplCoordTest::start() { init(); } - const auto txn = makeOperationContext(); - _repl->startup(txn.get()); + const auto opCtx = makeOperationContext(); + _repl->startup(opCtx.get()); _repl->waitForStartUpComplete_forTest(); _callShutdown = true; } @@ -362,8 +362,8 @@ void ReplCoordTest::simulateSuccessfulV1ElectionAt(Date_t electionTime) { ASSERT_FALSE(imResponse.isMaster()) << imResponse.toBSON().toString(); ASSERT_TRUE(imResponse.isSecondary()) << imResponse.toBSON().toString(); { - auto txn = makeOperationContext(); - replCoord->signalDrainComplete(txn.get(), replCoord->getTerm()); + auto opCtx = makeOperationContext(); + replCoord->signalDrainComplete(opCtx.get(), replCoord->getTerm()); } ASSERT(replCoord->getApplierState() == ReplicationCoordinator::ApplierState::Stopped); replCoord->fillIsMasterForReplSet(&imResponse); @@ -425,8 +425,8 @@ void ReplCoordTest::simulateSuccessfulElection() { ASSERT_FALSE(imResponse.isMaster()) << imResponse.toBSON().toString(); ASSERT_TRUE(imResponse.isSecondary()) << imResponse.toBSON().toString(); { - auto txn = makeOperationContext(); - replCoord->signalDrainComplete(txn.get(), replCoord->getTerm()); + auto opCtx = makeOperationContext(); + replCoord->signalDrainComplete(opCtx.get(), replCoord->getTerm()); } replCoord->fillIsMasterForReplSet(&imResponse); ASSERT_TRUE(imResponse.isMaster()) << imResponse.toBSON().toString(); @@ -435,10 +435,10 @@ void ReplCoordTest::simulateSuccessfulElection() { ASSERT(replCoord->getMemberState().primary()) << replCoord->getMemberState().toString(); } -void ReplCoordTest::shutdown(OperationContext* txn) { +void ReplCoordTest::shutdown(OperationContext* opCtx) { invariant(_callShutdown); _net->exitNetwork(); - _repl->shutdown(txn); + _repl->shutdown(opCtx); _callShutdown = false; } diff --git a/src/mongo/db/repl/replication_coordinator_test_fixture.h b/src/mongo/db/repl/replication_coordinator_test_fixture.h index b9b05e46172..5ffab24a8ef 100644 --- a/src/mongo/db/repl/replication_coordinator_test_fixture.h +++ b/src/mongo/db/repl/replication_coordinator_test_fixture.h @@ -233,7 +233,7 @@ protected: /** * Shuts down the objects under test. */ - void shutdown(OperationContext* txn); + void shutdown(OperationContext* opCtx); /** * Receive the heartbeat request from replication coordinator and reply with a response. diff --git a/src/mongo/db/repl/replication_executor.cpp b/src/mongo/db/repl/replication_executor.cpp index f4070ac5d9e..3f9f0c5836b 100644 --- a/src/mongo/db/repl/replication_executor.cpp +++ b/src/mongo/db/repl/replication_executor.cpp @@ -404,8 +404,8 @@ StatusWith<ReplicationExecutor::CallbackHandle> ReplicationExecutor::scheduleDBW handle.getValue(), &_dbWorkInProgressQueue, nullptr); - auto task = [doOp](OperationContext* txn, const Status& status) { - makeNoExcept(stdx::bind(doOp, txn, status))(); + auto task = [doOp](OperationContext* opCtx, const Status& status) { + makeNoExcept(stdx::bind(doOp, opCtx, status))(); return TaskRunner::NextAction::kDisposeOperationContext; }; if (mode == MODE_NONE && nss.ns().empty()) { @@ -418,7 +418,7 @@ StatusWith<ReplicationExecutor::CallbackHandle> ReplicationExecutor::scheduleDBW return handle; } -void ReplicationExecutor::_doOperation(OperationContext* txn, +void ReplicationExecutor::_doOperation(OperationContext* opCtx, const Status& taskRunnerStatus, const CallbackHandle& cbHandle, WorkQueue* workQueue, @@ -442,7 +442,7 @@ void ReplicationExecutor::_doOperation(OperationContext* txn, (callback->_isCanceled || !taskRunnerStatus.isOK() ? Status(ErrorCodes::CallbackCanceled, "Callback canceled") : Status::OK()), - txn)); + opCtx)); } lk.lock(); signalEvent_inlock(callback->_finishedEvent); @@ -461,8 +461,8 @@ ReplicationExecutor::scheduleWorkWithGlobalExclusiveLock(const CallbackFn& work) &_exclusiveLockInProgressQueue, &_terribleExLockSyncMutex); _dblockExclusiveLockTaskRunner.schedule(DatabaseTask::makeGlobalExclusiveLockTask( - [doOp](OperationContext* txn, const Status& status) { - makeNoExcept(stdx::bind(doOp, txn, status))(); + [doOp](OperationContext* opCtx, const Status& status) { + makeNoExcept(stdx::bind(doOp, opCtx, status))(); return TaskRunner::NextAction::kDisposeOperationContext; })); } diff --git a/src/mongo/db/repl/replication_executor.h b/src/mongo/db/repl/replication_executor.h index 26f8e522317..c5e20a8ac68 100644 --- a/src/mongo/db/repl/replication_executor.h +++ b/src/mongo/db/repl/replication_executor.h @@ -280,7 +280,7 @@ private: * Executes the callback referenced by "cbHandle", and moves the underlying * WorkQueue::iterator from "workQueue" into the _freeQueue. * - * "txn" is a pointer to the OperationContext. + * "opCtx" is a pointer to the OperationContext. * * "status" is the callback status from the task runner. Only possible values are * Status::OK and ErrorCodes::CallbackCanceled (when task runner is canceled). @@ -288,7 +288,7 @@ private: * If "terribleExLockSyncMutex" is not null, serializes execution of "cbHandle" with the * execution of other callbacks. */ - void _doOperation(OperationContext* txn, + void _doOperation(OperationContext* opCtx, const Status& taskRunnerStatus, const CallbackHandle& cbHandle, WorkQueue* workQueue, diff --git a/src/mongo/db/repl/replication_executor_test.cpp b/src/mongo/db/repl/replication_executor_test.cpp index e630a4a2e30..831259951b4 100644 --- a/src/mongo/db/repl/replication_executor_test.cpp +++ b/src/mongo/db/repl/replication_executor_test.cpp @@ -72,12 +72,12 @@ TEST_F(ReplicationExecutorTest, ScheduleDBWorkAndExclusiveWorkConcurrently) { NamespaceString nss("mydb", "mycoll"); ReplicationExecutor& executor = getReplExecutor(); Status status1 = getDetectableErrorStatus(); - OperationContext* txn = nullptr; + OperationContext* opCtx = nullptr; using CallbackData = ReplicationExecutor::CallbackArgs; ASSERT_OK(executor .scheduleDBWork([&](const CallbackData& cbData) { status1 = cbData.status; - txn = cbData.txn; + opCtx = cbData.opCtx; barrier.countDownAndWait(); if (cbData.status != ErrorCodes::CallbackCanceled) cbData.executor->shutdown(); @@ -90,23 +90,23 @@ TEST_F(ReplicationExecutorTest, ScheduleDBWorkAndExclusiveWorkConcurrently) { executor.startup(); executor.join(); ASSERT_OK(status1); - ASSERT(txn); + ASSERT(opCtx); } TEST_F(ReplicationExecutorTest, ScheduleDBWorkWithCollectionLock) { NamespaceString nss("mydb", "mycoll"); ReplicationExecutor& executor = getReplExecutor(); Status status1 = getDetectableErrorStatus(); - OperationContext* txn = nullptr; + OperationContext* opCtx = nullptr; bool collectionIsLocked = false; using CallbackData = ReplicationExecutor::CallbackArgs; ASSERT_OK(executor .scheduleDBWork( [&](const CallbackData& cbData) { status1 = cbData.status; - txn = cbData.txn; - collectionIsLocked = txn - ? txn->lockState()->isCollectionLockedForMode(nss.ns(), MODE_X) + opCtx = cbData.opCtx; + collectionIsLocked = opCtx + ? opCtx->lockState()->isCollectionLockedForMode(nss.ns(), MODE_X) : false; if (cbData.status != ErrorCodes::CallbackCanceled) cbData.executor->shutdown(); @@ -117,21 +117,21 @@ TEST_F(ReplicationExecutorTest, ScheduleDBWorkWithCollectionLock) { executor.startup(); executor.join(); ASSERT_OK(status1); - ASSERT(txn); + ASSERT(opCtx); ASSERT_TRUE(collectionIsLocked); } TEST_F(ReplicationExecutorTest, ScheduleExclusiveLockOperation) { ReplicationExecutor& executor = getReplExecutor(); Status status1 = getDetectableErrorStatus(); - OperationContext* txn = nullptr; + OperationContext* opCtx = nullptr; bool lockIsW = false; using CallbackData = ReplicationExecutor::CallbackArgs; ASSERT_OK(executor .scheduleWorkWithGlobalExclusiveLock([&](const CallbackData& cbData) { status1 = cbData.status; - txn = cbData.txn; - lockIsW = txn ? txn->lockState()->isW() : false; + opCtx = cbData.opCtx; + lockIsW = opCtx ? opCtx->lockState()->isW() : false; if (cbData.status != ErrorCodes::CallbackCanceled) cbData.executor->shutdown(); }) @@ -139,7 +139,7 @@ TEST_F(ReplicationExecutorTest, ScheduleExclusiveLockOperation) { executor.startup(); executor.join(); ASSERT_OK(status1); - ASSERT(txn); + ASSERT(opCtx); ASSERT_TRUE(lockIsW); } diff --git a/src/mongo/db/repl/replication_info.cpp b/src/mongo/db/repl/replication_info.cpp index c75c7b38880..e0a49f5849f 100644 --- a/src/mongo/db/repl/replication_info.cpp +++ b/src/mongo/db/repl/replication_info.cpp @@ -66,7 +66,7 @@ using std::stringstream; namespace repl { -void appendReplicationInfo(OperationContext* txn, BSONObjBuilder& result, int level) { +void appendReplicationInfo(OperationContext* opCtx, BSONObjBuilder& result, int level) { ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator(); if (replCoord->getSettings().usingReplSets()) { IsMasterResponse isMasterResponse; @@ -95,9 +95,9 @@ void appendReplicationInfo(OperationContext* txn, BSONObjBuilder& result, int le list<BSONObj> src; { const NamespaceString localSources{"local.sources"}; - AutoGetCollectionForRead ctx(txn, localSources); + AutoGetCollectionForRead ctx(opCtx, localSources); unique_ptr<PlanExecutor> exec(InternalPlanner::collectionScan( - txn, localSources.ns(), ctx.getCollection(), PlanExecutor::YIELD_MANUAL)); + opCtx, localSources.ns(), ctx.getCollection(), PlanExecutor::YIELD_MANUAL)); BSONObj obj; PlanExecutor::ExecState state; while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) { @@ -124,7 +124,7 @@ void appendReplicationInfo(OperationContext* txn, BSONObjBuilder& result, int le } if (level > 1) { - wassert(!txn->lockState()->isLocked()); + wassert(!opCtx->lockState()->isLocked()); // note: there is no so-style timeout on this connection; perhaps we should have // one. ScopedDbConnection conn(s["host"].valuestr()); @@ -159,7 +159,7 @@ public: return true; } - BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const { + BSONObj generateSection(OperationContext* opCtx, const BSONElement& configElement) const { if (!getGlobalReplicationCoordinator()->isReplEnabled()) { return BSONObj(); } @@ -167,7 +167,7 @@ public: int level = configElement.numberInt(); BSONObjBuilder result; - appendReplicationInfo(txn, result, level); + appendReplicationInfo(opCtx, result, level); getGlobalReplicationCoordinator()->processReplSetGetRBID(&result); return result.obj(); @@ -182,7 +182,7 @@ public: return false; } - BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const { + BSONObj generateSection(OperationContext* opCtx, const BSONElement& configElement) const { ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator(); if (!replCoord->isReplEnabled()) { return BSONObj(); @@ -199,7 +199,7 @@ public: BSONObj o; uassert(17347, "Problem reading earliest entry from oplog", - Helpers::getSingleton(txn, oplogNS.c_str(), o)); + Helpers::getSingleton(opCtx, oplogNS.c_str(), o)); result.append("earliestOptime", o["ts"].timestamp()); return result.obj(); } @@ -225,7 +225,7 @@ public: const BSONObj& cmdObj, std::vector<Privilege>* out) {} // No auth required CmdIsMaster() : Command("isMaster", true, "ismaster") {} - virtual bool run(OperationContext* txn, + virtual bool run(OperationContext* opCtx, const string&, BSONObj& cmdObj, int, @@ -235,20 +235,20 @@ public: authenticated. */ if (cmdObj["forShell"].trueValue()) { - LastError::get(txn->getClient()).disable(); + LastError::get(opCtx->getClient()).disable(); } // Tag connections to avoid closing them on stepdown. auto hangUpElement = cmdObj["hangUpOnStepDown"]; if (!hangUpElement.eoo() && !hangUpElement.trueValue()) { - auto session = txn->getClient()->session(); + auto session = opCtx->getClient()->session(); if (session) { session->replaceTags(session->getTags() | executor::NetworkInterface::kMessagingPortKeepOpen); } } - auto& clientMetadataIsMasterState = ClientMetadataIsMasterState::get(txn->getClient()); + auto& clientMetadataIsMasterState = ClientMetadataIsMasterState::get(opCtx->getClient()); bool seenIsMaster = clientMetadataIsMasterState.hasSeenIsMaster(); if (!seenIsMaster) { clientMetadataIsMasterState.setSeenIsMaster(); @@ -271,13 +271,13 @@ public: invariant(swParseClientMetadata.getValue()); - swParseClientMetadata.getValue().get().logClientMetadata(txn->getClient()); + swParseClientMetadata.getValue().get().logClientMetadata(opCtx->getClient()); clientMetadataIsMasterState.setClientMetadata( - txn->getClient(), std::move(swParseClientMetadata.getValue())); + opCtx->getClient(), std::move(swParseClientMetadata.getValue())); } - appendReplicationInfo(txn, result, 0); + appendReplicationInfo(opCtx, result, 0); if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { // If we have feature compatibility version 3.4, use a config server mode that 3.2 @@ -302,10 +302,10 @@ public: "automationServiceDescriptor", static_cast<ServerParameter*>(nullptr)); if (parameter) - parameter->append(txn, result, "automationServiceDescriptor"); + parameter->append(opCtx, result, "automationServiceDescriptor"); - if (txn->getClient()->session()) { - MessageCompressorManager::forSession(txn->getClient()->session()) + if (opCtx->getClient()->session()) { + MessageCompressorManager::forSession(opCtx->getClient()->session()) .serverNegotiate(cmdObj, &result); } diff --git a/src/mongo/db/repl/resync.cpp b/src/mongo/db/repl/resync.cpp index e45022440a3..848700d215a 100644 --- a/src/mongo/db/repl/resync.cpp +++ b/src/mongo/db/repl/resync.cpp @@ -72,7 +72,7 @@ public: } CmdResync() : Command(kResyncFieldName) {} - virtual bool run(OperationContext* txn, + virtual bool run(OperationContext* opCtx, const string& dbname, BSONObj& cmdObj, int, @@ -100,16 +100,16 @@ public: return appendCommandStatus( result, Status(ErrorCodes::NotSecondary, "primaries cannot resync")); } - uassertStatusOKWithLocation(replCoord->resyncData(txn, waitForResync), "resync", 0); + uassertStatusOKWithLocation(replCoord->resyncData(opCtx, waitForResync), "resync", 0); return true; } // Master/Slave resync. - ScopedTransaction transaction(txn, MODE_X); - Lock::GlobalWrite globalWriteLock(txn->lockState()); + ScopedTransaction transaction(opCtx, MODE_X); + Lock::GlobalWrite globalWriteLock(opCtx->lockState()); // below this comment pertains only to master/slave replication if (cmdObj.getBoolField("force")) { - if (!waitForSyncToFinish(txn, errmsg)) + if (!waitForSyncToFinish(opCtx, errmsg)) return false; replAllDead = "resync forced"; } @@ -118,16 +118,16 @@ public: errmsg = "not dead, no need to resync"; return false; } - if (!waitForSyncToFinish(txn, errmsg)) + if (!waitForSyncToFinish(opCtx, errmsg)) return false; - ReplSource::forceResyncDead(txn, "client"); + ReplSource::forceResyncDead(opCtx, "client"); result.append("info", "triggered resync for all sources"); return true; } - bool waitForSyncToFinish(OperationContext* txn, string& errmsg) const { + bool waitForSyncToFinish(OperationContext* opCtx, string& errmsg) const { // Wait for slave thread to finish syncing, so sources will be be // reloaded with new saved state on next pass. Timer t; @@ -135,7 +135,7 @@ public: if (syncing.load() == 0 || t.millis() > 30000) break; { - Lock::TempRelease t(txn->lockState()); + Lock::TempRelease t(opCtx->lockState()); relinquishSyncingSome.store(1); sleepmillis(1); } diff --git a/src/mongo/db/repl/rollback_source.h b/src/mongo/db/repl/rollback_source.h index 3e8d6f55578..4e068d336ce 100644 --- a/src/mongo/db/repl/rollback_source.h +++ b/src/mongo/db/repl/rollback_source.h @@ -76,7 +76,7 @@ public: /** * Clones a single collection from the sync source. */ - virtual void copyCollectionFromRemote(OperationContext* txn, + virtual void copyCollectionFromRemote(OperationContext* opCtx, const NamespaceString& nss) const = 0; /** diff --git a/src/mongo/db/repl/rollback_source_impl.cpp b/src/mongo/db/repl/rollback_source_impl.cpp index 27513953dca..226edcc0a63 100644 --- a/src/mongo/db/repl/rollback_source_impl.cpp +++ b/src/mongo/db/repl/rollback_source_impl.cpp @@ -68,7 +68,7 @@ BSONObj RollbackSourceImpl::findOne(const NamespaceString& nss, const BSONObj& f return _getConnection()->findOne(nss.toString(), filter, NULL, QueryOption_SlaveOk).getOwned(); } -void RollbackSourceImpl::copyCollectionFromRemote(OperationContext* txn, +void RollbackSourceImpl::copyCollectionFromRemote(OperationContext* opCtx, const NamespaceString& nss) const { std::string errmsg; std::unique_ptr<DBClientConnection> tmpConn(new DBClientConnection()); @@ -82,7 +82,7 @@ void RollbackSourceImpl::copyCollectionFromRemote(OperationContext* txn, uassert(15909, str::stream() << "replSet rollback error resyncing collection " << nss.ns() << ' ' << errmsg, - cloner.copyCollection(txn, nss.ns(), BSONObj(), errmsg, true)); + cloner.copyCollection(opCtx, nss.ns(), BSONObj(), errmsg, true)); } StatusWith<BSONObj> RollbackSourceImpl::getCollectionInfo(const NamespaceString& nss) const { diff --git a/src/mongo/db/repl/rollback_source_impl.h b/src/mongo/db/repl/rollback_source_impl.h index fe4b7a8aad1..55f9237949b 100644 --- a/src/mongo/db/repl/rollback_source_impl.h +++ b/src/mongo/db/repl/rollback_source_impl.h @@ -64,7 +64,8 @@ public: BSONObj findOne(const NamespaceString& nss, const BSONObj& filter) const override; - void copyCollectionFromRemote(OperationContext* txn, const NamespaceString& nss) const override; + void copyCollectionFromRemote(OperationContext* opCtx, + const NamespaceString& nss) const override; StatusWith<BSONObj> getCollectionInfo(const NamespaceString& nss) const override; diff --git a/src/mongo/db/repl/rs_initialsync.cpp b/src/mongo/db/repl/rs_initialsync.cpp index 073afcc73ec..a6a5cf78baf 100644 --- a/src/mongo/db/repl/rs_initialsync.cpp +++ b/src/mongo/db/repl/rs_initialsync.cpp @@ -84,16 +84,16 @@ MONGO_EXPORT_SERVER_PARAMETER(num3Dot2InitialSyncAttempts, int, 10); * Also resets the bgsync thread so that it reconnects its sync source after the oplog has been * truncated. */ -void truncateAndResetOplog(OperationContext* txn, +void truncateAndResetOplog(OperationContext* opCtx, ReplicationCoordinator* replCoord, BackgroundSync* bgsync) { // Add field to minvalid document to tell us to restart initial sync if we crash - StorageInterface::get(txn)->setInitialSyncFlag(txn); + StorageInterface::get(opCtx)->setInitialSyncFlag(opCtx); - AutoGetDb autoDb(txn, "local", MODE_X); + AutoGetDb autoDb(opCtx, "local", MODE_X); massert(28585, "no local database found", autoDb.getDb()); - invariant(txn->lockState()->isCollectionLockedForMode(rsOplogName, MODE_X)); + invariant(opCtx->lockState()->isCollectionLockedForMode(rsOplogName, MODE_X)); // Note: the following order is important. // The bgsync thread uses an empty optime as a sentinel to know to wait // for initial sync; thus, we must @@ -104,7 +104,7 @@ void truncateAndResetOplog(OperationContext* txn, replCoord->resetMyLastOpTimes(); bgsync->stop(true); bgsync->startProducerIfStopped(); - bgsync->clearBuffer(txn); + bgsync->clearBuffer(opCtx); replCoord->clearSyncSourceBlacklist(); @@ -112,15 +112,15 @@ void truncateAndResetOplog(OperationContext* txn, Collection* collection = autoDb.getDb()->getCollection(rsOplogName); fassert(28565, collection); MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN { - WriteUnitOfWork wunit(txn); - Status status = collection->truncate(txn); + WriteUnitOfWork wunit(opCtx); + Status status = collection->truncate(opCtx); fassert(28564, status); wunit.commit(); } - MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "truncate", collection->ns().ns()); + MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "truncate", collection->ns().ns()); } -bool _initialSyncClone(OperationContext* txn, +bool _initialSyncClone(OperationContext* opCtx, Cloner& cloner, const std::string& host, const std::string& db, @@ -144,10 +144,10 @@ bool _initialSyncClone(OperationContext* txn, options.createCollections = false; // Make database stable - ScopedTransaction transaction(txn, MODE_IX); - Lock::DBLock dbWrite(txn->lockState(), db, MODE_X); + ScopedTransaction transaction(opCtx, MODE_IX); + Lock::DBLock dbWrite(opCtx->lockState(), db, MODE_X); - Status status = cloner.copyDb(txn, db, host, options, nullptr, collections); + Status status = cloner.copyDb(opCtx, db, host, options, nullptr, collections); if (!status.isOK()) { log() << "initial sync: error while " << (dataPass ? "cloning " : "indexing ") << db << ". " << redact(status); @@ -155,7 +155,7 @@ bool _initialSyncClone(OperationContext* txn, } if (dataPass && (db == "admin")) { - fassertNoTrace(28619, checkAdminDatabase(txn, dbHolder().get(txn, db))); + fassertNoTrace(28619, checkAdminDatabase(opCtx, dbHolder().get(opCtx, db))); } return true; } @@ -167,7 +167,7 @@ bool _initialSyncClone(OperationContext* txn, * @param r the oplog reader. * @return if applying the oplog succeeded. */ -bool _initialSyncApplyOplog(OperationContext* txn, +bool _initialSyncApplyOplog(OperationContext* opCtx, repl::InitialSync* syncer, OplogReader* r, BackgroundSync* bgsync) { @@ -178,7 +178,7 @@ bool _initialSyncApplyOplog(OperationContext* txn, if (MONGO_FAIL_POINT(failInitSyncWithBufferedEntriesLeft)) { log() << "adding fake oplog entry to buffer."; bgsync->pushTestOpToBuffer( - txn, + opCtx, BSON("ts" << startOpTime.getTimestamp() << "t" << startOpTime.getTerm() << "v" << 1 << "op" << "n")); @@ -222,7 +222,7 @@ bool _initialSyncApplyOplog(OperationContext* txn, // apply till stopOpTime try { LOG(2) << "Applying oplog entries from " << startOpTime << " until " << stopOpTime; - syncer->oplogApplication(txn, stopOpTime); + syncer->oplogApplication(opCtx, stopOpTime); if (globalInShutdownDeprecated()) { return false; @@ -262,15 +262,15 @@ bool _initialSyncApplyOplog(OperationContext* txn, * ErrorCode::InitialSyncOplogSourceMissing if the node fails to find an sync source, Status::OK * if everything worked, and ErrorCode::InitialSyncFailure for all other error cases. */ -Status _initialSync(OperationContext* txn, BackgroundSync* bgsync) { +Status _initialSync(OperationContext* opCtx, BackgroundSync* bgsync) { log() << "initial sync pending"; - txn->setReplicatedWrites(false); - DisableDocumentValidation validationDisabler(txn); + opCtx->setReplicatedWrites(false); + DisableDocumentValidation validationDisabler(opCtx); ReplicationCoordinator* replCoord(getGlobalReplicationCoordinator()); // reset state for initial sync - truncateAndResetOplog(txn, replCoord, bgsync); + truncateAndResetOplog(opCtx, replCoord, bgsync); OplogReader r; @@ -278,7 +278,7 @@ Status _initialSync(OperationContext* txn, BackgroundSync* bgsync) { while (r.getHost().empty()) { // We must prime the sync source selector so that it considers all candidates regardless // of oplog position, by passing in null OpTime as the last op fetched time. - r.connectToSyncSource(txn, OpTime(), OpTime(), replCoord); + r.connectToSyncSource(opCtx, OpTime(), OpTime(), replCoord); if (r.getHost().empty()) { std::string msg = @@ -306,7 +306,7 @@ Status _initialSync(OperationContext* txn, BackgroundSync* bgsync) { } log() << "initial sync drop all databases"; - dropAllDatabasesExceptLocal(txn); + dropAllDatabasesExceptLocal(opCtx); if (MONGO_FAIL_POINT(initialSyncHangBeforeCopyingDatabases)) { log() << "initial sync - initialSyncHangBeforeCopyingDatabases fail point enabled. " @@ -360,17 +360,17 @@ Status _initialSync(OperationContext* txn, BackgroundSync* bgsync) { createCollectionParams.push_back(params); } - ScopedTransaction transaction(txn, MODE_IX); - Lock::DBLock dbWrite(txn->lockState(), db, MODE_X); + ScopedTransaction transaction(opCtx, MODE_IX); + Lock::DBLock dbWrite(opCtx->lockState(), db, MODE_X); - auto createStatus = cloner.createCollectionsForDb(txn, createCollectionParams, db); + auto createStatus = cloner.createCollectionsForDb(opCtx, createCollectionParams, db); if (!createStatus.isOK()) { return createStatus; } collectionsPerDb.emplace(db, std::move(collections)); } for (auto&& dbCollsPair : collectionsPerDb) { - if (!_initialSyncClone(txn, + if (!_initialSyncClone(opCtx, cloner, r.conn()->getServerAddress(), dbCollsPair.first, @@ -385,15 +385,15 @@ Status _initialSync(OperationContext* txn, BackgroundSync* bgsync) { // prime oplog, but don't need to actually apply the op as the cloned data already reflects it. fassertStatusOK( 40142, - StorageInterface::get(txn)->insertDocument(txn, NamespaceString(rsOplogName), lastOp)); + StorageInterface::get(opCtx)->insertDocument(opCtx, NamespaceString(rsOplogName), lastOp)); OpTime lastOptime = OplogEntry(lastOp).getOpTime(); - ReplClientInfo::forClient(txn->getClient()).setLastOp(lastOptime); + ReplClientInfo::forClient(opCtx->getClient()).setLastOp(lastOptime); replCoord->setMyLastAppliedOpTime(lastOptime); setNewTimestamp(replCoord->getServiceContext(), lastOptime.getTimestamp()); std::string msg = "oplog sync 1 of 3"; log() << msg; - if (!_initialSyncApplyOplog(txn, &init, &r, bgsync)) { + if (!_initialSyncApplyOplog(opCtx, &init, &r, bgsync)) { return Status(ErrorCodes::InitialSyncFailure, str::stream() << "initial sync failed: " << msg); } @@ -404,7 +404,7 @@ Status _initialSync(OperationContext* txn, BackgroundSync* bgsync) { // TODO: replace with "tail" instance below, since we don't need to retry/reclone missing docs. msg = "oplog sync 2 of 3"; log() << msg; - if (!_initialSyncApplyOplog(txn, &init, &r, bgsync)) { + if (!_initialSyncApplyOplog(opCtx, &init, &r, bgsync)) { return Status(ErrorCodes::InitialSyncFailure, str::stream() << "initial sync failed: " << msg); } @@ -413,7 +413,7 @@ Status _initialSync(OperationContext* txn, BackgroundSync* bgsync) { msg = "initial sync building indexes"; log() << msg; for (auto&& dbCollsPair : collectionsPerDb) { - if (!_initialSyncClone(txn, + if (!_initialSyncClone(opCtx, cloner, r.conn()->getServerAddress(), dbCollsPair.first, @@ -431,14 +431,14 @@ Status _initialSync(OperationContext* txn, BackgroundSync* bgsync) { log() << msg; InitialSync tail(bgsync, multiSyncApply); // Use the non-initial sync apply code - if (!_initialSyncApplyOplog(txn, &tail, &r, bgsync)) { + if (!_initialSyncApplyOplog(opCtx, &tail, &r, bgsync)) { return Status(ErrorCodes::InitialSyncFailure, str::stream() << "initial sync failed: " << msg); } // --------- - Status status = getGlobalAuthorizationManager()->initialize(txn); + Status status = getGlobalAuthorizationManager()->initialize(opCtx); if (!status.isOK()) { warning() << "Failed to reinitialize auth data after initial sync. " << status; return status; @@ -448,7 +448,7 @@ Status _initialSync(OperationContext* txn, BackgroundSync* bgsync) { // Initial sync is now complete. // Clear the initial sync flag -- cannot be done under a db lock, or recursive. - StorageInterface::get(txn)->clearInitialSyncFlag(txn); + StorageInterface::get(opCtx)->clearInitialSyncFlag(opCtx); // Clear maint. mode. while (replCoord->getMaintenanceMode()) { @@ -463,20 +463,20 @@ stdx::mutex _initialSyncMutex; const auto kInitialSyncRetrySleepDuration = Seconds{5}; } // namespace -Status checkAdminDatabase(OperationContext* txn, Database* adminDb) { - // Assumes txn holds MODE_X or MODE_S lock on "admin" database. +Status checkAdminDatabase(OperationContext* opCtx, Database* adminDb) { + // Assumes opCtx holds MODE_X or MODE_S lock on "admin" database. if (!adminDb) { return Status::OK(); } Collection* const usersCollection = adminDb->getCollection(AuthorizationManager::usersCollectionNamespace); const bool hasUsers = - usersCollection && !Helpers::findOne(txn, usersCollection, BSONObj(), false).isNull(); + usersCollection && !Helpers::findOne(opCtx, usersCollection, BSONObj(), false).isNull(); Collection* const adminVersionCollection = adminDb->getCollection(AuthorizationManager::versionCollectionNamespace); BSONObj authSchemaVersionDocument; if (!adminVersionCollection || - !Helpers::findOne(txn, + !Helpers::findOne(opCtx, adminVersionCollection, AuthorizationManager::versionDocumentQuery, authSchemaVersionDocument)) { @@ -518,7 +518,7 @@ Status checkAdminDatabase(OperationContext* txn, Database* adminDb) { return Status::OK(); } -void syncDoInitialSync(OperationContext* txn, +void syncDoInitialSync(OperationContext* opCtx, ReplicationCoordinatorExternalState* replicationCoordinatorExternalState) { stdx::unique_lock<stdx::mutex> lk(_initialSyncMutex, stdx::defer_lock); if (!lk.try_lock()) { @@ -530,21 +530,21 @@ void syncDoInitialSync(OperationContext* txn, log() << "Starting replication fetcher thread for initial sync"; bgsync = stdx::make_unique<BackgroundSync>( replicationCoordinatorExternalState, - replicationCoordinatorExternalState->makeInitialSyncOplogBuffer(txn)); - bgsync->startup(txn); - createOplog(txn); + replicationCoordinatorExternalState->makeInitialSyncOplogBuffer(opCtx)); + bgsync->startup(opCtx); + createOplog(opCtx); } - ON_BLOCK_EXIT([txn, &bgsync]() { + ON_BLOCK_EXIT([opCtx, &bgsync]() { log() << "Stopping replication fetcher thread for initial sync"; - bgsync->shutdown(txn); - bgsync->join(txn); + bgsync->shutdown(opCtx); + bgsync->join(opCtx); }); int failedAttempts = 0; while (failedAttempts < num3Dot2InitialSyncAttempts.load()) { try { // leave loop when successful - Status status = _initialSync(txn, bgsync.get()); + Status status = _initialSync(opCtx, bgsync.get()); if (status.isOK()) { break; } else { diff --git a/src/mongo/db/repl/rs_initialsync.h b/src/mongo/db/repl/rs_initialsync.h index a7206ac5c2e..d621eb17954 100644 --- a/src/mongo/db/repl/rs_initialsync.h +++ b/src/mongo/db/repl/rs_initialsync.h @@ -41,13 +41,13 @@ class ReplicationCoordinatorExternalState; * Begins an initial sync of a node. This drops all data, chooses a sync source, * and runs the cloner from that sync source. The node's state is not changed. */ -void syncDoInitialSync(OperationContext* txn, +void syncDoInitialSync(OperationContext* opCtx, ReplicationCoordinatorExternalState* replicationCoordinatorExternalState); /** * Checks that the "admin" database contains a supported version of the auth data schema. */ -Status checkAdminDatabase(OperationContext* txn, Database* adminDb); +Status checkAdminDatabase(OperationContext* opCtx, Database* adminDb); } // namespace repl } // namespace mongo diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp index dc5894cafc8..dbfb3a51284 100644 --- a/src/mongo/db/repl/rs_rollback.cpp +++ b/src/mongo/db/repl/rs_rollback.cpp @@ -335,7 +335,7 @@ namespace { * information from the upstream node. If any information is fetched from the upstream node after we * have written locally, the function must be called again. */ -void checkRbidAndUpdateMinValid(OperationContext* txn, +void checkRbidAndUpdateMinValid(OperationContext* opCtx, const int rbid, const RollbackSource& rollbackSource) { // It is important that the steps are performed in order to avoid racing with upstream rollbacks @@ -357,8 +357,8 @@ void checkRbidAndUpdateMinValid(OperationContext* txn, // online until we get to that point in freshness. OpTime minValid = fassertStatusOK(28774, OpTime::parseFromOplogEntry(newMinValidDoc)); log() << "Setting minvalid to " << minValid; - StorageInterface::get(txn)->setAppliedThrough(txn, {}); // Use top of oplog. - StorageInterface::get(txn)->setMinValid(txn, minValid); + StorageInterface::get(opCtx)->setAppliedThrough(opCtx, {}); // Use top of oplog. + StorageInterface::get(opCtx)->setMinValid(opCtx, minValid); if (MONGO_FAIL_POINT(rollbackHangThenFailAfterWritingMinValid)) { // This log output is used in js tests so please leave it. @@ -373,7 +373,7 @@ void checkRbidAndUpdateMinValid(OperationContext* txn, } } -void syncFixUp(OperationContext* txn, +void syncFixUp(OperationContext* opCtx, const FixUpInfo& fixUpInfo, const RollbackSource& rollbackSource, ReplicationCoordinator* replCoord) { @@ -415,7 +415,7 @@ void syncFixUp(OperationContext* txn, } log() << "rollback 3.5"; - checkRbidAndUpdateMinValid(txn, fixUpInfo.rbid, rollbackSource); + checkRbidAndUpdateMinValid(opCtx, fixUpInfo.rbid, rollbackSource); // update them log() << "rollback 4 n:" << goodVersions.size(); @@ -435,25 +435,25 @@ void syncFixUp(OperationContext* txn, { - ScopedTransaction transaction(txn, MODE_IX); - Lock::DBLock dbLock(txn->lockState(), nss.db(), MODE_X); - Database* db = dbHolder().openDb(txn, nss.db().toString()); + ScopedTransaction transaction(opCtx, MODE_IX); + Lock::DBLock dbLock(opCtx->lockState(), nss.db(), MODE_X); + Database* db = dbHolder().openDb(opCtx, nss.db().toString()); invariant(db); - WriteUnitOfWork wunit(txn); - fassertStatusOK(40359, db->dropCollectionEvenIfSystem(txn, nss)); + WriteUnitOfWork wunit(opCtx); + fassertStatusOK(40359, db->dropCollectionEvenIfSystem(opCtx, nss)); wunit.commit(); } - rollbackSource.copyCollectionFromRemote(txn, nss); + rollbackSource.copyCollectionFromRemote(opCtx, nss); } for (const string& ns : fixUpInfo.collectionsToResyncMetadata) { log() << "rollback 4.1.2 coll metadata resync " << ns; const NamespaceString nss(ns); - ScopedTransaction transaction(txn, MODE_IX); - Lock::DBLock dbLock(txn->lockState(), nss.db(), MODE_X); - auto db = dbHolder().openDb(txn, nss.db().toString()); + ScopedTransaction transaction(opCtx, MODE_IX); + Lock::DBLock dbLock(opCtx->lockState(), nss.db(), MODE_X); + auto db = dbHolder().openDb(opCtx, nss.db().toString()); invariant(db); auto collection = db->getCollection(ns); invariant(collection); @@ -490,23 +490,23 @@ void syncFixUp(OperationContext* txn, // Use default options. } - WriteUnitOfWork wuow(txn); - if (options.flagsSet || cce->getCollectionOptions(txn).flagsSet) { - cce->updateFlags(txn, options.flags); + WriteUnitOfWork wuow(opCtx); + if (options.flagsSet || cce->getCollectionOptions(opCtx).flagsSet) { + cce->updateFlags(opCtx, options.flags); } - auto status = collection->setValidator(txn, options.validator); + auto status = collection->setValidator(opCtx, options.validator); if (!status.isOK()) { throw RSFatalException(str::stream() << "Failed to set validator: " << status.toString()); } - status = collection->setValidationAction(txn, options.validationAction); + status = collection->setValidationAction(opCtx, options.validationAction); if (!status.isOK()) { throw RSFatalException(str::stream() << "Failed to set validationAction: " << status.toString()); } - status = collection->setValidationLevel(txn, options.validationLevel); + status = collection->setValidationLevel(opCtx, options.validationLevel); if (!status.isOK()) { throw RSFatalException(str::stream() << "Failed to set validationLevel: " << status.toString()); @@ -518,7 +518,7 @@ void syncFixUp(OperationContext* txn, // we did more reading from primary, so check it again for a rollback (which would mess // us up), and make minValid newer. log() << "rollback 4.2"; - checkRbidAndUpdateMinValid(txn, fixUpInfo.rbid, rollbackSource); + checkRbidAndUpdateMinValid(opCtx, fixUpInfo.rbid, rollbackSource); } log() << "rollback 4.6"; @@ -530,16 +530,16 @@ void syncFixUp(OperationContext* txn, invariant(!fixUpInfo.indexesToDrop.count(*it)); - ScopedTransaction transaction(txn, MODE_IX); + ScopedTransaction transaction(opCtx, MODE_IX); const NamespaceString nss(*it); - Lock::DBLock dbLock(txn->lockState(), nss.db(), MODE_X); - Database* db = dbHolder().get(txn, nsToDatabaseSubstring(*it)); + Lock::DBLock dbLock(opCtx->lockState(), nss.db(), MODE_X); + Database* db = dbHolder().get(opCtx, nsToDatabaseSubstring(*it)); if (db) { Helpers::RemoveSaver removeSaver("rollback", "", *it); // perform a collection scan and write all documents in the collection to disk std::unique_ptr<PlanExecutor> exec(InternalPlanner::collectionScan( - txn, *it, db->getCollection(*it), PlanExecutor::YIELD_AUTO)); + opCtx, *it, db->getCollection(*it), PlanExecutor::YIELD_AUTO)); BSONObj curObj; PlanExecutor::ExecState execState; while (PlanExecutor::ADVANCED == (execState = exec->getNext(&curObj, NULL))) { @@ -564,8 +564,8 @@ void syncFixUp(OperationContext* txn, throw RSFatalException(); } - WriteUnitOfWork wunit(txn); - fassertStatusOK(40360, db->dropCollectionEvenIfSystem(txn, nss)); + WriteUnitOfWork wunit(opCtx); + fassertStatusOK(40360, db->dropCollectionEvenIfSystem(opCtx, nss)); wunit.commit(); } } @@ -576,9 +576,9 @@ void syncFixUp(OperationContext* txn, const string& indexName = it->second; log() << "rollback drop index: collection: " << nss.toString() << ". index: " << indexName; - ScopedTransaction transaction(txn, MODE_IX); - Lock::DBLock dbLock(txn->lockState(), nss.db(), MODE_X); - auto db = dbHolder().get(txn, nss.db()); + ScopedTransaction transaction(opCtx, MODE_IX); + Lock::DBLock dbLock(opCtx->lockState(), nss.db(), MODE_X); + auto db = dbHolder().get(opCtx, nss.db()); if (!db) { continue; } @@ -592,14 +592,14 @@ void syncFixUp(OperationContext* txn, } bool includeUnfinishedIndexes = false; auto indexDescriptor = - indexCatalog->findIndexByName(txn, indexName, includeUnfinishedIndexes); + indexCatalog->findIndexByName(opCtx, indexName, includeUnfinishedIndexes); if (!indexDescriptor) { warning() << "rollback failed to drop index " << indexName << " in " << nss.toString() << ": index not found"; continue; } - WriteUnitOfWork wunit(txn); - auto status = indexCatalog->dropIndex(txn, indexDescriptor); + WriteUnitOfWork wunit(opCtx); + auto status = indexCatalog->dropIndex(opCtx, indexDescriptor); if (!status.isOK()) { severe() << "rollback failed to drop index " << indexName << " in " << nss.toString() << ": " << status; @@ -637,9 +637,9 @@ void syncFixUp(OperationContext* txn, // TODO: Lots of overhead in context. This can be faster. const NamespaceString docNss(doc.ns); - ScopedTransaction transaction(txn, MODE_IX); - Lock::DBLock docDbLock(txn->lockState(), docNss.db(), MODE_X); - OldClientContext ctx(txn, doc.ns); + ScopedTransaction transaction(opCtx, MODE_IX); + Lock::DBLock docDbLock(opCtx->lockState(), docNss.db(), MODE_X); + OldClientContext ctx(opCtx, doc.ns); Collection* collection = ctx.db()->getCollection(doc.ns); @@ -651,7 +651,7 @@ void syncFixUp(OperationContext* txn, // createCollection command and regardless, the document no longer exists. if (collection && removeSaver) { BSONObj obj; - bool found = Helpers::findOne(txn, collection, pattern, obj, false); + bool found = Helpers::findOne(opCtx, collection, pattern, obj, false); if (found) { auto status = removeSaver->goingToDelete(obj); if (!status.isOK()) { @@ -680,9 +680,9 @@ void syncFixUp(OperationContext* txn, // TODO: IIRC cappedTruncateAfter does not handle completely // empty. // this will crazy slow if no _id index. - const auto clock = txn->getServiceContext()->getFastClockSource(); + const auto clock = opCtx->getServiceContext()->getFastClockSource(); const auto findOneStart = clock->now(); - RecordId loc = Helpers::findOne(txn, collection, pattern, false); + RecordId loc = Helpers::findOne(opCtx, collection, pattern, false); if (clock->now() - findOneStart > Milliseconds(200)) warning() << "roll back slow no _id index for " << doc.ns << " perhaps?"; @@ -690,17 +690,17 @@ void syncFixUp(OperationContext* txn, // RecordId loc = Helpers::findById(nsd, pattern); if (!loc.isNull()) { try { - collection->cappedTruncateAfter(txn, loc, true); + collection->cappedTruncateAfter(opCtx, loc, true); } catch (const DBException& e) { if (e.getCode() == 13415) { // hack: need to just make cappedTruncate do this... MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN { - WriteUnitOfWork wunit(txn); - uassertStatusOK(collection->truncate(txn)); + WriteUnitOfWork wunit(opCtx); + uassertStatusOK(collection->truncate(opCtx)); wunit.commit(); } MONGO_WRITE_CONFLICT_RETRY_LOOP_END( - txn, "truncate", collection->ns().ns()); + opCtx, "truncate", collection->ns().ns()); } else { throw e; } @@ -717,7 +717,7 @@ void syncFixUp(OperationContext* txn, << ": " << redact(e); } } else { - deleteObjects(txn, + deleteObjects(opCtx, collection, doc.ns, pattern, @@ -740,7 +740,7 @@ void syncFixUp(OperationContext* txn, UpdateLifecycleImpl updateLifecycle(requestNs); request.setLifecycle(&updateLifecycle); - update(txn, ctx.db(), request); + update(opCtx, ctx.db(), request); } } catch (const DBException& e) { log() << "exception in rollback ns:" << doc.ns << ' ' << pattern.toString() << ' ' @@ -757,10 +757,10 @@ void syncFixUp(OperationContext* txn, LOG(2) << "rollback truncate oplog after " << fixUpInfo.commonPoint.toString(); { const NamespaceString oplogNss(rsOplogName); - ScopedTransaction transaction(txn, MODE_IX); - Lock::DBLock oplogDbLock(txn->lockState(), oplogNss.db(), MODE_IX); - Lock::CollectionLock oplogCollectionLoc(txn->lockState(), oplogNss.ns(), MODE_X); - OldClientContext ctx(txn, rsOplogName); + ScopedTransaction transaction(opCtx, MODE_IX); + Lock::DBLock oplogDbLock(opCtx->lockState(), oplogNss.db(), MODE_IX); + Lock::CollectionLock oplogCollectionLoc(opCtx->lockState(), oplogNss.ns(), MODE_X); + OldClientContext ctx(opCtx, rsOplogName); Collection* oplogCollection = ctx.db()->getCollection(rsOplogName); if (!oplogCollection) { fassertFailedWithStatusNoTrace(13423, @@ -768,10 +768,10 @@ void syncFixUp(OperationContext* txn, str::stream() << "Can't find " << rsOplogName)); } // TODO: fatal error if this throws? - oplogCollection->cappedTruncateAfter(txn, fixUpInfo.commonPointOurDiskloc, false); + oplogCollection->cappedTruncateAfter(opCtx, fixUpInfo.commonPointOurDiskloc, false); } - Status status = getGlobalAuthorizationManager()->initialize(txn); + Status status = getGlobalAuthorizationManager()->initialize(opCtx); if (!status.isOK()) { severe() << "Failed to reinitialize auth data after rollback: " << status; fassertFailedNoTrace(40366); @@ -779,16 +779,16 @@ void syncFixUp(OperationContext* txn, // Reload the lastAppliedOpTime and lastDurableOpTime value in the replcoord and the // lastAppliedHash value in bgsync to reflect our new last op. - replCoord->resetLastOpTimesFromOplog(txn); + replCoord->resetLastOpTimesFromOplog(opCtx); log() << "rollback done"; } -Status _syncRollback(OperationContext* txn, +Status _syncRollback(OperationContext* opCtx, const OplogInterface& localOplog, const RollbackSource& rollbackSource, boost::optional<int> requiredRBID, ReplicationCoordinator* replCoord) { - invariant(!txn->lockState()->isLocked()); + invariant(!opCtx->lockState()->isLocked()); FixUpInfo how; log() << "rollback 1"; @@ -833,7 +833,7 @@ Status _syncRollback(OperationContext* txn, log() << "rollback 3 fixup"; try { ON_BLOCK_EXIT([&] { replCoord->incrementRollbackID(); }); - syncFixUp(txn, how, rollbackSource, replCoord); + syncFixUp(opCtx, how, rollbackSource, replCoord); } catch (const RSFatalException& e) { return Status(ErrorCodes::UnrecoverableRollbackError, e.what(), 18753); } @@ -853,19 +853,19 @@ Status _syncRollback(OperationContext* txn, } // namespace -Status syncRollback(OperationContext* txn, +Status syncRollback(OperationContext* opCtx, const OplogInterface& localOplog, const RollbackSource& rollbackSource, boost::optional<int> requiredRBID, ReplicationCoordinator* replCoord) { - invariant(txn); + invariant(opCtx); invariant(replCoord); log() << "beginning rollback" << rsLog; - DisableDocumentValidation validationDisabler(txn); - UnreplicatedWritesBlock replicationDisabler(txn); - Status status = _syncRollback(txn, localOplog, rollbackSource, requiredRBID, replCoord); + DisableDocumentValidation validationDisabler(opCtx); + UnreplicatedWritesBlock replicationDisabler(opCtx); + Status status = _syncRollback(opCtx, localOplog, rollbackSource, requiredRBID, replCoord); log() << "rollback finished" << rsLog; return status; diff --git a/src/mongo/db/repl/rs_rollback.h b/src/mongo/db/repl/rs_rollback.h index 8ee7dd04367..58dd4a27d4f 100644 --- a/src/mongo/db/repl/rs_rollback.h +++ b/src/mongo/db/repl/rs_rollback.h @@ -59,7 +59,7 @@ class RollbackSource; * This function runs a command on the sync source to detect if the sync source rolls back * while our rollback is in progress. * - * @param txn Used to read and write from this node's databases + * @param opCtx Used to read and write from this node's databases * @param localOplog reads the oplog on this server. * @param rollbackSource interface for sync source: * provides oplog; and @@ -73,7 +73,7 @@ class RollbackSource; * fatally. All other errors should be considered recoverable regardless of whether reported as a * status or exception. */ -Status syncRollback(OperationContext* txn, +Status syncRollback(OperationContext* opCtx, const OplogInterface& localOplog, const RollbackSource& rollbackSource, boost::optional<int> requiredRBID, diff --git a/src/mongo/db/repl/rs_rollback_test.cpp b/src/mongo/db/repl/rs_rollback_test.cpp index 988fe98face..22e7a677fea 100644 --- a/src/mongo/db/repl/rs_rollback_test.cpp +++ b/src/mongo/db/repl/rs_rollback_test.cpp @@ -79,7 +79,7 @@ class ReplicationCoordinatorRollbackMock : public ReplicationCoordinatorMock { public: ReplicationCoordinatorRollbackMock(ServiceContext* service) : ReplicationCoordinatorMock(service, createReplSettings()) {} - void resetLastOpTimesFromOplog(OperationContext* txn) override {} + void resetLastOpTimesFromOplog(OperationContext* opCtx) override {} }; @@ -90,7 +90,8 @@ public: const OplogInterface& getOplog() const override; BSONObj getLastOperation() const override; BSONObj findOne(const NamespaceString& nss, const BSONObj& filter) const override; - void copyCollectionFromRemote(OperationContext* txn, const NamespaceString& nss) const override; + void copyCollectionFromRemote(OperationContext* opCtx, + const NamespaceString& nss) const override; StatusWith<BSONObj> getCollectionInfo(const NamespaceString& nss) const override; private: @@ -119,7 +120,7 @@ BSONObj RollbackSourceMock::findOne(const NamespaceString& nss, const BSONObj& f return BSONObj(); } -void RollbackSourceMock::copyCollectionFromRemote(OperationContext* txn, +void RollbackSourceMock::copyCollectionFromRemote(OperationContext* opCtx, const NamespaceString& nss) const {} StatusWith<BSONObj> RollbackSourceMock::getCollectionInfo(const NamespaceString& nss) const { @@ -128,7 +129,7 @@ StatusWith<BSONObj> RollbackSourceMock::getCollectionInfo(const NamespaceString& class RSRollbackTest : public ServiceContextMongoDTest { protected: - ServiceContext::UniqueOperationContext _txn; + ServiceContext::UniqueOperationContext _opCtx; // Owned by service context ReplicationCoordinator* _coordinator; @@ -140,8 +141,8 @@ private: void RSRollbackTest::setUp() { ServiceContextMongoDTest::setUp(); - _txn = cc().makeOperationContext(); - _coordinator = new ReplicationCoordinatorRollbackMock(_txn->getServiceContext()); + _opCtx = cc().makeOperationContext(); + _coordinator = new ReplicationCoordinatorRollbackMock(_opCtx->getServiceContext()); auto serviceContext = getServiceContext(); ReplicationCoordinator::set(serviceContext, @@ -149,22 +150,22 @@ void RSRollbackTest::setUp() { StorageInterface::set(serviceContext, stdx::make_unique<StorageInterfaceMock>()); setOplogCollectionName(); - repl::StorageInterface::get(_txn.get())->setAppliedThrough(_txn.get(), OpTime{}); - repl::StorageInterface::get(_txn.get())->setMinValid(_txn.get(), OpTime{}); + repl::StorageInterface::get(_opCtx.get())->setAppliedThrough(_opCtx.get(), OpTime{}); + repl::StorageInterface::get(_opCtx.get())->setMinValid(_opCtx.get(), OpTime{}); } void RSRollbackTest::tearDown() { - _txn.reset(); + _opCtx.reset(); ServiceContextMongoDTest::tearDown(); setGlobalReplicationCoordinator(nullptr); } TEST_F(RSRollbackTest, InconsistentMinValid) { - repl::StorageInterface::get(_txn.get()) - ->setAppliedThrough(_txn.get(), OpTime(Timestamp(Seconds(0), 0), 0)); - repl::StorageInterface::get(_txn.get()) - ->setMinValid(_txn.get(), OpTime(Timestamp(Seconds(1), 0), 0)); - auto status = syncRollback(_txn.get(), + repl::StorageInterface::get(_opCtx.get()) + ->setAppliedThrough(_opCtx.get(), OpTime(Timestamp(Seconds(0), 0), 0)); + repl::StorageInterface::get(_opCtx.get()) + ->setMinValid(_opCtx.get(), OpTime(Timestamp(Seconds(1), 0), 0)); + auto status = syncRollback(_opCtx.get(), OplogInterfaceMock(kEmptyMockOperations), RollbackSourceMock(std::unique_ptr<OplogInterface>( new OplogInterfaceMock(kEmptyMockOperations))), @@ -180,7 +181,7 @@ TEST_F(RSRollbackTest, OplogStartMissing) { std::make_pair(BSON("ts" << ts.getTimestamp() << "h" << ts.getTerm()), RecordId()); ASSERT_EQUALS( ErrorCodes::OplogStartMissing, - syncRollback(_txn.get(), + syncRollback(_opCtx.get(), OplogInterfaceMock(kEmptyMockOperations), RollbackSourceMock(std::unique_ptr<OplogInterface>(new OplogInterfaceMock({ operation, @@ -194,7 +195,7 @@ TEST_F(RSRollbackTest, NoRemoteOpLog) { OpTime ts(Timestamp(Seconds(1), 0), 0); auto operation = std::make_pair(BSON("ts" << ts.getTimestamp() << "h" << ts.getTerm()), RecordId()); - auto status = syncRollback(_txn.get(), + auto status = syncRollback(_opCtx.get(), OplogInterfaceMock({operation}), RollbackSourceMock(std::unique_ptr<OplogInterface>( new OplogInterfaceMock(kEmptyMockOperations))), @@ -216,7 +217,7 @@ TEST_F(RSRollbackTest, RemoteGetRollbackIdThrows) { uassert(ErrorCodes::UnknownError, "getRollbackId() failed", false); } }; - ASSERT_THROWS_CODE(syncRollback(_txn.get(), + ASSERT_THROWS_CODE(syncRollback(_opCtx.get(), OplogInterfaceMock({operation}), RollbackSourceLocal(std::unique_ptr<OplogInterface>( new OplogInterfaceMock(kEmptyMockOperations))), @@ -239,7 +240,7 @@ TEST_F(RSRollbackTest, RemoteGetRollbackIdDiffersFromRequiredRBID) { } }; - ASSERT_THROWS_CODE(syncRollback(_txn.get(), + ASSERT_THROWS_CODE(syncRollback(_opCtx.get(), OplogInterfaceMock({operation}), RollbackSourceLocal(std::unique_ptr<OplogInterface>( new OplogInterfaceMock(kEmptyMockOperations))), @@ -250,12 +251,12 @@ TEST_F(RSRollbackTest, RemoteGetRollbackIdDiffersFromRequiredRBID) { } TEST_F(RSRollbackTest, BothOplogsAtCommonPoint) { - createOplog(_txn.get()); + createOplog(_opCtx.get()); OpTime ts(Timestamp(Seconds(1), 0), 1); auto operation = std::make_pair(BSON("ts" << ts.getTimestamp() << "h" << ts.getTerm()), RecordId(1)); ASSERT_OK( - syncRollback(_txn.get(), + syncRollback(_opCtx.get(), OplogInterfaceMock({operation}), RollbackSourceMock(std::unique_ptr<OplogInterface>(new OplogInterfaceMock({ operation, @@ -268,24 +269,24 @@ TEST_F(RSRollbackTest, BothOplogsAtCommonPoint) { * Create test collection. * Returns collection. */ -Collection* _createCollection(OperationContext* txn, +Collection* _createCollection(OperationContext* opCtx, const NamespaceString& nss, const CollectionOptions& options) { - Lock::DBLock dbLock(txn->lockState(), nss.db(), MODE_X); - mongo::WriteUnitOfWork wuow(txn); - auto db = dbHolder().openDb(txn, nss.db()); + Lock::DBLock dbLock(opCtx->lockState(), nss.db(), MODE_X); + mongo::WriteUnitOfWork wuow(opCtx); + auto db = dbHolder().openDb(opCtx, nss.db()); ASSERT_TRUE(db); - db->dropCollection(txn, nss.ns()); - auto coll = db->createCollection(txn, nss.ns(), options); + db->dropCollection(opCtx, nss.ns()); + auto coll = db->createCollection(opCtx, nss.ns(), options); ASSERT_TRUE(coll); wuow.commit(); return coll; } -Collection* _createCollection(OperationContext* txn, +Collection* _createCollection(OperationContext* opCtx, const std::string& nss, const CollectionOptions& options) { - return _createCollection(txn, NamespaceString(nss), options); + return _createCollection(opCtx, NamespaceString(nss), options); } /** @@ -293,7 +294,7 @@ Collection* _createCollection(OperationContext* txn, * Returns number of records in collection after rolling back delete operation. * If collection does not exist after rolling back, returns -1. */ -int _testRollbackDelete(OperationContext* txn, +int _testRollbackDelete(OperationContext* opCtx, ReplicationCoordinator* coordinator, const BSONObj& documentAtSource) { auto commonOperation = @@ -325,54 +326,54 @@ int _testRollbackDelete(OperationContext* txn, std::unique_ptr<OplogInterface>(new OplogInterfaceMock({ commonOperation, }))); - ASSERT_OK(syncRollback(txn, + ASSERT_OK(syncRollback(opCtx, OplogInterfaceMock({deleteOperation, commonOperation}), rollbackSource, {}, coordinator)); ASSERT_TRUE(rollbackSource.called); - Lock::DBLock dbLock(txn->lockState(), "test", MODE_S); - Lock::CollectionLock collLock(txn->lockState(), "test.t", MODE_S); - auto db = dbHolder().get(txn, "test"); + Lock::DBLock dbLock(opCtx->lockState(), "test", MODE_S); + Lock::CollectionLock collLock(opCtx->lockState(), "test.t", MODE_S); + auto db = dbHolder().get(opCtx, "test"); ASSERT_TRUE(db); auto collection = db->getCollection("test.t"); if (!collection) { return -1; } - return collection->getRecordStore()->numRecords(txn); + return collection->getRecordStore()->numRecords(opCtx); } TEST_F(RSRollbackTest, RollbackDeleteNoDocumentAtSourceCollectionDoesNotExist) { - createOplog(_txn.get()); - ASSERT_EQUALS(-1, _testRollbackDelete(_txn.get(), _coordinator, BSONObj())); + createOplog(_opCtx.get()); + ASSERT_EQUALS(-1, _testRollbackDelete(_opCtx.get(), _coordinator, BSONObj())); } TEST_F(RSRollbackTest, RollbackDeleteNoDocumentAtSourceCollectionExistsNonCapped) { - createOplog(_txn.get()); - _createCollection(_txn.get(), "test.t", CollectionOptions()); - _testRollbackDelete(_txn.get(), _coordinator, BSONObj()); - ASSERT_EQUALS(0, _testRollbackDelete(_txn.get(), _coordinator, BSONObj())); + createOplog(_opCtx.get()); + _createCollection(_opCtx.get(), "test.t", CollectionOptions()); + _testRollbackDelete(_opCtx.get(), _coordinator, BSONObj()); + ASSERT_EQUALS(0, _testRollbackDelete(_opCtx.get(), _coordinator, BSONObj())); } TEST_F(RSRollbackTest, RollbackDeleteNoDocumentAtSourceCollectionExistsCapped) { - createOplog(_txn.get()); + createOplog(_opCtx.get()); CollectionOptions options; options.capped = true; - _createCollection(_txn.get(), "test.t", options); - ASSERT_EQUALS(0, _testRollbackDelete(_txn.get(), _coordinator, BSONObj())); + _createCollection(_opCtx.get(), "test.t", options); + ASSERT_EQUALS(0, _testRollbackDelete(_opCtx.get(), _coordinator, BSONObj())); } TEST_F(RSRollbackTest, RollbackDeleteRestoreDocument) { - createOplog(_txn.get()); - _createCollection(_txn.get(), "test.t", CollectionOptions()); + createOplog(_opCtx.get()); + _createCollection(_opCtx.get(), "test.t", CollectionOptions()); BSONObj doc = BSON("_id" << 0 << "a" << 1); - _testRollbackDelete(_txn.get(), _coordinator, doc); - ASSERT_EQUALS(1, _testRollbackDelete(_txn.get(), _coordinator, doc)); + _testRollbackDelete(_opCtx.get(), _coordinator, doc); + ASSERT_EQUALS(1, _testRollbackDelete(_opCtx.get(), _coordinator, doc)); } TEST_F(RSRollbackTest, RollbackInsertDocumentWithNoId) { - createOplog(_txn.get()); + createOplog(_opCtx.get()); auto commonOperation = std::make_pair(BSON("ts" << Timestamp(Seconds(1), 0) << "h" << 1LL), RecordId(1)); auto insertDocumentOperation = @@ -400,7 +401,7 @@ TEST_F(RSRollbackTest, RollbackInsertDocumentWithNoId) { commonOperation, }))); startCapturingLogMessages(); - auto status = syncRollback(_txn.get(), + auto status = syncRollback(_opCtx.get(), OplogInterfaceMock({insertDocumentOperation, commonOperation}), rollbackSource, {}, @@ -413,8 +414,8 @@ TEST_F(RSRollbackTest, RollbackInsertDocumentWithNoId) { } TEST_F(RSRollbackTest, RollbackCreateIndexCommand) { - createOplog(_txn.get()); - auto collection = _createCollection(_txn.get(), "test.t", CollectionOptions()); + createOplog(_opCtx.get()); + auto collection = _createCollection(_opCtx.get(), "test.t", CollectionOptions()); auto indexSpec = BSON("ns" << "test.t" << "key" @@ -424,15 +425,15 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommand) { << "v" << static_cast<int>(kIndexVersion)); { - Lock::DBLock dbLock(_txn->lockState(), "test", MODE_X); - MultiIndexBlock indexer(_txn.get(), collection); + Lock::DBLock dbLock(_opCtx->lockState(), "test", MODE_X); + MultiIndexBlock indexer(_opCtx.get(), collection); ASSERT_OK(indexer.init(indexSpec).getStatus()); - WriteUnitOfWork wunit(_txn.get()); + WriteUnitOfWork wunit(_opCtx.get()); indexer.commit(); wunit.commit(); auto indexCatalog = collection->getIndexCatalog(); ASSERT(indexCatalog); - ASSERT_EQUALS(2, indexCatalog->numIndexesReady(_txn.get())); + ASSERT_EQUALS(2, indexCatalog->numIndexesReady(_opCtx.get())); } auto commonOperation = std::make_pair(BSON("ts" << Timestamp(Seconds(1), 0) << "h" << 1LL), RecordId(1)); @@ -448,7 +449,7 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommand) { public: RollbackSourceLocal(std::unique_ptr<OplogInterface> oplog) : RollbackSourceMock(std::move(oplog)), called(false) {} - void copyCollectionFromRemote(OperationContext* txn, + void copyCollectionFromRemote(OperationContext* opCtx, const NamespaceString& nss) const override { called = true; } @@ -464,7 +465,7 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommand) { // This can happen when an index is re-created with different options. startCapturingLogMessages(); ASSERT_OK(syncRollback( - _txn.get(), + _opCtx.get(), OplogInterfaceMock({insertDocumentOperation, insertDocumentOperation, commonOperation}), rollbackSource, {}, @@ -474,16 +475,16 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommand) { countLogLinesContaining("rollback drop index: collection: test.t. index: a_1")); ASSERT_FALSE(rollbackSource.called); { - Lock::DBLock dbLock(_txn->lockState(), "test", MODE_S); + Lock::DBLock dbLock(_opCtx->lockState(), "test", MODE_S); auto indexCatalog = collection->getIndexCatalog(); ASSERT(indexCatalog); - ASSERT_EQUALS(1, indexCatalog->numIndexesReady(_txn.get())); + ASSERT_EQUALS(1, indexCatalog->numIndexesReady(_opCtx.get())); } } TEST_F(RSRollbackTest, RollbackCreateIndexCommandIndexNotInCatalog) { - createOplog(_txn.get()); - auto collection = _createCollection(_txn.get(), "test.t", CollectionOptions()); + createOplog(_opCtx.get()); + auto collection = _createCollection(_opCtx.get(), "test.t", CollectionOptions()); auto indexSpec = BSON("ns" << "test.t" << "key" @@ -492,10 +493,10 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommandIndexNotInCatalog) { << "a_1"); // Skip index creation to trigger warning during rollback. { - Lock::DBLock dbLock(_txn->lockState(), "test", MODE_S); + Lock::DBLock dbLock(_opCtx->lockState(), "test", MODE_S); auto indexCatalog = collection->getIndexCatalog(); ASSERT(indexCatalog); - ASSERT_EQUALS(1, indexCatalog->numIndexesReady(_txn.get())); + ASSERT_EQUALS(1, indexCatalog->numIndexesReady(_opCtx.get())); } auto commonOperation = std::make_pair(BSON("ts" << Timestamp(Seconds(1), 0) << "h" << 1LL), RecordId(1)); @@ -511,7 +512,7 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommandIndexNotInCatalog) { public: RollbackSourceLocal(std::unique_ptr<OplogInterface> oplog) : RollbackSourceMock(std::move(oplog)), called(false) {} - void copyCollectionFromRemote(OperationContext* txn, + void copyCollectionFromRemote(OperationContext* opCtx, const NamespaceString& nss) const override { called = true; } @@ -524,7 +525,7 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommandIndexNotInCatalog) { commonOperation, }))); startCapturingLogMessages(); - ASSERT_OK(syncRollback(_txn.get(), + ASSERT_OK(syncRollback(_opCtx.get(), OplogInterfaceMock({insertDocumentOperation, commonOperation}), rollbackSource, {}, @@ -535,15 +536,15 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommandIndexNotInCatalog) { ASSERT_EQUALS(1, countLogLinesContaining("rollback failed to drop index a_1 in test.t")); ASSERT_FALSE(rollbackSource.called); { - Lock::DBLock dbLock(_txn->lockState(), "test", MODE_S); + Lock::DBLock dbLock(_opCtx->lockState(), "test", MODE_S); auto indexCatalog = collection->getIndexCatalog(); ASSERT(indexCatalog); - ASSERT_EQUALS(1, indexCatalog->numIndexesReady(_txn.get())); + ASSERT_EQUALS(1, indexCatalog->numIndexesReady(_opCtx.get())); } } TEST_F(RSRollbackTest, RollbackCreateIndexCommandMissingNamespace) { - createOplog(_txn.get()); + createOplog(_opCtx.get()); auto commonOperation = std::make_pair(BSON("ts" << Timestamp(Seconds(1), 0) << "h" << 1LL), RecordId(1)); auto insertDocumentOperation = @@ -559,7 +560,7 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommandMissingNamespace) { public: RollbackSourceLocal(std::unique_ptr<OplogInterface> oplog) : RollbackSourceMock(std::move(oplog)), called(false) {} - void copyCollectionFromRemote(OperationContext* txn, + void copyCollectionFromRemote(OperationContext* opCtx, const NamespaceString& nss) const override { called = true; } @@ -572,7 +573,7 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommandMissingNamespace) { commonOperation, }))); startCapturingLogMessages(); - auto status = syncRollback(_txn.get(), + auto status = syncRollback(_opCtx.get(), OplogInterfaceMock({insertDocumentOperation, commonOperation}), rollbackSource, {}, @@ -586,7 +587,7 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommandMissingNamespace) { } TEST_F(RSRollbackTest, RollbackCreateIndexCommandInvalidNamespace) { - createOplog(_txn.get()); + createOplog(_opCtx.get()); auto commonOperation = std::make_pair(BSON("ts" << Timestamp(Seconds(1), 0) << "h" << 1LL), RecordId(1)); auto insertDocumentOperation = @@ -606,7 +607,7 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommandInvalidNamespace) { public: RollbackSourceLocal(std::unique_ptr<OplogInterface> oplog) : RollbackSourceMock(std::move(oplog)), called(false) {} - void copyCollectionFromRemote(OperationContext* txn, + void copyCollectionFromRemote(OperationContext* opCtx, const NamespaceString& nss) const override { called = true; } @@ -619,7 +620,7 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommandInvalidNamespace) { commonOperation, }))); startCapturingLogMessages(); - auto status = syncRollback(_txn.get(), + auto status = syncRollback(_opCtx.get(), OplogInterfaceMock({insertDocumentOperation, commonOperation}), rollbackSource, {}, @@ -633,7 +634,7 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommandInvalidNamespace) { } TEST_F(RSRollbackTest, RollbackCreateIndexCommandMissingIndexName) { - createOplog(_txn.get()); + createOplog(_opCtx.get()); auto commonOperation = std::make_pair(BSON("ts" << Timestamp(Seconds(1), 0) << "h" << 1LL), RecordId(1)); auto insertDocumentOperation = @@ -651,7 +652,7 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommandMissingIndexName) { public: RollbackSourceLocal(std::unique_ptr<OplogInterface> oplog) : RollbackSourceMock(std::move(oplog)), called(false) {} - void copyCollectionFromRemote(OperationContext* txn, + void copyCollectionFromRemote(OperationContext* opCtx, const NamespaceString& nss) const override { called = true; } @@ -664,7 +665,7 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommandMissingIndexName) { commonOperation, }))); startCapturingLogMessages(); - auto status = syncRollback(_txn.get(), + auto status = syncRollback(_opCtx.get(), OplogInterfaceMock({insertDocumentOperation, commonOperation}), rollbackSource, {}, @@ -677,7 +678,7 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommandMissingIndexName) { } TEST_F(RSRollbackTest, RollbackUnknownCommand) { - createOplog(_txn.get()); + createOplog(_opCtx.get()); auto commonOperation = std::make_pair(BSON("ts" << Timestamp(Seconds(1), 0) << "h" << 1LL), RecordId(1)); auto unknownCommandOperation = @@ -690,15 +691,15 @@ TEST_F(RSRollbackTest, RollbackUnknownCommand) { << "t")), RecordId(2)); { - Lock::DBLock dbLock(_txn->lockState(), "test", MODE_X); - mongo::WriteUnitOfWork wuow(_txn.get()); - auto db = dbHolder().openDb(_txn.get(), "test"); + Lock::DBLock dbLock(_opCtx->lockState(), "test", MODE_X); + mongo::WriteUnitOfWork wuow(_opCtx.get()); + auto db = dbHolder().openDb(_opCtx.get(), "test"); ASSERT_TRUE(db); - ASSERT_TRUE(db->getOrCreateCollection(_txn.get(), "test.t")); + ASSERT_TRUE(db->getOrCreateCollection(_opCtx.get(), "test.t")); wuow.commit(); } auto status = - syncRollback(_txn.get(), + syncRollback(_opCtx.get(), OplogInterfaceMock({unknownCommandOperation, commonOperation}), RollbackSourceMock(std::unique_ptr<OplogInterface>(new OplogInterfaceMock({ commonOperation, @@ -710,7 +711,7 @@ TEST_F(RSRollbackTest, RollbackUnknownCommand) { } TEST_F(RSRollbackTest, RollbackDropCollectionCommand) { - createOplog(_txn.get()); + createOplog(_opCtx.get()); auto commonOperation = std::make_pair(BSON("ts" << Timestamp(Seconds(1), 0) << "h" << 1LL), RecordId(1)); auto dropCollectionOperation = @@ -726,7 +727,7 @@ TEST_F(RSRollbackTest, RollbackDropCollectionCommand) { public: RollbackSourceLocal(std::unique_ptr<OplogInterface> oplog) : RollbackSourceMock(std::move(oplog)), called(false) {} - void copyCollectionFromRemote(OperationContext* txn, + void copyCollectionFromRemote(OperationContext* opCtx, const NamespaceString& nss) const override { called = true; } @@ -735,8 +736,8 @@ TEST_F(RSRollbackTest, RollbackDropCollectionCommand) { RollbackSourceLocal rollbackSource(std::unique_ptr<OplogInterface>(new OplogInterfaceMock({ commonOperation, }))); - _createCollection(_txn.get(), "test.t", CollectionOptions()); - ASSERT_OK(syncRollback(_txn.get(), + _createCollection(_opCtx.get(), "test.t", CollectionOptions()); + ASSERT_OK(syncRollback(_opCtx.get(), OplogInterfaceMock({dropCollectionOperation, commonOperation}), rollbackSource, {}, @@ -745,7 +746,7 @@ TEST_F(RSRollbackTest, RollbackDropCollectionCommand) { } TEST_F(RSRollbackTest, RollbackDropCollectionCommandFailsIfRBIDChangesWhileSyncingCollection) { - createOplog(_txn.get()); + createOplog(_opCtx.get()); auto commonOperation = std::make_pair(BSON("ts" << Timestamp(Seconds(1), 0) << "h" << 1LL), RecordId(1)); auto dropCollectionOperation = @@ -763,7 +764,7 @@ TEST_F(RSRollbackTest, RollbackDropCollectionCommandFailsIfRBIDChangesWhileSynci int getRollbackId() const override { return copyCollectionCalled ? 1 : 0; } - void copyCollectionFromRemote(OperationContext* txn, + void copyCollectionFromRemote(OperationContext* opCtx, const NamespaceString& nss) const override { copyCollectionCalled = true; } @@ -773,8 +774,8 @@ TEST_F(RSRollbackTest, RollbackDropCollectionCommandFailsIfRBIDChangesWhileSynci commonOperation, }))); - _createCollection(_txn.get(), "test.t", CollectionOptions()); - ASSERT_THROWS_CODE(syncRollback(_txn.get(), + _createCollection(_opCtx.get(), "test.t", CollectionOptions()); + ASSERT_THROWS_CODE(syncRollback(_opCtx.get(), OplogInterfaceMock({dropCollectionOperation, commonOperation}), rollbackSource, {0}, @@ -815,22 +816,22 @@ OpTime getOpTimeFromOplogEntry(const BSONObj& entry) { } TEST_F(RSRollbackTest, RollbackApplyOpsCommand) { - createOplog(_txn.get()); + createOplog(_opCtx.get()); { - AutoGetOrCreateDb autoDb(_txn.get(), "test", MODE_X); - mongo::WriteUnitOfWork wuow(_txn.get()); + AutoGetOrCreateDb autoDb(_opCtx.get(), "test", MODE_X); + mongo::WriteUnitOfWork wuow(_opCtx.get()); auto coll = autoDb.getDb()->getCollection("test.t"); if (!coll) { - coll = autoDb.getDb()->createCollection(_txn.get(), "test.t"); + coll = autoDb.getDb()->createCollection(_opCtx.get(), "test.t"); } ASSERT(coll); OpDebug* const nullOpDebug = nullptr; ASSERT_OK( - coll->insertDocument(_txn.get(), BSON("_id" << 1 << "v" << 2), nullOpDebug, false)); + coll->insertDocument(_opCtx.get(), BSON("_id" << 1 << "v" << 2), nullOpDebug, false)); ASSERT_OK( - coll->insertDocument(_txn.get(), BSON("_id" << 2 << "v" << 4), nullOpDebug, false)); - ASSERT_OK(coll->insertDocument(_txn.get(), BSON("_id" << 4), nullOpDebug, false)); + coll->insertDocument(_opCtx.get(), BSON("_id" << 2 << "v" << 4), nullOpDebug, false)); + ASSERT_OK(coll->insertDocument(_opCtx.get(), BSON("_id" << 4), nullOpDebug, false)); wuow.commit(); } const auto commonOperation = @@ -897,8 +898,8 @@ TEST_F(RSRollbackTest, RollbackApplyOpsCommand) { mutable std::multiset<int> searchedIds; } rollbackSource(std::unique_ptr<OplogInterface>(new OplogInterfaceMock({commonOperation}))); - _createCollection(_txn.get(), "test.t", CollectionOptions()); - ASSERT_OK(syncRollback(_txn.get(), + _createCollection(_opCtx.get(), "test.t", CollectionOptions()); + ASSERT_OK(syncRollback(_opCtx.get(), OplogInterfaceMock({applyOpsOperation, commonOperation}), rollbackSource, {}, @@ -909,20 +910,20 @@ TEST_F(RSRollbackTest, RollbackApplyOpsCommand) { ASSERT_EQUALS(1U, rollbackSource.searchedIds.count(3)); ASSERT_EQUALS(1U, rollbackSource.searchedIds.count(4)); - AutoGetCollectionForRead acr(_txn.get(), NamespaceString("test.t")); + AutoGetCollectionForRead acr(_opCtx.get(), NamespaceString("test.t")); BSONObj result; - ASSERT(Helpers::findOne(_txn.get(), acr.getCollection(), BSON("_id" << 1), result)); + ASSERT(Helpers::findOne(_opCtx.get(), acr.getCollection(), BSON("_id" << 1), result)); ASSERT_EQUALS(1, result["v"].numberInt()) << result; - ASSERT(Helpers::findOne(_txn.get(), acr.getCollection(), BSON("_id" << 2), result)); + ASSERT(Helpers::findOne(_opCtx.get(), acr.getCollection(), BSON("_id" << 2), result)); ASSERT_EQUALS(3, result["v"].numberInt()) << result; - ASSERT(Helpers::findOne(_txn.get(), acr.getCollection(), BSON("_id" << 3), result)); + ASSERT(Helpers::findOne(_opCtx.get(), acr.getCollection(), BSON("_id" << 3), result)); ASSERT_EQUALS(5, result["v"].numberInt()) << result; - ASSERT_FALSE(Helpers::findOne(_txn.get(), acr.getCollection(), BSON("_id" << 4), result)) + ASSERT_FALSE(Helpers::findOne(_opCtx.get(), acr.getCollection(), BSON("_id" << 4), result)) << result; } TEST_F(RSRollbackTest, RollbackCreateCollectionCommand) { - createOplog(_txn.get()); + createOplog(_opCtx.get()); auto commonOperation = std::make_pair(BSON("ts" << Timestamp(Seconds(1), 0) << "h" << 1LL), RecordId(1)); auto createCollectionOperation = @@ -937,22 +938,22 @@ TEST_F(RSRollbackTest, RollbackCreateCollectionCommand) { RollbackSourceMock rollbackSource(std::unique_ptr<OplogInterface>(new OplogInterfaceMock({ commonOperation, }))); - _createCollection(_txn.get(), "test.t", CollectionOptions()); - ASSERT_OK(syncRollback(_txn.get(), + _createCollection(_opCtx.get(), "test.t", CollectionOptions()); + ASSERT_OK(syncRollback(_opCtx.get(), OplogInterfaceMock({createCollectionOperation, commonOperation}), rollbackSource, {}, _coordinator)); { - Lock::DBLock dbLock(_txn->lockState(), "test", MODE_S); - auto db = dbHolder().get(_txn.get(), "test"); + Lock::DBLock dbLock(_opCtx->lockState(), "test", MODE_S); + auto db = dbHolder().get(_opCtx.get(), "test"); ASSERT_TRUE(db); ASSERT_FALSE(db->getCollection("test.t")); } } TEST_F(RSRollbackTest, RollbackCollectionModificationCommand) { - createOplog(_txn.get()); + createOplog(_opCtx.get()); auto commonOperation = std::make_pair(BSON("ts" << Timestamp(Seconds(1), 0) << "h" << 1LL), RecordId(1)); auto collectionModificationOperation = @@ -979,9 +980,9 @@ TEST_F(RSRollbackTest, RollbackCollectionModificationCommand) { RollbackSourceLocal rollbackSource(std::unique_ptr<OplogInterface>(new OplogInterfaceMock({ commonOperation, }))); - _createCollection(_txn.get(), "test.t", CollectionOptions()); + _createCollection(_opCtx.get(), "test.t", CollectionOptions()); startCapturingLogMessages(); - ASSERT_OK(syncRollback(_txn.get(), + ASSERT_OK(syncRollback(_opCtx.get(), OplogInterfaceMock({collectionModificationOperation, commonOperation}), rollbackSource, {}, @@ -995,7 +996,7 @@ TEST_F(RSRollbackTest, RollbackCollectionModificationCommand) { } TEST_F(RSRollbackTest, RollbackCollectionModificationCommandInvalidCollectionOptions) { - createOplog(_txn.get()); + createOplog(_opCtx.get()); auto commonOperation = std::make_pair(BSON("ts" << Timestamp(Seconds(1), 0) << "h" << 1LL), RecordId(1)); auto collectionModificationOperation = @@ -1020,9 +1021,9 @@ TEST_F(RSRollbackTest, RollbackCollectionModificationCommandInvalidCollectionOpt RollbackSourceLocal rollbackSource(std::unique_ptr<OplogInterface>(new OplogInterfaceMock({ commonOperation, }))); - _createCollection(_txn.get(), "test.t", CollectionOptions()); + _createCollection(_opCtx.get(), "test.t", CollectionOptions()); auto status = - syncRollback(_txn.get(), + syncRollback(_opCtx.get(), OplogInterfaceMock({collectionModificationOperation, commonOperation}), rollbackSource, {}, diff --git a/src/mongo/db/repl/storage_interface.cpp b/src/mongo/db/repl/storage_interface.cpp index b9a1c25df45..5f9deb71a85 100644 --- a/src/mongo/db/repl/storage_interface.cpp +++ b/src/mongo/db/repl/storage_interface.cpp @@ -51,8 +51,8 @@ StorageInterface* StorageInterface::get(ServiceContext& service) { return getStorageInterface(service).get(); } -StorageInterface* StorageInterface::get(OperationContext* txn) { - return get(txn->getClient()->getServiceContext()); +StorageInterface* StorageInterface::get(OperationContext* opCtx) { + return get(opCtx->getClient()->getServiceContext()); } diff --git a/src/mongo/db/repl/storage_interface.h b/src/mongo/db/repl/storage_interface.h index ae4f6f7eabf..2e87f23537d 100644 --- a/src/mongo/db/repl/storage_interface.h +++ b/src/mongo/db/repl/storage_interface.h @@ -91,7 +91,7 @@ public: // Operation Context binding. static StorageInterface* get(ServiceContext* service); static StorageInterface* get(ServiceContext& service); - static StorageInterface* get(OperationContext* txn); + static StorageInterface* get(OperationContext* opCtx); static void set(ServiceContext* service, std::unique_ptr<StorageInterface> storageInterface); // Constructor and Destructor. @@ -105,7 +105,7 @@ public: /** * Returns true if initial sync was started but has not not completed. */ - virtual bool getInitialSyncFlag(OperationContext* txn) const = 0; + virtual bool getInitialSyncFlag(OperationContext* opCtx) const = 0; /** * Sets the the initial sync flag to record that initial sync has not completed. @@ -113,7 +113,7 @@ public: * This operation is durable and waits for durable writes (which will block on *journaling/checkpointing). */ - virtual void setInitialSyncFlag(OperationContext* txn) = 0; + virtual void setInitialSyncFlag(OperationContext* opCtx) = 0; /** * Clears the the initial sync flag to record that initial sync has completed. @@ -121,34 +121,34 @@ public: * This operation is durable and waits for durable writes (which will block on *journaling/checkpointing). */ - virtual void clearInitialSyncFlag(OperationContext* txn) = 0; + virtual void clearInitialSyncFlag(OperationContext* opCtx) = 0; /** * The minValid value is the earliest (minimum) Timestamp that must be applied in order to * consider the dataset consistent. */ - virtual void setMinValid(OperationContext* txn, const OpTime& minValid) = 0; - virtual OpTime getMinValid(OperationContext* txn) const = 0; + virtual void setMinValid(OperationContext* opCtx, const OpTime& minValid) = 0; + virtual OpTime getMinValid(OperationContext* opCtx) const = 0; /** * Sets minValid only if it is not already higher than endOpTime. * Warning, this compares the term and timestamp independently. Do not use if the current * minValid could be from the other fork of a rollback. */ - virtual void setMinValidToAtLeast(OperationContext* txn, const OpTime& endOpTime) = 0; + virtual void setMinValidToAtLeast(OperationContext* opCtx, const OpTime& endOpTime) = 0; /** * On startup all oplog entries with a value >= the oplog delete from point should be deleted. * If null, no documents should be deleted. */ - virtual void setOplogDeleteFromPoint(OperationContext* txn, const Timestamp& timestamp) = 0; - virtual Timestamp getOplogDeleteFromPoint(OperationContext* txn) = 0; + virtual void setOplogDeleteFromPoint(OperationContext* opCtx, const Timestamp& timestamp) = 0; + virtual Timestamp getOplogDeleteFromPoint(OperationContext* opCtx) = 0; /** * The applied through point is a persistent record of where we've applied through. If null, the * applied through point is the top of the oplog. */ - virtual void setAppliedThrough(OperationContext* txn, const OpTime& optime) = 0; + virtual void setAppliedThrough(OperationContext* opCtx, const OpTime& optime) = 0; /** * You should probably be calling ReplicationCoordinator::getLastAppliedOpTime() instead. @@ -156,7 +156,7 @@ public: * This reads the value from storage which isn't always updated when the ReplicationCoordinator * is. */ - virtual OpTime getAppliedThrough(OperationContext* txn) = 0; + virtual OpTime getAppliedThrough(OperationContext* opCtx) = 0; // Collection creation and population for initial sync. @@ -177,7 +177,7 @@ public: * NOTE: If the collection doesn't exist, it will not be created, and instead * an error is returned. */ - virtual Status insertDocument(OperationContext* txn, + virtual Status insertDocument(OperationContext* opCtx, const NamespaceString& nss, const BSONObj& doc) = 0; @@ -185,14 +185,14 @@ public: * Inserts the given documents into the collection. * It is an error to call this function with an empty set of documents. */ - virtual Status insertDocuments(OperationContext* txn, + virtual Status insertDocuments(OperationContext* opCtx, const NamespaceString& nss, const std::vector<BSONObj>& docs) = 0; /** * Creates the initial oplog, errors if it exists. */ - virtual Status createOplog(OperationContext* txn, const NamespaceString& nss) = 0; + virtual Status createOplog(OperationContext* opCtx, const NamespaceString& nss) = 0; /** * Returns the configured maximum size of the oplog. @@ -200,30 +200,30 @@ public: * Implementations are allowed to be "fuzzy" and delete documents when the actual size is * slightly above or below this, so callers should not rely on its exact value. */ - virtual StatusWith<size_t> getOplogMaxSize(OperationContext* txn, + virtual StatusWith<size_t> getOplogMaxSize(OperationContext* opCtx, const NamespaceString& nss) = 0; /** * Creates a collection. */ - virtual Status createCollection(OperationContext* txn, + virtual Status createCollection(OperationContext* opCtx, const NamespaceString& nss, const CollectionOptions& options) = 0; /** * Drops a collection, like the oplog. */ - virtual Status dropCollection(OperationContext* txn, const NamespaceString& nss) = 0; + virtual Status dropCollection(OperationContext* opCtx, const NamespaceString& nss) = 0; /** * Drops all databases except "local". */ - virtual Status dropReplicatedDatabases(OperationContext* txn) = 0; + virtual Status dropReplicatedDatabases(OperationContext* opCtx) = 0; /** * Validates that the admin database is valid during initial sync. */ - virtual Status isAdminDbValid(OperationContext* txn) = 0; + virtual Status isAdminDbValid(OperationContext* opCtx) = 0; /** * Finds at most "limit" documents returned by a collection or index scan on the collection in @@ -242,7 +242,7 @@ public: kForward = 1, kBackward = -1, }; - virtual StatusWith<std::vector<BSONObj>> findDocuments(OperationContext* txn, + virtual StatusWith<std::vector<BSONObj>> findDocuments(OperationContext* opCtx, const NamespaceString& nss, boost::optional<StringData> indexName, ScanDirection scanDirection, @@ -257,7 +257,7 @@ public: * will be kept open once this function returns. * If "indexName" is null, a collection scan is used to locate the document. */ - virtual StatusWith<std::vector<BSONObj>> deleteDocuments(OperationContext* txn, + virtual StatusWith<std::vector<BSONObj>> deleteDocuments(OperationContext* opCtx, const NamespaceString& nss, boost::optional<StringData> indexName, ScanDirection scanDirection, diff --git a/src/mongo/db/repl/storage_interface_impl.cpp b/src/mongo/db/repl/storage_interface_impl.cpp index ce03ab1629c..ff5aa7d4260 100644 --- a/src/mongo/db/repl/storage_interface_impl.cpp +++ b/src/mongo/db/repl/storage_interface_impl.cpp @@ -102,67 +102,67 @@ NamespaceString StorageInterfaceImpl::getMinValidNss() const { return _minValidNss; } -BSONObj StorageInterfaceImpl::getMinValidDocument(OperationContext* txn) const { +BSONObj StorageInterfaceImpl::getMinValidDocument(OperationContext* opCtx) const { MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN { - ScopedTransaction transaction(txn, MODE_IS); - Lock::DBLock dblk(txn->lockState(), _minValidNss.db(), MODE_IS); - Lock::CollectionLock lk(txn->lockState(), _minValidNss.ns(), MODE_IS); + ScopedTransaction transaction(opCtx, MODE_IS); + Lock::DBLock dblk(opCtx->lockState(), _minValidNss.db(), MODE_IS); + Lock::CollectionLock lk(opCtx->lockState(), _minValidNss.ns(), MODE_IS); BSONObj doc; - bool found = Helpers::getSingleton(txn, _minValidNss.ns().c_str(), doc); + bool found = Helpers::getSingleton(opCtx, _minValidNss.ns().c_str(), doc); invariant(found || doc.isEmpty()); return doc; } MONGO_WRITE_CONFLICT_RETRY_LOOP_END( - txn, "StorageInterfaceImpl::getMinValidDocument", _minValidNss.ns()); + opCtx, "StorageInterfaceImpl::getMinValidDocument", _minValidNss.ns()); MONGO_UNREACHABLE; } -void StorageInterfaceImpl::updateMinValidDocument(OperationContext* txn, +void StorageInterfaceImpl::updateMinValidDocument(OperationContext* opCtx, const BSONObj& updateSpec) { MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN { - ScopedTransaction transaction(txn, MODE_IX); + ScopedTransaction transaction(opCtx, MODE_IX); // For now this needs to be MODE_X because it sometimes creates the collection. - Lock::DBLock dblk(txn->lockState(), _minValidNss.db(), MODE_X); - Helpers::putSingleton(txn, _minValidNss.ns().c_str(), updateSpec); + Lock::DBLock dblk(opCtx->lockState(), _minValidNss.db(), MODE_X); + Helpers::putSingleton(opCtx, _minValidNss.ns().c_str(), updateSpec); } MONGO_WRITE_CONFLICT_RETRY_LOOP_END( - txn, "StorageInterfaceImpl::updateMinValidDocument", _minValidNss.ns()); + opCtx, "StorageInterfaceImpl::updateMinValidDocument", _minValidNss.ns()); } -bool StorageInterfaceImpl::getInitialSyncFlag(OperationContext* txn) const { - const BSONObj doc = getMinValidDocument(txn); +bool StorageInterfaceImpl::getInitialSyncFlag(OperationContext* opCtx) const { + const BSONObj doc = getMinValidDocument(opCtx); const auto flag = doc[kInitialSyncFlagFieldName].trueValue(); LOG(3) << "returning initial sync flag value of " << flag; return flag; } -void StorageInterfaceImpl::setInitialSyncFlag(OperationContext* txn) { +void StorageInterfaceImpl::setInitialSyncFlag(OperationContext* opCtx) { LOG(3) << "setting initial sync flag"; - updateMinValidDocument(txn, BSON("$set" << kInitialSyncFlag)); - txn->recoveryUnit()->waitUntilDurable(); + updateMinValidDocument(opCtx, BSON("$set" << kInitialSyncFlag)); + opCtx->recoveryUnit()->waitUntilDurable(); } -void StorageInterfaceImpl::clearInitialSyncFlag(OperationContext* txn) { +void StorageInterfaceImpl::clearInitialSyncFlag(OperationContext* opCtx) { LOG(3) << "clearing initial sync flag"; - auto replCoord = repl::ReplicationCoordinator::get(txn); + auto replCoord = repl::ReplicationCoordinator::get(opCtx); OpTime time = replCoord->getMyLastAppliedOpTime(); updateMinValidDocument( - txn, + opCtx, BSON("$unset" << kInitialSyncFlag << "$set" << BSON("ts" << time.getTimestamp() << "t" << time.getTerm() << kBeginFieldName << time.toBSON()))); if (getGlobalServiceContext()->getGlobalStorageEngine()->isDurable()) { - txn->recoveryUnit()->waitUntilDurable(); + opCtx->recoveryUnit()->waitUntilDurable(); replCoord->setMyLastDurableOpTime(time); } } -OpTime StorageInterfaceImpl::getMinValid(OperationContext* txn) const { - const BSONObj doc = getMinValidDocument(txn); +OpTime StorageInterfaceImpl::getMinValid(OperationContext* opCtx) const { + const BSONObj doc = getMinValidDocument(opCtx); const auto opTimeStatus = OpTime::parseFromOplogEntry(doc); // If any of the keys (fields) are missing from the minvalid document, we return // a null OpTime. @@ -182,28 +182,29 @@ OpTime StorageInterfaceImpl::getMinValid(OperationContext* txn) const { return minValid; } -void StorageInterfaceImpl::setMinValid(OperationContext* txn, const OpTime& minValid) { +void StorageInterfaceImpl::setMinValid(OperationContext* opCtx, const OpTime& minValid) { LOG(3) << "setting minvalid to exactly: " << minValid.toString() << "(" << minValid.toBSON() << ")"; updateMinValidDocument( - txn, BSON("$set" << BSON("ts" << minValid.getTimestamp() << "t" << minValid.getTerm()))); + opCtx, BSON("$set" << BSON("ts" << minValid.getTimestamp() << "t" << minValid.getTerm()))); } -void StorageInterfaceImpl::setMinValidToAtLeast(OperationContext* txn, const OpTime& minValid) { +void StorageInterfaceImpl::setMinValidToAtLeast(OperationContext* opCtx, const OpTime& minValid) { LOG(3) << "setting minvalid to at least: " << minValid.toString() << "(" << minValid.toBSON() << ")"; updateMinValidDocument( - txn, BSON("$max" << BSON("ts" << minValid.getTimestamp() << "t" << minValid.getTerm()))); + opCtx, BSON("$max" << BSON("ts" << minValid.getTimestamp() << "t" << minValid.getTerm()))); } -void StorageInterfaceImpl::setOplogDeleteFromPoint(OperationContext* txn, +void StorageInterfaceImpl::setOplogDeleteFromPoint(OperationContext* opCtx, const Timestamp& timestamp) { LOG(3) << "setting oplog delete from point to: " << timestamp.toStringPretty(); - updateMinValidDocument(txn, BSON("$set" << BSON(kOplogDeleteFromPointFieldName << timestamp))); + updateMinValidDocument(opCtx, + BSON("$set" << BSON(kOplogDeleteFromPointFieldName << timestamp))); } -Timestamp StorageInterfaceImpl::getOplogDeleteFromPoint(OperationContext* txn) { - const BSONObj doc = getMinValidDocument(txn); +Timestamp StorageInterfaceImpl::getOplogDeleteFromPoint(OperationContext* opCtx) { + const BSONObj doc = getMinValidDocument(opCtx); Timestamp out = {}; if (auto field = doc[kOplogDeleteFromPointFieldName]) { out = field.timestamp(); @@ -213,17 +214,17 @@ Timestamp StorageInterfaceImpl::getOplogDeleteFromPoint(OperationContext* txn) { return out; } -void StorageInterfaceImpl::setAppliedThrough(OperationContext* txn, const OpTime& optime) { +void StorageInterfaceImpl::setAppliedThrough(OperationContext* opCtx, const OpTime& optime) { LOG(3) << "setting appliedThrough to: " << optime.toString() << "(" << optime.toBSON() << ")"; if (optime.isNull()) { - updateMinValidDocument(txn, BSON("$unset" << BSON(kBeginFieldName << 1))); + updateMinValidDocument(opCtx, BSON("$unset" << BSON(kBeginFieldName << 1))); } else { - updateMinValidDocument(txn, BSON("$set" << BSON(kBeginFieldName << optime.toBSON()))); + updateMinValidDocument(opCtx, BSON("$set" << BSON(kBeginFieldName << optime.toBSON()))); } } -OpTime StorageInterfaceImpl::getAppliedThrough(OperationContext* txn) { - const BSONObj doc = getMinValidDocument(txn); +OpTime StorageInterfaceImpl::getAppliedThrough(OperationContext* opCtx) { + const BSONObj doc = getMinValidDocument(opCtx); const auto opTimeStatus = OpTime::parseFromOplogEntry(doc.getObjectField(kBeginFieldName)); if (!opTimeStatus.isOK()) { // Return null OpTime on any parse failure, including if "begin" is missing. @@ -253,18 +254,18 @@ StorageInterfaceImpl::createCollectionForBulkLoading( std::unique_ptr<CollectionBulkLoader> loaderToReturn; Collection* collection; - auto status = runner->runSynchronousTask([&](OperationContext* txn) -> Status { + auto status = runner->runSynchronousTask([&](OperationContext* opCtx) -> Status { // We are not replicating nor validating writes under this OperationContext*. // The OperationContext* is used for all writes to the (newly) cloned collection. - txn->setReplicatedWrites(false); - documentValidationDisabled(txn) = true; + opCtx->setReplicatedWrites(false); + documentValidationDisabled(opCtx) = true; // Retry if WCE. MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN { // Get locks and create the collection. - ScopedTransaction transaction(txn, MODE_IX); - auto db = stdx::make_unique<AutoGetOrCreateDb>(txn, nss.db(), MODE_IX); - auto coll = stdx::make_unique<AutoGetCollection>(txn, nss, MODE_X); + ScopedTransaction transaction(opCtx, MODE_IX); + auto db = stdx::make_unique<AutoGetOrCreateDb>(opCtx, nss.db(), MODE_IX); + auto coll = stdx::make_unique<AutoGetCollection>(opCtx, nss, MODE_X); collection = coll->getCollection(); if (collection) { @@ -272,14 +273,14 @@ StorageInterfaceImpl::createCollectionForBulkLoading( } // Create the collection. - WriteUnitOfWork wunit(txn); - collection = db->getDb()->createCollection(txn, nss.ns(), options, false); + WriteUnitOfWork wunit(opCtx); + collection = db->getDb()->createCollection(opCtx, nss.ns(), options, false); invariant(collection); wunit.commit(); - coll = stdx::make_unique<AutoGetCollection>(txn, nss, MODE_IX); + coll = stdx::make_unique<AutoGetCollection>(opCtx, nss, MODE_IX); // Move locks into loader, so it now controls their lifetime. - auto loader = stdx::make_unique<CollectionBulkLoaderImpl>(txn, + auto loader = stdx::make_unique<CollectionBulkLoaderImpl>(opCtx, collection, idIndexSpec, std::move(threadPool), @@ -291,7 +292,7 @@ StorageInterfaceImpl::createCollectionForBulkLoading( loaderToReturn = std::move(loader); return Status::OK(); } - MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "beginCollectionClone", nss.ns()); + MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "beginCollectionClone", nss.ns()); MONGO_UNREACHABLE; }); @@ -308,20 +309,20 @@ StorageInterfaceImpl::createCollectionForBulkLoading( } -Status StorageInterfaceImpl::insertDocument(OperationContext* txn, +Status StorageInterfaceImpl::insertDocument(OperationContext* opCtx, const NamespaceString& nss, const BSONObj& doc) { - return insertDocuments(txn, nss, {doc}); + return insertDocuments(opCtx, nss, {doc}); } namespace { -Status insertDocumentsSingleBatch(OperationContext* txn, +Status insertDocumentsSingleBatch(OperationContext* opCtx, const NamespaceString& nss, std::vector<BSONObj>::const_iterator begin, std::vector<BSONObj>::const_iterator end) { - ScopedTransaction transaction(txn, MODE_IX); - AutoGetCollection autoColl(txn, nss, MODE_IX); + ScopedTransaction transaction(opCtx, MODE_IX); + AutoGetCollection autoColl(opCtx, nss, MODE_IX); auto collection = autoColl.getCollection(); if (!collection) { return {ErrorCodes::NamespaceNotFound, @@ -329,9 +330,9 @@ Status insertDocumentsSingleBatch(OperationContext* txn, << nss.ns()}; } - WriteUnitOfWork wunit(txn); + WriteUnitOfWork wunit(opCtx); OpDebug* const nullOpDebug = nullptr; - auto status = collection->insertDocuments(txn, begin, end, nullOpDebug, false); + auto status = collection->insertDocuments(opCtx, begin, end, nullOpDebug, false); if (!status.isOK()) { return status; } @@ -342,12 +343,12 @@ Status insertDocumentsSingleBatch(OperationContext* txn, } // namespace -Status StorageInterfaceImpl::insertDocuments(OperationContext* txn, +Status StorageInterfaceImpl::insertDocuments(OperationContext* opCtx, const NamespaceString& nss, const std::vector<BSONObj>& docs) { if (docs.size() > 1U) { try { - if (insertDocumentsSingleBatch(txn, nss, docs.cbegin(), docs.cend()).isOK()) { + if (insertDocumentsSingleBatch(opCtx, nss, docs.cbegin(), docs.cend()).isOK()) { return Status::OK(); } } catch (...) { @@ -359,83 +360,84 @@ Status StorageInterfaceImpl::insertDocuments(OperationContext* txn, // Try to insert the batch one-at-a-time because the batch failed all-at-once inserting. for (auto it = docs.cbegin(); it != docs.cend(); ++it) { MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN { - auto status = insertDocumentsSingleBatch(txn, nss, it, it + 1); + auto status = insertDocumentsSingleBatch(opCtx, nss, it, it + 1); if (!status.isOK()) { return status; } } - MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "StorageInterfaceImpl::insertDocuments", nss.ns()); + MONGO_WRITE_CONFLICT_RETRY_LOOP_END( + opCtx, "StorageInterfaceImpl::insertDocuments", nss.ns()); } return Status::OK(); } -Status StorageInterfaceImpl::dropReplicatedDatabases(OperationContext* txn) { - dropAllDatabasesExceptLocal(txn); +Status StorageInterfaceImpl::dropReplicatedDatabases(OperationContext* opCtx) { + dropAllDatabasesExceptLocal(opCtx); return Status::OK(); } -Status StorageInterfaceImpl::createOplog(OperationContext* txn, const NamespaceString& nss) { - mongo::repl::createOplog(txn, nss.ns(), true); +Status StorageInterfaceImpl::createOplog(OperationContext* opCtx, const NamespaceString& nss) { + mongo::repl::createOplog(opCtx, nss.ns(), true); return Status::OK(); } -StatusWith<size_t> StorageInterfaceImpl::getOplogMaxSize(OperationContext* txn, +StatusWith<size_t> StorageInterfaceImpl::getOplogMaxSize(OperationContext* opCtx, const NamespaceString& nss) { - AutoGetCollectionForRead collection(txn, nss); + AutoGetCollectionForRead collection(opCtx, nss); if (!collection.getCollection()) { return {ErrorCodes::NamespaceNotFound, str::stream() << "Your oplog doesn't exist: " << nss.ns()}; } - const auto options = collection.getCollection()->getCatalogEntry()->getCollectionOptions(txn); + const auto options = collection.getCollection()->getCatalogEntry()->getCollectionOptions(opCtx); if (!options.capped) return {ErrorCodes::BadValue, str::stream() << nss.ns() << " isn't capped"}; return options.cappedSize; } -Status StorageInterfaceImpl::createCollection(OperationContext* txn, +Status StorageInterfaceImpl::createCollection(OperationContext* opCtx, const NamespaceString& nss, const CollectionOptions& options) { MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN { - ScopedTransaction transaction(txn, MODE_IX); - AutoGetOrCreateDb databaseWriteGuard(txn, nss.db(), MODE_X); + ScopedTransaction transaction(opCtx, MODE_IX); + AutoGetOrCreateDb databaseWriteGuard(opCtx, nss.db(), MODE_X); auto db = databaseWriteGuard.getDb(); invariant(db); if (db->getCollection(nss)) { return {ErrorCodes::NamespaceExists, str::stream() << "Collection " << nss.ns() << " already exists."}; } - WriteUnitOfWork wuow(txn); + WriteUnitOfWork wuow(opCtx); try { - auto coll = db->createCollection(txn, nss.ns(), options); + auto coll = db->createCollection(opCtx, nss.ns(), options); invariant(coll); } catch (const UserException& ex) { return ex.toStatus(); } wuow.commit(); } - MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "StorageInterfaceImpl::createCollection", nss.ns()); + MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "StorageInterfaceImpl::createCollection", nss.ns()); return Status::OK(); } -Status StorageInterfaceImpl::dropCollection(OperationContext* txn, const NamespaceString& nss) { +Status StorageInterfaceImpl::dropCollection(OperationContext* opCtx, const NamespaceString& nss) { MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN { - ScopedTransaction transaction(txn, MODE_IX); - AutoGetDb autoDB(txn, nss.db(), MODE_X); + ScopedTransaction transaction(opCtx, MODE_IX); + AutoGetDb autoDB(opCtx, nss.db(), MODE_X); if (!autoDB.getDb()) { // Database does not exist - nothing to do. return Status::OK(); } - WriteUnitOfWork wunit(txn); - const auto status = autoDB.getDb()->dropCollection(txn, nss.ns()); + WriteUnitOfWork wunit(opCtx); + const auto status = autoDB.getDb()->dropCollection(opCtx, nss.ns()); if (status.isOK()) { wunit.commit(); } return status; } - MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "StorageInterfaceImpl::dropCollection", nss.ns()); + MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "StorageInterfaceImpl::dropCollection", nss.ns()); } namespace { @@ -455,7 +457,7 @@ DeleteStageParams makeDeleteStageParamsForDeleteDocuments() { */ enum class FindDeleteMode { kFind, kDelete }; StatusWith<std::vector<BSONObj>> _findOrDeleteDocuments( - OperationContext* txn, + OperationContext* opCtx, const NamespaceString& nss, boost::optional<StringData> indexName, StorageInterface::ScanDirection scanDirection, @@ -468,8 +470,8 @@ StatusWith<std::vector<BSONObj>> _findOrDeleteDocuments( MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN { auto collectionAccessMode = isFind ? MODE_IS : MODE_IX; - ScopedTransaction transaction(txn, collectionAccessMode); - AutoGetCollection collectionGuard(txn, nss, collectionAccessMode); + ScopedTransaction transaction(opCtx, collectionAccessMode); + AutoGetCollection collectionGuard(opCtx, nss, collectionAccessMode); auto collection = collectionGuard.getCollection(); if (!collection) { return {ErrorCodes::NamespaceNotFound, @@ -493,9 +495,9 @@ StatusWith<std::vector<BSONObj>> _findOrDeleteDocuments( // Use collection scan. planExecutor = isFind ? InternalPlanner::collectionScan( - txn, nss.ns(), collection, PlanExecutor::YIELD_MANUAL, direction) + opCtx, nss.ns(), collection, PlanExecutor::YIELD_MANUAL, direction) : InternalPlanner::deleteWithCollectionScan( - txn, + opCtx, collection, makeDeleteStageParamsForDeleteDocuments(), PlanExecutor::YIELD_MANUAL, @@ -506,7 +508,7 @@ StatusWith<std::vector<BSONObj>> _findOrDeleteDocuments( invariant(indexCatalog); bool includeUnfinishedIndexes = false; IndexDescriptor* indexDescriptor = - indexCatalog->findIndexByName(txn, *indexName, includeUnfinishedIndexes); + indexCatalog->findIndexByName(opCtx, *indexName, includeUnfinishedIndexes); if (!indexDescriptor) { return {ErrorCodes::IndexNotFound, str::stream() << "Index not found, ns:" << nss.ns() << ", index: " @@ -529,7 +531,7 @@ StatusWith<std::vector<BSONObj>> _findOrDeleteDocuments( bounds.first = startKey; } planExecutor = isFind - ? InternalPlanner::indexScan(txn, + ? InternalPlanner::indexScan(opCtx, collection, indexDescriptor, bounds.first, @@ -538,7 +540,7 @@ StatusWith<std::vector<BSONObj>> _findOrDeleteDocuments( PlanExecutor::YIELD_MANUAL, direction, InternalPlanner::IXSCAN_FETCH) - : InternalPlanner::deleteWithIndexScan(txn, + : InternalPlanner::deleteWithIndexScan(opCtx, collection, makeDeleteStageParamsForDeleteDocuments(), indexDescriptor, @@ -562,33 +564,39 @@ StatusWith<std::vector<BSONObj>> _findOrDeleteDocuments( } return docs; } - MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, opStr, nss.ns()); + MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, opStr, nss.ns()); MONGO_UNREACHABLE; } } // namespace StatusWith<std::vector<BSONObj>> StorageInterfaceImpl::findDocuments( - OperationContext* txn, + OperationContext* opCtx, const NamespaceString& nss, boost::optional<StringData> indexName, ScanDirection scanDirection, const BSONObj& startKey, BoundInclusion boundInclusion, std::size_t limit) { - return _findOrDeleteDocuments( - txn, nss, indexName, scanDirection, startKey, boundInclusion, limit, FindDeleteMode::kFind); + return _findOrDeleteDocuments(opCtx, + nss, + indexName, + scanDirection, + startKey, + boundInclusion, + limit, + FindDeleteMode::kFind); } StatusWith<std::vector<BSONObj>> StorageInterfaceImpl::deleteDocuments( - OperationContext* txn, + OperationContext* opCtx, const NamespaceString& nss, boost::optional<StringData> indexName, ScanDirection scanDirection, const BSONObj& startKey, BoundInclusion boundInclusion, std::size_t limit) { - return _findOrDeleteDocuments(txn, + return _findOrDeleteDocuments(opCtx, nss, indexName, scanDirection, @@ -598,10 +606,10 @@ StatusWith<std::vector<BSONObj>> StorageInterfaceImpl::deleteDocuments( FindDeleteMode::kDelete); } -Status StorageInterfaceImpl::isAdminDbValid(OperationContext* txn) { - ScopedTransaction transaction(txn, MODE_IX); - AutoGetDb autoDB(txn, "admin", MODE_X); - return checkAdminDatabase(txn, autoDB.getDb()); +Status StorageInterfaceImpl::isAdminDbValid(OperationContext* opCtx) { + ScopedTransaction transaction(opCtx, MODE_IX); + AutoGetDb autoDB(opCtx, "admin", MODE_X); + return checkAdminDatabase(opCtx, autoDB.getDb()); } } // namespace repl diff --git a/src/mongo/db/repl/storage_interface_impl.h b/src/mongo/db/repl/storage_interface_impl.h index 9edd37b44c8..b3bc5eeb617 100644 --- a/src/mongo/db/repl/storage_interface_impl.h +++ b/src/mongo/db/repl/storage_interface_impl.h @@ -62,19 +62,19 @@ public: */ NamespaceString getMinValidNss() const; - bool getInitialSyncFlag(OperationContext* txn) const override; + bool getInitialSyncFlag(OperationContext* opCtx) const override; - void setInitialSyncFlag(OperationContext* txn) override; + void setInitialSyncFlag(OperationContext* opCtx) override; - void clearInitialSyncFlag(OperationContext* txn) override; + void clearInitialSyncFlag(OperationContext* opCtx) override; - OpTime getMinValid(OperationContext* txn) const override; - void setMinValid(OperationContext* txn, const OpTime& minValid) override; - void setMinValidToAtLeast(OperationContext* txn, const OpTime& endOpTime) override; - void setOplogDeleteFromPoint(OperationContext* txn, const Timestamp& timestamp) override; - Timestamp getOplogDeleteFromPoint(OperationContext* txn) override; - void setAppliedThrough(OperationContext* txn, const OpTime& optime) override; - OpTime getAppliedThrough(OperationContext* txn) override; + OpTime getMinValid(OperationContext* opCtx) const override; + void setMinValid(OperationContext* opCtx, const OpTime& minValid) override; + void setMinValidToAtLeast(OperationContext* opCtx, const OpTime& endOpTime) override; + void setOplogDeleteFromPoint(OperationContext* opCtx, const Timestamp& timestamp) override; + Timestamp getOplogDeleteFromPoint(OperationContext* opCtx) override; + void setAppliedThrough(OperationContext* opCtx, const OpTime& optime) override; + OpTime getAppliedThrough(OperationContext* opCtx) override; /** * Allocates a new TaskRunner for use by the passed in collection. @@ -85,26 +85,27 @@ public: const BSONObj idIndexSpec, const std::vector<BSONObj>& secondaryIndexSpecs) override; - Status insertDocument(OperationContext* txn, + Status insertDocument(OperationContext* opCtx, const NamespaceString& nss, const BSONObj& doc) override; - Status insertDocuments(OperationContext* txn, + Status insertDocuments(OperationContext* opCtx, const NamespaceString& nss, const std::vector<BSONObj>& docs) override; - Status dropReplicatedDatabases(OperationContext* txn) override; + Status dropReplicatedDatabases(OperationContext* opCtx) override; - Status createOplog(OperationContext* txn, const NamespaceString& nss) override; - StatusWith<size_t> getOplogMaxSize(OperationContext* txn, const NamespaceString& nss) override; + Status createOplog(OperationContext* opCtx, const NamespaceString& nss) override; + StatusWith<size_t> getOplogMaxSize(OperationContext* opCtx, + const NamespaceString& nss) override; - Status createCollection(OperationContext* txn, + Status createCollection(OperationContext* opCtx, const NamespaceString& nss, const CollectionOptions& options) override; - Status dropCollection(OperationContext* txn, const NamespaceString& nss) override; + Status dropCollection(OperationContext* opCtx, const NamespaceString& nss) override; - StatusWith<std::vector<BSONObj>> findDocuments(OperationContext* txn, + StatusWith<std::vector<BSONObj>> findDocuments(OperationContext* opCtx, const NamespaceString& nss, boost::optional<StringData> indexName, ScanDirection scanDirection, @@ -112,7 +113,7 @@ public: BoundInclusion boundInclusion, std::size_t limit) override; - StatusWith<std::vector<BSONObj>> deleteDocuments(OperationContext* txn, + StatusWith<std::vector<BSONObj>> deleteDocuments(OperationContext* opCtx, const NamespaceString& nss, boost::optional<StringData> indexName, ScanDirection scanDirection, @@ -120,12 +121,12 @@ public: BoundInclusion boundInclusion, std::size_t limit) override; - Status isAdminDbValid(OperationContext* txn) override; + Status isAdminDbValid(OperationContext* opCtx) override; private: // Returns empty document if not present. - BSONObj getMinValidDocument(OperationContext* txn) const; - void updateMinValidDocument(OperationContext* txn, const BSONObj& updateSpec); + BSONObj getMinValidDocument(OperationContext* opCtx) const; + void updateMinValidDocument(OperationContext* opCtx, const BSONObj& updateSpec); const NamespaceString _minValidNss; }; diff --git a/src/mongo/db/repl/storage_interface_impl_test.cpp b/src/mongo/db/repl/storage_interface_impl_test.cpp index c44e281f012..2ee2cbf2259 100644 --- a/src/mongo/db/repl/storage_interface_impl_test.cpp +++ b/src/mongo/db/repl/storage_interface_impl_test.cpp @@ -87,17 +87,17 @@ NamespaceString makeNamespace(const T& t, const char* suffix = "") { /** * Returns min valid document. */ -BSONObj getMinValidDocument(OperationContext* txn, const NamespaceString& minValidNss) { +BSONObj getMinValidDocument(OperationContext* opCtx, const NamespaceString& minValidNss) { MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN { - ScopedTransaction transaction(txn, MODE_IS); - Lock::DBLock dblk(txn->lockState(), minValidNss.db(), MODE_IS); - Lock::CollectionLock lk(txn->lockState(), minValidNss.ns(), MODE_IS); + ScopedTransaction transaction(opCtx, MODE_IS); + Lock::DBLock dblk(opCtx->lockState(), minValidNss.db(), MODE_IS); + Lock::CollectionLock lk(opCtx->lockState(), minValidNss.ns(), MODE_IS); BSONObj mv; - if (Helpers::getSingleton(txn, minValidNss.ns().c_str(), mv)) { + if (Helpers::getSingleton(opCtx, minValidNss.ns().c_str(), mv)) { return mv; } } - MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "getMinValidDocument", minValidNss.ns()); + MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "getMinValidDocument", minValidNss.ns()); return BSONObj(); } @@ -116,21 +116,21 @@ CollectionOptions createOplogCollectionOptions() { * Create test collection. * Returns collection. */ -void createCollection(OperationContext* txn, +void createCollection(OperationContext* opCtx, const NamespaceString& nss, const CollectionOptions& options = CollectionOptions()) { MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN { - ScopedTransaction transaction(txn, MODE_IX); - Lock::DBLock dblk(txn->lockState(), nss.db(), MODE_X); - OldClientContext ctx(txn, nss.ns()); + ScopedTransaction transaction(opCtx, MODE_IX); + Lock::DBLock dblk(opCtx->lockState(), nss.db(), MODE_X); + OldClientContext ctx(opCtx, nss.ns()); auto db = ctx.db(); ASSERT_TRUE(db); - mongo::WriteUnitOfWork wuow(txn); - auto coll = db->createCollection(txn, nss.ns(), options); + mongo::WriteUnitOfWork wuow(opCtx); + auto coll = db->createCollection(opCtx, nss.ns(), options); ASSERT_TRUE(coll); wuow.commit(); } - MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "createCollection", nss.ns()); + MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "createCollection", nss.ns()); } /** @@ -158,11 +158,11 @@ ReplSettings createReplSettings() { /** * Counts the number of keys in an index using an IndexAccessMethod::validate call. */ -int64_t getIndexKeyCount(OperationContext* txn, IndexCatalog* cat, IndexDescriptor* desc) { +int64_t getIndexKeyCount(OperationContext* opCtx, IndexCatalog* cat, IndexDescriptor* desc) { auto idx = cat->getIndex(desc); int64_t numKeys; ValidateResults fullRes; - idx->validate(txn, &numKeys, &fullRes); + idx->validate(opCtx, &numKeys, &fullRes); return numKeys; } @@ -191,28 +191,28 @@ protected: ServiceContextMongoDTest::setUp(); createOptCtx(); _coordinator = - new ReplicationCoordinatorMock(_txn->getServiceContext(), createReplSettings()); + new ReplicationCoordinatorMock(_opCtx->getServiceContext(), createReplSettings()); setGlobalReplicationCoordinator(_coordinator); } void tearDown() override { - _txn.reset(nullptr); + _opCtx.reset(nullptr); ServiceContextMongoDTest::tearDown(); } void createOptCtx() { - _txn = cc().makeOperationContext(); + _opCtx = cc().makeOperationContext(); // We are not replicating nor validating these writes. - _txn->setReplicatedWrites(false); - DisableDocumentValidation validationDisabler(_txn.get()); + _opCtx->setReplicatedWrites(false); + DisableDocumentValidation validationDisabler(_opCtx.get()); } OperationContext* getOperationContext() { - return _txn.get(); + return _opCtx.get(); } private: - ServiceContext::UniqueOperationContext _txn; + ServiceContext::UniqueOperationContext _opCtx; // Owned by service context ReplicationCoordinator* _coordinator; @@ -249,23 +249,23 @@ TEST_F(StorageInterfaceImplTest, InitialSyncFlag) { NamespaceString nss("local.StorageInterfaceImplTest_InitialSyncFlag"); StorageInterfaceImpl storageInterface(nss); - auto txn = getClient()->makeOperationContext(); + auto opCtx = getClient()->makeOperationContext(); // Initial sync flag should be unset after initializing a new storage engine. - ASSERT_FALSE(storageInterface.getInitialSyncFlag(txn.get())); + ASSERT_FALSE(storageInterface.getInitialSyncFlag(opCtx.get())); // Setting initial sync flag should affect getInitialSyncFlag() result. - storageInterface.setInitialSyncFlag(txn.get()); - ASSERT_TRUE(storageInterface.getInitialSyncFlag(txn.get())); + storageInterface.setInitialSyncFlag(opCtx.get()); + ASSERT_TRUE(storageInterface.getInitialSyncFlag(opCtx.get())); // Check min valid document using storage engine interface. - auto minValidDocument = getMinValidDocument(txn.get(), nss); + auto minValidDocument = getMinValidDocument(opCtx.get(), nss); ASSERT_TRUE(minValidDocument.hasField(StorageInterfaceImpl::kInitialSyncFlagFieldName)); ASSERT_TRUE(minValidDocument.getBoolField(StorageInterfaceImpl::kInitialSyncFlagFieldName)); // Clearing initial sync flag should affect getInitialSyncFlag() result. - storageInterface.clearInitialSyncFlag(txn.get()); - ASSERT_FALSE(storageInterface.getInitialSyncFlag(txn.get())); + storageInterface.clearInitialSyncFlag(opCtx.get()); + ASSERT_FALSE(storageInterface.getInitialSyncFlag(opCtx.get())); } TEST_F(StorageInterfaceImplTest, GetMinValidAfterSettingInitialSyncFlagWorks) { @@ -273,53 +273,54 @@ TEST_F(StorageInterfaceImplTest, GetMinValidAfterSettingInitialSyncFlagWorks) { "local.StorageInterfaceImplTest_GetMinValidAfterSettingInitialSyncFlagWorks"); StorageInterfaceImpl storageInterface(nss); - auto txn = getClient()->makeOperationContext(); + auto opCtx = getClient()->makeOperationContext(); // Initial sync flag should be unset after initializing a new storage engine. - ASSERT_FALSE(storageInterface.getInitialSyncFlag(txn.get())); + ASSERT_FALSE(storageInterface.getInitialSyncFlag(opCtx.get())); // Setting initial sync flag should affect getInitialSyncFlag() result. - storageInterface.setInitialSyncFlag(txn.get()); - ASSERT_TRUE(storageInterface.getInitialSyncFlag(txn.get())); + storageInterface.setInitialSyncFlag(opCtx.get()); + ASSERT_TRUE(storageInterface.getInitialSyncFlag(opCtx.get())); - ASSERT(storageInterface.getMinValid(txn.get()).isNull()); - ASSERT(storageInterface.getAppliedThrough(txn.get()).isNull()); - ASSERT(storageInterface.getOplogDeleteFromPoint(txn.get()).isNull()); + ASSERT(storageInterface.getMinValid(opCtx.get()).isNull()); + ASSERT(storageInterface.getAppliedThrough(opCtx.get()).isNull()); + ASSERT(storageInterface.getOplogDeleteFromPoint(opCtx.get()).isNull()); } TEST_F(StorageInterfaceImplTest, MinValid) { NamespaceString nss("local.StorageInterfaceImplTest_MinValid"); StorageInterfaceImpl storageInterface(nss); - auto txn = getClient()->makeOperationContext(); + auto opCtx = getClient()->makeOperationContext(); // MinValid boundaries should all be null after initializing a new storage engine. - ASSERT(storageInterface.getMinValid(txn.get()).isNull()); - ASSERT(storageInterface.getAppliedThrough(txn.get()).isNull()); - ASSERT(storageInterface.getOplogDeleteFromPoint(txn.get()).isNull()); + ASSERT(storageInterface.getMinValid(opCtx.get()).isNull()); + ASSERT(storageInterface.getAppliedThrough(opCtx.get()).isNull()); + ASSERT(storageInterface.getOplogDeleteFromPoint(opCtx.get()).isNull()); // Setting min valid boundaries should affect getMinValid() result. OpTime startOpTime({Seconds(123), 0}, 1LL); OpTime endOpTime({Seconds(456), 0}, 1LL); - storageInterface.setAppliedThrough(txn.get(), startOpTime); - storageInterface.setMinValid(txn.get(), endOpTime); - storageInterface.setOplogDeleteFromPoint(txn.get(), endOpTime.getTimestamp()); + storageInterface.setAppliedThrough(opCtx.get(), startOpTime); + storageInterface.setMinValid(opCtx.get(), endOpTime); + storageInterface.setOplogDeleteFromPoint(opCtx.get(), endOpTime.getTimestamp()); - ASSERT_EQ(storageInterface.getAppliedThrough(txn.get()), startOpTime); - ASSERT_EQ(storageInterface.getMinValid(txn.get()), endOpTime); - ASSERT_EQ(storageInterface.getOplogDeleteFromPoint(txn.get()), endOpTime.getTimestamp()); + ASSERT_EQ(storageInterface.getAppliedThrough(opCtx.get()), startOpTime); + ASSERT_EQ(storageInterface.getMinValid(opCtx.get()), endOpTime); + ASSERT_EQ(storageInterface.getOplogDeleteFromPoint(opCtx.get()), endOpTime.getTimestamp()); // setMinValid always changes minValid, but setMinValidToAtLeast only does if higher. - storageInterface.setMinValid(txn.get(), startOpTime); // Forcibly lower it. - ASSERT_EQ(storageInterface.getMinValid(txn.get()), startOpTime); - storageInterface.setMinValidToAtLeast(txn.get(), endOpTime); // Higher than current (sets it). - ASSERT_EQ(storageInterface.getMinValid(txn.get()), endOpTime); - storageInterface.setMinValidToAtLeast(txn.get(), startOpTime); // Lower than current (no-op). - ASSERT_EQ(storageInterface.getMinValid(txn.get()), endOpTime); + storageInterface.setMinValid(opCtx.get(), startOpTime); // Forcibly lower it. + ASSERT_EQ(storageInterface.getMinValid(opCtx.get()), startOpTime); + storageInterface.setMinValidToAtLeast(opCtx.get(), + endOpTime); // Higher than current (sets it). + ASSERT_EQ(storageInterface.getMinValid(opCtx.get()), endOpTime); + storageInterface.setMinValidToAtLeast(opCtx.get(), startOpTime); // Lower than current (no-op). + ASSERT_EQ(storageInterface.getMinValid(opCtx.get()), endOpTime); // Check min valid document using storage engine interface. - auto minValidDocument = getMinValidDocument(txn.get(), nss); + auto minValidDocument = getMinValidDocument(opCtx.get(), nss); ASSERT_TRUE(minValidDocument.hasField(StorageInterfaceImpl::kBeginFieldName)); ASSERT_TRUE(minValidDocument[StorageInterfaceImpl::kBeginFieldName].isABSONObj()); ASSERT_EQUALS(startOpTime, @@ -330,45 +331,45 @@ TEST_F(StorageInterfaceImplTest, MinValid) { endOpTime.getTimestamp(), minValidDocument[StorageInterfaceImpl::kOplogDeleteFromPointFieldName].timestamp()); - // Recovery unit will be owned by "txn". + // Recovery unit will be owned by "opCtx". RecoveryUnitWithDurabilityTracking* recoveryUnit = new RecoveryUnitWithDurabilityTracking(); - txn->setRecoveryUnit(recoveryUnit, OperationContext::kNotInUnitOfWork); + opCtx->setRecoveryUnit(recoveryUnit, OperationContext::kNotInUnitOfWork); // Set min valid without waiting for the changes to be durable. OpTime endOpTime2({Seconds(789), 0}, 1LL); - storageInterface.setMinValid(txn.get(), endOpTime2); - storageInterface.setAppliedThrough(txn.get(), {}); - ASSERT_EQUALS(storageInterface.getAppliedThrough(txn.get()), OpTime()); - ASSERT_EQUALS(storageInterface.getMinValid(txn.get()), endOpTime2); + storageInterface.setMinValid(opCtx.get(), endOpTime2); + storageInterface.setAppliedThrough(opCtx.get(), {}); + ASSERT_EQUALS(storageInterface.getAppliedThrough(opCtx.get()), OpTime()); + ASSERT_EQUALS(storageInterface.getMinValid(opCtx.get()), endOpTime2); ASSERT_FALSE(recoveryUnit->waitUntilDurableCalled); } TEST_F(StorageInterfaceImplTest, SnapshotSupported) { - auto txn = getClient()->makeOperationContext(); - Status status = txn->recoveryUnit()->setReadFromMajorityCommittedSnapshot(); + auto opCtx = getClient()->makeOperationContext(); + Status status = opCtx->recoveryUnit()->setReadFromMajorityCommittedSnapshot(); ASSERT(status.isOK()); } TEST_F(StorageInterfaceImplTest, InsertDocumentsReturnsOKWhenNoOperationsAreGiven) { - auto txn = getClient()->makeOperationContext(); + auto opCtx = getClient()->makeOperationContext(); NamespaceString nss("local." + _agent.getTestName()); - createCollection(txn.get(), nss); + createCollection(opCtx.get(), nss); StorageInterfaceImpl storageInterface(nss); - ASSERT_OK(storageInterface.insertDocuments(txn.get(), nss, {})); + ASSERT_OK(storageInterface.insertDocuments(opCtx.get(), nss, {})); } TEST_F(StorageInterfaceImplTest, InsertDocumentsReturnsInternalErrorWhenSavingOperationToNonOplogCollection) { // Create fake non-oplog collection to ensure saving oplog entries (without _id field) will // fail. - auto txn = getClient()->makeOperationContext(); + auto opCtx = getClient()->makeOperationContext(); NamespaceString nss("local." + _agent.getSuiteName() + "_" + _agent.getTestName()); - createCollection(txn.get(), nss); + createCollection(opCtx.get(), nss); // Non-oplog collection will enforce mandatory _id field requirement on insertion. StorageInterfaceImpl storageInterface(nss); auto op = makeOplogEntry({Timestamp(Seconds(1), 0), 1LL}); - auto status = storageInterface.insertDocuments(txn.get(), nss, {op}); + auto status = storageInterface.insertDocuments(opCtx.get(), nss, {op}); ASSERT_EQUALS(ErrorCodes::InternalError, status); ASSERT_STRING_CONTAINS(status.reason(), "Collection::insertDocument got document without _id"); } @@ -376,12 +377,12 @@ TEST_F(StorageInterfaceImplTest, TEST_F(StorageInterfaceImplTest, InsertDocumentsInsertsDocumentsOneAtATimeWhenAllAtOnceInsertingFails) { // Create a collection that does not support all-at-once inserting. - auto txn = getClient()->makeOperationContext(); + auto opCtx = getClient()->makeOperationContext(); NamespaceString nss("local." + _agent.getSuiteName() + "_" + _agent.getTestName()); CollectionOptions options; options.capped = true; options.cappedSize = 1024 * 1024; - createCollection(txn.get(), nss, options); + createCollection(opCtx.get(), nss, options); // StorageInterfaceImpl::insertDocuments should fall back on inserting the batch one at a time. StorageInterfaceImpl storageInterface(nss); auto doc1 = BSON("_id" << 1); @@ -389,16 +390,16 @@ TEST_F(StorageInterfaceImplTest, std::vector<BSONObj> docs({doc1, doc2}); // Confirm that Collection::insertDocuments fails to insert the batch all at once. { - AutoGetCollection autoCollection(txn.get(), nss, MODE_IX); - WriteUnitOfWork wunit(txn.get()); + AutoGetCollection autoCollection(opCtx.get(), nss, MODE_IX); + WriteUnitOfWork wunit(opCtx.get()); ASSERT_EQUALS(ErrorCodes::OperationCannotBeBatched, autoCollection.getCollection()->insertDocuments( - txn.get(), docs.begin(), docs.cend(), nullptr, false)); + opCtx.get(), docs.begin(), docs.cend(), nullptr, false)); } - ASSERT_OK(storageInterface.insertDocuments(txn.get(), nss, docs)); + ASSERT_OK(storageInterface.insertDocuments(opCtx.get(), nss, docs)); // Check collection contents. OplogInterface returns documents in reverse natural order. - OplogInterfaceLocal oplog(txn.get(), nss.ns()); + OplogInterfaceLocal oplog(opCtx.get(), nss.ns()); auto iter = oplog.makeIterator(); ASSERT_BSONOBJ_EQ(doc2, unittest::assertGet(iter->next()).first); ASSERT_BSONOBJ_EQ(doc1, unittest::assertGet(iter->next()).first); @@ -407,19 +408,19 @@ TEST_F(StorageInterfaceImplTest, TEST_F(StorageInterfaceImplTest, InsertDocumentsSavesOperationsReturnsOpTimeOfLastOperation) { // Create fake oplog collection to hold operations. - auto txn = getClient()->makeOperationContext(); + auto opCtx = getClient()->makeOperationContext(); NamespaceString nss("local." + _agent.getSuiteName() + "_" + _agent.getTestName()); - createCollection(txn.get(), nss, createOplogCollectionOptions()); + createCollection(opCtx.get(), nss, createOplogCollectionOptions()); // Insert operations using storage interface. Ensure optime return is consistent with last // operation inserted. StorageInterfaceImpl storageInterface(nss); auto op1 = makeOplogEntry({Timestamp(Seconds(1), 0), 1LL}); auto op2 = makeOplogEntry({Timestamp(Seconds(1), 0), 1LL}); - ASSERT_OK(storageInterface.insertDocuments(txn.get(), nss, {op1, op2})); + ASSERT_OK(storageInterface.insertDocuments(opCtx.get(), nss, {op1, op2})); // Check contents of oplog. OplogInterface iterates over oplog collection in reverse. - repl::OplogInterfaceLocal oplog(txn.get(), nss.ns()); + repl::OplogInterfaceLocal oplog(opCtx.get(), nss.ns()); auto iter = oplog.makeIterator(); ASSERT_BSONOBJ_EQ(op2, unittest::assertGet(iter->next()).first); ASSERT_BSONOBJ_EQ(op1, unittest::assertGet(iter->next()).first); @@ -431,46 +432,46 @@ TEST_F(StorageInterfaceImplTest, auto op = makeOplogEntry({Timestamp(Seconds(1), 0), 1LL}); NamespaceString nss("local.nosuchcollection"); StorageInterfaceImpl storageInterface(nss); - auto txn = getClient()->makeOperationContext(); - auto status = storageInterface.insertDocuments(txn.get(), nss, {op}); + auto opCtx = getClient()->makeOperationContext(); + auto status = storageInterface.insertDocuments(opCtx.get(), nss, {op}); ASSERT_EQUALS(ErrorCodes::NamespaceNotFound, status); ASSERT_STRING_CONTAINS(status.reason(), "The collection must exist before inserting documents"); } TEST_F(StorageInterfaceImplWithReplCoordTest, InsertMissingDocWorksOnExistingCappedCollection) { - auto txn = getOperationContext(); + auto opCtx = getOperationContext(); StorageInterfaceImpl storage; NamespaceString nss("foo.bar"); CollectionOptions opts; opts.capped = true; opts.cappedSize = 1024 * 1024; - createCollection(txn, nss, opts); - ASSERT_OK(storage.insertDocument(txn, nss, BSON("_id" << 1))); - AutoGetCollectionForRead autoColl(txn, nss); + createCollection(opCtx, nss, opts); + ASSERT_OK(storage.insertDocument(opCtx, nss, BSON("_id" << 1))); + AutoGetCollectionForRead autoColl(opCtx, nss); ASSERT_TRUE(autoColl.getCollection()); } TEST_F(StorageInterfaceImplWithReplCoordTest, InsertMissingDocWorksOnExistingCollection) { - auto txn = getOperationContext(); + auto opCtx = getOperationContext(); StorageInterfaceImpl storage; NamespaceString nss("foo.bar"); - createCollection(txn, nss); - ASSERT_OK(storage.insertDocument(txn, nss, BSON("_id" << 1))); - AutoGetCollectionForRead autoColl(txn, nss); + createCollection(opCtx, nss); + ASSERT_OK(storage.insertDocument(opCtx, nss, BSON("_id" << 1))); + AutoGetCollectionForRead autoColl(opCtx, nss); ASSERT_TRUE(autoColl.getCollection()); } TEST_F(StorageInterfaceImplWithReplCoordTest, InsertMissingDocFailesIfCollectionIsMissing) { - auto txn = getOperationContext(); + auto opCtx = getOperationContext(); StorageInterfaceImpl storage; NamespaceString nss("foo.bar"); - const auto status = storage.insertDocument(txn, nss, BSON("_id" << 1)); + const auto status = storage.insertDocument(opCtx, nss, BSON("_id" << 1)); ASSERT_NOT_OK(status); ASSERT_EQ(status.code(), ErrorCodes::NamespaceNotFound); } TEST_F(StorageInterfaceImplWithReplCoordTest, CreateCollectionWithIDIndexCommits) { - auto txn = getOperationContext(); + auto opCtx = getOperationContext(); StorageInterfaceImpl storage; storage.startup(); NamespaceString nss("foo.bar"); @@ -484,18 +485,18 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, CreateCollectionWithIDIndexCommits ASSERT_OK(loader->insertDocuments(docs.begin(), docs.end())); ASSERT_OK(loader->commit()); - AutoGetCollectionForRead autoColl(txn, nss); + AutoGetCollectionForRead autoColl(opCtx, nss); auto coll = autoColl.getCollection(); ASSERT(coll); - ASSERT_EQ(coll->getRecordStore()->numRecords(txn), 2LL); + ASSERT_EQ(coll->getRecordStore()->numRecords(opCtx), 2LL); auto collIdxCat = coll->getIndexCatalog(); - auto idIdxDesc = collIdxCat->findIdIndex(txn); - auto count = getIndexKeyCount(txn, collIdxCat, idIdxDesc); + auto idIdxDesc = collIdxCat->findIdIndex(opCtx); + auto count = getIndexKeyCount(opCtx, collIdxCat, idIdxDesc); ASSERT_EQ(count, 2LL); } void _testDestroyUncommitedCollectionBulkLoader( - OperationContext* txn, + OperationContext* opCtx, std::vector<BSONObj> secondaryIndexes, stdx::function<void(std::unique_ptr<CollectionBulkLoader> loader)> destroyLoaderFn) { StorageInterfaceImpl storage; @@ -513,23 +514,23 @@ void _testDestroyUncommitedCollectionBulkLoader( // Collection and ID index should not exist after 'loader' is destroyed. destroyLoaderFn(std::move(loader)); - AutoGetCollectionForRead autoColl(txn, nss); + AutoGetCollectionForRead autoColl(opCtx, nss); auto coll = autoColl.getCollection(); // Bulk loader is used to create indexes. The collection is not dropped when the bulk loader is // destroyed. ASSERT_TRUE(coll); - ASSERT_EQ(1LL, coll->getRecordStore()->numRecords(txn)); + ASSERT_EQ(1LL, coll->getRecordStore()->numRecords(opCtx)); // IndexCatalog::numIndexesTotal() includes unfinished indexes. We need to ensure that // the bulk loader drops the unfinished indexes. auto collIdxCat = coll->getIndexCatalog(); - ASSERT_EQUALS(0, collIdxCat->numIndexesTotal(txn)); + ASSERT_EQUALS(0, collIdxCat->numIndexesTotal(opCtx)); } TEST_F(StorageInterfaceImplWithReplCoordTest, DestroyingUncommittedCollectionBulkLoaderDropsIndexes) { - auto txn = getOperationContext(); + auto opCtx = getOperationContext(); NamespaceString nss("foo.bar"); std::vector<BSONObj> indexes = {BSON("v" << 1 << "key" << BSON("x" << 1) << "name" << "x_1" @@ -538,24 +539,24 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, auto destroyLoaderFn = [](std::unique_ptr<CollectionBulkLoader> loader) { // Destroy 'loader' by letting it go out of scope. }; - _testDestroyUncommitedCollectionBulkLoader(txn, indexes, destroyLoaderFn); + _testDestroyUncommitedCollectionBulkLoader(opCtx, indexes, destroyLoaderFn); } TEST_F(StorageInterfaceImplWithReplCoordTest, DestructorInitializesClientBeforeDestroyingIdIndexBuilder) { - auto txn = getOperationContext(); + auto opCtx = getOperationContext(); NamespaceString nss("foo.bar"); std::vector<BSONObj> indexes; auto destroyLoaderFn = [](std::unique_ptr<CollectionBulkLoader> loader) { // Destroy 'loader' in a new thread that does not have a Client. stdx::thread([&loader]() { loader.reset(); }).join(); }; - _testDestroyUncommitedCollectionBulkLoader(txn, indexes, destroyLoaderFn); + _testDestroyUncommitedCollectionBulkLoader(opCtx, indexes, destroyLoaderFn); } TEST_F(StorageInterfaceImplWithReplCoordTest, DestructorInitializesClientBeforeDestroyingSecondaryIndexesBuilder) { - auto txn = getOperationContext(); + auto opCtx = getOperationContext(); NamespaceString nss("foo.bar"); std::vector<BSONObj> indexes = {BSON("v" << 1 << "key" << BSON("x" << 1) << "name" << "x_1" @@ -565,15 +566,15 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, // Destroy 'loader' in a new thread that does not have a Client. stdx::thread([&loader]() { loader.reset(); }).join(); }; - _testDestroyUncommitedCollectionBulkLoader(txn, indexes, destroyLoaderFn); + _testDestroyUncommitedCollectionBulkLoader(opCtx, indexes, destroyLoaderFn); } TEST_F(StorageInterfaceImplWithReplCoordTest, CreateCollectionThatAlreadyExistsFails) { - auto txn = getOperationContext(); + auto opCtx = getOperationContext(); StorageInterfaceImpl storage; storage.startup(); NamespaceString nss("test.system.indexes"); - createCollection(txn, nss); + createCollection(opCtx, nss); const CollectionOptions opts; const std::vector<BSONObj> indexes; @@ -583,16 +584,16 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, CreateCollectionThatAlreadyExistsF } TEST_F(StorageInterfaceImplWithReplCoordTest, CreateOplogCreateCappedCollection) { - auto txn = getOperationContext(); + auto opCtx = getOperationContext(); StorageInterfaceImpl storage; NamespaceString nss("local.oplog.X"); { - AutoGetCollectionForRead autoColl(txn, nss); + AutoGetCollectionForRead autoColl(opCtx, nss); ASSERT_FALSE(autoColl.getCollection()); } - ASSERT_OK(storage.createOplog(txn, nss)); + ASSERT_OK(storage.createOplog(opCtx, nss)); { - AutoGetCollectionForRead autoColl(txn, nss); + AutoGetCollectionForRead autoColl(opCtx, nss); ASSERT_TRUE(autoColl.getCollection()); ASSERT_EQ(nss.toString(), autoColl.getCollection()->ns().toString()); ASSERT_TRUE(autoColl.getCollection()->isCapped()); @@ -601,78 +602,78 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, CreateOplogCreateCappedCollection) TEST_F(StorageInterfaceImplWithReplCoordTest, CreateCollectionReturnsUserExceptionAsStatusIfCollectionCreationThrows) { - auto txn = getOperationContext(); + auto opCtx = getOperationContext(); StorageInterfaceImpl storage; NamespaceString nss("local.oplog.Y"); { - AutoGetCollectionForRead autoColl(txn, nss); + AutoGetCollectionForRead autoColl(opCtx, nss); ASSERT_FALSE(autoColl.getCollection()); } - auto status = storage.createCollection(txn, nss, CollectionOptions()); + auto status = storage.createCollection(opCtx, nss, CollectionOptions()); ASSERT_EQUALS(ErrorCodes::fromInt(28838), status); ASSERT_STRING_CONTAINS(status.reason(), "cannot create a non-capped oplog collection"); } TEST_F(StorageInterfaceImplWithReplCoordTest, CreateCollectionFailsIfCollectionExists) { - auto txn = getOperationContext(); + auto opCtx = getOperationContext(); StorageInterfaceImpl storage; auto nss = makeNamespace(_agent); { - AutoGetCollectionForRead autoColl(txn, nss); + AutoGetCollectionForRead autoColl(opCtx, nss); ASSERT_FALSE(autoColl.getCollection()); } - ASSERT_OK(storage.createCollection(txn, nss, CollectionOptions())); + ASSERT_OK(storage.createCollection(opCtx, nss, CollectionOptions())); { - AutoGetCollectionForRead autoColl(txn, nss); + AutoGetCollectionForRead autoColl(opCtx, nss); ASSERT_TRUE(autoColl.getCollection()); ASSERT_EQ(nss.toString(), autoColl.getCollection()->ns().toString()); } - auto status = storage.createCollection(txn, nss, CollectionOptions()); + auto status = storage.createCollection(opCtx, nss, CollectionOptions()); ASSERT_EQUALS(ErrorCodes::NamespaceExists, status); ASSERT_STRING_CONTAINS(status.reason(), str::stream() << "Collection " << nss.ns() << " already exists"); } TEST_F(StorageInterfaceImplWithReplCoordTest, DropCollectionWorksWithExistingWithDataCollection) { - auto txn = getOperationContext(); + auto opCtx = getOperationContext(); StorageInterfaceImpl storage; NamespaceString nss("foo.bar"); - createCollection(txn, nss); - ASSERT_OK(storage.insertDocument(txn, nss, BSON("_id" << 1))); - ASSERT_OK(storage.dropCollection(txn, nss)); + createCollection(opCtx, nss); + ASSERT_OK(storage.insertDocument(opCtx, nss, BSON("_id" << 1))); + ASSERT_OK(storage.dropCollection(opCtx, nss)); } TEST_F(StorageInterfaceImplWithReplCoordTest, DropCollectionWorksWithExistingEmptyCollection) { - auto txn = getOperationContext(); + auto opCtx = getOperationContext(); StorageInterfaceImpl storage; NamespaceString nss("foo.bar"); - createCollection(txn, nss); - ASSERT_OK(storage.dropCollection(txn, nss)); - AutoGetCollectionForRead autoColl(txn, nss); + createCollection(opCtx, nss); + ASSERT_OK(storage.dropCollection(opCtx, nss)); + AutoGetCollectionForRead autoColl(opCtx, nss); ASSERT_FALSE(autoColl.getCollection()); } TEST_F(StorageInterfaceImplWithReplCoordTest, DropCollectionWorksWithMissingCollection) { - auto txn = getOperationContext(); + auto opCtx = getOperationContext(); StorageInterfaceImpl storage; NamespaceString nss("foo.bar"); - ASSERT_FALSE(AutoGetDb(txn, nss.db(), MODE_IS).getDb()); - ASSERT_OK(storage.dropCollection(txn, nss)); - ASSERT_FALSE(AutoGetCollectionForRead(txn, nss).getCollection()); + ASSERT_FALSE(AutoGetDb(opCtx, nss.db(), MODE_IS).getDb()); + ASSERT_OK(storage.dropCollection(opCtx, nss)); + ASSERT_FALSE(AutoGetCollectionForRead(opCtx, nss).getCollection()); // Database should not be created after running dropCollection. - ASSERT_FALSE(AutoGetDb(txn, nss.db(), MODE_IS).getDb()); + ASSERT_FALSE(AutoGetDb(opCtx, nss.db(), MODE_IS).getDb()); } TEST_F(StorageInterfaceImplWithReplCoordTest, FindDocumentsReturnsInvalidNamespaceIfCollectionIsMissing) { - auto txn = getOperationContext(); + auto opCtx = getOperationContext(); StorageInterfaceImpl storage; auto nss = makeNamespace(_agent); auto indexName = "_id_"_sd; ASSERT_EQUALS(ErrorCodes::NamespaceNotFound, storage - .findDocuments(txn, + .findDocuments(opCtx, nss, indexName, StorageInterface::ScanDirection::kForward, @@ -683,14 +684,14 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, } TEST_F(StorageInterfaceImplWithReplCoordTest, FindDocumentsReturnsIndexNotFoundIfIndexIsMissing) { - auto txn = getOperationContext(); + auto opCtx = getOperationContext(); StorageInterfaceImpl storage; auto nss = makeNamespace(_agent); auto indexName = "nonexistent"_sd; - ASSERT_OK(storage.createCollection(txn, nss, CollectionOptions())); + ASSERT_OK(storage.createCollection(opCtx, nss, CollectionOptions())); ASSERT_EQUALS(ErrorCodes::IndexNotFound, storage - .findDocuments(txn, + .findDocuments(opCtx, nss, indexName, StorageInterface::ScanDirection::kForward, @@ -702,7 +703,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, FindDocumentsReturnsIndexNotFoundI TEST_F(StorageInterfaceImplWithReplCoordTest, FindDocumentsReturnsIndexOptionsConflictIfIndexIsAPartialIndex) { - auto txn = getOperationContext(); + auto opCtx = getOperationContext(); StorageInterfaceImpl storage; storage.startup(); auto nss = makeNamespace(_agent); @@ -720,7 +721,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, auto indexName = "x_1"_sd; ASSERT_EQUALS(ErrorCodes::IndexOptionsConflict, storage - .findDocuments(txn, + .findDocuments(opCtx, nss, indexName, StorageInterface::ScanDirection::kForward, @@ -731,12 +732,12 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, } TEST_F(StorageInterfaceImplWithReplCoordTest, FindDocumentsReturnsEmptyVectorIfCollectionIsEmpty) { - auto txn = getOperationContext(); + auto opCtx = getOperationContext(); StorageInterfaceImpl storage; auto nss = makeNamespace(_agent); auto indexName = "_id_"_sd; - ASSERT_OK(storage.createCollection(txn, nss, CollectionOptions())); - ASSERT_TRUE(unittest::assertGet(storage.findDocuments(txn, + ASSERT_OK(storage.createCollection(opCtx, nss, CollectionOptions())); + ASSERT_TRUE(unittest::assertGet(storage.findDocuments(opCtx, nss, indexName, StorageInterface::ScanDirection::kForward, @@ -765,12 +766,12 @@ std::string _toString(const std::vector<BSONObj>& docs) { /** * Check collection contents. OplogInterface returns documents in reverse natural order. */ -void _assertDocumentsInCollectionEquals(OperationContext* txn, +void _assertDocumentsInCollectionEquals(OperationContext* opCtx, const NamespaceString& nss, const std::vector<BSONObj>& docs) { std::vector<BSONObj> reversedDocs(docs); std::reverse(reversedDocs.begin(), reversedDocs.end()); - OplogInterfaceLocal oplog(txn, nss.ns()); + OplogInterfaceLocal oplog(opCtx, nss.ns()); auto iter = oplog.makeIterator(); for (const auto& doc : reversedDocs) { ASSERT_BSONOBJ_EQ(doc, unittest::assertGet(iter->next()).first); @@ -805,12 +806,12 @@ BSONObj _assetGetFront(const StatusWith<std::vector<BSONObj>>& statusWithDocs) { TEST_F(StorageInterfaceImplWithReplCoordTest, FindDocumentsReturnsDocumentWithLowestKeyValueIfScanDirectionIsForward) { - auto txn = getOperationContext(); + auto opCtx = getOperationContext(); StorageInterfaceImpl storage; auto nss = makeNamespace(_agent); auto indexName = "_id_"_sd; - ASSERT_OK(storage.createCollection(txn, nss, CollectionOptions())); - ASSERT_OK(storage.insertDocuments(txn, + ASSERT_OK(storage.createCollection(opCtx, nss, CollectionOptions())); + ASSERT_OK(storage.insertDocuments(opCtx, nss, {BSON("_id" << 0), BSON("_id" << 1), @@ -821,7 +822,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, // startKey not provided ASSERT_BSONOBJ_EQ( BSON("_id" << 0), - _assetGetFront(storage.findDocuments(txn, + _assetGetFront(storage.findDocuments(opCtx, nss, indexName, StorageInterface::ScanDirection::kForward, @@ -830,7 +831,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, 1U))); // startKey not provided. limit is 0. - _assertDocumentsEqual(storage.findDocuments(txn, + _assertDocumentsEqual(storage.findDocuments(opCtx, nss, indexName, StorageInterface::ScanDirection::kForward, @@ -840,7 +841,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, {}); // startKey not provided. limit of 2. - _assertDocumentsEqual(storage.findDocuments(txn, + _assertDocumentsEqual(storage.findDocuments(opCtx, nss, indexName, StorageInterface::ScanDirection::kForward, @@ -852,7 +853,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, // startKey provided; include start key ASSERT_BSONOBJ_EQ( BSON("_id" << 0), - _assetGetFront(storage.findDocuments(txn, + _assetGetFront(storage.findDocuments(opCtx, nss, indexName, StorageInterface::ScanDirection::kForward, @@ -861,7 +862,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, 1U))); ASSERT_BSONOBJ_EQ( BSON("_id" << 1), - _assetGetFront(storage.findDocuments(txn, + _assetGetFront(storage.findDocuments(opCtx, nss, indexName, StorageInterface::ScanDirection::kForward, @@ -871,7 +872,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, ASSERT_BSONOBJ_EQ( BSON("_id" << 1), - _assetGetFront(storage.findDocuments(txn, + _assetGetFront(storage.findDocuments(opCtx, nss, indexName, StorageInterface::ScanDirection::kForward, @@ -882,7 +883,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, // startKey provided; include both start and end keys ASSERT_BSONOBJ_EQ( BSON("_id" << 1), - _assetGetFront(storage.findDocuments(txn, + _assetGetFront(storage.findDocuments(opCtx, nss, indexName, StorageInterface::ScanDirection::kForward, @@ -893,7 +894,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, // startKey provided; exclude start key ASSERT_BSONOBJ_EQ( BSON("_id" << 2), - _assetGetFront(storage.findDocuments(txn, + _assetGetFront(storage.findDocuments(opCtx, nss, indexName, StorageInterface::ScanDirection::kForward, @@ -903,7 +904,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, ASSERT_BSONOBJ_EQ( BSON("_id" << 2), - _assetGetFront(storage.findDocuments(txn, + _assetGetFront(storage.findDocuments(opCtx, nss, indexName, StorageInterface::ScanDirection::kForward, @@ -914,7 +915,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, // startKey provided; exclude both start and end keys ASSERT_BSONOBJ_EQ( BSON("_id" << 2), - _assetGetFront(storage.findDocuments(txn, + _assetGetFront(storage.findDocuments(opCtx, nss, indexName, StorageInterface::ScanDirection::kForward, @@ -924,7 +925,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, // startKey provided; exclude both start and end keys. // A limit of 3 should return 2 documents because we reached the end of the collection. - _assertDocumentsEqual(storage.findDocuments(txn, + _assertDocumentsEqual(storage.findDocuments(opCtx, nss, indexName, StorageInterface::ScanDirection::kForward, @@ -934,19 +935,19 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, {BSON("_id" << 3), BSON("_id" << 4)}); _assertDocumentsInCollectionEquals( - txn, + opCtx, nss, {BSON("_id" << 0), BSON("_id" << 1), BSON("_id" << 2), BSON("_id" << 3), BSON("_id" << 4)}); } TEST_F(StorageInterfaceImplWithReplCoordTest, FindDocumentsReturnsDocumentWithHighestKeyValueIfScanDirectionIsBackward) { - auto txn = getOperationContext(); + auto opCtx = getOperationContext(); StorageInterfaceImpl storage; auto nss = makeNamespace(_agent); auto indexName = "_id_"_sd; - ASSERT_OK(storage.createCollection(txn, nss, CollectionOptions())); - ASSERT_OK(storage.insertDocuments(txn, + ASSERT_OK(storage.createCollection(opCtx, nss, CollectionOptions())); + ASSERT_OK(storage.insertDocuments(opCtx, nss, {BSON("_id" << 0), BSON("_id" << 1), @@ -957,7 +958,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, // startKey not provided ASSERT_BSONOBJ_EQ( BSON("_id" << 4), - _assetGetFront(storage.findDocuments(txn, + _assetGetFront(storage.findDocuments(opCtx, nss, indexName, StorageInterface::ScanDirection::kBackward, @@ -966,7 +967,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, 1U))); // startKey not provided. limit is 0. - _assertDocumentsEqual(storage.findDocuments(txn, + _assertDocumentsEqual(storage.findDocuments(opCtx, nss, indexName, StorageInterface::ScanDirection::kBackward, @@ -976,7 +977,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, {}); // startKey not provided. limit of 2. - _assertDocumentsEqual(storage.findDocuments(txn, + _assertDocumentsEqual(storage.findDocuments(opCtx, nss, indexName, StorageInterface::ScanDirection::kBackward, @@ -988,7 +989,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, // startKey provided; include start key ASSERT_BSONOBJ_EQ( BSON("_id" << 4), - _assetGetFront(storage.findDocuments(txn, + _assetGetFront(storage.findDocuments(opCtx, nss, indexName, StorageInterface::ScanDirection::kBackward, @@ -997,7 +998,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, 1U))); ASSERT_BSONOBJ_EQ( BSON("_id" << 3), - _assetGetFront(storage.findDocuments(txn, + _assetGetFront(storage.findDocuments(opCtx, nss, indexName, StorageInterface::ScanDirection::kBackward, @@ -1008,7 +1009,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, // startKey provided; include both start and end keys ASSERT_BSONOBJ_EQ( BSON("_id" << 4), - _assetGetFront(storage.findDocuments(txn, + _assetGetFront(storage.findDocuments(opCtx, nss, indexName, StorageInterface::ScanDirection::kBackward, @@ -1019,7 +1020,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, // startKey provided; exclude start key ASSERT_BSONOBJ_EQ( BSON("_id" << 2), - _assetGetFront(storage.findDocuments(txn, + _assetGetFront(storage.findDocuments(opCtx, nss, indexName, StorageInterface::ScanDirection::kBackward, @@ -1030,7 +1031,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, // startKey provided; exclude both start and end keys ASSERT_BSONOBJ_EQ( BSON("_id" << 2), - _assetGetFront(storage.findDocuments(txn, + _assetGetFront(storage.findDocuments(opCtx, nss, indexName, StorageInterface::ScanDirection::kBackward, @@ -1040,7 +1041,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, // startKey provided; exclude both start and end keys. // A limit of 3 should return 2 documents because we reached the beginning of the collection. - _assertDocumentsEqual(storage.findDocuments(txn, + _assertDocumentsEqual(storage.findDocuments(opCtx, nss, indexName, StorageInterface::ScanDirection::kBackward, @@ -1050,22 +1051,22 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, {BSON("_id" << 1), BSON("_id" << 0)}); _assertDocumentsInCollectionEquals( - txn, + opCtx, nss, {BSON("_id" << 0), BSON("_id" << 1), BSON("_id" << 2), BSON("_id" << 3), BSON("_id" << 4)}); } TEST_F(StorageInterfaceImplWithReplCoordTest, FindDocumentsCollScanReturnsFirstDocumentInsertedIfScanDirectionIsForward) { - auto txn = getOperationContext(); + auto opCtx = getOperationContext(); StorageInterfaceImpl storage; auto nss = makeNamespace(_agent); - ASSERT_OK(storage.createCollection(txn, nss, CollectionOptions())); - ASSERT_OK( - storage.insertDocuments(txn, nss, {BSON("_id" << 1), BSON("_id" << 2), BSON("_id" << 0)})); + ASSERT_OK(storage.createCollection(opCtx, nss, CollectionOptions())); + ASSERT_OK(storage.insertDocuments( + opCtx, nss, {BSON("_id" << 1), BSON("_id" << 2), BSON("_id" << 0)})); ASSERT_BSONOBJ_EQ( BSON("_id" << 1), - _assetGetFront(storage.findDocuments(txn, + _assetGetFront(storage.findDocuments(opCtx, nss, boost::none, StorageInterface::ScanDirection::kForward, @@ -1074,7 +1075,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, 1U))); // Check collection contents. OplogInterface returns documents in reverse natural order. - OplogInterfaceLocal oplog(txn, nss.ns()); + OplogInterfaceLocal oplog(opCtx, nss.ns()); auto iter = oplog.makeIterator(); ASSERT_BSONOBJ_EQ(BSON("_id" << 0), unittest::assertGet(iter->next()).first); ASSERT_BSONOBJ_EQ(BSON("_id" << 2), unittest::assertGet(iter->next()).first); @@ -1084,15 +1085,15 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, TEST_F(StorageInterfaceImplWithReplCoordTest, FindDocumentsCollScanReturnsLastDocumentInsertedIfScanDirectionIsBackward) { - auto txn = getOperationContext(); + auto opCtx = getOperationContext(); StorageInterfaceImpl storage; auto nss = makeNamespace(_agent); - ASSERT_OK(storage.createCollection(txn, nss, CollectionOptions())); - ASSERT_OK( - storage.insertDocuments(txn, nss, {BSON("_id" << 1), BSON("_id" << 2), BSON("_id" << 0)})); + ASSERT_OK(storage.createCollection(opCtx, nss, CollectionOptions())); + ASSERT_OK(storage.insertDocuments( + opCtx, nss, {BSON("_id" << 1), BSON("_id" << 2), BSON("_id" << 0)})); ASSERT_BSONOBJ_EQ( BSON("_id" << 0), - _assetGetFront(storage.findDocuments(txn, + _assetGetFront(storage.findDocuments(opCtx, nss, boost::none, StorageInterface::ScanDirection::kBackward, @@ -1101,20 +1102,20 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, 1U))); _assertDocumentsInCollectionEquals( - txn, nss, {BSON("_id" << 1), BSON("_id" << 2), BSON("_id" << 0)}); + opCtx, nss, {BSON("_id" << 1), BSON("_id" << 2), BSON("_id" << 0)}); } TEST_F(StorageInterfaceImplWithReplCoordTest, FindDocumentsCollScanReturnsNoSuchKeyIfStartKeyIsNotEmpty) { - auto txn = getOperationContext(); + auto opCtx = getOperationContext(); StorageInterfaceImpl storage; auto nss = makeNamespace(_agent); - ASSERT_OK(storage.createCollection(txn, nss, CollectionOptions())); - ASSERT_OK( - storage.insertDocuments(txn, nss, {BSON("_id" << 1), BSON("_id" << 2), BSON("_id" << 0)})); + ASSERT_OK(storage.createCollection(opCtx, nss, CollectionOptions())); + ASSERT_OK(storage.insertDocuments( + opCtx, nss, {BSON("_id" << 1), BSON("_id" << 2), BSON("_id" << 0)})); ASSERT_EQUALS(ErrorCodes::NoSuchKey, storage - .findDocuments(txn, + .findDocuments(opCtx, nss, boost::none, StorageInterface::ScanDirection::kForward, @@ -1126,15 +1127,15 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, TEST_F(StorageInterfaceImplWithReplCoordTest, FindDocumentsCollScanReturnsInvalidOptionsIfBoundIsNotStartKeyOnly) { - auto txn = getOperationContext(); + auto opCtx = getOperationContext(); StorageInterfaceImpl storage; auto nss = makeNamespace(_agent); - ASSERT_OK(storage.createCollection(txn, nss, CollectionOptions())); - ASSERT_OK( - storage.insertDocuments(txn, nss, {BSON("_id" << 1), BSON("_id" << 2), BSON("_id" << 0)})); + ASSERT_OK(storage.createCollection(opCtx, nss, CollectionOptions())); + ASSERT_OK(storage.insertDocuments( + opCtx, nss, {BSON("_id" << 1), BSON("_id" << 2), BSON("_id" << 0)})); ASSERT_EQUALS(ErrorCodes::InvalidOptions, storage - .findDocuments(txn, + .findDocuments(opCtx, nss, boost::none, StorageInterface::ScanDirection::kForward, @@ -1146,13 +1147,13 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, TEST_F(StorageInterfaceImplWithReplCoordTest, DeleteDocumentsReturnsInvalidNamespaceIfCollectionIsMissing) { - auto txn = getOperationContext(); + auto opCtx = getOperationContext(); StorageInterfaceImpl storage; auto nss = makeNamespace(_agent); auto indexName = "_id_"_sd; ASSERT_EQUALS(ErrorCodes::NamespaceNotFound, storage - .deleteDocuments(txn, + .deleteDocuments(opCtx, nss, indexName, StorageInterface::ScanDirection::kForward, @@ -1163,14 +1164,14 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, } TEST_F(StorageInterfaceImplWithReplCoordTest, DeleteDocumentsReturnsIndexNotFoundIfIndexIsMissing) { - auto txn = getOperationContext(); + auto opCtx = getOperationContext(); StorageInterfaceImpl storage; auto nss = makeNamespace(_agent); auto indexName = "nonexistent"_sd; - ASSERT_OK(storage.createCollection(txn, nss, CollectionOptions())); + ASSERT_OK(storage.createCollection(opCtx, nss, CollectionOptions())); ASSERT_EQUALS(ErrorCodes::IndexNotFound, storage - .deleteDocuments(txn, + .deleteDocuments(opCtx, nss, indexName, StorageInterface::ScanDirection::kForward, @@ -1182,13 +1183,13 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, DeleteDocumentsReturnsIndexNotFoun TEST_F(StorageInterfaceImplWithReplCoordTest, DeleteDocumentsReturnsEmptyVectorIfCollectionIsEmpty) { - auto txn = getOperationContext(); + auto opCtx = getOperationContext(); StorageInterfaceImpl storage; auto nss = makeNamespace(_agent); auto indexName = "_id_"_sd; - ASSERT_OK(storage.createCollection(txn, nss, CollectionOptions())); + ASSERT_OK(storage.createCollection(opCtx, nss, CollectionOptions())); ASSERT_TRUE( - unittest::assertGet(storage.deleteDocuments(txn, + unittest::assertGet(storage.deleteDocuments(opCtx, nss, indexName, StorageInterface::ScanDirection::kForward, @@ -1200,12 +1201,12 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, TEST_F(StorageInterfaceImplWithReplCoordTest, DeleteDocumentsReturnsDocumentWithLowestKeyValueIfScanDirectionIsForward) { - auto txn = getOperationContext(); + auto opCtx = getOperationContext(); StorageInterfaceImpl storage; auto nss = makeNamespace(_agent); auto indexName = "_id_"_sd; - ASSERT_OK(storage.createCollection(txn, nss, CollectionOptions())); - ASSERT_OK(storage.insertDocuments(txn, + ASSERT_OK(storage.createCollection(opCtx, nss, CollectionOptions())); + ASSERT_OK(storage.insertDocuments(opCtx, nss, {BSON("_id" << 0), BSON("_id" << 1), @@ -1219,7 +1220,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, // startKey not provided ASSERT_BSONOBJ_EQ( BSON("_id" << 0), - _assetGetFront(storage.deleteDocuments(txn, + _assetGetFront(storage.deleteDocuments(opCtx, nss, indexName, StorageInterface::ScanDirection::kForward, @@ -1227,7 +1228,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, BoundInclusion::kIncludeStartKeyOnly, 1U))); - _assertDocumentsInCollectionEquals(txn, + _assertDocumentsInCollectionEquals(opCtx, nss, {BSON("_id" << 1), BSON("_id" << 2), @@ -1238,7 +1239,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, BSON("_id" << 7)}); // startKey not provided. limit is 0. - _assertDocumentsEqual(storage.deleteDocuments(txn, + _assertDocumentsEqual(storage.deleteDocuments(opCtx, nss, indexName, StorageInterface::ScanDirection::kForward, @@ -1247,7 +1248,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, 0U), {}); - _assertDocumentsInCollectionEquals(txn, + _assertDocumentsInCollectionEquals(opCtx, nss, {BSON("_id" << 1), BSON("_id" << 2), @@ -1260,7 +1261,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, // startKey provided; include start key ASSERT_BSONOBJ_EQ( BSON("_id" << 2), - _assetGetFront(storage.deleteDocuments(txn, + _assetGetFront(storage.deleteDocuments(opCtx, nss, indexName, StorageInterface::ScanDirection::kForward, @@ -1268,7 +1269,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, BoundInclusion::kIncludeStartKeyOnly, 1U))); - _assertDocumentsInCollectionEquals(txn, + _assertDocumentsInCollectionEquals(opCtx, nss, {BSON("_id" << 1), BSON("_id" << 3), @@ -1280,7 +1281,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, // startKey provided; exclude start key ASSERT_BSONOBJ_EQ( BSON("_id" << 5), - _assetGetFront(storage.deleteDocuments(txn, + _assetGetFront(storage.deleteDocuments(opCtx, nss, indexName, StorageInterface::ScanDirection::kForward, @@ -1289,13 +1290,13 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, 1U))); _assertDocumentsInCollectionEquals( - txn, + opCtx, nss, {BSON("_id" << 1), BSON("_id" << 3), BSON("_id" << 4), BSON("_id" << 6), BSON("_id" << 7)}); // startKey provided; exclude start key. // A limit of 3 should return 2 documents because we reached the end of the collection. - _assertDocumentsEqual(storage.deleteDocuments(txn, + _assertDocumentsEqual(storage.deleteDocuments(opCtx, nss, indexName, StorageInterface::ScanDirection::kForward, @@ -1305,17 +1306,17 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, {BSON("_id" << 6), BSON("_id" << 7)}); _assertDocumentsInCollectionEquals( - txn, nss, {BSON("_id" << 1), BSON("_id" << 3), BSON("_id" << 4)}); + opCtx, nss, {BSON("_id" << 1), BSON("_id" << 3), BSON("_id" << 4)}); } TEST_F(StorageInterfaceImplWithReplCoordTest, DeleteDocumentsReturnsDocumentWithHighestKeyValueIfScanDirectionIsBackward) { - auto txn = getOperationContext(); + auto opCtx = getOperationContext(); StorageInterfaceImpl storage; auto nss = makeNamespace(_agent); auto indexName = "_id_"_sd; - ASSERT_OK(storage.createCollection(txn, nss, CollectionOptions())); - ASSERT_OK(storage.insertDocuments(txn, + ASSERT_OK(storage.createCollection(opCtx, nss, CollectionOptions())); + ASSERT_OK(storage.insertDocuments(opCtx, nss, {BSON("_id" << 0), BSON("_id" << 1), @@ -1329,7 +1330,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, // startKey not provided ASSERT_BSONOBJ_EQ( BSON("_id" << 7), - _assetGetFront(storage.deleteDocuments(txn, + _assetGetFront(storage.deleteDocuments(opCtx, nss, indexName, StorageInterface::ScanDirection::kBackward, @@ -1337,7 +1338,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, BoundInclusion::kIncludeStartKeyOnly, 1U))); - _assertDocumentsInCollectionEquals(txn, + _assertDocumentsInCollectionEquals(opCtx, nss, {BSON("_id" << 0), BSON("_id" << 1), @@ -1348,7 +1349,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, BSON("_id" << 6)}); // startKey not provided. limit is 0. - _assertDocumentsEqual(storage.deleteDocuments(txn, + _assertDocumentsEqual(storage.deleteDocuments(opCtx, nss, indexName, StorageInterface::ScanDirection::kBackward, @@ -1357,7 +1358,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, 0U), {}); - _assertDocumentsInCollectionEquals(txn, + _assertDocumentsInCollectionEquals(opCtx, nss, {BSON("_id" << 0), BSON("_id" << 1), @@ -1370,7 +1371,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, // startKey provided; include start key ASSERT_BSONOBJ_EQ( BSON("_id" << 5), - _assetGetFront(storage.deleteDocuments(txn, + _assetGetFront(storage.deleteDocuments(opCtx, nss, indexName, StorageInterface::ScanDirection::kBackward, @@ -1378,7 +1379,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, BoundInclusion::kIncludeStartKeyOnly, 1U))); - _assertDocumentsInCollectionEquals(txn, + _assertDocumentsInCollectionEquals(opCtx, nss, {BSON("_id" << 0), BSON("_id" << 1), @@ -1390,7 +1391,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, // startKey provided; exclude start key ASSERT_BSONOBJ_EQ( BSON("_id" << 2), - _assetGetFront(storage.deleteDocuments(txn, + _assetGetFront(storage.deleteDocuments(opCtx, nss, indexName, StorageInterface::ScanDirection::kBackward, @@ -1399,13 +1400,13 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, 1U))); _assertDocumentsInCollectionEquals( - txn, + opCtx, nss, {BSON("_id" << 0), BSON("_id" << 1), BSON("_id" << 3), BSON("_id" << 4), BSON("_id" << 6)}); // startKey provided; exclude start key. // A limit of 3 should return 2 documents because we reached the beginning of the collection. - _assertDocumentsEqual(storage.deleteDocuments(txn, + _assertDocumentsEqual(storage.deleteDocuments(opCtx, nss, indexName, StorageInterface::ScanDirection::kBackward, @@ -1415,20 +1416,20 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, {BSON("_id" << 1), BSON("_id" << 0)}); _assertDocumentsInCollectionEquals( - txn, nss, {BSON("_id" << 3), BSON("_id" << 4), BSON("_id" << 6)}); + opCtx, nss, {BSON("_id" << 3), BSON("_id" << 4), BSON("_id" << 6)}); } TEST_F(StorageInterfaceImplWithReplCoordTest, DeleteDocumentsCollScanReturnsFirstDocumentInsertedIfScanDirectionIsForward) { - auto txn = getOperationContext(); + auto opCtx = getOperationContext(); StorageInterfaceImpl storage; auto nss = makeNamespace(_agent); - ASSERT_OK(storage.createCollection(txn, nss, CollectionOptions())); - ASSERT_OK( - storage.insertDocuments(txn, nss, {BSON("_id" << 1), BSON("_id" << 2), BSON("_id" << 0)})); + ASSERT_OK(storage.createCollection(opCtx, nss, CollectionOptions())); + ASSERT_OK(storage.insertDocuments( + opCtx, nss, {BSON("_id" << 1), BSON("_id" << 2), BSON("_id" << 0)})); ASSERT_BSONOBJ_EQ( BSON("_id" << 1), - _assetGetFront(storage.deleteDocuments(txn, + _assetGetFront(storage.deleteDocuments(opCtx, nss, boost::none, StorageInterface::ScanDirection::kForward, @@ -1436,20 +1437,20 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, BoundInclusion::kIncludeStartKeyOnly, 1U))); - _assertDocumentsInCollectionEquals(txn, nss, {BSON("_id" << 2), BSON("_id" << 0)}); + _assertDocumentsInCollectionEquals(opCtx, nss, {BSON("_id" << 2), BSON("_id" << 0)}); } TEST_F(StorageInterfaceImplWithReplCoordTest, DeleteDocumentsCollScanReturnsLastDocumentInsertedIfScanDirectionIsBackward) { - auto txn = getOperationContext(); + auto opCtx = getOperationContext(); StorageInterfaceImpl storage; auto nss = makeNamespace(_agent); - ASSERT_OK(storage.createCollection(txn, nss, CollectionOptions())); - ASSERT_OK( - storage.insertDocuments(txn, nss, {BSON("_id" << 1), BSON("_id" << 2), BSON("_id" << 0)})); + ASSERT_OK(storage.createCollection(opCtx, nss, CollectionOptions())); + ASSERT_OK(storage.insertDocuments( + opCtx, nss, {BSON("_id" << 1), BSON("_id" << 2), BSON("_id" << 0)})); ASSERT_BSONOBJ_EQ( BSON("_id" << 0), - _assetGetFront(storage.deleteDocuments(txn, + _assetGetFront(storage.deleteDocuments(opCtx, nss, boost::none, StorageInterface::ScanDirection::kBackward, @@ -1457,20 +1458,20 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, BoundInclusion::kIncludeStartKeyOnly, 1U))); - _assertDocumentsInCollectionEquals(txn, nss, {BSON("_id" << 1), BSON("_id" << 2)}); + _assertDocumentsInCollectionEquals(opCtx, nss, {BSON("_id" << 1), BSON("_id" << 2)}); } TEST_F(StorageInterfaceImplWithReplCoordTest, DeleteDocumentsCollScanReturnsNoSuchKeyIfStartKeyIsNotEmpty) { - auto txn = getOperationContext(); + auto opCtx = getOperationContext(); StorageInterfaceImpl storage; auto nss = makeNamespace(_agent); - ASSERT_OK(storage.createCollection(txn, nss, CollectionOptions())); - ASSERT_OK( - storage.insertDocuments(txn, nss, {BSON("_id" << 1), BSON("_id" << 2), BSON("_id" << 0)})); + ASSERT_OK(storage.createCollection(opCtx, nss, CollectionOptions())); + ASSERT_OK(storage.insertDocuments( + opCtx, nss, {BSON("_id" << 1), BSON("_id" << 2), BSON("_id" << 0)})); ASSERT_EQUALS(ErrorCodes::NoSuchKey, storage - .deleteDocuments(txn, + .deleteDocuments(opCtx, nss, boost::none, StorageInterface::ScanDirection::kForward, @@ -1482,15 +1483,15 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, TEST_F(StorageInterfaceImplWithReplCoordTest, DeleteDocumentsCollScanReturnsInvalidOptionsIfBoundIsNotStartKeyOnly) { - auto txn = getOperationContext(); + auto opCtx = getOperationContext(); StorageInterfaceImpl storage; auto nss = makeNamespace(_agent); - ASSERT_OK(storage.createCollection(txn, nss, CollectionOptions())); - ASSERT_OK( - storage.insertDocuments(txn, nss, {BSON("_id" << 1), BSON("_id" << 2), BSON("_id" << 0)})); + ASSERT_OK(storage.createCollection(opCtx, nss, CollectionOptions())); + ASSERT_OK(storage.insertDocuments( + opCtx, nss, {BSON("_id" << 1), BSON("_id" << 2), BSON("_id" << 0)})); ASSERT_EQUALS(ErrorCodes::InvalidOptions, storage - .deleteDocuments(txn, + .deleteDocuments(opCtx, nss, boost::none, StorageInterface::ScanDirection::kForward, diff --git a/src/mongo/db/repl/storage_interface_mock.cpp b/src/mongo/db/repl/storage_interface_mock.cpp index ec870c1a0cc..4b534c05d6a 100644 --- a/src/mongo/db/repl/storage_interface_mock.cpp +++ b/src/mongo/db/repl/storage_interface_mock.cpp @@ -40,53 +40,53 @@ namespace mongo { namespace repl { void StorageInterfaceMock::startup() {} void StorageInterfaceMock::shutdown() {} -bool StorageInterfaceMock::getInitialSyncFlag(OperationContext* txn) const { +bool StorageInterfaceMock::getInitialSyncFlag(OperationContext* opCtx) const { stdx::lock_guard<stdx::mutex> lock(_initialSyncFlagMutex); return _initialSyncFlag; } -void StorageInterfaceMock::setInitialSyncFlag(OperationContext* txn) { +void StorageInterfaceMock::setInitialSyncFlag(OperationContext* opCtx) { stdx::lock_guard<stdx::mutex> lock(_initialSyncFlagMutex); _initialSyncFlag = true; } -void StorageInterfaceMock::clearInitialSyncFlag(OperationContext* txn) { +void StorageInterfaceMock::clearInitialSyncFlag(OperationContext* opCtx) { stdx::lock_guard<stdx::mutex> lock(_initialSyncFlagMutex); _initialSyncFlag = false; } -OpTime StorageInterfaceMock::getMinValid(OperationContext* txn) const { +OpTime StorageInterfaceMock::getMinValid(OperationContext* opCtx) const { stdx::lock_guard<stdx::mutex> lock(_minValidBoundariesMutex); return _minValid; } -void StorageInterfaceMock::setMinValid(OperationContext* txn, const OpTime& minValid) { +void StorageInterfaceMock::setMinValid(OperationContext* opCtx, const OpTime& minValid) { stdx::lock_guard<stdx::mutex> lock(_minValidBoundariesMutex); _minValid = minValid; } -void StorageInterfaceMock::setMinValidToAtLeast(OperationContext* txn, const OpTime& minValid) { +void StorageInterfaceMock::setMinValidToAtLeast(OperationContext* opCtx, const OpTime& minValid) { stdx::lock_guard<stdx::mutex> lock(_minValidBoundariesMutex); _minValid = std::max(_minValid, minValid); } -void StorageInterfaceMock::setOplogDeleteFromPoint(OperationContext* txn, +void StorageInterfaceMock::setOplogDeleteFromPoint(OperationContext* opCtx, const Timestamp& timestamp) { stdx::lock_guard<stdx::mutex> lock(_minValidBoundariesMutex); _oplogDeleteFromPoint = timestamp; } -Timestamp StorageInterfaceMock::getOplogDeleteFromPoint(OperationContext* txn) { +Timestamp StorageInterfaceMock::getOplogDeleteFromPoint(OperationContext* opCtx) { stdx::lock_guard<stdx::mutex> lock(_minValidBoundariesMutex); return _oplogDeleteFromPoint; } -void StorageInterfaceMock::setAppliedThrough(OperationContext* txn, const OpTime& optime) { +void StorageInterfaceMock::setAppliedThrough(OperationContext* opCtx, const OpTime& optime) { stdx::lock_guard<stdx::mutex> lock(_minValidBoundariesMutex); _appliedThrough = optime; } -OpTime StorageInterfaceMock::getAppliedThrough(OperationContext* txn) { +OpTime StorageInterfaceMock::getAppliedThrough(OperationContext* opCtx) { stdx::lock_guard<stdx::mutex> lock(_minValidBoundariesMutex); return _appliedThrough; } diff --git a/src/mongo/db/repl/storage_interface_mock.h b/src/mongo/db/repl/storage_interface_mock.h index 911244c2670..9f63764473b 100644 --- a/src/mongo/db/repl/storage_interface_mock.h +++ b/src/mongo/db/repl/storage_interface_mock.h @@ -92,17 +92,18 @@ public: const BSONObj idIndexSpec, const std::vector<BSONObj>& secondaryIndexSpecs)>; using InsertDocumentFn = stdx::function<Status( - OperationContext* txn, const NamespaceString& nss, const BSONObj& doc)>; + OperationContext* opCtx, const NamespaceString& nss, const BSONObj& doc)>; using InsertDocumentsFn = stdx::function<Status( - OperationContext* txn, const NamespaceString& nss, const std::vector<BSONObj>& docs)>; - using DropUserDatabasesFn = stdx::function<Status(OperationContext* txn)>; - using CreateOplogFn = stdx::function<Status(OperationContext* txn, const NamespaceString& nss)>; + OperationContext* opCtx, const NamespaceString& nss, const std::vector<BSONObj>& docs)>; + using DropUserDatabasesFn = stdx::function<Status(OperationContext* opCtx)>; + using CreateOplogFn = + stdx::function<Status(OperationContext* opCtx, const NamespaceString& nss)>; using CreateCollectionFn = stdx::function<Status( - OperationContext* txn, const NamespaceString& nss, const CollectionOptions& options)>; + OperationContext* opCtx, const NamespaceString& nss, const CollectionOptions& options)>; using DropCollectionFn = - stdx::function<Status(OperationContext* txn, const NamespaceString& nss)>; + stdx::function<Status(OperationContext* opCtx, const NamespaceString& nss)>; using FindDocumentsFn = - stdx::function<StatusWith<std::vector<BSONObj>>(OperationContext* txn, + stdx::function<StatusWith<std::vector<BSONObj>>(OperationContext* opCtx, const NamespaceString& nss, boost::optional<StringData> indexName, ScanDirection scanDirection, @@ -110,31 +111,31 @@ public: BoundInclusion boundInclusion, std::size_t limit)>; using DeleteDocumentsFn = - stdx::function<StatusWith<std::vector<BSONObj>>(OperationContext* txn, + stdx::function<StatusWith<std::vector<BSONObj>>(OperationContext* opCtx, const NamespaceString& nss, boost::optional<StringData> indexName, ScanDirection scanDirection, const BSONObj& startKey, BoundInclusion boundInclusion, std::size_t limit)>; - using IsAdminDbValidFn = stdx::function<Status(OperationContext* txn)>; + using IsAdminDbValidFn = stdx::function<Status(OperationContext* opCtx)>; StorageInterfaceMock() = default; void startup() override; void shutdown() override; - bool getInitialSyncFlag(OperationContext* txn) const override; - void setInitialSyncFlag(OperationContext* txn) override; - void clearInitialSyncFlag(OperationContext* txn) override; + bool getInitialSyncFlag(OperationContext* opCtx) const override; + void setInitialSyncFlag(OperationContext* opCtx) override; + void clearInitialSyncFlag(OperationContext* opCtx) override; - OpTime getMinValid(OperationContext* txn) const override; - void setMinValid(OperationContext* txn, const OpTime& minValid) override; - void setMinValidToAtLeast(OperationContext* txn, const OpTime& minValid) override; - void setOplogDeleteFromPoint(OperationContext* txn, const Timestamp& timestamp) override; - Timestamp getOplogDeleteFromPoint(OperationContext* txn) override; - void setAppliedThrough(OperationContext* txn, const OpTime& optime) override; - OpTime getAppliedThrough(OperationContext* txn) override; + OpTime getMinValid(OperationContext* opCtx) const override; + void setMinValid(OperationContext* opCtx, const OpTime& minValid) override; + void setMinValidToAtLeast(OperationContext* opCtx, const OpTime& minValid) override; + void setOplogDeleteFromPoint(OperationContext* opCtx, const Timestamp& timestamp) override; + Timestamp getOplogDeleteFromPoint(OperationContext* opCtx) override; + void setAppliedThrough(OperationContext* opCtx, const OpTime& optime) override; + OpTime getAppliedThrough(OperationContext* opCtx) override; StatusWith<std::unique_ptr<CollectionBulkLoader>> createCollectionForBulkLoading( const NamespaceString& nss, @@ -144,51 +145,53 @@ public: return createCollectionForBulkFn(nss, options, idIndexSpec, secondaryIndexSpecs); }; - Status insertDocument(OperationContext* txn, + Status insertDocument(OperationContext* opCtx, const NamespaceString& nss, const BSONObj& doc) override { - return insertDocumentFn(txn, nss, doc); + return insertDocumentFn(opCtx, nss, doc); }; - Status insertDocuments(OperationContext* txn, + Status insertDocuments(OperationContext* opCtx, const NamespaceString& nss, const std::vector<BSONObj>& docs) override { - return insertDocumentsFn(txn, nss, docs); + return insertDocumentsFn(opCtx, nss, docs); } - Status dropReplicatedDatabases(OperationContext* txn) override { - return dropUserDBsFn(txn); + Status dropReplicatedDatabases(OperationContext* opCtx) override { + return dropUserDBsFn(opCtx); }; - Status createOplog(OperationContext* txn, const NamespaceString& nss) override { - return createOplogFn(txn, nss); + Status createOplog(OperationContext* opCtx, const NamespaceString& nss) override { + return createOplogFn(opCtx, nss); }; - StatusWith<size_t> getOplogMaxSize(OperationContext* txn, const NamespaceString& nss) override { + StatusWith<size_t> getOplogMaxSize(OperationContext* opCtx, + const NamespaceString& nss) override { return 1024 * 1024 * 1024; } - Status createCollection(OperationContext* txn, + Status createCollection(OperationContext* opCtx, const NamespaceString& nss, const CollectionOptions& options) override { - return createCollFn(txn, nss, options); + return createCollFn(opCtx, nss, options); } - Status dropCollection(OperationContext* txn, const NamespaceString& nss) override { - return dropCollFn(txn, nss); + Status dropCollection(OperationContext* opCtx, const NamespaceString& nss) override { + return dropCollFn(opCtx, nss); }; - StatusWith<std::vector<BSONObj>> findDocuments(OperationContext* txn, + StatusWith<std::vector<BSONObj>> findDocuments(OperationContext* opCtx, const NamespaceString& nss, boost::optional<StringData> indexName, ScanDirection scanDirection, const BSONObj& startKey, BoundInclusion boundInclusion, std::size_t limit) override { - return findDocumentsFn(txn, nss, indexName, scanDirection, startKey, boundInclusion, limit); + return findDocumentsFn( + opCtx, nss, indexName, scanDirection, startKey, boundInclusion, limit); } - StatusWith<std::vector<BSONObj>> deleteDocuments(OperationContext* txn, + StatusWith<std::vector<BSONObj>> deleteDocuments(OperationContext* opCtx, const NamespaceString& nss, boost::optional<StringData> indexName, ScanDirection scanDirection, @@ -196,11 +199,11 @@ public: BoundInclusion boundInclusion, std::size_t limit) override { return deleteDocumentsFn( - txn, nss, indexName, scanDirection, startKey, boundInclusion, limit); + opCtx, nss, indexName, scanDirection, startKey, boundInclusion, limit); } - Status isAdminDbValid(OperationContext* txn) override { - return isAdminDbValidFn(txn); + Status isAdminDbValid(OperationContext* opCtx) override { + return isAdminDbValidFn(opCtx); }; @@ -214,27 +217,27 @@ public: return Status{ErrorCodes::IllegalOperation, "CreateCollectionForBulkFn not implemented."}; }; InsertDocumentFn insertDocumentFn = - [](OperationContext* txn, const NamespaceString& nss, const BSONObj& doc) { + [](OperationContext* opCtx, const NamespaceString& nss, const BSONObj& doc) { return Status{ErrorCodes::IllegalOperation, "InsertDocumentFn not implemented."}; }; InsertDocumentsFn insertDocumentsFn = - [](OperationContext* txn, const NamespaceString& nss, const std::vector<BSONObj>& docs) { + [](OperationContext* opCtx, const NamespaceString& nss, const std::vector<BSONObj>& docs) { return Status{ErrorCodes::IllegalOperation, "InsertDocumentsFn not implemented."}; }; - DropUserDatabasesFn dropUserDBsFn = [](OperationContext* txn) { + DropUserDatabasesFn dropUserDBsFn = [](OperationContext* opCtx) { return Status{ErrorCodes::IllegalOperation, "DropUserDatabasesFn not implemented."}; }; - CreateOplogFn createOplogFn = [](OperationContext* txn, const NamespaceString& nss) { + CreateOplogFn createOplogFn = [](OperationContext* opCtx, const NamespaceString& nss) { return Status{ErrorCodes::IllegalOperation, "CreateOplogFn not implemented."}; }; CreateCollectionFn createCollFn = - [](OperationContext* txn, const NamespaceString& nss, const CollectionOptions& options) { + [](OperationContext* opCtx, const NamespaceString& nss, const CollectionOptions& options) { return Status{ErrorCodes::IllegalOperation, "CreateCollectionFn not implemented."}; }; - DropCollectionFn dropCollFn = [](OperationContext* txn, const NamespaceString& nss) { + DropCollectionFn dropCollFn = [](OperationContext* opCtx, const NamespaceString& nss) { return Status{ErrorCodes::IllegalOperation, "DropCollectionFn not implemented."}; }; - FindDocumentsFn findDocumentsFn = [](OperationContext* txn, + FindDocumentsFn findDocumentsFn = [](OperationContext* opCtx, const NamespaceString& nss, boost::optional<StringData> indexName, ScanDirection scanDirection, @@ -243,7 +246,7 @@ public: std::size_t limit) { return Status{ErrorCodes::IllegalOperation, "FindOneFn not implemented."}; }; - DeleteDocumentsFn deleteDocumentsFn = [](OperationContext* txn, + DeleteDocumentsFn deleteDocumentsFn = [](OperationContext* opCtx, const NamespaceString& nss, boost::optional<StringData> indexName, ScanDirection scanDirection, diff --git a/src/mongo/db/repl/sync_source_feedback.cpp b/src/mongo/db/repl/sync_source_feedback.cpp index 2633e058e44..b8c41b1e1fd 100644 --- a/src/mongo/db/repl/sync_source_feedback.cpp +++ b/src/mongo/db/repl/sync_source_feedback.cpp @@ -52,9 +52,9 @@ namespace { * Calculates the keep alive interval based on the current configuration in the replication * coordinator. */ -Milliseconds calculateKeepAliveInterval(OperationContext* txn, stdx::mutex& mtx) { +Milliseconds calculateKeepAliveInterval(OperationContext* opCtx, stdx::mutex& mtx) { stdx::lock_guard<stdx::mutex> lock(mtx); - auto replCoord = repl::ReplicationCoordinator::get(txn); + auto replCoord = repl::ReplicationCoordinator::get(opCtx); auto rsConfig = replCoord->getConfig(); auto keepAliveInterval = rsConfig.getElectionTimeoutPeriod() / 2; return keepAliveInterval; @@ -64,9 +64,9 @@ Milliseconds calculateKeepAliveInterval(OperationContext* txn, stdx::mutex& mtx) * Returns function to prepare update command */ Reporter::PrepareReplSetUpdatePositionCommandFn makePrepareReplSetUpdatePositionCommandFn( - OperationContext* txn, const HostAndPort& syncTarget, BackgroundSync* bgsync) { - return [syncTarget, txn, bgsync](ReplicationCoordinator::ReplSetUpdatePositionCommandStyle - commandStyle) -> StatusWith<BSONObj> { + OperationContext* opCtx, const HostAndPort& syncTarget, BackgroundSync* bgsync) { + return [syncTarget, opCtx, bgsync](ReplicationCoordinator::ReplSetUpdatePositionCommandStyle + commandStyle) -> StatusWith<BSONObj> { auto currentSyncTarget = bgsync->getSyncTarget(); if (currentSyncTarget != syncTarget) { if (currentSyncTarget.empty()) { @@ -82,7 +82,7 @@ Reporter::PrepareReplSetUpdatePositionCommandFn makePrepareReplSetUpdatePosition } } - auto replCoord = repl::ReplicationCoordinator::get(txn); + auto replCoord = repl::ReplicationCoordinator::get(opCtx); if (replCoord->getMemberState().primary()) { // Primary has no one to send updates to. return Status(ErrorCodes::InvalidSyncSource, @@ -149,10 +149,10 @@ void SyncSourceFeedback::run(executor::TaskExecutor* executor, BackgroundSync* b Milliseconds keepAliveInterval(0); while (true) { // breaks once _shutdownSignaled is true - auto txn = cc().makeOperationContext(); + auto opCtx = cc().makeOperationContext(); if (keepAliveInterval == Milliseconds(0)) { - keepAliveInterval = calculateKeepAliveInterval(txn.get(), _mtx); + keepAliveInterval = calculateKeepAliveInterval(opCtx.get(), _mtx); } { @@ -163,7 +163,7 @@ void SyncSourceFeedback::run(executor::TaskExecutor* executor, BackgroundSync* b while (!_positionChanged && !_shutdownSignaled) { if (_cond.wait_for(lock, keepAliveInterval.toSystemDuration()) == stdx::cv_status::timeout) { - MemberState state = ReplicationCoordinator::get(txn.get())->getMemberState(); + MemberState state = ReplicationCoordinator::get(opCtx.get())->getMemberState(); if (!(state.primary() || state.startup())) { break; } @@ -179,7 +179,7 @@ void SyncSourceFeedback::run(executor::TaskExecutor* executor, BackgroundSync* b { stdx::lock_guard<stdx::mutex> lock(_mtx); - MemberState state = ReplicationCoordinator::get(txn.get())->getMemberState(); + MemberState state = ReplicationCoordinator::get(opCtx.get())->getMemberState(); if (state.primary() || state.startup()) { continue; } @@ -201,17 +201,18 @@ void SyncSourceFeedback::run(executor::TaskExecutor* executor, BackgroundSync* b // Update keepalive value from config. auto oldKeepAliveInterval = keepAliveInterval; - keepAliveInterval = calculateKeepAliveInterval(txn.get(), _mtx); + keepAliveInterval = calculateKeepAliveInterval(opCtx.get(), _mtx); if (oldKeepAliveInterval != keepAliveInterval) { LOG(1) << "new syncSourceFeedback keep alive duration = " << keepAliveInterval << " (previously " << oldKeepAliveInterval << ")"; } } - Reporter reporter(executor, - makePrepareReplSetUpdatePositionCommandFn(txn.get(), syncTarget, bgsync), - syncTarget, - keepAliveInterval); + Reporter reporter( + executor, + makePrepareReplSetUpdatePositionCommandFn(opCtx.get(), syncTarget, bgsync), + syncTarget, + keepAliveInterval); { stdx::lock_guard<stdx::mutex> lock(_mtx); if (_shutdownSignaled) { diff --git a/src/mongo/db/repl/sync_tail.cpp b/src/mongo/db/repl/sync_tail.cpp index 277df2f9a9d..8738b47d027 100644 --- a/src/mongo/db/repl/sync_tail.cpp +++ b/src/mongo/db/repl/sync_tail.cpp @@ -255,8 +255,8 @@ void ApplyBatchFinalizerForJournal::_run() { _latestOpTime = OpTime(); } - auto txn = cc().makeOperationContext(); - txn->recoveryUnit()->waitUntilDurable(); + auto opCtx = cc().makeOperationContext(); + opCtx->recoveryUnit()->waitUntilDurable(); _recordDurable(latestOpTime); } } @@ -276,19 +276,19 @@ std::unique_ptr<OldThreadPool> SyncTail::makeWriterPool() { return stdx::make_unique<OldThreadPool>(replWriterThreadCount, "repl writer worker "); } -bool SyncTail::peek(OperationContext* txn, BSONObj* op) { - return _networkQueue->peek(txn, op); +bool SyncTail::peek(OperationContext* opCtx, BSONObj* op) { + return _networkQueue->peek(opCtx, op); } // static -Status SyncTail::syncApply(OperationContext* txn, +Status SyncTail::syncApply(OperationContext* opCtx, const BSONObj& op, bool inSteadyStateReplication, ApplyOperationInLockFn applyOperationInLock, ApplyCommandInLockFn applyCommandInLock, IncrementOpsAppliedStatsFn incrementOpsAppliedStats) { // Count each log op application as a separate operation, for reporting purposes - CurOp individualOp(txn); + CurOp individualOp(opCtx); const char* ns = op.getStringField("ns"); verify(ns); @@ -312,24 +312,24 @@ Status SyncTail::syncApply(OperationContext* txn, MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN { // a command may need a global write lock. so we will conservatively go // ahead and grab one here. suboptimal. :-( - Lock::GlobalWrite globalWriteLock(txn->lockState()); + Lock::GlobalWrite globalWriteLock(opCtx->lockState()); // special case apply for commands to avoid implicit database creation - Status status = applyCommandInLock(txn, op, inSteadyStateReplication); + Status status = applyCommandInLock(opCtx, op, inSteadyStateReplication); incrementOpsAppliedStats(); return status; } - MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "syncApply_command", ns); + MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "syncApply_command", ns); } auto applyOp = [&](Database* db) { // For non-initial-sync, we convert updates to upserts // to suppress errors when replaying oplog entries. - txn->setReplicatedWrites(false); - DisableDocumentValidation validationDisabler(txn); + opCtx->setReplicatedWrites(false); + DisableDocumentValidation validationDisabler(opCtx); Status status = - applyOperationInLock(txn, db, op, inSteadyStateReplication, incrementOpsAppliedStats); + applyOperationInLock(opCtx, db, op, inSteadyStateReplication, incrementOpsAppliedStats); if (!status.isOK() && status.code() == ErrorCodes::WriteConflict) { throw WriteConflictException(); } @@ -339,11 +339,11 @@ Status SyncTail::syncApply(OperationContext* txn, if (isNoOp || (opType[0] == 'i' && nsToCollectionSubstring(ns) == "system.indexes")) { auto opStr = isNoOp ? "syncApply_noop" : "syncApply_indexBuild"; MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN { - Lock::DBLock dbLock(txn->lockState(), nsToDatabaseSubstring(ns), MODE_X); - OldClientContext ctx(txn, ns); + Lock::DBLock dbLock(opCtx->lockState(), nsToDatabaseSubstring(ns), MODE_X); + OldClientContext ctx(opCtx, ns); return applyOp(ctx.db()); } - MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, opStr, ns); + MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, opStr, ns); } if (isCrudOpType(opType)) { @@ -361,29 +361,29 @@ Status SyncTail::syncApply(OperationContext* txn, // drop the DB lock before acquiring // the upgraded one. dbLock.reset(); - dbLock.reset(new Lock::DBLock(txn->lockState(), dbName, mode)); - collectionLock.reset(new Lock::CollectionLock(txn->lockState(), ns, mode)); + dbLock.reset(new Lock::DBLock(opCtx->lockState(), dbName, mode)); + collectionLock.reset(new Lock::CollectionLock(opCtx->lockState(), ns, mode)); }; resetLocks(MODE_IX); - if (!dbHolder().get(txn, dbName)) { + if (!dbHolder().get(opCtx, dbName)) { // Need to create database, so reset lock to stronger mode. resetLocks(MODE_X); - ctx.reset(new OldClientContext(txn, ns)); + ctx.reset(new OldClientContext(opCtx, ns)); } else { - ctx.reset(new OldClientContext(txn, ns)); + ctx.reset(new OldClientContext(opCtx, ns)); if (!ctx->db()->getCollection(ns)) { // Need to implicitly create collection. This occurs for 'u' opTypes, // but not for 'i' nor 'd'. ctx.reset(); resetLocks(MODE_X); - ctx.reset(new OldClientContext(txn, ns)); + ctx.reset(new OldClientContext(opCtx, ns)); } } return applyOp(ctx->db()); } - MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "syncApply_CRUD", ns); + MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "syncApply_CRUD", ns); } // unknown opType @@ -393,10 +393,10 @@ Status SyncTail::syncApply(OperationContext* txn, return Status(ErrorCodes::BadValue, ss); } -Status SyncTail::syncApply(OperationContext* txn, +Status SyncTail::syncApply(OperationContext* opCtx, const BSONObj& op, bool inSteadyStateReplication) { - return SyncTail::syncApply(txn, + return SyncTail::syncApply(opCtx, op, inSteadyStateReplication, applyOperation_inlock, @@ -416,12 +416,12 @@ void prefetchOp(const BSONObj& op) { try { // one possible tweak here would be to stay in the read lock for this database // for multiple prefetches if they are for the same database. - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; - AutoGetCollectionForRead ctx(&txn, NamespaceString(ns)); + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; + AutoGetCollectionForRead ctx(&opCtx, NamespaceString(ns)); Database* db = ctx.getDb(); if (db) { - prefetchPagesForReplicatedOp(&txn, db, op); + prefetchPagesForReplicatedOp(&opCtx, db, op); } } catch (const DBException& e) { LOG(2) << "ignoring exception in prefetchOp(): " << redact(e) << endl; @@ -468,7 +468,7 @@ void initializeWriterThread() { // Schedules the writes to the oplog for 'ops' into threadPool. The caller must guarantee that 'ops' // stays valid until all scheduled work in the thread pool completes. -void scheduleWritesToOplog(OperationContext* txn, +void scheduleWritesToOplog(OperationContext* opCtx, OldThreadPool* threadPool, const MultiApplier::Operations& ops) { @@ -479,9 +479,9 @@ void scheduleWritesToOplog(OperationContext* txn, return [&ops, begin, end] { initializeWriterThread(); const auto txnHolder = cc().makeOperationContext(); - const auto txn = txnHolder.get(); - txn->lockState()->setShouldConflictWithSecondaryBatchApplication(false); - txn->setReplicatedWrites(false); + const auto opCtx = txnHolder.get(); + opCtx->lockState()->setShouldConflictWithSecondaryBatchApplication(false); + opCtx->setReplicatedWrites(false); std::vector<BSONObj> docs; docs.reserve(end - begin); @@ -492,8 +492,8 @@ void scheduleWritesToOplog(OperationContext* txn, } fassertStatusOK(40141, - StorageInterface::get(txn)->insertDocuments( - txn, NamespaceString(rsOplogName), docs)); + StorageInterface::get(opCtx)->insertDocuments( + opCtx, NamespaceString(rsOplogName), docs)); }; }; @@ -509,7 +509,7 @@ void scheduleWritesToOplog(OperationContext* txn, // there would be no way to take advantage of multiple threads if a storage engine doesn't // support document locking. if (!enoughToMultiThread || - !txn->getServiceContext()->getGlobalStorageEngine()->supportsDocLocking()) { + !opCtx->getServiceContext()->getGlobalStorageEngine()->supportsDocLocking()) { threadPool->schedule(makeOplogWriterForRange(0, ops.size())); return; @@ -536,24 +536,24 @@ public: const CollatorInterface* collator = nullptr; }; - CollectionProperties getCollectionProperties(OperationContext* txn, + CollectionProperties getCollectionProperties(OperationContext* opCtx, const StringMapTraits::HashedKey& ns) { auto it = _cache.find(ns); if (it != _cache.end()) { return it->second; } - auto collProperties = getCollectionPropertiesImpl(txn, ns.key()); + auto collProperties = getCollectionPropertiesImpl(opCtx, ns.key()); _cache[ns] = collProperties; return collProperties; } private: - CollectionProperties getCollectionPropertiesImpl(OperationContext* txn, StringData ns) { + CollectionProperties getCollectionPropertiesImpl(OperationContext* opCtx, StringData ns) { CollectionProperties collProperties; - Lock::DBLock dbLock(txn->lockState(), nsToDatabaseSubstring(ns), MODE_IS); - auto db = dbHolder().get(txn, ns); + Lock::DBLock dbLock(opCtx->lockState(), nsToDatabaseSubstring(ns), MODE_IS); + auto db = dbHolder().get(opCtx, ns); if (!db) { return collProperties; } @@ -573,7 +573,7 @@ private: // This only modifies the isForCappedCollection field on each op. It does not alter the ops vector // in any other way. -void fillWriterVectors(OperationContext* txn, +void fillWriterVectors(OperationContext* opCtx, MultiApplier::Operations* ops, std::vector<MultiApplier::OperationPtrs>* writerVectors) { const bool supportsDocLocking = @@ -587,7 +587,7 @@ void fillWriterVectors(OperationContext* txn, uint32_t hash = hashedNs.hash(); if (op.isCrudOpType()) { - auto collProperties = collPropertiesCache.getCollectionProperties(txn, hashedNs); + auto collProperties = collPropertiesCache.getCollectionProperties(opCtx, hashedNs); // For doc locking engines, include the _id of the document in the hash so we get // parallelism even if all writes are to a single collection. @@ -620,7 +620,7 @@ void fillWriterVectors(OperationContext* txn, // Applies a batch of oplog entries, by using a set of threads to apply the operations and then // writes the oplog entries to the local oplog. -OpTime SyncTail::multiApply(OperationContext* txn, MultiApplier::Operations ops) { +OpTime SyncTail::multiApply(OperationContext* opCtx, MultiApplier::Operations ops) { auto applyOperation = [this](MultiApplier::OperationPtrs* ops) -> Status { _applyFunc(ops, this); // This function is used by 3.2 initial sync and steady state data replication. @@ -628,11 +628,11 @@ OpTime SyncTail::multiApply(OperationContext* txn, MultiApplier::Operations ops) return Status::OK(); }; return fassertStatusOK( - 34437, repl::multiApply(txn, _writerPool.get(), std::move(ops), applyOperation)); + 34437, repl::multiApply(opCtx, _writerPool.get(), std::move(ops), applyOperation)); } namespace { -void tryToGoLiveAsASecondary(OperationContext* txn, ReplicationCoordinator* replCoord) { +void tryToGoLiveAsASecondary(OperationContext* opCtx, ReplicationCoordinator* replCoord) { if (replCoord->isInPrimaryOrSecondaryState()) { return; } @@ -640,8 +640,8 @@ void tryToGoLiveAsASecondary(OperationContext* txn, ReplicationCoordinator* repl // This needs to happen after the attempt so readers can be sure we've already tried. ON_BLOCK_EXIT([] { attemptsToBecomeSecondary.increment(); }); - ScopedTransaction transaction(txn, MODE_S); - Lock::GlobalRead readLock(txn->lockState()); + ScopedTransaction transaction(opCtx, MODE_S); + Lock::GlobalRead readLock(opCtx->lockState()); if (replCoord->getMaintenanceMode()) { LOG(1) << "Can't go live (tryToGoLiveAsASecondary) as maintenance mode is active."; @@ -657,7 +657,7 @@ void tryToGoLiveAsASecondary(OperationContext* txn, ReplicationCoordinator* repl } // We can't go to SECONDARY until we reach minvalid. - if (replCoord->getMyLastAppliedOpTime() < StorageInterface::get(txn)->getMinValid(txn)) { + if (replCoord->getMyLastAppliedOpTime() < StorageInterface::get(opCtx)->getMinValid(opCtx)) { return; } @@ -697,13 +697,13 @@ public: private: void run() { Client::initThread("ReplBatcher"); - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; - const auto replCoord = ReplicationCoordinator::get(&txn); - const auto fastClockSource = txn.getServiceContext()->getFastClockSource(); + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; + const auto replCoord = ReplicationCoordinator::get(&opCtx); + const auto fastClockSource = opCtx.getServiceContext()->getFastClockSource(); const auto oplogMaxSize = fassertStatusOK( 40301, - StorageInterface::get(&txn)->getOplogMaxSize(&txn, NamespaceString(rsOplogName))); + StorageInterface::get(&opCtx)->getOplogMaxSize(&opCtx, NamespaceString(rsOplogName))); // Batches are limited to 10% of the oplog. BatchLimits batchLimits; @@ -720,7 +720,7 @@ private: OpQueue ops; // tryPopAndWaitForMore adds to ops and returns true when we need to end a batch early. - while (!_syncTail->tryPopAndWaitForMore(&txn, &ops, batchLimits)) { + while (!_syncTail->tryPopAndWaitForMore(&opCtx, &ops, batchLimits)) { } if (ops.empty() && !ops.mustShutdown()) { @@ -755,8 +755,8 @@ private: void SyncTail::oplogApplication(ReplicationCoordinator* replCoord) { OpQueueBatcher batcher(this); - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; std::unique_ptr<ApplyBatchFinalizer> finalizer{ getGlobalServiceContext()->getGlobalStorageEngine()->isDurable() ? new ApplyBatchFinalizerForJournal(replCoord) @@ -774,7 +774,7 @@ void SyncTail::oplogApplication(ReplicationCoordinator* replCoord) { sleepmillis(10); } - tryToGoLiveAsASecondary(&txn, replCoord); + tryToGoLiveAsASecondary(&opCtx, replCoord); long long termWhenBufferIsEmpty = replCoord->getTerm(); // Blocks up to a second waiting for a batch to be ready to apply. If one doesn't become @@ -788,7 +788,7 @@ void SyncTail::oplogApplication(ReplicationCoordinator* replCoord) { continue; } // Signal drain complete if we're in Draining state and the buffer is empty. - replCoord->signalDrainComplete(&txn, termWhenBufferIsEmpty); + replCoord->signalDrainComplete(&opCtx, termWhenBufferIsEmpty); continue; // Try again. } @@ -813,13 +813,13 @@ void SyncTail::oplogApplication(ReplicationCoordinator* replCoord) { stdx::lock_guard<SimpleMutex> fsynclk(filesLockedFsync); // Do the work. - multiApply(&txn, ops.releaseBatch()); + multiApply(&opCtx, ops.releaseBatch()); // Update various things that care about our last applied optime. Tests rely on 2 happening // before 3 even though it isn't strictly necessary. The order of 1 doesn't matter. - setNewTimestamp(txn.getServiceContext(), lastOpTimeInBatch.getTimestamp()); // 1 - StorageInterface::get(&txn)->setAppliedThrough(&txn, lastOpTimeInBatch); // 2 - finalizer->record(lastOpTimeInBatch); // 3 + setNewTimestamp(opCtx.getServiceContext(), lastOpTimeInBatch.getTimestamp()); // 1 + StorageInterface::get(&opCtx)->setAppliedThrough(&opCtx, lastOpTimeInBatch); // 2 + finalizer->record(lastOpTimeInBatch); // 3 } } @@ -830,13 +830,13 @@ void SyncTail::oplogApplication(ReplicationCoordinator* replCoord) { // This function also blocks 1 second waiting for new ops to appear in the bgsync // queue. We don't block forever so that we can periodically check for things like shutdown or // reconfigs. -bool SyncTail::tryPopAndWaitForMore(OperationContext* txn, +bool SyncTail::tryPopAndWaitForMore(OperationContext* opCtx, SyncTail::OpQueue* ops, const BatchLimits& limits) { { BSONObj op; // Check to see if there are ops waiting in the bgsync queue - bool peek_success = peek(txn, &op); + bool peek_success = peek(opCtx, &op); if (!peek_success) { // If we don't have anything in the queue, wait a bit for something to appear. if (ops->empty()) { @@ -908,7 +908,7 @@ bool SyncTail::tryPopAndWaitForMore(OperationContext* txn, (!entry.ns.empty() && nsToCollectionSubstring(entry.ns) == "system.indexes")) { if (ops->getCount() == 1) { // apply commands one-at-a-time - _networkQueue->consume(txn); + _networkQueue->consume(opCtx); } else { // This op must be processed alone, but we already had ops in the queue so we can't // include it in this batch. Since we didn't call consume(), we'll see this again next @@ -921,7 +921,7 @@ bool SyncTail::tryPopAndWaitForMore(OperationContext* txn, } // We are going to apply this Op. - _networkQueue->consume(txn); + _networkQueue->consume(opCtx); // Go back for more ops, unless we've hit the limit. return ops->getCount() >= limits.ops; @@ -935,7 +935,7 @@ OldThreadPool* SyncTail::getWriterPool() { return _writerPool.get(); } -BSONObj SyncTail::getMissingDoc(OperationContext* txn, Database* db, const BSONObj& o) { +BSONObj SyncTail::getMissingDoc(OperationContext* opCtx, Database* db, const BSONObj& o) { OplogReader missingObjReader; // why are we using OplogReader to run a non-oplog query? const char* ns = o.getStringField("ns"); @@ -1004,18 +1004,18 @@ BSONObj SyncTail::getMissingDoc(OperationContext* txn, Database* db, const BSONO str::stream() << "Can no longer connect to initial sync source: " << _hostname); } -bool SyncTail::shouldRetry(OperationContext* txn, const BSONObj& o) { +bool SyncTail::shouldRetry(OperationContext* opCtx, const BSONObj& o) { const NamespaceString nss(o.getStringField("ns")); MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN { // Take an X lock on the database in order to preclude other modifications. // Also, the database might not exist yet, so create it. - AutoGetOrCreateDb autoDb(txn, nss.db(), MODE_X); + AutoGetOrCreateDb autoDb(opCtx, nss.db(), MODE_X); Database* const db = autoDb.getDb(); // we don't have the object yet, which is possible on initial sync. get it. log() << "adding missing object" << endl; // rare enough we can log - BSONObj missingObj = getMissingDoc(txn, db, o); + BSONObj missingObj = getMissingDoc(opCtx, db, o); if (missingObj.isEmpty()) { log() << "missing object not found on source." @@ -1025,13 +1025,13 @@ bool SyncTail::shouldRetry(OperationContext* txn, const BSONObj& o) { return false; } else { - WriteUnitOfWork wunit(txn); + WriteUnitOfWork wunit(opCtx); - Collection* const coll = db->getOrCreateCollection(txn, nss.toString()); + Collection* const coll = db->getOrCreateCollection(opCtx, nss.toString()); invariant(coll); OpDebug* const nullOpDebug = nullptr; - Status status = coll->insertDocument(txn, missingObj, nullOpDebug, true); + Status status = coll->insertDocument(opCtx, missingObj, nullOpDebug, true); uassert(15917, str::stream() << "failed to insert missing doc: " << status.toString(), status.isOK()); @@ -1042,7 +1042,7 @@ bool SyncTail::shouldRetry(OperationContext* txn, const BSONObj& o) { return true; } } - MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "InsertRetry", nss.ns()); + MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "InsertRetry", nss.ns()); // fixes compile errors on GCC - see SERVER-18219 for details MONGO_UNREACHABLE; @@ -1051,22 +1051,22 @@ bool SyncTail::shouldRetry(OperationContext* txn, const BSONObj& o) { // This free function is used by the writer threads to apply each op void multiSyncApply(MultiApplier::OperationPtrs* ops, SyncTail*) { initializeWriterThread(); - auto txn = cc().makeOperationContext(); - auto syncApply = [](OperationContext* txn, const BSONObj& op, bool inSteadyStateReplication) { - return SyncTail::syncApply(txn, op, inSteadyStateReplication); + auto opCtx = cc().makeOperationContext(); + auto syncApply = [](OperationContext* opCtx, const BSONObj& op, bool inSteadyStateReplication) { + return SyncTail::syncApply(opCtx, op, inSteadyStateReplication); }; - fassertNoTrace(16359, multiSyncApply_noAbort(txn.get(), ops, syncApply)); + fassertNoTrace(16359, multiSyncApply_noAbort(opCtx.get(), ops, syncApply)); } -Status multiSyncApply_noAbort(OperationContext* txn, +Status multiSyncApply_noAbort(OperationContext* opCtx, MultiApplier::OperationPtrs* oplogEntryPointers, SyncApplyFn syncApply) { - txn->setReplicatedWrites(false); - DisableDocumentValidation validationDisabler(txn); + opCtx->setReplicatedWrites(false); + DisableDocumentValidation validationDisabler(opCtx); // allow us to get through the magic barrier - txn->lockState()->setShouldConflictWithSecondaryBatchApplication(false); + opCtx->lockState()->setShouldConflictWithSecondaryBatchApplication(false); if (oplogEntryPointers->size() > 1) { std::stable_sort(oplogEntryPointers->begin(), @@ -1125,7 +1125,7 @@ Status multiSyncApply_noAbort(OperationContext* txn, try { // Apply the group of inserts. uassertStatusOK( - syncApply(txn, groupedInsertBuilder.done(), inSteadyStateReplication)); + syncApply(opCtx, groupedInsertBuilder.done(), inSteadyStateReplication)); // It succeeded, advance the oplogEntriesIterator to the end of the // group of inserts. oplogEntriesIterator = endOfGroupableOpsIterator - 1; @@ -1145,7 +1145,7 @@ Status multiSyncApply_noAbort(OperationContext* txn, try { // Apply an individual (non-grouped) op. - const Status status = syncApply(txn, entry->raw, inSteadyStateReplication); + const Status status = syncApply(opCtx, entry->raw, inSteadyStateReplication); if (!status.isOK()) { severe() << "Error applying operation (" << redact(entry->raw) @@ -1165,28 +1165,28 @@ Status multiSyncApply_noAbort(OperationContext* txn, // This free function is used by the initial sync writer threads to apply each op void multiInitialSyncApply_abortOnFailure(MultiApplier::OperationPtrs* ops, SyncTail* st) { initializeWriterThread(); - auto txn = cc().makeOperationContext(); + auto opCtx = cc().makeOperationContext(); AtomicUInt32 fetchCount(0); - fassertNoTrace(15915, multiInitialSyncApply_noAbort(txn.get(), ops, st, &fetchCount)); + fassertNoTrace(15915, multiInitialSyncApply_noAbort(opCtx.get(), ops, st, &fetchCount)); } Status multiInitialSyncApply(MultiApplier::OperationPtrs* ops, SyncTail* st, AtomicUInt32* fetchCount) { initializeWriterThread(); - auto txn = cc().makeOperationContext(); - return multiInitialSyncApply_noAbort(txn.get(), ops, st, fetchCount); + auto opCtx = cc().makeOperationContext(); + return multiInitialSyncApply_noAbort(opCtx.get(), ops, st, fetchCount); } -Status multiInitialSyncApply_noAbort(OperationContext* txn, +Status multiInitialSyncApply_noAbort(OperationContext* opCtx, MultiApplier::OperationPtrs* ops, SyncTail* st, AtomicUInt32* fetchCount) { - txn->setReplicatedWrites(false); - DisableDocumentValidation validationDisabler(txn); + opCtx->setReplicatedWrites(false); + DisableDocumentValidation validationDisabler(opCtx); // allow us to get through the magic barrier - txn->lockState()->setShouldConflictWithSecondaryBatchApplication(false); + opCtx->lockState()->setShouldConflictWithSecondaryBatchApplication(false); // This function is only called in initial sync, as its name suggests. const bool inSteadyStateReplication = false; @@ -1194,7 +1194,7 @@ Status multiInitialSyncApply_noAbort(OperationContext* txn, for (auto it = ops->begin(); it != ops->end(); ++it) { auto& entry = **it; try { - const Status s = SyncTail::syncApply(txn, entry.raw, inSteadyStateReplication); + const Status s = SyncTail::syncApply(opCtx, entry.raw, inSteadyStateReplication); if (!s.isOK()) { // Don't retry on commands. if (entry.isCommand()) { @@ -1205,8 +1205,9 @@ Status multiInitialSyncApply_noAbort(OperationContext* txn, // We might need to fetch the missing docs from the sync source. fetchCount->fetchAndAdd(1); - if (st->shouldRetry(txn, entry.raw)) { - const Status s2 = SyncTail::syncApply(txn, entry.raw, inSteadyStateReplication); + if (st->shouldRetry(opCtx, entry.raw)) { + const Status s2 = + SyncTail::syncApply(opCtx, entry.raw, inSteadyStateReplication); if (!s2.isOK()) { severe() << "Error applying operation (" << redact(entry.raw) << "): " << redact(s2); @@ -1234,11 +1235,11 @@ Status multiInitialSyncApply_noAbort(OperationContext* txn, return Status::OK(); } -StatusWith<OpTime> multiApply(OperationContext* txn, +StatusWith<OpTime> multiApply(OperationContext* opCtx, OldThreadPool* workerPool, MultiApplier::Operations ops, MultiApplier::ApplyOperationFn applyOperation) { - if (!txn) { + if (!opCtx) { return {ErrorCodes::BadValue, "invalid operation context"}; } @@ -1259,14 +1260,14 @@ StatusWith<OpTime> multiApply(OperationContext* txn, prefetchOps(ops, workerPool); } - auto storage = StorageInterface::get(txn); + auto storage = StorageInterface::get(opCtx); LOG(2) << "replication batch size is " << ops.size(); // Stop all readers until we're done. This also prevents doc-locking engines from deleting old // entries from the oplog until we finish writing. - Lock::ParallelBatchWriterMode pbwm(txn->lockState()); + Lock::ParallelBatchWriterMode pbwm(opCtx->lockState()); - auto replCoord = ReplicationCoordinator::get(txn); + auto replCoord = ReplicationCoordinator::get(opCtx); if (replCoord->getApplierState() == ReplicationCoordinator::ApplierState::Stopped) { severe() << "attempting to replicate ops while primary"; return {ErrorCodes::CannotApplyOplogWhilePrimary, @@ -1280,14 +1281,14 @@ StatusWith<OpTime> multiApply(OperationContext* txn, std::vector<MultiApplier::OperationPtrs> writerVectors(workerPool->getNumThreads()); ON_BLOCK_EXIT([&] { workerPool->join(); }); - storage->setOplogDeleteFromPoint(txn, ops.front().ts.timestamp()); - scheduleWritesToOplog(txn, workerPool, ops); - fillWriterVectors(txn, &ops, &writerVectors); + storage->setOplogDeleteFromPoint(opCtx, ops.front().ts.timestamp()); + scheduleWritesToOplog(opCtx, workerPool, ops); + fillWriterVectors(opCtx, &ops, &writerVectors); workerPool->join(); - storage->setOplogDeleteFromPoint(txn, Timestamp()); - storage->setMinValidToAtLeast(txn, ops.back().getOpTime()); + storage->setOplogDeleteFromPoint(opCtx, Timestamp()); + storage->setMinValidToAtLeast(opCtx, ops.back().getOpTime()); applyOps(writerVectors, workerPool, applyOperation, &statusVector); } diff --git a/src/mongo/db/repl/sync_tail.h b/src/mongo/db/repl/sync_tail.h index 98485782868..1f4aa0e12c4 100644 --- a/src/mongo/db/repl/sync_tail.h +++ b/src/mongo/db/repl/sync_tail.h @@ -70,7 +70,7 @@ public: * 'opCounter' is used to update server status metrics. * Returns failure status if the op was an update that could not be applied. */ - using ApplyOperationInLockFn = stdx::function<Status(OperationContext* txn, + using ApplyOperationInLockFn = stdx::function<Status(OperationContext* opCtx, Database* db, const BSONObj& opObj, bool inSteadyStateReplication, @@ -100,17 +100,19 @@ public: * Functions for applying operations/commands and increment server status counters may * be overridden for testing. */ - static Status syncApply(OperationContext* txn, + static Status syncApply(OperationContext* opCtx, const BSONObj& o, bool inSteadyStateReplication, ApplyOperationInLockFn applyOperationInLock, ApplyCommandInLockFn applyCommandInLock, IncrementOpsAppliedStatsFn incrementOpsAppliedStats); - static Status syncApply(OperationContext* txn, const BSONObj& o, bool inSteadyStateReplication); + static Status syncApply(OperationContext* opCtx, + const BSONObj& o, + bool inSteadyStateReplication); void oplogApplication(ReplicationCoordinator* replCoord); - bool peek(OperationContext* txn, BSONObj* obj); + bool peek(OperationContext* opCtx, BSONObj* obj); class OpQueue { public: @@ -195,17 +197,17 @@ public: * If ops is empty on entry and nothing can be added yet, will wait up to a second before * returning true. */ - bool tryPopAndWaitForMore(OperationContext* txn, OpQueue* ops, const BatchLimits& limits); + bool tryPopAndWaitForMore(OperationContext* opCtx, OpQueue* ops, const BatchLimits& limits); /** * Fetch a single document referenced in the operation from the sync source. */ - virtual BSONObj getMissingDoc(OperationContext* txn, Database* db, const BSONObj& o); + virtual BSONObj getMissingDoc(OperationContext* opCtx, Database* db, const BSONObj& o); /** * If applyOperation_inlock should be called again after an update fails. */ - virtual bool shouldRetry(OperationContext* txn, const BSONObj& o); + virtual bool shouldRetry(OperationContext* opCtx, const BSONObj& o); void setHostname(const std::string& hostname); /** @@ -222,7 +224,7 @@ protected: // Apply a batch of operations, using multiple threads. // Returns the last OpTime applied during the apply batch, ops.end["ts"] basically. - OpTime multiApply(OperationContext* txn, MultiApplier::Operations ops); + OpTime multiApply(OperationContext* opCtx, MultiApplier::Operations ops); private: class OpQueueBatcher; @@ -247,7 +249,7 @@ private: * * Shared between here and MultiApplier. */ -StatusWith<OpTime> multiApply(OperationContext* txn, +StatusWith<OpTime> multiApply(OperationContext* opCtx, OldThreadPool* workerPool, MultiApplier::Operations ops, MultiApplier::ApplyOperationFn applyOperation); @@ -271,9 +273,9 @@ Status multiInitialSyncApply(MultiApplier::OperationPtrs* ops, * Accepts an external operation context and a function with the same argument list as * SyncTail::syncApply. */ -using SyncApplyFn = - stdx::function<Status(OperationContext* txn, const BSONObj& o, bool inSteadyStateReplication)>; -Status multiSyncApply_noAbort(OperationContext* txn, +using SyncApplyFn = stdx::function<Status( + OperationContext* opCtx, const BSONObj& o, bool inSteadyStateReplication)>; +Status multiSyncApply_noAbort(OperationContext* opCtx, MultiApplier::OperationPtrs* ops, SyncApplyFn syncApply); @@ -281,7 +283,7 @@ Status multiSyncApply_noAbort(OperationContext* txn, * Testing-only version of multiInitialSyncApply that accepts an external operation context and * returns an error instead of aborting. */ -Status multiInitialSyncApply_noAbort(OperationContext* txn, +Status multiInitialSyncApply_noAbort(OperationContext* opCtx, MultiApplier::OperationPtrs* ops, SyncTail* st, AtomicUInt32* fetchCount); diff --git a/src/mongo/db/repl/sync_tail_test.cpp b/src/mongo/db/repl/sync_tail_test.cpp index b6946a4edc2..b1e62a2efa5 100644 --- a/src/mongo/db/repl/sync_tail_test.cpp +++ b/src/mongo/db/repl/sync_tail_test.cpp @@ -69,7 +69,7 @@ using namespace mongo::repl; class SyncTailTest : public ServiceContextMongoDTest { protected: void _testSyncApplyInsertDocument(LockMode expectedMode); - ServiceContext::UniqueOperationContext _txn; + ServiceContext::UniqueOperationContext _opCtx; unsigned int _opsApplied; SyncTail::ApplyOperationInLockFn _applyOp; SyncTail::ApplyCommandInLockFn _applyCmd; @@ -91,7 +91,7 @@ protected: class SyncTailWithLocalDocumentFetcher : public SyncTail { public: SyncTailWithLocalDocumentFetcher(const BSONObj& document); - BSONObj getMissingDoc(OperationContext* txn, Database* db, const BSONObj& o) override; + BSONObj getMissingDoc(OperationContext* opCtx, Database* db, const BSONObj& o) override; private: BSONObj _document; @@ -103,7 +103,7 @@ private: class SyncTailWithOperationContextChecker : public SyncTail { public: SyncTailWithOperationContextChecker(); - bool shouldRetry(OperationContext* txn, const BSONObj& o) override; + bool shouldRetry(OperationContext* opCtx, const BSONObj& o) override; }; void SyncTailTest::setUp() { @@ -122,19 +122,19 @@ void SyncTailTest::setUp() { const std::vector<BSONObj>&) { return Status::OK(); }; StorageInterface::set(service, std::move(storageInterface)); - _txn = cc().makeOperationContext(); + _opCtx = cc().makeOperationContext(); _opsApplied = 0; - _applyOp = [](OperationContext* txn, + _applyOp = [](OperationContext* opCtx, Database* db, const BSONObj& op, bool inSteadyStateReplication, stdx::function<void()>) { return Status::OK(); }; - _applyCmd = [](OperationContext* txn, const BSONObj& op, bool) { return Status::OK(); }; + _applyCmd = [](OperationContext* opCtx, const BSONObj& op, bool) { return Status::OK(); }; _incOps = [this]() { _opsApplied++; }; } void SyncTailTest::tearDown() { - _txn.reset(); + _opCtx.reset(); ServiceContextMongoDTest::tearDown(); _storageInterface = nullptr; } @@ -151,10 +151,10 @@ BSONObj SyncTailWithLocalDocumentFetcher::getMissingDoc(OperationContext*, SyncTailWithOperationContextChecker::SyncTailWithOperationContextChecker() : SyncTail(nullptr, SyncTail::MultiSyncApplyFunc(), nullptr) {} -bool SyncTailWithOperationContextChecker::shouldRetry(OperationContext* txn, const BSONObj&) { - ASSERT_FALSE(txn->writesAreReplicated()); - ASSERT_FALSE(txn->lockState()->shouldConflictWithSecondaryBatchApplication()); - ASSERT_TRUE(documentValidationDisabled(txn)); +bool SyncTailWithOperationContextChecker::shouldRetry(OperationContext* opCtx, const BSONObj&) { + ASSERT_FALSE(opCtx->writesAreReplicated()); + ASSERT_FALSE(opCtx->lockState()->shouldConflictWithSecondaryBatchApplication()); + ASSERT_TRUE(documentValidationDisabled(opCtx)); return false; } @@ -173,21 +173,21 @@ CollectionOptions createOplogCollectionOptions() { * Create test collection. * Returns collection. */ -void createCollection(OperationContext* txn, +void createCollection(OperationContext* opCtx, const NamespaceString& nss, const CollectionOptions& options) { MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN { - ScopedTransaction transaction(txn, MODE_IX); - Lock::DBLock dblk(txn->lockState(), nss.db(), MODE_X); - OldClientContext ctx(txn, nss.ns()); + ScopedTransaction transaction(opCtx, MODE_IX); + Lock::DBLock dblk(opCtx->lockState(), nss.db(), MODE_X); + OldClientContext ctx(opCtx, nss.ns()); auto db = ctx.db(); ASSERT_TRUE(db); - mongo::WriteUnitOfWork wuow(txn); - auto coll = db->createCollection(txn, nss.ns(), options); + mongo::WriteUnitOfWork wuow(opCtx); + auto coll = db->createCollection(opCtx, nss.ns(), options); ASSERT_TRUE(coll); wuow.commit(); } - MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "createCollection", nss.ns()); + MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "createCollection", nss.ns()); } /** @@ -250,7 +250,7 @@ OplogEntry makeUpdateDocumentOplogEntry(OpTime opTime, return OplogEntry(bob.obj()); } -Status failedApplyCommand(OperationContext* txn, const BSONObj& theOperation, bool) { +Status failedApplyCommand(OperationContext* opCtx, const BSONObj& theOperation, bool) { FAIL("applyCommand unexpectedly invoked."); return Status::OK(); } @@ -258,12 +258,12 @@ Status failedApplyCommand(OperationContext* txn, const BSONObj& theOperation, bo TEST_F(SyncTailTest, SyncApplyNoNamespaceBadOp) { const BSONObj op = BSON("op" << "x"); - ASSERT_OK(SyncTail::syncApply(_txn.get(), op, false, _applyOp, _applyCmd, _incOps)); + ASSERT_OK(SyncTail::syncApply(_opCtx.get(), op, false, _applyOp, _applyCmd, _incOps)); ASSERT_EQUALS(0U, _opsApplied); } TEST_F(SyncTailTest, SyncApplyNoNamespaceNoOp) { - ASSERT_OK(SyncTail::syncApply(_txn.get(), + ASSERT_OK(SyncTail::syncApply(_opCtx.get(), BSON("op" << "n"), false)); @@ -275,8 +275,9 @@ TEST_F(SyncTailTest, SyncApplyBadOp) { << "x" << "ns" << "test.t"); - ASSERT_EQUALS(ErrorCodes::BadValue, - SyncTail::syncApply(_txn.get(), op, false, _applyOp, _applyCmd, _incOps).code()); + ASSERT_EQUALS( + ErrorCodes::BadValue, + SyncTail::syncApply(_opCtx.get(), op, false, _applyOp, _applyCmd, _incOps).code()); ASSERT_EQUALS(0U, _opsApplied); } @@ -286,24 +287,24 @@ TEST_F(SyncTailTest, SyncApplyNoOp) { << "ns" << "test.t"); bool applyOpCalled = false; - SyncTail::ApplyOperationInLockFn applyOp = [&](OperationContext* txn, + SyncTail::ApplyOperationInLockFn applyOp = [&](OperationContext* opCtx, Database* db, const BSONObj& theOperation, bool inSteadyStateReplication, stdx::function<void()>) { applyOpCalled = true; - ASSERT_TRUE(txn); - ASSERT_TRUE(txn->lockState()->isDbLockedForMode("test", MODE_X)); - ASSERT_FALSE(txn->writesAreReplicated()); - ASSERT_TRUE(documentValidationDisabled(txn)); + ASSERT_TRUE(opCtx); + ASSERT_TRUE(opCtx->lockState()->isDbLockedForMode("test", MODE_X)); + ASSERT_FALSE(opCtx->writesAreReplicated()); + ASSERT_TRUE(documentValidationDisabled(opCtx)); ASSERT_TRUE(db); ASSERT_BSONOBJ_EQ(op, theOperation); ASSERT_FALSE(inSteadyStateReplication); return Status::OK(); }; - ASSERT_TRUE(_txn->writesAreReplicated()); - ASSERT_FALSE(documentValidationDisabled(_txn.get())); - ASSERT_OK(SyncTail::syncApply(_txn.get(), op, false, applyOp, failedApplyCommand, _incOps)); + ASSERT_TRUE(_opCtx->writesAreReplicated()); + ASSERT_FALSE(documentValidationDisabled(_opCtx.get())); + ASSERT_OK(SyncTail::syncApply(_opCtx.get(), op, false, applyOp, failedApplyCommand, _incOps)); ASSERT_TRUE(applyOpCalled); } @@ -313,7 +314,7 @@ TEST_F(SyncTailTest, SyncApplyNoOpApplyOpThrowsException) { << "ns" << "test.t"); int applyOpCalled = 0; - SyncTail::ApplyOperationInLockFn applyOp = [&](OperationContext* txn, + SyncTail::ApplyOperationInLockFn applyOp = [&](OperationContext* opCtx, Database* db, const BSONObj& theOperation, bool inSteadyStateReplication, @@ -324,7 +325,7 @@ TEST_F(SyncTailTest, SyncApplyNoOpApplyOpThrowsException) { } return Status::OK(); }; - ASSERT_OK(SyncTail::syncApply(_txn.get(), op, false, applyOp, failedApplyCommand, _incOps)); + ASSERT_OK(SyncTail::syncApply(_opCtx.get(), op, false, applyOp, failedApplyCommand, _incOps)); ASSERT_EQUALS(5, applyOpCalled); } @@ -334,25 +335,25 @@ void SyncTailTest::_testSyncApplyInsertDocument(LockMode expectedMode) { << "ns" << "test.t"); bool applyOpCalled = false; - SyncTail::ApplyOperationInLockFn applyOp = [&](OperationContext* txn, + SyncTail::ApplyOperationInLockFn applyOp = [&](OperationContext* opCtx, Database* db, const BSONObj& theOperation, bool inSteadyStateReplication, stdx::function<void()>) { applyOpCalled = true; - ASSERT_TRUE(txn); - ASSERT_TRUE(txn->lockState()->isDbLockedForMode("test", expectedMode)); - ASSERT_TRUE(txn->lockState()->isCollectionLockedForMode("test.t", expectedMode)); - ASSERT_FALSE(txn->writesAreReplicated()); - ASSERT_TRUE(documentValidationDisabled(txn)); + ASSERT_TRUE(opCtx); + ASSERT_TRUE(opCtx->lockState()->isDbLockedForMode("test", expectedMode)); + ASSERT_TRUE(opCtx->lockState()->isCollectionLockedForMode("test.t", expectedMode)); + ASSERT_FALSE(opCtx->writesAreReplicated()); + ASSERT_TRUE(documentValidationDisabled(opCtx)); ASSERT_TRUE(db); ASSERT_BSONOBJ_EQ(op, theOperation); ASSERT_TRUE(inSteadyStateReplication); return Status::OK(); }; - ASSERT_TRUE(_txn->writesAreReplicated()); - ASSERT_FALSE(documentValidationDisabled(_txn.get())); - ASSERT_OK(SyncTail::syncApply(_txn.get(), op, true, applyOp, failedApplyCommand, _incOps)); + ASSERT_TRUE(_opCtx->writesAreReplicated()); + ASSERT_FALSE(documentValidationDisabled(_opCtx.get())); + ASSERT_OK(SyncTail::syncApply(_opCtx.get(), op, true, applyOp, failedApplyCommand, _incOps)); ASSERT_TRUE(applyOpCalled); } @@ -362,9 +363,9 @@ TEST_F(SyncTailTest, SyncApplyInsertDocumentDatabaseMissing) { TEST_F(SyncTailTest, SyncApplyInsertDocumentCollectionMissing) { { - Lock::GlobalWrite globalLock(_txn->lockState()); + Lock::GlobalWrite globalLock(_opCtx->lockState()); bool justCreated = false; - Database* db = dbHolder().openDb(_txn.get(), "test", &justCreated); + Database* db = dbHolder().openDb(_opCtx.get(), "test", &justCreated); ASSERT_TRUE(db); ASSERT_TRUE(justCreated); } @@ -373,12 +374,12 @@ TEST_F(SyncTailTest, SyncApplyInsertDocumentCollectionMissing) { TEST_F(SyncTailTest, SyncApplyInsertDocumentCollectionExists) { { - Lock::GlobalWrite globalLock(_txn->lockState()); + Lock::GlobalWrite globalLock(_opCtx->lockState()); bool justCreated = false; - Database* db = dbHolder().openDb(_txn.get(), "test", &justCreated); + Database* db = dbHolder().openDb(_opCtx.get(), "test", &justCreated); ASSERT_TRUE(db); ASSERT_TRUE(justCreated); - Collection* collection = db->createCollection(_txn.get(), "test.t"); + Collection* collection = db->createCollection(_opCtx.get(), "test.t"); ASSERT_TRUE(collection); } _testSyncApplyInsertDocument(MODE_IX); @@ -390,24 +391,24 @@ TEST_F(SyncTailTest, SyncApplyIndexBuild) { << "ns" << "test.system.indexes"); bool applyOpCalled = false; - SyncTail::ApplyOperationInLockFn applyOp = [&](OperationContext* txn, + SyncTail::ApplyOperationInLockFn applyOp = [&](OperationContext* opCtx, Database* db, const BSONObj& theOperation, bool inSteadyStateReplication, stdx::function<void()>) { applyOpCalled = true; - ASSERT_TRUE(txn); - ASSERT_TRUE(txn->lockState()->isDbLockedForMode("test", MODE_X)); - ASSERT_FALSE(txn->writesAreReplicated()); - ASSERT_TRUE(documentValidationDisabled(txn)); + ASSERT_TRUE(opCtx); + ASSERT_TRUE(opCtx->lockState()->isDbLockedForMode("test", MODE_X)); + ASSERT_FALSE(opCtx->writesAreReplicated()); + ASSERT_TRUE(documentValidationDisabled(opCtx)); ASSERT_TRUE(db); ASSERT_BSONOBJ_EQ(op, theOperation); ASSERT_FALSE(inSteadyStateReplication); return Status::OK(); }; - ASSERT_TRUE(_txn->writesAreReplicated()); - ASSERT_FALSE(documentValidationDisabled(_txn.get())); - ASSERT_OK(SyncTail::syncApply(_txn.get(), op, false, applyOp, failedApplyCommand, _incOps)); + ASSERT_TRUE(_opCtx->writesAreReplicated()); + ASSERT_FALSE(documentValidationDisabled(_opCtx.get())); + ASSERT_OK(SyncTail::syncApply(_opCtx.get(), op, false, applyOp, failedApplyCommand, _incOps)); ASSERT_TRUE(applyOpCalled); } @@ -417,7 +418,7 @@ TEST_F(SyncTailTest, SyncApplyCommand) { << "ns" << "test.t"); bool applyCmdCalled = false; - SyncTail::ApplyOperationInLockFn applyOp = [&](OperationContext* txn, + SyncTail::ApplyOperationInLockFn applyOp = [&](OperationContext* opCtx, Database* db, const BSONObj& theOperation, bool inSteadyStateReplication, @@ -426,18 +427,18 @@ TEST_F(SyncTailTest, SyncApplyCommand) { return Status::OK(); }; SyncTail::ApplyCommandInLockFn applyCmd = - [&](OperationContext* txn, const BSONObj& theOperation, bool inSteadyStateReplication) { + [&](OperationContext* opCtx, const BSONObj& theOperation, bool inSteadyStateReplication) { applyCmdCalled = true; - ASSERT_TRUE(txn); - ASSERT_TRUE(txn->lockState()->isW()); - ASSERT_TRUE(txn->writesAreReplicated()); - ASSERT_FALSE(documentValidationDisabled(txn)); + ASSERT_TRUE(opCtx); + ASSERT_TRUE(opCtx->lockState()->isW()); + ASSERT_TRUE(opCtx->writesAreReplicated()); + ASSERT_FALSE(documentValidationDisabled(opCtx)); ASSERT_BSONOBJ_EQ(op, theOperation); return Status::OK(); }; - ASSERT_TRUE(_txn->writesAreReplicated()); - ASSERT_FALSE(documentValidationDisabled(_txn.get())); - ASSERT_OK(SyncTail::syncApply(_txn.get(), op, false, applyOp, applyCmd, _incOps)); + ASSERT_TRUE(_opCtx->writesAreReplicated()); + ASSERT_FALSE(documentValidationDisabled(_opCtx.get())); + ASSERT_OK(SyncTail::syncApply(_opCtx.get(), op, false, applyOp, applyCmd, _incOps)); ASSERT_TRUE(applyCmdCalled); ASSERT_EQUALS(1U, _opsApplied); } @@ -448,7 +449,7 @@ TEST_F(SyncTailTest, SyncApplyCommandThrowsException) { << "ns" << "test.t"); int applyCmdCalled = 0; - SyncTail::ApplyOperationInLockFn applyOp = [&](OperationContext* txn, + SyncTail::ApplyOperationInLockFn applyOp = [&](OperationContext* opCtx, Database* db, const BSONObj& theOperation, bool inSteadyStateReplication, @@ -457,14 +458,14 @@ TEST_F(SyncTailTest, SyncApplyCommandThrowsException) { return Status::OK(); }; SyncTail::ApplyCommandInLockFn applyCmd = - [&](OperationContext* txn, const BSONObj& theOperation, bool inSteadyStateReplication) { + [&](OperationContext* opCtx, const BSONObj& theOperation, bool inSteadyStateReplication) { applyCmdCalled++; if (applyCmdCalled < 5) { throw WriteConflictException(); } return Status::OK(); }; - ASSERT_OK(SyncTail::syncApply(_txn.get(), op, false, applyOp, applyCmd, _incOps)); + ASSERT_OK(SyncTail::syncApply(_opCtx.get(), op, false, applyOp, applyCmd, _incOps)); ASSERT_EQUALS(5, applyCmdCalled); ASSERT_EQUALS(1U, _opsApplied); } @@ -479,14 +480,14 @@ TEST_F(SyncTailTest, MultiApplyReturnsBadValueOnNullOperationContext) { TEST_F(SyncTailTest, MultiApplyReturnsBadValueOnNullWriterPool) { auto op = makeCreateCollectionOplogEntry({Timestamp(Seconds(1), 0), 1LL}); - auto status = multiApply(_txn.get(), nullptr, {op}, noopApplyOperationFn).getStatus(); + auto status = multiApply(_opCtx.get(), nullptr, {op}, noopApplyOperationFn).getStatus(); ASSERT_EQUALS(ErrorCodes::BadValue, status); ASSERT_STRING_CONTAINS(status.reason(), "invalid worker pool"); } TEST_F(SyncTailTest, MultiApplyReturnsEmptyArrayOperationWhenNoOperationsAreGiven) { auto writerPool = SyncTail::makeWriterPool(); - auto status = multiApply(_txn.get(), writerPool.get(), {}, noopApplyOperationFn).getStatus(); + auto status = multiApply(_opCtx.get(), writerPool.get(), {}, noopApplyOperationFn).getStatus(); ASSERT_EQUALS(ErrorCodes::EmptyArrayOperation, status); ASSERT_STRING_CONTAINS(status.reason(), "no operations provided to multiApply"); } @@ -495,12 +496,13 @@ TEST_F(SyncTailTest, MultiApplyReturnsBadValueOnNullApplyOperation) { auto writerPool = SyncTail::makeWriterPool(); MultiApplier::ApplyOperationFn nullApplyOperationFn; auto op = makeCreateCollectionOplogEntry({Timestamp(Seconds(1), 0), 1LL}); - auto status = multiApply(_txn.get(), writerPool.get(), {op}, nullApplyOperationFn).getStatus(); + auto status = + multiApply(_opCtx.get(), writerPool.get(), {op}, nullApplyOperationFn).getStatus(); ASSERT_EQUALS(ErrorCodes::BadValue, status); ASSERT_STRING_CONTAINS(status.reason(), "invalid apply operation function"); } -bool _testOplogEntryIsForCappedCollection(OperationContext* txn, +bool _testOplogEntryIsForCappedCollection(OperationContext* opCtx, const NamespaceString& nss, const CollectionOptions& options) { auto writerPool = SyncTail::makeWriterPool(); @@ -512,13 +514,13 @@ bool _testOplogEntryIsForCappedCollection(OperationContext* txn, } return Status::OK(); }; - createCollection(txn, nss, options); + createCollection(opCtx, nss, options); auto op = makeInsertDocumentOplogEntry({Timestamp(Seconds(1), 0), 1LL}, nss, BSON("a" << 1)); ASSERT_FALSE(op.isForCappedCollection); auto lastOpTime = - unittest::assertGet(multiApply(txn, writerPool.get(), {op}, applyOperationFn)); + unittest::assertGet(multiApply(opCtx, writerPool.get(), {op}, applyOperationFn)); ASSERT_EQUALS(op.getOpTime(), lastOpTime); ASSERT_EQUALS(1U, operationsApplied.size()); @@ -532,14 +534,14 @@ TEST_F( SyncTailTest, MultiApplyDoesNotSetOplogEntryIsForCappedCollectionWhenProcessingNonCappedCollectionInsertOperation) { NamespaceString nss("local." + _agent.getSuiteName() + "_" + _agent.getTestName()); - ASSERT_FALSE(_testOplogEntryIsForCappedCollection(_txn.get(), nss, CollectionOptions())); + ASSERT_FALSE(_testOplogEntryIsForCappedCollection(_opCtx.get(), nss, CollectionOptions())); } TEST_F(SyncTailTest, MultiApplySetsOplogEntryIsForCappedCollectionWhenProcessingCappedCollectionInsertOperation) { NamespaceString nss("local." + _agent.getSuiteName() + "_" + _agent.getTestName()); ASSERT_TRUE( - _testOplogEntryIsForCappedCollection(_txn.get(), nss, createOplogCollectionOptions())); + _testOplogEntryIsForCappedCollection(_opCtx.get(), nss, createOplogCollectionOptions())); } TEST_F(SyncTailTest, MultiApplyAssignsOperationsToWriterThreadsBasedOnNamespaceHash) { @@ -569,7 +571,7 @@ TEST_F(SyncTailTest, MultiApplyAssignsOperationsToWriterThreadsBasedOnNamespaceH NamespaceString nssForInsert; std::vector<BSONObj> operationsWrittenToOplog; _storageInterface->insertDocumentsFn = [&mutex, &nssForInsert, &operationsWrittenToOplog]( - OperationContext* txn, const NamespaceString& nss, const std::vector<BSONObj>& docs) { + OperationContext* opCtx, const NamespaceString& nss, const std::vector<BSONObj>& docs) { stdx::lock_guard<stdx::mutex> lock(mutex); nssForInsert = nss; operationsWrittenToOplog = docs; @@ -577,7 +579,7 @@ TEST_F(SyncTailTest, MultiApplyAssignsOperationsToWriterThreadsBasedOnNamespaceH }; auto lastOpTime = - unittest::assertGet(multiApply(_txn.get(), &writerPool, {op1, op2}, applyOperationFn)); + unittest::assertGet(multiApply(_opCtx.get(), &writerPool, {op1, op2}, applyOperationFn)); ASSERT_EQUALS(op2.getOpTime(), lastOpTime); // Each writer thread should be given exactly one operation to apply. @@ -606,28 +608,28 @@ TEST_F(SyncTailTest, MultiApplyAssignsOperationsToWriterThreadsBasedOnNamespaceH TEST_F(SyncTailTest, MultiSyncApplyUsesSyncApplyToApplyOperation) { NamespaceString nss("local." + _agent.getSuiteName() + "_" + _agent.getTestName()); auto op = makeCreateCollectionOplogEntry({Timestamp(Seconds(1), 0), 1LL}, nss); - _txn.reset(); + _opCtx.reset(); MultiApplier::OperationPtrs ops = {&op}; multiSyncApply(&ops, nullptr); // Collection should be created after SyncTail::syncApply() processes operation. - _txn = cc().makeOperationContext(); - ASSERT_TRUE(AutoGetCollectionForRead(_txn.get(), nss).getCollection()); + _opCtx = cc().makeOperationContext(); + ASSERT_TRUE(AutoGetCollectionForRead(_opCtx.get(), nss).getCollection()); } TEST_F(SyncTailTest, MultiSyncApplyDisablesDocumentValidationWhileApplyingOperations) { NamespaceString nss("local." + _agent.getSuiteName() + "_" + _agent.getTestName()); - auto syncApply = [](OperationContext* txn, const BSONObj&, bool convertUpdatesToUpserts) { - ASSERT_FALSE(txn->writesAreReplicated()); - ASSERT_FALSE(txn->lockState()->shouldConflictWithSecondaryBatchApplication()); - ASSERT_TRUE(documentValidationDisabled(txn)); + auto syncApply = [](OperationContext* opCtx, const BSONObj&, bool convertUpdatesToUpserts) { + ASSERT_FALSE(opCtx->writesAreReplicated()); + ASSERT_FALSE(opCtx->lockState()->shouldConflictWithSecondaryBatchApplication()); + ASSERT_TRUE(documentValidationDisabled(opCtx)); ASSERT_TRUE(convertUpdatesToUpserts); return Status::OK(); }; auto op = makeUpdateDocumentOplogEntry( {Timestamp(Seconds(1), 0), 1LL}, nss, BSON("_id" << 0), BSON("_id" << 0 << "x" << 2)); MultiApplier::OperationPtrs ops = {&op}; - ASSERT_OK(multiSyncApply_noAbort(_txn.get(), &ops, syncApply)); + ASSERT_OK(multiSyncApply_noAbort(_opCtx.get(), &ops, syncApply)); } TEST_F(SyncTailTest, MultiSyncApplyPassesThroughSyncApplyErrorAfterFailingToApplyOperation) { @@ -640,7 +642,8 @@ TEST_F(SyncTailTest, MultiSyncApplyPassesThroughSyncApplyErrorAfterFailingToAppl return {ErrorCodes::OperationFailed, ""}; }; MultiApplier::OperationPtrs ops = {&op}; - ASSERT_EQUALS(ErrorCodes::OperationFailed, multiSyncApply_noAbort(_txn.get(), &ops, syncApply)); + ASSERT_EQUALS(ErrorCodes::OperationFailed, + multiSyncApply_noAbort(_opCtx.get(), &ops, syncApply)); } TEST_F(SyncTailTest, MultiSyncApplyPassesThroughSyncApplyException) { @@ -654,7 +657,8 @@ TEST_F(SyncTailTest, MultiSyncApplyPassesThroughSyncApplyException) { MONGO_UNREACHABLE; }; MultiApplier::OperationPtrs ops = {&op}; - ASSERT_EQUALS(ErrorCodes::OperationFailed, multiSyncApply_noAbort(_txn.get(), &ops, syncApply)); + ASSERT_EQUALS(ErrorCodes::OperationFailed, + multiSyncApply_noAbort(_opCtx.get(), &ops, syncApply)); } TEST_F(SyncTailTest, MultiSyncApplySortsOperationsStablyByNamespaceBeforeApplying) { @@ -677,7 +681,7 @@ TEST_F(SyncTailTest, MultiSyncApplySortsOperationsStablyByNamespaceBeforeApplyin return Status::OK(); }; MultiApplier::OperationPtrs ops = {&op4, &op1, &op3, &op2}; - ASSERT_OK(multiSyncApply_noAbort(_txn.get(), &ops, syncApply)); + ASSERT_OK(multiSyncApply_noAbort(_opCtx.get(), &ops, syncApply)); ASSERT_EQUALS(4U, operationsApplied.size()); ASSERT_EQUALS(op1, operationsApplied[0]); ASSERT_EQUALS(op2, operationsApplied[1]); @@ -707,7 +711,7 @@ TEST_F(SyncTailTest, MultiSyncApplyGroupsInsertOperationByNamespaceBeforeApplyin MultiApplier::OperationPtrs ops = { &createOp1, &createOp2, &insertOp1a, &insertOp2a, &insertOp1b, &insertOp2b}; - ASSERT_OK(multiSyncApply_noAbort(_txn.get(), &ops, syncApply)); + ASSERT_OK(multiSyncApply_noAbort(_opCtx.get(), &ops, syncApply)); ASSERT_EQUALS(4U, operationsApplied.size()); ASSERT_EQUALS(createOp1, operationsApplied[0]); @@ -761,7 +765,7 @@ TEST_F(SyncTailTest, MultiSyncApplyUsesLimitWhenGroupingInsertOperation) { for (auto&& op : operationsToApply) { ops.push_back(&op); } - ASSERT_OK(multiSyncApply_noAbort(_txn.get(), &ops, syncApply)); + ASSERT_OK(multiSyncApply_noAbort(_opCtx.get(), &ops, syncApply)); // multiSyncApply should combine operations as follows: // {create}, {grouped_insert}, {insert_(limit+1)} @@ -820,7 +824,7 @@ TEST_F(SyncTailTest, MultiSyncApplyFallsBackOnApplyingInsertsIndividuallyWhenGro for (auto&& op : operationsToApply) { ops.push_back(&op); } - ASSERT_OK(multiSyncApply_noAbort(_txn.get(), &ops, syncApply)); + ASSERT_OK(multiSyncApply_noAbort(_opCtx.get(), &ops, syncApply)); // On failing to apply the grouped insert operation, multiSyncApply should apply the operations // as given in "operationsToApply": @@ -845,7 +849,7 @@ TEST_F(SyncTailTest, MultiInitialSyncApplyDisablesDocumentValidationWhileApplyin {Timestamp(Seconds(1), 0), 1LL}, nss, BSON("_id" << 0), BSON("_id" << 0 << "x" << 2)); MultiApplier::OperationPtrs ops = {&op}; AtomicUInt32 fetchCount(0); - ASSERT_OK(multiInitialSyncApply_noAbort(_txn.get(), &ops, &syncTail, &fetchCount)); + ASSERT_OK(multiInitialSyncApply_noAbort(_opCtx.get(), &ops, &syncTail, &fetchCount)); ASSERT_EQUALS(fetchCount.load(), 1U); } @@ -858,11 +862,11 @@ TEST_F(SyncTailTest, {Timestamp(Seconds(1), 0), 1LL}, nss, BSON("_id" << 0), BSON("_id" << 0 << "x" << 2)); MultiApplier::OperationPtrs ops = {&op}; AtomicUInt32 fetchCount(0); - ASSERT_OK(multiInitialSyncApply_noAbort(_txn.get(), &ops, &syncTail, &fetchCount)); + ASSERT_OK(multiInitialSyncApply_noAbort(_opCtx.get(), &ops, &syncTail, &fetchCount)); // Since the missing document is not found on the sync source, the collection referenced by // the failed operation should not be automatically created. - ASSERT_FALSE(AutoGetCollectionForRead(_txn.get(), nss).getCollection()); + ASSERT_FALSE(AutoGetCollectionForRead(_opCtx.get(), nss).getCollection()); ASSERT_EQUALS(fetchCount.load(), 1U); } @@ -880,10 +884,10 @@ TEST_F(SyncTailTest, MultiInitialSyncApplySkipsDocumentOnNamespaceNotFound) { auto op3 = makeInsertDocumentOplogEntry({Timestamp(Seconds(4), 0), 1LL}, nss, doc3); MultiApplier::OperationPtrs ops = {&op0, &op1, &op2, &op3}; AtomicUInt32 fetchCount(0); - ASSERT_OK(multiInitialSyncApply_noAbort(_txn.get(), &ops, &syncTail, &fetchCount)); + ASSERT_OK(multiInitialSyncApply_noAbort(_opCtx.get(), &ops, &syncTail, &fetchCount)); ASSERT_EQUALS(fetchCount.load(), 0U); - OplogInterfaceLocal collectionReader(_txn.get(), nss.ns()); + OplogInterfaceLocal collectionReader(_opCtx.get(), nss.ns()); auto iter = collectionReader.makeIterator(); ASSERT_BSONOBJ_EQ(doc3, unittest::assertGet(iter->next()).first); ASSERT_BSONOBJ_EQ(doc1, unittest::assertGet(iter->next()).first); @@ -898,13 +902,13 @@ TEST_F(SyncTailTest, MultiInitialSyncApplyRetriesFailedUpdateIfDocumentIsAvailab {Timestamp(Seconds(1), 0), 1LL}, nss, BSON("_id" << 0), updatedDocument); MultiApplier::OperationPtrs ops = {&op}; AtomicUInt32 fetchCount(0); - ASSERT_OK(multiInitialSyncApply_noAbort(_txn.get(), &ops, &syncTail, &fetchCount)); + ASSERT_OK(multiInitialSyncApply_noAbort(_opCtx.get(), &ops, &syncTail, &fetchCount)); ASSERT_EQUALS(fetchCount.load(), 1U); // The collection referenced by "ns" in the failed operation is automatically created to hold // the missing document fetched from the sync source. We verify the contents of the collection // with the OplogInterfaceLocal class. - OplogInterfaceLocal collectionReader(_txn.get(), nss.ns()); + OplogInterfaceLocal collectionReader(_opCtx.get(), nss.ns()); auto iter = collectionReader.makeIterator(); ASSERT_BSONOBJ_EQ(updatedDocument, unittest::assertGet(iter->next()).first); ASSERT_EQUALS(ErrorCodes::CollectionIsEmpty, iter->next().getStatus()); @@ -920,7 +924,7 @@ TEST_F(SyncTailTest, MultiInitialSyncApplyPassesThroughSyncApplyErrorAfterFailin MultiApplier::OperationPtrs ops = {&op}; AtomicUInt32 fetchCount(0); ASSERT_EQUALS(ErrorCodes::BadValue, - multiInitialSyncApply_noAbort(_txn.get(), &ops, &syncTail, &fetchCount)); + multiInitialSyncApply_noAbort(_opCtx.get(), &ops, &syncTail, &fetchCount)); ASSERT_EQUALS(fetchCount.load(), 1U); } @@ -929,12 +933,13 @@ TEST_F(SyncTailTest, MultiInitialSyncApplyPassesThroughShouldSyncTailRetryError) NamespaceString nss("local." + _agent.getSuiteName() + "_" + _agent.getTestName()); auto op = makeUpdateDocumentOplogEntry( {Timestamp(Seconds(1), 0), 1LL}, nss, BSON("_id" << 0), BSON("_id" << 0 << "x" << 2)); - ASSERT_THROWS_CODE( - syncTail.shouldRetry(_txn.get(), op.raw), mongo::UserException, ErrorCodes::FailedToParse); + ASSERT_THROWS_CODE(syncTail.shouldRetry(_opCtx.get(), op.raw), + mongo::UserException, + ErrorCodes::FailedToParse); MultiApplier::OperationPtrs ops = {&op}; AtomicUInt32 fetchCount(0); ASSERT_EQUALS(ErrorCodes::FailedToParse, - multiInitialSyncApply_noAbort(_txn.get(), &ops, &syncTail, &fetchCount)); + multiInitialSyncApply_noAbort(_opCtx.get(), &ops, &syncTail, &fetchCount)); ASSERT_EQUALS(fetchCount.load(), 1U); } @@ -970,7 +975,7 @@ Status IdempotencyTest::runOps(std::initializer_list<OplogEntry> ops) { opsPtrs.push_back(&op); } AtomicUInt32 fetchCount(0); - return multiInitialSyncApply_noAbort(_txn.get(), &opsPtrs, &syncTail, &fetchCount); + return multiInitialSyncApply_noAbort(_opCtx.get(), &opsPtrs, &syncTail, &fetchCount); } OplogEntry IdempotencyTest::createCollection() { @@ -1002,21 +1007,21 @@ OplogEntry IdempotencyTest::dropIndex(const std::string& indexName) { } std::string IdempotencyTest::validate() { - auto collection = AutoGetCollectionForRead(_txn.get(), nss).getCollection(); + auto collection = AutoGetCollectionForRead(_opCtx.get(), nss).getCollection(); if (!collection) { return "CollectionNotFound"; } ValidateResults validateResults; BSONObjBuilder bob; - Lock::DBLock lk(_txn->lockState(), nss.db(), MODE_IS); - Lock::CollectionLock lock(_txn->lockState(), nss.ns(), MODE_IS); - ASSERT_OK(collection->validate(_txn.get(), kValidateFull, &validateResults, &bob)); + Lock::DBLock lk(_opCtx->lockState(), nss.db(), MODE_IS); + Lock::CollectionLock lock(_opCtx->lockState(), nss.ns(), MODE_IS); + ASSERT_OK(collection->validate(_opCtx.get(), kValidateFull, &validateResults, &bob)); ASSERT_TRUE(validateResults.valid); - IndexDescriptor* desc = collection->getIndexCatalog()->findIdIndex(_txn.get()); + IndexDescriptor* desc = collection->getIndexCatalog()->findIdIndex(_opCtx.get()); ASSERT_TRUE(desc); - auto exec = InternalPlanner::indexScan(_txn.get(), + auto exec = InternalPlanner::indexScan(_opCtx.get(), collection, desc, BSONObj(), @@ -1041,7 +1046,7 @@ std::string IdempotencyTest::validate() { } TEST_F(IdempotencyTest, Geo2dsphereIndexFailedOnUpdate) { - ReplicationCoordinator::get(_txn.get())->setFollowerMode(MemberState::RS_RECOVERING); + ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_RECOVERING); ASSERT_OK(runOp(createCollection())); auto insertOp = insert(fromjson("{_id: 1, loc: 'hi'}")); auto updateOp = update(1, fromjson("{$set: {loc: [1, 2]}}")); @@ -1054,13 +1059,13 @@ TEST_F(IdempotencyTest, Geo2dsphereIndexFailedOnUpdate) { ASSERT_OK(runOps(ops)); ASSERT_EQUALS(hash, validate()); - ReplicationCoordinator::get(_txn.get())->setFollowerMode(MemberState::RS_PRIMARY); + ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_PRIMARY); auto status = runOps(ops); ASSERT_EQ(status.code(), 16755); } TEST_F(IdempotencyTest, Geo2dsphereIndexFailedOnIndexing) { - ReplicationCoordinator::get(_txn.get())->setFollowerMode(MemberState::RS_RECOVERING); + ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_RECOVERING); ASSERT_OK(runOp(createCollection())); auto indexOp = buildIndex(fromjson("{loc: '2dsphere'}"), BSON("2dsphereIndexVersion" << 3)); auto dropIndexOp = dropIndex("loc_index"); @@ -1073,13 +1078,13 @@ TEST_F(IdempotencyTest, Geo2dsphereIndexFailedOnIndexing) { ASSERT_OK(runOps(ops)); ASSERT_EQUALS(hash, validate()); - ReplicationCoordinator::get(_txn.get())->setFollowerMode(MemberState::RS_PRIMARY); + ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_PRIMARY); auto status = runOps(ops); ASSERT_EQ(status.code(), 16755); } TEST_F(IdempotencyTest, Geo2dIndex) { - ReplicationCoordinator::get(_txn.get())->setFollowerMode(MemberState::RS_RECOVERING); + ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_RECOVERING); ASSERT_OK(runOp(createCollection())); auto insertOp = insert(fromjson("{_id: 1, loc: [1]}")); auto updateOp = update(1, fromjson("{$set: {loc: [1, 2]}}")); @@ -1092,13 +1097,13 @@ TEST_F(IdempotencyTest, Geo2dIndex) { ASSERT_OK(runOps(ops)); ASSERT_EQUALS(hash, validate()); - ReplicationCoordinator::get(_txn.get())->setFollowerMode(MemberState::RS_PRIMARY); + ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_PRIMARY); auto status = runOps(ops); ASSERT_EQ(status.code(), 13068); } TEST_F(IdempotencyTest, UniqueKeyIndex) { - ReplicationCoordinator::get(_txn.get())->setFollowerMode(MemberState::RS_RECOVERING); + ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_RECOVERING); ASSERT_OK(runOp(createCollection())); auto insertOp = insert(fromjson("{_id: 1, x: 5}")); auto updateOp = update(1, fromjson("{$set: {x: 6}}")); @@ -1112,13 +1117,13 @@ TEST_F(IdempotencyTest, UniqueKeyIndex) { ASSERT_OK(runOps(ops)); ASSERT_EQUALS(hash, validate()); - ReplicationCoordinator::get(_txn.get())->setFollowerMode(MemberState::RS_PRIMARY); + ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_PRIMARY); auto status = runOps(ops); ASSERT_EQ(status.code(), ErrorCodes::DuplicateKey); } TEST_F(IdempotencyTest, ParallelArrayError) { - ReplicationCoordinator::get(_txn.get())->setFollowerMode(MemberState::RS_RECOVERING); + ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_RECOVERING); ASSERT_OK(runOp(createCollection())); ASSERT_OK(runOp(insert(fromjson("{_id: 1}")))); @@ -1135,13 +1140,13 @@ TEST_F(IdempotencyTest, ParallelArrayError) { ASSERT_OK(runOps(ops)); ASSERT_EQUALS(hash, validate()); - ReplicationCoordinator::get(_txn.get())->setFollowerMode(MemberState::RS_PRIMARY); + ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_PRIMARY); auto status = runOps(ops); ASSERT_EQ(status.code(), ErrorCodes::CannotIndexParallelArrays); } TEST_F(IdempotencyTest, IndexKeyTooLongError) { - ReplicationCoordinator::get(_txn.get())->setFollowerMode(MemberState::RS_RECOVERING); + ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_RECOVERING); ASSERT_OK(runOp(createCollection())); ASSERT_OK(runOp(insert(fromjson("{_id: 1}")))); @@ -1161,13 +1166,13 @@ TEST_F(IdempotencyTest, IndexKeyTooLongError) { ASSERT_OK(runOps(ops)); ASSERT_EQUALS(hash, validate()); - ReplicationCoordinator::get(_txn.get())->setFollowerMode(MemberState::RS_PRIMARY); + ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_PRIMARY); auto status = runOps(ops); ASSERT_EQ(status.code(), ErrorCodes::KeyTooLong); } TEST_F(IdempotencyTest, IndexWithDifferentOptions) { - ReplicationCoordinator::get(_txn.get())->setFollowerMode(MemberState::RS_RECOVERING); + ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_RECOVERING); ASSERT_OK(runOp(createCollection())); ASSERT_OK(runOp(insert(fromjson("{_id: 1, x: 'hi'}")))); @@ -1183,13 +1188,13 @@ TEST_F(IdempotencyTest, IndexWithDifferentOptions) { ASSERT_OK(runOps(ops)); ASSERT_EQUALS(hash, validate()); - ReplicationCoordinator::get(_txn.get())->setFollowerMode(MemberState::RS_PRIMARY); + ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_PRIMARY); auto status = runOps(ops); ASSERT_EQ(status.code(), ErrorCodes::IndexOptionsConflict); } TEST_F(IdempotencyTest, TextIndexDocumentHasNonStringLanguageField) { - ReplicationCoordinator::get(_txn.get())->setFollowerMode(MemberState::RS_RECOVERING); + ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_RECOVERING); ASSERT_OK(runOp(createCollection())); auto insertOp = insert(fromjson("{_id: 1, x: 'words to index', language: 1}")); @@ -1203,13 +1208,13 @@ TEST_F(IdempotencyTest, TextIndexDocumentHasNonStringLanguageField) { ASSERT_OK(runOps(ops)); ASSERT_EQUALS(hash, validate()); - ReplicationCoordinator::get(_txn.get())->setFollowerMode(MemberState::RS_PRIMARY); + ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_PRIMARY); auto status = runOps(ops); ASSERT_EQ(status.code(), 17261); } TEST_F(IdempotencyTest, InsertDocumentWithNonStringLanguageFieldWhenTextIndexExists) { - ReplicationCoordinator::get(_txn.get())->setFollowerMode(MemberState::RS_RECOVERING); + ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_RECOVERING); ASSERT_OK(runOp(createCollection())); auto indexOp = buildIndex(fromjson("{x: 'text'}"), BSONObj()); @@ -1223,13 +1228,13 @@ TEST_F(IdempotencyTest, InsertDocumentWithNonStringLanguageFieldWhenTextIndexExi ASSERT_OK(runOps(ops)); ASSERT_EQUALS(hash, validate()); - ReplicationCoordinator::get(_txn.get())->setFollowerMode(MemberState::RS_PRIMARY); + ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_PRIMARY); auto status = runOps(ops); ASSERT_EQ(status.code(), 17261); } TEST_F(IdempotencyTest, TextIndexDocumentHasNonStringLanguageOverrideField) { - ReplicationCoordinator::get(_txn.get())->setFollowerMode(MemberState::RS_RECOVERING); + ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_RECOVERING); ASSERT_OK(runOp(createCollection())); auto insertOp = insert(fromjson("{_id: 1, x: 'words to index', y: 1}")); @@ -1243,13 +1248,13 @@ TEST_F(IdempotencyTest, TextIndexDocumentHasNonStringLanguageOverrideField) { ASSERT_OK(runOps(ops)); ASSERT_EQUALS(hash, validate()); - ReplicationCoordinator::get(_txn.get())->setFollowerMode(MemberState::RS_PRIMARY); + ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_PRIMARY); auto status = runOps(ops); ASSERT_EQ(status.code(), 17261); } TEST_F(IdempotencyTest, InsertDocumentWithNonStringLanguageOverrideFieldWhenTextIndexExists) { - ReplicationCoordinator::get(_txn.get())->setFollowerMode(MemberState::RS_RECOVERING); + ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_RECOVERING); ASSERT_OK(runOp(createCollection())); auto indexOp = buildIndex(fromjson("{x: 'text'}"), fromjson("{language_override: 'y'}")); @@ -1263,13 +1268,13 @@ TEST_F(IdempotencyTest, InsertDocumentWithNonStringLanguageOverrideFieldWhenText ASSERT_OK(runOps(ops)); ASSERT_EQUALS(hash, validate()); - ReplicationCoordinator::get(_txn.get())->setFollowerMode(MemberState::RS_PRIMARY); + ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_PRIMARY); auto status = runOps(ops); ASSERT_EQ(status.code(), 17261); } TEST_F(IdempotencyTest, TextIndexDocumentHasUnknownLanguage) { - ReplicationCoordinator::get(_txn.get())->setFollowerMode(MemberState::RS_RECOVERING); + ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_RECOVERING); ASSERT_OK(runOp(createCollection())); auto insertOp = insert(fromjson("{_id: 1, x: 'words to index', language: 'bad'}")); @@ -1283,7 +1288,7 @@ TEST_F(IdempotencyTest, TextIndexDocumentHasUnknownLanguage) { ASSERT_OK(runOps(ops)); ASSERT_EQUALS(hash, validate()); - ReplicationCoordinator::get(_txn.get())->setFollowerMode(MemberState::RS_PRIMARY); + ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_PRIMARY); auto status = runOps(ops); ASSERT_EQ(status.code(), 17262); } @@ -1428,7 +1433,7 @@ TEST_F(IdempotencyTest, CollModIndexNotFound) { } TEST_F(IdempotencyTest, ResyncOnRenameCollection) { - ReplicationCoordinator::get(_txn.get())->setFollowerMode(MemberState::RS_RECOVERING); + ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_RECOVERING); auto cmd = BSON("renameCollection" << nss.ns() << "to" << "test.bar" diff --git a/src/mongo/db/repl/task_runner.cpp b/src/mongo/db/repl/task_runner.cpp index 210718bba3e..d364151a228 100644 --- a/src/mongo/db/repl/task_runner.cpp +++ b/src/mongo/db/repl/task_runner.cpp @@ -60,10 +60,10 @@ using LockGuard = stdx::lock_guard<stdx::mutex>; * next action of kCancel. */ TaskRunner::NextAction runSingleTask(const TaskRunner::Task& task, - OperationContext* txn, + OperationContext* opCtx, const Status& status) { try { - return task(txn, status); + return task(opCtx, status); } catch (...) { log() << "Unhandled exception in task runner: " << redact(exceptionToStatus()); } @@ -74,7 +74,7 @@ TaskRunner::NextAction runSingleTask(const TaskRunner::Task& task, // static TaskRunner::Task TaskRunner::makeCancelTask() { - return [](OperationContext* txn, const Status& status) { return NextAction::kCancel; }; + return [](OperationContext* opCtx, const Status& status) { return NextAction::kCancel; }; } TaskRunner::TaskRunner(OldThreadPool* threadPool) @@ -132,10 +132,10 @@ void TaskRunner::join() { void TaskRunner::_runTasks() { Client* client = nullptr; - ServiceContext::UniqueOperationContext txn; + ServiceContext::UniqueOperationContext opCtx; while (Task task = _waitForNextTask()) { - if (!txn) { + if (!opCtx) { if (!client) { // We initialize cc() because ServiceContextMongoD::_newOpCtx() expects cc() // to be equal to the client used to create the operation context. @@ -145,13 +145,13 @@ void TaskRunner::_runTasks() { AuthorizationSession::get(client)->grantInternalAuthorization(); } } - txn = client->makeOperationContext(); + opCtx = client->makeOperationContext(); } - NextAction nextAction = runSingleTask(task, txn.get(), Status::OK()); + NextAction nextAction = runSingleTask(task, opCtx.get(), Status::OK()); if (nextAction != NextAction::kKeepOperationContext) { - txn.reset(); + opCtx.reset(); } if (nextAction == NextAction::kCancel) { @@ -167,7 +167,7 @@ void TaskRunner::_runTasks() { } } } - txn.reset(); + opCtx.reset(); std::list<Task> tasks; UniqueLock lk{_mutex}; @@ -221,13 +221,13 @@ Status TaskRunner::runSynchronousTask(SynchronousTask func, TaskRunner::NextActi stdx::condition_variable waitTillDoneCond; Status returnStatus{Status::OK()}; - this->schedule([&](OperationContext* txn, const Status taskStatus) { + this->schedule([&](OperationContext* opCtx, const Status taskStatus) { if (!taskStatus.isOK()) { returnStatus = taskStatus; } else { // Run supplied function. try { - returnStatus = func(txn); + returnStatus = func(opCtx); } catch (...) { returnStatus = exceptionToStatus(); error() << "Exception thrown in runSynchronousTask: " << redact(returnStatus); diff --git a/src/mongo/db/repl/task_runner.h b/src/mongo/db/repl/task_runner.h index a8908660c44..b7dcf4c05d6 100644 --- a/src/mongo/db/repl/task_runner.h +++ b/src/mongo/db/repl/task_runner.h @@ -59,7 +59,7 @@ public: }; using Task = stdx::function<NextAction(OperationContext*, const Status&)>; - using SynchronousTask = stdx::function<Status(OperationContext* txn)>; + using SynchronousTask = stdx::function<Status(OperationContext* opCtx)>; /** * Returns the Status from the supplied function after running it.. diff --git a/src/mongo/db/repl/task_runner_test.cpp b/src/mongo/db/repl/task_runner_test.cpp index dedb6269083..62b64513b37 100644 --- a/src/mongo/db/repl/task_runner_test.cpp +++ b/src/mongo/db/repl/task_runner_test.cpp @@ -58,12 +58,12 @@ TEST_F(TaskRunnerTest, GetDiagnosticString) { TEST_F(TaskRunnerTest, CallbackValues) { stdx::mutex mutex; bool called = false; - OperationContext* txn = nullptr; + OperationContext* opCtx = nullptr; Status status = getDetectableErrorStatus(); auto task = [&](OperationContext* theTxn, const Status& theStatus) { stdx::lock_guard<stdx::mutex> lk(mutex); called = true; - txn = theTxn; + opCtx = theTxn; status = theStatus; return TaskRunner::NextAction::kCancel; }; @@ -73,7 +73,7 @@ TEST_F(TaskRunnerTest, CallbackValues) { stdx::lock_guard<stdx::mutex> lk(mutex); ASSERT_TRUE(called); - ASSERT(txn); + ASSERT(opCtx); ASSERT_OK(status); } @@ -149,7 +149,7 @@ TEST_F(TaskRunnerTest, RunTaskTwiceKeepOperationContext) { TEST_F(TaskRunnerTest, SkipSecondTask) { stdx::mutex mutex; int i = 0; - OperationContext* txn[2] = {nullptr, nullptr}; + OperationContext* opCtx[2] = {nullptr, nullptr}; Status status[2] = {getDetectableErrorStatus(), getDetectableErrorStatus()}; stdx::condition_variable condition; bool schedulingDone = false; @@ -159,7 +159,7 @@ TEST_F(TaskRunnerTest, SkipSecondTask) { if (j >= 2) { return TaskRunner::NextAction::kCancel; } - txn[j] = theTxn; + opCtx[j] = theTxn; status[j] = theStatus; // Wait for the test code to schedule the second task. @@ -182,16 +182,16 @@ TEST_F(TaskRunnerTest, SkipSecondTask) { stdx::lock_guard<stdx::mutex> lk(mutex); ASSERT_EQUALS(2, i); - ASSERT(txn[0]); + ASSERT(opCtx[0]); ASSERT_OK(status[0]); - ASSERT_FALSE(txn[1]); + ASSERT_FALSE(opCtx[1]); ASSERT_EQUALS(ErrorCodes::CallbackCanceled, status[1].code()); } TEST_F(TaskRunnerTest, FirstTaskThrowsException) { stdx::mutex mutex; int i = 0; - OperationContext* txn[2] = {nullptr, nullptr}; + OperationContext* opCtx[2] = {nullptr, nullptr}; Status status[2] = {getDetectableErrorStatus(), getDetectableErrorStatus()}; stdx::condition_variable condition; bool schedulingDone = false; @@ -201,7 +201,7 @@ TEST_F(TaskRunnerTest, FirstTaskThrowsException) { if (j >= 2) { return TaskRunner::NextAction::kCancel; } - txn[j] = theTxn; + opCtx[j] = theTxn; status[j] = theStatus; // Wait for the test code to schedule the second task. @@ -231,9 +231,9 @@ TEST_F(TaskRunnerTest, FirstTaskThrowsException) { stdx::lock_guard<stdx::mutex> lk(mutex); ASSERT_EQUALS(2, i); - ASSERT(txn[0]); + ASSERT(opCtx[0]); ASSERT_OK(status[0]); - ASSERT_FALSE(txn[1]); + ASSERT_FALSE(opCtx[1]); ASSERT_EQUALS(ErrorCodes::CallbackCanceled, status[1].code()); } |