diff options
author | Maria van Keulen <maria@mongodb.com> | 2017-03-07 12:00:08 -0500 |
---|---|---|
committer | Maria van Keulen <maria@mongodb.com> | 2017-03-07 12:00:08 -0500 |
commit | 589a5c169ced8f6e9ddcd3d182ae1b75db6b7d79 (patch) | |
tree | c7a090ffdd56a91ae677e2492c61b820af44f964 /src/mongo/db/dbhelpers.cpp | |
parent | 3cba97198638df3750e3b455e2ad57af7ee536ae (diff) | |
download | mongo-589a5c169ced8f6e9ddcd3d182ae1b75db6b7d79.tar.gz |
SERVER-27938 Rename all OperationContext variables to opCtx
This commit is an automated rename of all whole word instances of txn,
_txn, and txnPtr to opCtx, _opCtx, and opCtxPtr, respectively in all
.cpp and .h files in src/mongo.
Diffstat (limited to 'src/mongo/db/dbhelpers.cpp')
-rw-r--r-- | src/mongo/db/dbhelpers.cpp | 113 |
1 files changed, 61 insertions, 52 deletions
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp index 2dd6c2c7d1c..83f4c15e52d 100644 --- a/src/mongo/db/dbhelpers.cpp +++ b/src/mongo/db/dbhelpers.cpp @@ -83,7 +83,7 @@ using std::stringstream; using logger::LogComponent; -void Helpers::ensureIndex(OperationContext* txn, +void Helpers::ensureIndex(OperationContext* opCtx, Collection* collection, BSONObj keyPattern, IndexDescriptor::IndexVersion indexVersion, @@ -97,7 +97,7 @@ void Helpers::ensureIndex(OperationContext* txn, b.appendBool("unique", unique); BSONObj o = b.done(); - MultiIndexBlock indexer(txn, collection); + MultiIndexBlock indexer(opCtx, collection); Status status = indexer.init(o).getStatus(); if (status.code() == ErrorCodes::IndexAlreadyExists) @@ -106,7 +106,7 @@ void Helpers::ensureIndex(OperationContext* txn, uassertStatusOK(indexer.insertAllDocumentsInCollection()); - WriteUnitOfWork wunit(txn); + WriteUnitOfWork wunit(opCtx); indexer.commit(); wunit.commit(); } @@ -114,40 +114,40 @@ void Helpers::ensureIndex(OperationContext* txn, /* fetch a single object from collection ns that matches query set your db SavedContext first */ -bool Helpers::findOne(OperationContext* txn, +bool Helpers::findOne(OperationContext* opCtx, Collection* collection, const BSONObj& query, BSONObj& result, bool requireIndex) { - RecordId loc = findOne(txn, collection, query, requireIndex); + RecordId loc = findOne(opCtx, collection, query, requireIndex); if (loc.isNull()) return false; - result = collection->docFor(txn, loc).value(); + result = collection->docFor(opCtx, loc).value(); return true; } /* fetch a single object from collection ns that matches query set your db SavedContext first */ -RecordId Helpers::findOne(OperationContext* txn, +RecordId Helpers::findOne(OperationContext* opCtx, Collection* collection, const BSONObj& query, bool requireIndex) { if (!collection) return RecordId(); - const ExtensionsCallbackReal extensionsCallback(txn, &collection->ns()); + const ExtensionsCallbackReal extensionsCallback(opCtx, &collection->ns()); auto qr = stdx::make_unique<QueryRequest>(collection->ns()); qr->setFilter(query); - auto statusWithCQ = CanonicalQuery::canonicalize(txn, std::move(qr), extensionsCallback); + auto statusWithCQ = CanonicalQuery::canonicalize(opCtx, std::move(qr), extensionsCallback); massert(17244, "Could not canonicalize " + query.toString(), statusWithCQ.isOK()); unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); size_t options = requireIndex ? QueryPlannerParams::NO_TABLE_SCAN : QueryPlannerParams::DEFAULT; auto statusWithPlanExecutor = - getExecutor(txn, collection, std::move(cq), PlanExecutor::YIELD_MANUAL, options); + getExecutor(opCtx, collection, std::move(cq), PlanExecutor::YIELD_MANUAL, options); massert(17245, "Could not get executor for query " + query.toString(), statusWithPlanExecutor.isOK()); @@ -165,7 +165,7 @@ RecordId Helpers::findOne(OperationContext* txn, return RecordId(); } -bool Helpers::findById(OperationContext* txn, +bool Helpers::findById(OperationContext* opCtx, Database* database, const char* ns, BSONObj query, @@ -183,7 +183,7 @@ bool Helpers::findById(OperationContext* txn, *nsFound = true; IndexCatalog* catalog = collection->getIndexCatalog(); - const IndexDescriptor* desc = catalog->findIdIndex(txn); + const IndexDescriptor* desc = catalog->findIdIndex(opCtx); if (!desc) return false; @@ -191,28 +191,30 @@ bool Helpers::findById(OperationContext* txn, if (indexFound) *indexFound = 1; - RecordId loc = catalog->getIndex(desc)->findSingle(txn, query["_id"].wrap()); + RecordId loc = catalog->getIndex(desc)->findSingle(opCtx, query["_id"].wrap()); if (loc.isNull()) return false; - result = collection->docFor(txn, loc).value(); + result = collection->docFor(opCtx, loc).value(); return true; } -RecordId Helpers::findById(OperationContext* txn, Collection* collection, const BSONObj& idquery) { +RecordId Helpers::findById(OperationContext* opCtx, + Collection* collection, + const BSONObj& idquery) { verify(collection); IndexCatalog* catalog = collection->getIndexCatalog(); - const IndexDescriptor* desc = catalog->findIdIndex(txn); + const IndexDescriptor* desc = catalog->findIdIndex(opCtx); uassert(13430, "no _id index", desc); - return catalog->getIndex(desc)->findSingle(txn, idquery["_id"].wrap()); + return catalog->getIndex(desc)->findSingle(opCtx, idquery["_id"].wrap()); } -bool Helpers::getSingleton(OperationContext* txn, const char* ns, BSONObj& result) { - AutoGetCollectionForRead ctx(txn, NamespaceString(ns)); - unique_ptr<PlanExecutor> exec( - InternalPlanner::collectionScan(txn, ns, ctx.getCollection(), PlanExecutor::YIELD_MANUAL)); +bool Helpers::getSingleton(OperationContext* opCtx, const char* ns, BSONObj& result) { + AutoGetCollectionForRead ctx(opCtx, NamespaceString(ns)); + unique_ptr<PlanExecutor> exec(InternalPlanner::collectionScan( + opCtx, ns, ctx.getCollection(), PlanExecutor::YIELD_MANUAL)); PlanExecutor::ExecState state = exec->getNext(&result, NULL); - CurOp::get(txn)->done(); + CurOp::get(opCtx)->done(); // Non-yielding collection scans from InternalPlanner will never error. invariant(PlanExecutor::ADVANCED == state || PlanExecutor::IS_EOF == state); @@ -225,10 +227,13 @@ bool Helpers::getSingleton(OperationContext* txn, const char* ns, BSONObj& resul return false; } -bool Helpers::getLast(OperationContext* txn, const char* ns, BSONObj& result) { - AutoGetCollectionForRead autoColl(txn, NamespaceString(ns)); - unique_ptr<PlanExecutor> exec(InternalPlanner::collectionScan( - txn, ns, autoColl.getCollection(), PlanExecutor::YIELD_MANUAL, InternalPlanner::BACKWARD)); +bool Helpers::getLast(OperationContext* opCtx, const char* ns, BSONObj& result) { + AutoGetCollectionForRead autoColl(opCtx, NamespaceString(ns)); + unique_ptr<PlanExecutor> exec(InternalPlanner::collectionScan(opCtx, + ns, + autoColl.getCollection(), + PlanExecutor::YIELD_MANUAL, + InternalPlanner::BACKWARD)); PlanExecutor::ExecState state = exec->getNext(&result, NULL); // Non-yielding collection scans from InternalPlanner will never error. @@ -242,12 +247,15 @@ bool Helpers::getLast(OperationContext* txn, const char* ns, BSONObj& result) { return false; } -void Helpers::upsert(OperationContext* txn, const string& ns, const BSONObj& o, bool fromMigrate) { +void Helpers::upsert(OperationContext* opCtx, + const string& ns, + const BSONObj& o, + bool fromMigrate) { BSONElement e = o["_id"]; verify(e.type()); BSONObj id = e.wrap(); - OldClientContext context(txn, ns); + OldClientContext context(opCtx, ns); const NamespaceString requestNs(ns); UpdateRequest request(requestNs); @@ -259,11 +267,11 @@ void Helpers::upsert(OperationContext* txn, const string& ns, const BSONObj& o, UpdateLifecycleImpl updateLifecycle(requestNs); request.setLifecycle(&updateLifecycle); - update(txn, context.db(), request); + update(opCtx, context.db(), request); } -void Helpers::putSingleton(OperationContext* txn, const char* ns, BSONObj obj) { - OldClientContext context(txn, ns); +void Helpers::putSingleton(OperationContext* opCtx, const char* ns, BSONObj obj) { + OldClientContext context(opCtx, ns); const NamespaceString requestNs(ns); UpdateRequest request(requestNs); @@ -273,9 +281,9 @@ void Helpers::putSingleton(OperationContext* txn, const char* ns, BSONObj obj) { UpdateLifecycleImpl updateLifecycle(requestNs); request.setLifecycle(&updateLifecycle); - update(txn, context.db(), request); + update(opCtx, context.db(), request); - CurOp::get(txn)->done(); + CurOp::get(opCtx)->done(); } BSONObj Helpers::toKeyFormat(const BSONObj& o) { @@ -294,7 +302,7 @@ BSONObj Helpers::inferKeyPattern(const BSONObj& o) { return kpBuilder.obj(); } -long long Helpers::removeRange(OperationContext* txn, +long long Helpers::removeRange(OperationContext* opCtx, const KeyRange& range, BoundInclusion boundInclusion, const WriteConcernOptions& writeConcern, @@ -311,7 +319,7 @@ long long Helpers::removeRange(OperationContext* txn, BSONObj max; { - AutoGetCollectionForRead ctx(txn, nss); + AutoGetCollectionForRead ctx(opCtx, nss); Collection* collection = ctx.getCollection(); if (!collection) { warning(LogComponent::kSharding) @@ -324,7 +332,7 @@ long long Helpers::removeRange(OperationContext* txn, // Therefore, any multi-key index prefixed by shard key cannot be multikey over // the shard key fields. const IndexDescriptor* idx = - collection->getIndexCatalog()->findShardKeyPrefixedIndex(txn, + collection->getIndexCatalog()->findShardKeyPrefixedIndex(opCtx, range.keyPattern, false); // requireSingleKey if (!idx) { @@ -359,12 +367,13 @@ long long Helpers::removeRange(OperationContext* txn, while (1) { // Scoping for write lock. { - AutoGetCollection ctx(txn, nss, MODE_IX, MODE_IX); + AutoGetCollection ctx(opCtx, nss, MODE_IX, MODE_IX); Collection* collection = ctx.getCollection(); if (!collection) break; - IndexDescriptor* desc = collection->getIndexCatalog()->findIndexByName(txn, indexName); + IndexDescriptor* desc = + collection->getIndexCatalog()->findIndexByName(opCtx, indexName); if (!desc) { warning(LogComponent::kSharding) << "shard key index '" << indexName << "' on '" @@ -373,7 +382,7 @@ long long Helpers::removeRange(OperationContext* txn, } unique_ptr<PlanExecutor> exec( - InternalPlanner::indexScan(txn, + InternalPlanner::indexScan(opCtx, collection, desc, min, @@ -404,7 +413,7 @@ long long Helpers::removeRange(OperationContext* txn, verify(PlanExecutor::ADVANCED == state); - WriteUnitOfWork wuow(txn); + WriteUnitOfWork wuow(opCtx); if (onlyRemoveOrphanedDocs) { // Do a final check in the write lock to make absolutely sure that our @@ -413,12 +422,12 @@ long long Helpers::removeRange(OperationContext* txn, // We should never be able to turn off the sharding state once enabled, but // in the future we might want to. - verify(ShardingState::get(txn)->enabled()); + verify(ShardingState::get(opCtx)->enabled()); bool docIsOrphan; // In write lock, so will be the most up-to-date version - auto metadataNow = CollectionShardingState::get(txn, nss.ns())->getMetadata(); + auto metadataNow = CollectionShardingState::get(opCtx, nss.ns())->getMetadata(); if (metadataNow) { ShardKeyPattern kp(metadataNow->getKeyPattern()); BSONObj key = kp.extractShardKeyFromDoc(obj); @@ -437,7 +446,7 @@ long long Helpers::removeRange(OperationContext* txn, } } - if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(txn, nss)) { + if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, nss)) { warning() << "stepped down from primary while deleting chunk; " << "orphaning data in " << nss.ns() << " in range [" << redact(min) << ", " << redact(max) << ")"; @@ -448,7 +457,7 @@ long long Helpers::removeRange(OperationContext* txn, callback->goingToDelete(obj); OpDebug* const nullOpDebug = nullptr; - collection->deleteDocument(txn, rloc, nullOpDebug, fromMigrate); + collection->deleteDocument(opCtx, rloc, nullOpDebug, fromMigrate); wuow.commit(); numDeleted++; } @@ -459,8 +468,8 @@ long long Helpers::removeRange(OperationContext* txn, if (writeConcern.shouldWaitForOtherNodes() && numDeleted > 0) { repl::ReplicationCoordinator::StatusAndDuration replStatus = repl::getGlobalReplicationCoordinator()->awaitReplication( - txn, - repl::ReplClientInfo::forClient(txn->getClient()).getLastOp(), + opCtx, + repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp(), writeConcern); if (replStatus.status.code() == ErrorCodes::ExceededTimeLimit) { warning(LogComponent::kSharding) << "replication to secondaries for removeRange at " @@ -484,13 +493,13 @@ long long Helpers::removeRange(OperationContext* txn, return numDeleted; } -void Helpers::emptyCollection(OperationContext* txn, const char* ns) { - OldClientContext context(txn, ns); - bool shouldReplicateWrites = txn->writesAreReplicated(); - txn->setReplicatedWrites(false); - ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, txn, shouldReplicateWrites); +void Helpers::emptyCollection(OperationContext* opCtx, const char* ns) { + OldClientContext context(opCtx, ns); + bool shouldReplicateWrites = opCtx->writesAreReplicated(); + opCtx->setReplicatedWrites(false); + ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, opCtx, shouldReplicateWrites); Collection* collection = context.db() ? context.db()->getCollection(ns) : nullptr; - deleteObjects(txn, collection, ns, BSONObj(), PlanExecutor::YIELD_MANUAL, false); + deleteObjects(opCtx, collection, ns, BSONObj(), PlanExecutor::YIELD_MANUAL, false); } Helpers::RemoveSaver::RemoveSaver(const string& a, const string& b, const string& why) { |