summaryrefslogtreecommitdiff
path: root/src/mongo/db/catalog
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/db/catalog')
-rw-r--r--src/mongo/db/catalog/apply_ops.cpp72
-rw-r--r--src/mongo/db/catalog/apply_ops.h2
-rw-r--r--src/mongo/db/catalog/capped_utils.cpp86
-rw-r--r--src/mongo/db/catalog/capped_utils.h6
-rw-r--r--src/mongo/db/catalog/coll_mod.cpp46
-rw-r--r--src/mongo/db/catalog/coll_mod.h4
-rw-r--r--src/mongo/db/catalog/collection.cpp284
-rw-r--r--src/mongo/db/catalog/collection.h62
-rw-r--r--src/mongo/db/catalog/collection_catalog_entry.h32
-rw-r--r--src/mongo/db/catalog/collection_compact.cpp28
-rw-r--r--src/mongo/db/catalog/collection_info_cache.cpp46
-rw-r--r--src/mongo/db/catalog/collection_info_cache.h16
-rw-r--r--src/mongo/db/catalog/create_collection.cpp18
-rw-r--r--src/mongo/db/catalog/create_collection.h2
-rw-r--r--src/mongo/db/catalog/cursor_manager.cpp53
-rw-r--r--src/mongo/db/catalog/cursor_manager.h12
-rw-r--r--src/mongo/db/catalog/database.cpp158
-rw-r--r--src/mongo/db/catalog/database.h38
-rw-r--r--src/mongo/db/catalog/database_catalog_entry.h6
-rw-r--r--src/mongo/db/catalog/database_holder.cpp28
-rw-r--r--src/mongo/db/catalog/database_holder.h8
-rw-r--r--src/mongo/db/catalog/document_validation.h14
-rw-r--r--src/mongo/db/catalog/drop_collection.cpp25
-rw-r--r--src/mongo/db/catalog/drop_collection.h2
-rw-r--r--src/mongo/db/catalog/drop_database.cpp26
-rw-r--r--src/mongo/db/catalog/drop_database.h2
-rw-r--r--src/mongo/db/catalog/drop_indexes.cpp37
-rw-r--r--src/mongo/db/catalog/drop_indexes.h2
-rw-r--r--src/mongo/db/catalog/head_manager.h4
-rw-r--r--src/mongo/db/catalog/index_catalog.cpp296
-rw-r--r--src/mongo/db/catalog/index_catalog.h87
-rw-r--r--src/mongo/db/catalog/index_catalog_entry.cpp71
-rw-r--r--src/mongo/db/catalog/index_catalog_entry.h18
-rw-r--r--src/mongo/db/catalog/index_create.cpp63
-rw-r--r--src/mongo/db/catalog/index_create.h4
-rw-r--r--src/mongo/db/catalog/index_key_validate.cpp4
-rw-r--r--src/mongo/db/catalog/index_key_validate.h2
-rw-r--r--src/mongo/db/catalog/index_spec_validate_test.cpp12
-rw-r--r--src/mongo/db/catalog/rename_collection.cpp86
-rw-r--r--src/mongo/db/catalog/rename_collection.h2
40 files changed, 892 insertions, 872 deletions
diff --git a/src/mongo/db/catalog/apply_ops.cpp b/src/mongo/db/catalog/apply_ops.cpp
index 12729f3fa31..5e4e00aefb0 100644
--- a/src/mongo/db/catalog/apply_ops.cpp
+++ b/src/mongo/db/catalog/apply_ops.cpp
@@ -89,12 +89,12 @@ bool canBeAtomic(const BSONObj& applyOpCmd) {
return true;
}
-Status _applyOps(OperationContext* txn,
+Status _applyOps(OperationContext* opCtx,
const std::string& dbName,
const BSONObj& applyOpCmd,
BSONObjBuilder* result,
int* numApplied) {
- dassert(txn->lockState()->isLockHeldForMode(
+ dassert(opCtx->lockState()->isLockHeldForMode(
ResourceId(RESOURCE_GLOBAL, ResourceId::SINGLETON_GLOBAL), MODE_X));
BSONObj ops = applyOpCmd.firstElement().Obj();
@@ -107,10 +107,10 @@ Status _applyOps(OperationContext* txn,
BSONArrayBuilder ab;
const bool alwaysUpsert =
applyOpCmd.hasField("alwaysUpsert") ? applyOpCmd["alwaysUpsert"].trueValue() : true;
- const bool haveWrappingWUOW = txn->lockState()->inAWriteUnitOfWork();
+ const bool haveWrappingWUOW = opCtx->lockState()->inAWriteUnitOfWork();
{
- repl::UnreplicatedWritesBlock uwb(txn);
+ repl::UnreplicatedWritesBlock uwb(opCtx);
while (i.more()) {
BSONElement e = i.next();
@@ -132,18 +132,18 @@ Status _applyOps(OperationContext* txn,
if (haveWrappingWUOW) {
invariant(*opType != 'c');
- if (!dbHolder().get(txn, ns)) {
+ if (!dbHolder().get(opCtx, ns)) {
throw DBException(
"cannot create a database in atomic applyOps mode; will retry without "
"atomicity",
ErrorCodes::NamespaceNotFound);
}
- OldClientContext ctx(txn, ns);
- status = repl::applyOperation_inlock(txn, ctx.db(), opObj, alwaysUpsert);
+ OldClientContext ctx(opCtx, ns);
+ status = repl::applyOperation_inlock(opCtx, ctx.db(), opObj, alwaysUpsert);
if (!status.isOK())
return status;
- logOpForDbHash(txn, ns.c_str());
+ logOpForDbHash(opCtx, ns.c_str());
} else {
try {
// Run operations under a nested lock as a hack to prevent yielding.
@@ -156,25 +156,25 @@ Status _applyOps(OperationContext* txn,
//
// We do not have a wrapping WriteUnitOfWork so it is possible for a journal
// commit to happen with a subset of ops applied.
- Lock::GlobalWrite globalWriteLockDisallowTempRelease(txn->lockState());
+ Lock::GlobalWrite globalWriteLockDisallowTempRelease(opCtx->lockState());
// Ensures that yielding will not happen (see the comment above).
DEV {
Locker::LockSnapshot lockSnapshot;
- invariant(!txn->lockState()->saveLockStateAndUnlock(&lockSnapshot));
+ invariant(!opCtx->lockState()->saveLockStateAndUnlock(&lockSnapshot));
};
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
if (*opType == 'c') {
- status = repl::applyCommand_inlock(txn, opObj, true);
+ status = repl::applyCommand_inlock(opCtx, opObj, true);
} else {
- OldClientContext ctx(txn, ns);
+ OldClientContext ctx(opCtx, ns);
status =
- repl::applyOperation_inlock(txn, ctx.db(), opObj, alwaysUpsert);
+ repl::applyOperation_inlock(opCtx, ctx.db(), opObj, alwaysUpsert);
}
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "applyOps", ns);
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "applyOps", ns);
} catch (const DBException& ex) {
ab.append(false);
result->append("applied", ++(*numApplied));
@@ -185,8 +185,8 @@ Status _applyOps(OperationContext* txn,
result->append("results", ab.arr());
return Status(ErrorCodes::UnknownError, ex.what());
}
- WriteUnitOfWork wuow(txn);
- logOpForDbHash(txn, ns.c_str());
+ WriteUnitOfWork wuow(opCtx);
+ logOpForDbHash(opCtx, ns.c_str());
wuow.commit();
}
@@ -203,7 +203,7 @@ Status _applyOps(OperationContext* txn,
result->append("results", ab.arr());
} // set replicatedWrites back to original value
- if (txn->writesAreReplicated()) {
+ if (opCtx->writesAreReplicated()) {
// We want this applied atomically on slaves
// so we re-wrap without the pre-condition for speed
@@ -227,7 +227,7 @@ Status _applyOps(OperationContext* txn,
auto opObserver = getGlobalServiceContext()->getOpObserver();
invariant(opObserver);
if (haveWrappingWUOW) {
- opObserver->onApplyOps(txn, tempNS, cmdRewritten);
+ opObserver->onApplyOps(opCtx, tempNS, cmdRewritten);
} else {
// When executing applyOps outside of a wrapping WriteUnitOfWOrk, always logOp the
// command regardless of whether the individial ops succeeded and rely on any
@@ -235,14 +235,14 @@ Status _applyOps(OperationContext* txn,
// has always done and is part of its "correct" behavior.
while (true) {
try {
- WriteUnitOfWork wunit(txn);
- opObserver->onApplyOps(txn, tempNS, cmdRewritten);
+ WriteUnitOfWork wunit(opCtx);
+ opObserver->onApplyOps(opCtx, tempNS, cmdRewritten);
wunit.commit();
break;
} catch (const WriteConflictException& wce) {
LOG(2) << "WriteConflictException while logging applyOps command, retrying.";
- txn->recoveryUnit()->abandonSnapshot();
+ opCtx->recoveryUnit()->abandonSnapshot();
continue;
}
}
@@ -256,8 +256,8 @@ Status _applyOps(OperationContext* txn,
return Status::OK();
}
-Status preconditionOK(OperationContext* txn, const BSONObj& applyOpCmd, BSONObjBuilder* result) {
- dassert(txn->lockState()->isLockHeldForMode(
+Status preconditionOK(OperationContext* opCtx, const BSONObj& applyOpCmd, BSONObjBuilder* result) {
+ dassert(opCtx->lockState()->isLockHeldForMode(
ResourceId(RESOURCE_GLOBAL, ResourceId::SINGLETON_GLOBAL), MODE_X));
if (applyOpCmd["preCondition"].type() == Array) {
@@ -274,11 +274,11 @@ Status preconditionOK(OperationContext* txn, const BSONObj& applyOpCmd, BSONObjB
return {ErrorCodes::InvalidNamespace, "invalid ns: " + nss.ns()};
}
- DBDirectClient db(txn);
+ DBDirectClient db(opCtx);
BSONObj realres = db.findOne(nss.ns(), preCondition["q"].Obj());
// Get collection default collation.
- Database* database = dbHolder().get(txn, nss.db());
+ Database* database = dbHolder().get(opCtx, nss.db());
if (!database) {
return {ErrorCodes::NamespaceNotFound,
"database in ns does not exist: " + nss.ns()};
@@ -305,43 +305,43 @@ Status preconditionOK(OperationContext* txn, const BSONObj& applyOpCmd, BSONObjB
}
} // namespace
-Status applyOps(OperationContext* txn,
+Status applyOps(OperationContext* opCtx,
const std::string& dbName,
const BSONObj& applyOpCmd,
BSONObjBuilder* result) {
- ScopedTransaction scopedXact(txn, MODE_X);
- Lock::GlobalWrite globalWriteLock(txn->lockState());
+ ScopedTransaction scopedXact(opCtx, MODE_X);
+ Lock::GlobalWrite globalWriteLock(opCtx->lockState());
- bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(txn, dbName);
+ bool userInitiatedWritesAndNotPrimary = opCtx->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(opCtx, dbName);
if (userInitiatedWritesAndNotPrimary)
return Status(ErrorCodes::NotMaster,
str::stream() << "Not primary while applying ops to database " << dbName);
- Status preconditionStatus = preconditionOK(txn, applyOpCmd, result);
+ Status preconditionStatus = preconditionOK(opCtx, applyOpCmd, result);
if (!preconditionStatus.isOK()) {
return preconditionStatus;
}
int numApplied = 0;
if (!canBeAtomic(applyOpCmd))
- return _applyOps(txn, dbName, applyOpCmd, result, &numApplied);
+ return _applyOps(opCtx, dbName, applyOpCmd, result, &numApplied);
// Perform write ops atomically
try {
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
numApplied = 0;
- uassertStatusOK(_applyOps(txn, dbName, applyOpCmd, result, &numApplied));
+ uassertStatusOK(_applyOps(opCtx, dbName, applyOpCmd, result, &numApplied));
wunit.commit();
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "applyOps", dbName);
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "applyOps", dbName);
} catch (const DBException& ex) {
if (ex.getCode() == ErrorCodes::NamespaceNotFound) {
// Retry in non-atomic mode, since MMAP cannot implicitly create a new database
// within an active WriteUnitOfWork.
- return _applyOps(txn, dbName, applyOpCmd, result, &numApplied);
+ return _applyOps(opCtx, dbName, applyOpCmd, result, &numApplied);
}
BSONArrayBuilder ab;
++numApplied;
diff --git a/src/mongo/db/catalog/apply_ops.h b/src/mongo/db/catalog/apply_ops.h
index 588d3bb370b..3a742891573 100644
--- a/src/mongo/db/catalog/apply_ops.h
+++ b/src/mongo/db/catalog/apply_ops.h
@@ -37,7 +37,7 @@ class OperationContext;
* Applies ops contained in "applyOpCmd" and populates fields in "result" to be returned to the
* user.
*/
-Status applyOps(OperationContext* txn,
+Status applyOps(OperationContext* opCtx,
const std::string& dbName,
const BSONObj& applyOpCmd,
BSONObjBuilder* result);
diff --git a/src/mongo/db/catalog/capped_utils.cpp b/src/mongo/db/catalog/capped_utils.cpp
index d6b11fe50dc..62b2eae8fbf 100644
--- a/src/mongo/db/catalog/capped_utils.cpp
+++ b/src/mongo/db/catalog/capped_utils.cpp
@@ -52,12 +52,12 @@
#include "mongo/util/scopeguard.h"
namespace mongo {
-Status emptyCapped(OperationContext* txn, const NamespaceString& collectionName) {
- ScopedTransaction scopedXact(txn, MODE_IX);
- AutoGetDb autoDb(txn, collectionName.db(), MODE_X);
+Status emptyCapped(OperationContext* opCtx, const NamespaceString& collectionName) {
+ ScopedTransaction scopedXact(opCtx, MODE_IX);
+ AutoGetDb autoDb(opCtx, collectionName.db(), MODE_X);
- bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(txn, collectionName);
+ bool userInitiatedWritesAndNotPrimary = opCtx->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, collectionName);
if (userInitiatedWritesAndNotPrimary) {
return Status(ErrorCodes::NotMaster,
@@ -71,7 +71,7 @@ Status emptyCapped(OperationContext* txn, const NamespaceString& collectionName)
Collection* collection = db->getCollection(collectionName);
uassert(ErrorCodes::CommandNotSupportedOnView,
str::stream() << "emptycapped not supported on view: " << collectionName.ns(),
- collection || !db->getViewCatalog()->lookup(txn, collectionName.ns()));
+ collection || !db->getViewCatalog()->lookup(opCtx, collectionName.ns()));
massert(28584, "no such collection", collection);
if (collectionName.isSystem() && !collectionName.isSystemDotProfile()) {
@@ -96,21 +96,21 @@ Status emptyCapped(OperationContext* txn, const NamespaceString& collectionName)
BackgroundOperation::assertNoBgOpInProgForNs(collectionName.ns());
- WriteUnitOfWork wuow(txn);
+ WriteUnitOfWork wuow(opCtx);
- Status status = collection->truncate(txn);
+ Status status = collection->truncate(opCtx);
if (!status.isOK()) {
return status;
}
- getGlobalServiceContext()->getOpObserver()->onEmptyCapped(txn, collection->ns());
+ getGlobalServiceContext()->getOpObserver()->onEmptyCapped(opCtx, collection->ns());
wuow.commit();
return Status::OK();
}
-Status cloneCollectionAsCapped(OperationContext* txn,
+Status cloneCollectionAsCapped(OperationContext* opCtx,
Database* db,
const std::string& shortFrom,
const std::string& shortTo,
@@ -121,7 +121,7 @@ Status cloneCollectionAsCapped(OperationContext* txn,
Collection* fromCollection = db->getCollection(fromNs);
if (!fromCollection) {
- if (db->getViewCatalog()->lookup(txn, fromNs)) {
+ if (db->getViewCatalog()->lookup(opCtx, fromNs)) {
return Status(ErrorCodes::CommandNotSupportedOnView,
str::stream() << "cloneCollectionAsCapped not supported for views: "
<< fromNs);
@@ -136,8 +136,8 @@ Status cloneCollectionAsCapped(OperationContext* txn,
// create new collection
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
const auto fromOptions =
- fromCollection->getCatalogEntry()->getCollectionOptions(txn).toBSON();
- OldClientContext ctx(txn, toNs);
+ fromCollection->getCatalogEntry()->getCollectionOptions(opCtx).toBSON();
+ OldClientContext ctx(opCtx, toNs);
BSONObjBuilder spec;
spec.appendBool("capped", true);
spec.append("size", size);
@@ -145,13 +145,13 @@ Status cloneCollectionAsCapped(OperationContext* txn,
spec.appendBool("temp", true);
spec.appendElementsUnique(fromOptions);
- WriteUnitOfWork wunit(txn);
- Status status = userCreateNS(txn, ctx.db(), toNs, spec.done());
+ WriteUnitOfWork wunit(opCtx);
+ Status status = userCreateNS(opCtx, ctx.db(), toNs, spec.done());
if (!status.isOK())
return status;
wunit.commit();
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "cloneCollectionAsCapped", fromNs);
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "cloneCollectionAsCapped", fromNs);
Collection* toCollection = db->getCollection(toNs);
invariant(toCollection); // we created above
@@ -161,12 +161,12 @@ Status cloneCollectionAsCapped(OperationContext* txn,
long long allocatedSpaceGuess =
std::max(static_cast<long long>(size * 2),
- static_cast<long long>(toCollection->getRecordStore()->storageSize(txn) * 2));
+ static_cast<long long>(toCollection->getRecordStore()->storageSize(opCtx) * 2));
- long long excessSize = fromCollection->dataSize(txn) - allocatedSpaceGuess;
+ long long excessSize = fromCollection->dataSize(opCtx) - allocatedSpaceGuess;
std::unique_ptr<PlanExecutor> exec(InternalPlanner::collectionScan(
- txn, fromNs, fromCollection, PlanExecutor::YIELD_MANUAL, InternalPlanner::FORWARD));
+ opCtx, fromNs, fromCollection, PlanExecutor::YIELD_MANUAL, InternalPlanner::FORWARD));
exec->setYieldPolicy(PlanExecutor::WRITE_CONFLICT_RETRY_ONLY, fromCollection);
@@ -174,7 +174,7 @@ Status cloneCollectionAsCapped(OperationContext* txn,
RecordId loc;
PlanExecutor::ExecState state = PlanExecutor::FAILURE; // suppress uninitialized warnings
- DisableDocumentValidation validationDisabler(txn);
+ DisableDocumentValidation validationDisabler(opCtx);
int retries = 0; // non-zero when retrying our last document.
while (true) {
@@ -205,30 +205,30 @@ Status cloneCollectionAsCapped(OperationContext* txn,
try {
// Make sure we are working with the latest version of the document.
- if (objToClone.snapshotId() != txn->recoveryUnit()->getSnapshotId() &&
- !fromCollection->findDoc(txn, loc, &objToClone)) {
+ if (objToClone.snapshotId() != opCtx->recoveryUnit()->getSnapshotId() &&
+ !fromCollection->findDoc(opCtx, loc, &objToClone)) {
// doc was deleted so don't clone it.
retries = 0;
continue;
}
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
OpDebug* const nullOpDebug = nullptr;
toCollection->insertDocument(
- txn, objToClone.value(), nullOpDebug, true, txn->writesAreReplicated());
+ opCtx, objToClone.value(), nullOpDebug, true, opCtx->writesAreReplicated());
wunit.commit();
// Go to the next document
retries = 0;
} catch (const WriteConflictException& wce) {
- CurOp::get(txn)->debug().writeConflicts++;
+ CurOp::get(opCtx)->debug().writeConflicts++;
retries++; // logAndBackoff expects this to be 1 on first call.
wce.logAndBackoff(retries, "cloneCollectionAsCapped", fromNs);
// Can't use WRITE_CONFLICT_RETRY_LOOP macros since we need to save/restore exec
// around call to abandonSnapshot.
exec->saveState();
- txn->recoveryUnit()->abandonSnapshot();
+ opCtx->recoveryUnit()->abandonSnapshot();
exec->restoreState(); // Handles any WCEs internally.
}
}
@@ -236,15 +236,17 @@ Status cloneCollectionAsCapped(OperationContext* txn,
invariant(false); // unreachable
}
-Status convertToCapped(OperationContext* txn, const NamespaceString& collectionName, double size) {
+Status convertToCapped(OperationContext* opCtx,
+ const NamespaceString& collectionName,
+ double size) {
StringData dbname = collectionName.db();
StringData shortSource = collectionName.coll();
- ScopedTransaction transaction(txn, MODE_IX);
- AutoGetDb autoDb(txn, collectionName.db(), MODE_X);
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ AutoGetDb autoDb(opCtx, collectionName.db(), MODE_X);
- bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(txn, collectionName);
+ bool userInitiatedWritesAndNotPrimary = opCtx->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, collectionName);
if (userInitiatedWritesAndNotPrimary) {
return Status(ErrorCodes::NotMaster,
@@ -264,18 +266,18 @@ Status convertToCapped(OperationContext* txn, const NamespaceString& collectionN
std::string longTmpName = str::stream() << dbname << "." << shortTmpName;
if (db->getCollection(longTmpName)) {
- WriteUnitOfWork wunit(txn);
- Status status = db->dropCollection(txn, longTmpName);
+ WriteUnitOfWork wunit(opCtx);
+ Status status = db->dropCollection(opCtx, longTmpName);
if (!status.isOK())
return status;
}
- const bool shouldReplicateWrites = txn->writesAreReplicated();
- txn->setReplicatedWrites(false);
- ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, txn, shouldReplicateWrites);
+ const bool shouldReplicateWrites = opCtx->writesAreReplicated();
+ opCtx->setReplicatedWrites(false);
+ ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, opCtx, shouldReplicateWrites);
Status status =
- cloneCollectionAsCapped(txn, db, shortSource.toString(), shortTmpName, size, true);
+ cloneCollectionAsCapped(opCtx, db, shortSource.toString(), shortTmpName, size, true);
if (!status.isOK()) {
return status;
@@ -284,18 +286,18 @@ Status convertToCapped(OperationContext* txn, const NamespaceString& collectionN
verify(db->getCollection(longTmpName));
{
- WriteUnitOfWork wunit(txn);
- status = db->dropCollection(txn, collectionName.ns());
- txn->setReplicatedWrites(shouldReplicateWrites);
+ WriteUnitOfWork wunit(opCtx);
+ status = db->dropCollection(opCtx, collectionName.ns());
+ opCtx->setReplicatedWrites(shouldReplicateWrites);
if (!status.isOK())
return status;
- status = db->renameCollection(txn, longTmpName, collectionName.ns(), false);
+ status = db->renameCollection(opCtx, longTmpName, collectionName.ns(), false);
if (!status.isOK())
return status;
getGlobalServiceContext()->getOpObserver()->onConvertToCapped(
- txn, NamespaceString(collectionName), size);
+ opCtx, NamespaceString(collectionName), size);
wunit.commit();
}
diff --git a/src/mongo/db/catalog/capped_utils.h b/src/mongo/db/catalog/capped_utils.h
index f7be6dc427e..9ae6a41c505 100644
--- a/src/mongo/db/catalog/capped_utils.h
+++ b/src/mongo/db/catalog/capped_utils.h
@@ -36,12 +36,12 @@ class OperationContext;
/**
* Drops all documents contained in the capped collection, "collectionName".
*/
-Status emptyCapped(OperationContext* txn, const NamespaceString& collectionName);
+Status emptyCapped(OperationContext* opCtx, const NamespaceString& collectionName);
/**
* Clones the collection "shortFrom" to the capped collection "shortTo" with a size of "size".
*/
-Status cloneCollectionAsCapped(OperationContext* txn,
+Status cloneCollectionAsCapped(OperationContext* opCtx,
Database* db,
const std::string& shortFrom,
const std::string& shortTo,
@@ -51,5 +51,5 @@ Status cloneCollectionAsCapped(OperationContext* txn,
/**
* Converts the collection "collectionName" to a capped collection with a size of "size".
*/
-Status convertToCapped(OperationContext* txn, const NamespaceString& collectionName, double size);
+Status convertToCapped(OperationContext* opCtx, const NamespaceString& collectionName, double size);
} // namespace mongo
diff --git a/src/mongo/db/catalog/coll_mod.cpp b/src/mongo/db/catalog/coll_mod.cpp
index 61f2abc1e3f..fdbfd26677b 100644
--- a/src/mongo/db/catalog/coll_mod.cpp
+++ b/src/mongo/db/catalog/coll_mod.cpp
@@ -60,7 +60,7 @@ struct CollModRequest {
BSONElement noPadding = {};
};
-StatusWith<CollModRequest> parseCollModRequest(OperationContext* txn,
+StatusWith<CollModRequest> parseCollModRequest(OperationContext* opCtx,
const NamespaceString& nss,
Collection* coll,
const BSONObj& cmdObj) {
@@ -117,7 +117,7 @@ StatusWith<CollModRequest> parseCollModRequest(OperationContext* txn,
}
if (!indexName.empty()) {
- cmr.idx = coll->getIndexCatalog()->findIndexByName(txn, indexName);
+ cmr.idx = coll->getIndexCatalog()->findIndexByName(opCtx, indexName);
if (!cmr.idx) {
return Status(ErrorCodes::IndexNotFound,
str::stream() << "cannot find index " << indexName << " for ns "
@@ -125,7 +125,8 @@ StatusWith<CollModRequest> parseCollModRequest(OperationContext* txn,
}
} else {
std::vector<IndexDescriptor*> indexes;
- coll->getIndexCatalog()->findIndexesByKeyPattern(txn, keyPattern, false, &indexes);
+ coll->getIndexCatalog()->findIndexesByKeyPattern(
+ opCtx, keyPattern, false, &indexes);
if (indexes.size() > 1) {
return Status(ErrorCodes::AmbiguousIndexKeyPattern,
@@ -214,20 +215,20 @@ StatusWith<CollModRequest> parseCollModRequest(OperationContext* txn,
return {std::move(cmr)};
}
-Status collMod(OperationContext* txn,
+Status collMod(OperationContext* opCtx,
const NamespaceString& nss,
const BSONObj& cmdObj,
BSONObjBuilder* result) {
StringData dbName = nss.db();
- ScopedTransaction transaction(txn, MODE_IX);
- AutoGetDb autoDb(txn, dbName, MODE_X);
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ AutoGetDb autoDb(opCtx, dbName, MODE_X);
Database* const db = autoDb.getDb();
Collection* coll = db ? db->getCollection(nss) : nullptr;
// May also modify a view instead of a collection.
boost::optional<ViewDefinition> view;
if (db && !coll) {
- const auto sharedView = db->getViewCatalog()->lookup(txn, nss.ns());
+ const auto sharedView = db->getViewCatalog()->lookup(opCtx, nss.ns());
if (sharedView) {
// We copy the ViewDefinition as it is modified below to represent the requested state.
view = {*sharedView};
@@ -243,10 +244,10 @@ Status collMod(OperationContext* txn,
return Status(ErrorCodes::NamespaceNotFound, "ns does not exist");
}
- OldClientContext ctx(txn, nss.ns());
+ OldClientContext ctx(opCtx, nss.ns());
- bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(txn, nss);
+ bool userInitiatedWritesAndNotPrimary = opCtx->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, nss);
if (userInitiatedWritesAndNotPrimary) {
return Status(ErrorCodes::NotMaster,
@@ -254,14 +255,14 @@ Status collMod(OperationContext* txn,
<< nss.ns());
}
- auto statusW = parseCollModRequest(txn, nss, coll, cmdObj);
+ auto statusW = parseCollModRequest(opCtx, nss, coll, cmdObj);
if (!statusW.isOK()) {
return statusW.getStatus();
}
CollModRequest cmr = statusW.getValue();
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
if (view) {
if (!cmr.viewPipeLine.eoo())
@@ -276,7 +277,8 @@ Status collMod(OperationContext* txn,
for (auto& item : view->pipeline()) {
pipeline.append(item);
}
- auto errorStatus = catalog->modifyView(txn, nss, view->viewOn(), BSONArray(pipeline.obj()));
+ auto errorStatus =
+ catalog->modifyView(opCtx, nss, view->viewOn(), BSONArray(pipeline.obj()));
if (!errorStatus.isOK()) {
return errorStatus;
}
@@ -289,21 +291,21 @@ Status collMod(OperationContext* txn,
result->appendAs(oldExpireSecs, "expireAfterSeconds_old");
// Change the value of "expireAfterSeconds" on disk.
coll->getCatalogEntry()->updateTTLSetting(
- txn, cmr.idx->indexName(), newExpireSecs.safeNumberLong());
+ opCtx, cmr.idx->indexName(), newExpireSecs.safeNumberLong());
// Notify the index catalog that the definition of this index changed.
- cmr.idx = coll->getIndexCatalog()->refreshEntry(txn, cmr.idx);
+ cmr.idx = coll->getIndexCatalog()->refreshEntry(opCtx, cmr.idx);
result->appendAs(newExpireSecs, "expireAfterSeconds_new");
}
}
if (!cmr.collValidator.eoo())
- coll->setValidator(txn, cmr.collValidator.Obj());
+ coll->setValidator(opCtx, cmr.collValidator.Obj());
if (!cmr.collValidationAction.empty())
- coll->setValidationAction(txn, cmr.collValidationAction);
+ coll->setValidationAction(opCtx, cmr.collValidationAction);
if (!cmr.collValidationLevel.empty())
- coll->setValidationLevel(txn, cmr.collValidationLevel);
+ coll->setValidationLevel(opCtx, cmr.collValidationLevel);
auto setCollectionOption = [&](BSONElement& COElement) {
typedef CollectionOptions CO;
@@ -315,7 +317,7 @@ Status collMod(OperationContext* txn,
CollectionCatalogEntry* cce = coll->getCatalogEntry();
- const int oldFlags = cce->getCollectionOptions(txn).flags;
+ const int oldFlags = cce->getCollectionOptions(opCtx).flags;
const bool oldSetting = oldFlags & flag;
const bool newSetting = COElement.trueValue();
@@ -327,9 +329,9 @@ Status collMod(OperationContext* txn,
// NOTE we do this unconditionally to ensure that we note that the user has
// explicitly set flags, even if they are just setting the default.
- cce->updateFlags(txn, newFlags);
+ cce->updateFlags(opCtx, newFlags);
- const CollectionOptions newOptions = cce->getCollectionOptions(txn);
+ const CollectionOptions newOptions = cce->getCollectionOptions(opCtx);
invariant(newOptions.flags == newFlags);
invariant(newOptions.flagsSet);
};
@@ -345,7 +347,7 @@ Status collMod(OperationContext* txn,
// Only observe non-view collMods, as view operations are observed as operations on the
// system.views collection.
getGlobalServiceContext()->getOpObserver()->onCollMod(
- txn, (dbName.toString() + ".$cmd").c_str(), cmdObj);
+ opCtx, (dbName.toString() + ".$cmd").c_str(), cmdObj);
}
wunit.commit();
diff --git a/src/mongo/db/catalog/coll_mod.h b/src/mongo/db/catalog/coll_mod.h
index bb86bbc50d1..9442fc7d82e 100644
--- a/src/mongo/db/catalog/coll_mod.h
+++ b/src/mongo/db/catalog/coll_mod.h
@@ -38,7 +38,7 @@ class OperationContext;
struct CollModRequest;
-StatusWith<CollModRequest> parseCollModRequest(OperationContext* txn,
+StatusWith<CollModRequest> parseCollModRequest(OperationContext* opCtx,
const NamespaceString& nss,
Collection* coll,
const BSONObj& cmdObj);
@@ -46,7 +46,7 @@ StatusWith<CollModRequest> parseCollModRequest(OperationContext* txn,
/**
* Performs the collection modification described in "cmdObj" on the collection "ns".
*/
-Status collMod(OperationContext* txn,
+Status collMod(OperationContext* opCtx,
const NamespaceString& ns,
const BSONObj& cmdObj,
BSONObjBuilder* result);
diff --git a/src/mongo/db/catalog/collection.cpp b/src/mongo/db/catalog/collection.cpp
index 1edad781f0c..8ef63ac7ad7 100644
--- a/src/mongo/db/catalog/collection.cpp
+++ b/src/mongo/db/catalog/collection.cpp
@@ -104,7 +104,7 @@ Status checkValidatorForBannedExpressions(const BSONObj& validator) {
// Uses the collator factory to convert the BSON representation of a collator to a
// CollatorInterface. Returns null if the BSONObj is empty. We expect the stored collation to be
// valid, since it gets validated on collection create.
-std::unique_ptr<CollatorInterface> parseCollation(OperationContext* txn,
+std::unique_ptr<CollatorInterface> parseCollation(OperationContext* opCtx,
const NamespaceString& nss,
BSONObj collationSpec) {
if (collationSpec.isEmpty()) {
@@ -112,7 +112,7 @@ std::unique_ptr<CollatorInterface> parseCollation(OperationContext* txn,
}
auto collator =
- CollatorFactoryInterface::get(txn->getServiceContext())->makeFromBSON(collationSpec);
+ CollatorFactoryInterface::get(opCtx->getServiceContext())->makeFromBSON(collationSpec);
// If the collection's default collator has a version not currently supported by our ICU
// integration, shut down the server. Errors other than IncompatibleCollationVersion should not
@@ -209,7 +209,7 @@ bool CappedInsertNotifier::isDead() {
// ----
-Collection::Collection(OperationContext* txn,
+Collection::Collection(OperationContext* opCtx,
StringData fullNS,
CollectionCatalogEntry* details,
RecordStore* recordStore,
@@ -221,23 +221,23 @@ Collection::Collection(OperationContext* txn,
_needCappedLock(supportsDocLocking() && _recordStore->isCapped() && _ns.db() != "local"),
_infoCache(this),
_indexCatalog(this),
- _collator(parseCollation(txn, _ns, _details->getCollectionOptions(txn).collation)),
- _validatorDoc(_details->getCollectionOptions(txn).validator.getOwned()),
+ _collator(parseCollation(opCtx, _ns, _details->getCollectionOptions(opCtx).collation)),
+ _validatorDoc(_details->getCollectionOptions(opCtx).validator.getOwned()),
_validator(uassertStatusOK(parseValidator(_validatorDoc))),
_validationAction(uassertStatusOK(
- parseValidationAction(_details->getCollectionOptions(txn).validationAction))),
+ parseValidationAction(_details->getCollectionOptions(opCtx).validationAction))),
_validationLevel(uassertStatusOK(
- parseValidationLevel(_details->getCollectionOptions(txn).validationLevel))),
+ parseValidationLevel(_details->getCollectionOptions(opCtx).validationLevel))),
_cursorManager(fullNS),
_cappedNotifier(_recordStore->isCapped() ? new CappedInsertNotifier() : nullptr),
_mustTakeCappedLockOnInsert(isCapped() && !_ns.isSystemDotProfile() && !_ns.isOplog()) {
_magic = 1357924;
- _indexCatalog.init(txn);
+ _indexCatalog.init(opCtx);
if (isCapped())
_recordStore->setCappedCallback(this);
- _infoCache.init(txn);
+ _infoCache.init(opCtx);
}
Collection::~Collection() {
@@ -265,45 +265,45 @@ bool Collection::requiresIdIndex() const {
return true;
}
-std::unique_ptr<SeekableRecordCursor> Collection::getCursor(OperationContext* txn,
+std::unique_ptr<SeekableRecordCursor> Collection::getCursor(OperationContext* opCtx,
bool forward) const {
- dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IS));
+ dassert(opCtx->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IS));
invariant(ok());
- return _recordStore->getCursor(txn, forward);
+ return _recordStore->getCursor(opCtx, forward);
}
-vector<std::unique_ptr<RecordCursor>> Collection::getManyCursors(OperationContext* txn) const {
- dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IS));
+vector<std::unique_ptr<RecordCursor>> Collection::getManyCursors(OperationContext* opCtx) const {
+ dassert(opCtx->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IS));
- return _recordStore->getManyCursors(txn);
+ return _recordStore->getManyCursors(opCtx);
}
-Snapshotted<BSONObj> Collection::docFor(OperationContext* txn, const RecordId& loc) const {
- return Snapshotted<BSONObj>(txn->recoveryUnit()->getSnapshotId(),
- _recordStore->dataFor(txn, loc).releaseToBson());
+Snapshotted<BSONObj> Collection::docFor(OperationContext* opCtx, const RecordId& loc) const {
+ return Snapshotted<BSONObj>(opCtx->recoveryUnit()->getSnapshotId(),
+ _recordStore->dataFor(opCtx, loc).releaseToBson());
}
-bool Collection::findDoc(OperationContext* txn,
+bool Collection::findDoc(OperationContext* opCtx,
const RecordId& loc,
Snapshotted<BSONObj>* out) const {
- dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IS));
+ dassert(opCtx->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IS));
RecordData rd;
- if (!_recordStore->findRecord(txn, loc, &rd))
+ if (!_recordStore->findRecord(opCtx, loc, &rd))
return false;
- *out = Snapshotted<BSONObj>(txn->recoveryUnit()->getSnapshotId(), rd.releaseToBson());
+ *out = Snapshotted<BSONObj>(opCtx->recoveryUnit()->getSnapshotId(), rd.releaseToBson());
return true;
}
-Status Collection::checkValidation(OperationContext* txn, const BSONObj& document) const {
+Status Collection::checkValidation(OperationContext* opCtx, const BSONObj& document) const {
if (!_validator)
return Status::OK();
if (_validationLevel == OFF)
return Status::OK();
- if (documentValidationDisabled(txn))
+ if (documentValidationDisabled(opCtx))
return Status::OK();
if (_validator->matchesBSON(document))
@@ -349,10 +349,10 @@ StatusWithMatchExpression Collection::parseValidator(const BSONObj& validator) c
return statusWithMatcher;
}
-Status Collection::insertDocumentsForOplog(OperationContext* txn,
+Status Collection::insertDocumentsForOplog(OperationContext* opCtx,
const DocWriter* const* docs,
size_t nDocs) {
- dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
+ dassert(opCtx->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
// Since this is only for the OpLog, we can assume these for simplicity.
// This also means that we do not need to forward this object to the OpObserver, which is good
@@ -361,17 +361,17 @@ Status Collection::insertDocumentsForOplog(OperationContext* txn,
invariant(!_indexCatalog.haveAnyIndexes());
invariant(!_mustTakeCappedLockOnInsert);
- Status status = _recordStore->insertRecordsWithDocWriter(txn, docs, nDocs);
+ Status status = _recordStore->insertRecordsWithDocWriter(opCtx, docs, nDocs);
if (!status.isOK())
return status;
- txn->recoveryUnit()->onCommit([this]() { notifyCappedWaitersIfNeeded(); });
+ opCtx->recoveryUnit()->onCommit([this]() { notifyCappedWaitersIfNeeded(); });
return status;
}
-Status Collection::insertDocuments(OperationContext* txn,
+Status Collection::insertDocuments(OperationContext* opCtx,
const vector<BSONObj>::const_iterator begin,
const vector<BSONObj>::const_iterator end,
OpDebug* opDebug,
@@ -392,7 +392,7 @@ Status Collection::insertDocuments(OperationContext* txn,
}
// Should really be done in the collection object at creation and updated on index create.
- const bool hasIdIndex = _indexCatalog.findIdIndex(txn);
+ const bool hasIdIndex = _indexCatalog.findIdIndex(opCtx);
for (auto it = begin; it != end; it++) {
if (hasIdIndex && (*it)["_id"].eoo()) {
@@ -402,39 +402,39 @@ Status Collection::insertDocuments(OperationContext* txn,
<< _ns.ns());
}
- auto status = checkValidation(txn, *it);
+ auto status = checkValidation(opCtx, *it);
if (!status.isOK())
return status;
}
- const SnapshotId sid = txn->recoveryUnit()->getSnapshotId();
+ const SnapshotId sid = opCtx->recoveryUnit()->getSnapshotId();
if (_mustTakeCappedLockOnInsert)
- synchronizeOnCappedInFlightResource(txn->lockState(), _ns);
+ synchronizeOnCappedInFlightResource(opCtx->lockState(), _ns);
- Status status = _insertDocuments(txn, begin, end, enforceQuota, opDebug);
+ Status status = _insertDocuments(opCtx, begin, end, enforceQuota, opDebug);
if (!status.isOK())
return status;
- invariant(sid == txn->recoveryUnit()->getSnapshotId());
+ invariant(sid == opCtx->recoveryUnit()->getSnapshotId());
- getGlobalServiceContext()->getOpObserver()->onInserts(txn, ns(), begin, end, fromMigrate);
+ getGlobalServiceContext()->getOpObserver()->onInserts(opCtx, ns(), begin, end, fromMigrate);
- txn->recoveryUnit()->onCommit([this]() { notifyCappedWaitersIfNeeded(); });
+ opCtx->recoveryUnit()->onCommit([this]() { notifyCappedWaitersIfNeeded(); });
return Status::OK();
}
-Status Collection::insertDocument(OperationContext* txn,
+Status Collection::insertDocument(OperationContext* opCtx,
const BSONObj& docToInsert,
OpDebug* opDebug,
bool enforceQuota,
bool fromMigrate) {
vector<BSONObj> docs;
docs.push_back(docToInsert);
- return insertDocuments(txn, docs.begin(), docs.end(), opDebug, enforceQuota, fromMigrate);
+ return insertDocuments(opCtx, docs.begin(), docs.end(), opDebug, enforceQuota, fromMigrate);
}
-Status Collection::insertDocument(OperationContext* txn,
+Status Collection::insertDocument(OperationContext* opCtx,
const BSONObj& doc,
const std::vector<MultiIndexBlock*>& indexBlocks,
bool enforceQuota) {
@@ -453,18 +453,18 @@ Status Collection::insertDocument(OperationContext* txn,
}
{
- auto status = checkValidation(txn, doc);
+ auto status = checkValidation(opCtx, doc);
if (!status.isOK())
return status;
}
- dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
+ dassert(opCtx->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
if (_mustTakeCappedLockOnInsert)
- synchronizeOnCappedInFlightResource(txn->lockState(), _ns);
+ synchronizeOnCappedInFlightResource(opCtx->lockState(), _ns);
- StatusWith<RecordId> loc =
- _recordStore->insertRecord(txn, doc.objdata(), doc.objsize(), _enforceQuota(enforceQuota));
+ StatusWith<RecordId> loc = _recordStore->insertRecord(
+ opCtx, doc.objdata(), doc.objsize(), _enforceQuota(enforceQuota));
if (!loc.isOK())
return loc.getStatus();
@@ -480,19 +480,19 @@ Status Collection::insertDocument(OperationContext* txn,
docs.push_back(doc);
getGlobalServiceContext()->getOpObserver()->onInserts(
- txn, ns(), docs.begin(), docs.end(), false);
+ opCtx, ns(), docs.begin(), docs.end(), false);
- txn->recoveryUnit()->onCommit([this]() { notifyCappedWaitersIfNeeded(); });
+ opCtx->recoveryUnit()->onCommit([this]() { notifyCappedWaitersIfNeeded(); });
return loc.getStatus();
}
-Status Collection::_insertDocuments(OperationContext* txn,
+Status Collection::_insertDocuments(OperationContext* opCtx,
const vector<BSONObj>::const_iterator begin,
const vector<BSONObj>::const_iterator end,
bool enforceQuota,
OpDebug* opDebug) {
- dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
+ dassert(opCtx->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
const size_t count = std::distance(begin, end);
if (isCapped() && _indexCatalog.haveAnyIndexes() && count > 1) {
@@ -509,7 +509,7 @@ Status Collection::_insertDocuments(OperationContext* txn,
// prevents the primary from executing with more concurrency than secondaries.
// See SERVER-21646.
Lock::ResourceLock heldUntilEndOfWUOW{
- txn->lockState(), ResourceId(RESOURCE_METADATA, _ns.ns()), MODE_X};
+ opCtx->lockState(), ResourceId(RESOURCE_METADATA, _ns.ns()), MODE_X};
}
std::vector<Record> records;
@@ -518,7 +518,7 @@ Status Collection::_insertDocuments(OperationContext* txn,
Record record = {RecordId(), RecordData(it->objdata(), it->objsize())};
records.push_back(record);
}
- Status status = _recordStore->insertRecords(txn, &records, _enforceQuota(enforceQuota));
+ Status status = _recordStore->insertRecords(opCtx, &records, _enforceQuota(enforceQuota));
if (!status.isOK())
return status;
@@ -535,7 +535,7 @@ Status Collection::_insertDocuments(OperationContext* txn,
}
int64_t keysInserted;
- status = _indexCatalog.indexRecords(txn, bsonRecords, &keysInserted);
+ status = _indexCatalog.indexRecords(opCtx, bsonRecords, &keysInserted);
if (opDebug) {
opDebug->keysInserted += keysInserted;
}
@@ -551,15 +551,15 @@ void Collection::notifyCappedWaitersIfNeeded() {
_cappedNotifier->notifyAll();
}
-Status Collection::aboutToDeleteCapped(OperationContext* txn,
+Status Collection::aboutToDeleteCapped(OperationContext* opCtx,
const RecordId& loc,
RecordData data) {
/* check if any cursors point to us. if so, advance them. */
- _cursorManager.invalidateDocument(txn, loc, INVALIDATION_DELETION);
+ _cursorManager.invalidateDocument(opCtx, loc, INVALIDATION_DELETION);
BSONObj doc = data.releaseToBson();
int64_t* const nullKeysDeleted = nullptr;
- _indexCatalog.unindexRecord(txn, doc, loc, false, nullKeysDeleted);
+ _indexCatalog.unindexRecord(opCtx, doc, loc, false, nullKeysDeleted);
// We are not capturing and reporting to OpDebug the 'keysDeleted' by unindexRecord(). It is
// questionable whether reporting will add diagnostic value to users and may instead be
@@ -571,37 +571,37 @@ Status Collection::aboutToDeleteCapped(OperationContext* txn,
}
void Collection::deleteDocument(
- OperationContext* txn, const RecordId& loc, OpDebug* opDebug, bool fromMigrate, bool noWarn) {
+ OperationContext* opCtx, const RecordId& loc, OpDebug* opDebug, bool fromMigrate, bool noWarn) {
if (isCapped()) {
log() << "failing remove on a capped ns " << _ns;
uasserted(10089, "cannot remove from a capped collection");
return;
}
- Snapshotted<BSONObj> doc = docFor(txn, loc);
+ Snapshotted<BSONObj> doc = docFor(opCtx, loc);
auto deleteState =
- getGlobalServiceContext()->getOpObserver()->aboutToDelete(txn, ns(), doc.value());
+ getGlobalServiceContext()->getOpObserver()->aboutToDelete(opCtx, ns(), doc.value());
/* check if any cursors point to us. if so, advance them. */
- _cursorManager.invalidateDocument(txn, loc, INVALIDATION_DELETION);
+ _cursorManager.invalidateDocument(opCtx, loc, INVALIDATION_DELETION);
int64_t keysDeleted;
- _indexCatalog.unindexRecord(txn, doc.value(), loc, noWarn, &keysDeleted);
+ _indexCatalog.unindexRecord(opCtx, doc.value(), loc, noWarn, &keysDeleted);
if (opDebug) {
opDebug->keysDeleted += keysDeleted;
}
- _recordStore->deleteRecord(txn, loc);
+ _recordStore->deleteRecord(opCtx, loc);
getGlobalServiceContext()->getOpObserver()->onDelete(
- txn, ns(), std::move(deleteState), fromMigrate);
+ opCtx, ns(), std::move(deleteState), fromMigrate);
}
Counter64 moveCounter;
ServerStatusMetricField<Counter64> moveCounterDisplay("record.moves", &moveCounter);
-StatusWith<RecordId> Collection::updateDocument(OperationContext* txn,
+StatusWith<RecordId> Collection::updateDocument(OperationContext* opCtx,
const RecordId& oldLocation,
const Snapshotted<BSONObj>& oldDoc,
const BSONObj& newDoc,
@@ -610,13 +610,13 @@ StatusWith<RecordId> Collection::updateDocument(OperationContext* txn,
OpDebug* opDebug,
OplogUpdateEntryArgs* args) {
{
- auto status = checkValidation(txn, newDoc);
+ auto status = checkValidation(opCtx, newDoc);
if (!status.isOK()) {
if (_validationLevel == STRICT_V) {
return status;
}
// moderate means we have to check the old doc
- auto oldDocStatus = checkValidation(txn, oldDoc.value());
+ auto oldDocStatus = checkValidation(opCtx, oldDoc.value());
if (oldDocStatus.isOK()) {
// transitioning from good -> bad is not ok
return status;
@@ -625,8 +625,8 @@ StatusWith<RecordId> Collection::updateDocument(OperationContext* txn,
}
}
- dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
- invariant(oldDoc.snapshotId() == txn->recoveryUnit()->getSnapshotId());
+ dassert(opCtx->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
+ invariant(oldDoc.snapshotId() == opCtx->recoveryUnit()->getSnapshotId());
invariant(newDoc.isOwned());
if (_needCappedLock) {
@@ -634,10 +634,10 @@ StatusWith<RecordId> Collection::updateDocument(OperationContext* txn,
// prevents the primary from executing with more concurrency than secondaries.
// See SERVER-21646.
Lock::ResourceLock heldUntilEndOfWUOW{
- txn->lockState(), ResourceId(RESOURCE_METADATA, _ns.ns()), MODE_X};
+ opCtx->lockState(), ResourceId(RESOURCE_METADATA, _ns.ns()), MODE_X};
}
- SnapshotId sid = txn->recoveryUnit()->getSnapshotId();
+ SnapshotId sid = opCtx->recoveryUnit()->getSnapshotId();
BSONElement oldId = oldDoc.value()["_id"];
if (!oldId.eoo() && SimpleBSONElementComparator::kInstance.evaluate(oldId != newDoc["_id"]))
@@ -664,17 +664,17 @@ StatusWith<RecordId> Collection::updateDocument(OperationContext* txn,
// newDoc.
OwnedPointerMap<IndexDescriptor*, UpdateTicket> updateTickets;
if (indexesAffected) {
- IndexCatalog::IndexIterator ii = _indexCatalog.getIndexIterator(txn, true);
+ IndexCatalog::IndexIterator ii = _indexCatalog.getIndexIterator(opCtx, true);
while (ii.more()) {
IndexDescriptor* descriptor = ii.next();
IndexCatalogEntry* entry = ii.catalogEntry(descriptor);
IndexAccessMethod* iam = ii.accessMethod(descriptor);
InsertDeleteOptions options;
- IndexCatalog::prepareInsertDeleteOptions(txn, descriptor, &options);
+ IndexCatalog::prepareInsertDeleteOptions(opCtx, descriptor, &options);
UpdateTicket* updateTicket = new UpdateTicket();
updateTickets.mutableMap()[descriptor] = updateTicket;
- Status ret = iam->validateUpdate(txn,
+ Status ret = iam->validateUpdate(opCtx,
oldDoc.value(),
newDoc,
oldLocation,
@@ -688,18 +688,18 @@ StatusWith<RecordId> Collection::updateDocument(OperationContext* txn,
}
Status updateStatus = _recordStore->updateRecord(
- txn, oldLocation, newDoc.objdata(), newDoc.objsize(), _enforceQuota(enforceQuota), this);
+ opCtx, oldLocation, newDoc.objdata(), newDoc.objsize(), _enforceQuota(enforceQuota), this);
if (updateStatus == ErrorCodes::NeedsDocumentMove) {
return _updateDocumentWithMove(
- txn, oldLocation, oldDoc, newDoc, enforceQuota, opDebug, args, sid);
+ opCtx, oldLocation, oldDoc, newDoc, enforceQuota, opDebug, args, sid);
} else if (!updateStatus.isOK()) {
return updateStatus;
}
// Object did not move. We update each index with each respective UpdateTicket.
if (indexesAffected) {
- IndexCatalog::IndexIterator ii = _indexCatalog.getIndexIterator(txn, true);
+ IndexCatalog::IndexIterator ii = _indexCatalog.getIndexIterator(opCtx, true);
while (ii.more()) {
IndexDescriptor* descriptor = ii.next();
IndexAccessMethod* iam = ii.accessMethod(descriptor);
@@ -707,7 +707,7 @@ StatusWith<RecordId> Collection::updateDocument(OperationContext* txn,
int64_t keysInserted;
int64_t keysDeleted;
Status ret = iam->update(
- txn, *updateTickets.mutableMap()[descriptor], &keysInserted, &keysDeleted);
+ opCtx, *updateTickets.mutableMap()[descriptor], &keysInserted, &keysDeleted);
if (!ret.isOK())
return StatusWith<RecordId>(ret);
if (opDebug) {
@@ -717,15 +717,15 @@ StatusWith<RecordId> Collection::updateDocument(OperationContext* txn,
}
}
- invariant(sid == txn->recoveryUnit()->getSnapshotId());
+ invariant(sid == opCtx->recoveryUnit()->getSnapshotId());
args->updatedDoc = newDoc;
- getGlobalServiceContext()->getOpObserver()->onUpdate(txn, *args);
+ getGlobalServiceContext()->getOpObserver()->onUpdate(opCtx, *args);
return {oldLocation};
}
-StatusWith<RecordId> Collection::_updateDocumentWithMove(OperationContext* txn,
+StatusWith<RecordId> Collection::_updateDocumentWithMove(OperationContext* opCtx,
const RecordId& oldLocation,
const Snapshotted<BSONObj>& oldDoc,
const BSONObj& newDoc,
@@ -735,21 +735,21 @@ StatusWith<RecordId> Collection::_updateDocumentWithMove(OperationContext* txn,
const SnapshotId& sid) {
// Insert new record.
StatusWith<RecordId> newLocation = _recordStore->insertRecord(
- txn, newDoc.objdata(), newDoc.objsize(), _enforceQuota(enforceQuota));
+ opCtx, newDoc.objdata(), newDoc.objsize(), _enforceQuota(enforceQuota));
if (!newLocation.isOK()) {
return newLocation;
}
invariant(newLocation.getValue() != oldLocation);
- _cursorManager.invalidateDocument(txn, oldLocation, INVALIDATION_DELETION);
+ _cursorManager.invalidateDocument(opCtx, oldLocation, INVALIDATION_DELETION);
// Remove indexes for old record.
int64_t keysDeleted;
- _indexCatalog.unindexRecord(txn, oldDoc.value(), oldLocation, true, &keysDeleted);
+ _indexCatalog.unindexRecord(opCtx, oldDoc.value(), oldLocation, true, &keysDeleted);
// Remove old record.
- _recordStore->deleteRecord(txn, oldLocation);
+ _recordStore->deleteRecord(opCtx, oldLocation);
std::vector<BsonRecord> bsonRecords;
BsonRecord bsonRecord = {newLocation.getValue(), &newDoc};
@@ -757,15 +757,15 @@ StatusWith<RecordId> Collection::_updateDocumentWithMove(OperationContext* txn,
// Add indexes for new record.
int64_t keysInserted;
- Status status = _indexCatalog.indexRecords(txn, bsonRecords, &keysInserted);
+ Status status = _indexCatalog.indexRecords(opCtx, bsonRecords, &keysInserted);
if (!status.isOK()) {
return StatusWith<RecordId>(status);
}
- invariant(sid == txn->recoveryUnit()->getSnapshotId());
+ invariant(sid == opCtx->recoveryUnit()->getSnapshotId());
args->updatedDoc = newDoc;
- getGlobalServiceContext()->getOpObserver()->onUpdate(txn, *args);
+ getGlobalServiceContext()->getOpObserver()->onUpdate(opCtx, *args);
moveCounter.increment();
if (opDebug) {
@@ -777,9 +777,9 @@ StatusWith<RecordId> Collection::_updateDocumentWithMove(OperationContext* txn,
return newLocation;
}
-Status Collection::recordStoreGoingToUpdateInPlace(OperationContext* txn, const RecordId& loc) {
+Status Collection::recordStoreGoingToUpdateInPlace(OperationContext* opCtx, const RecordId& loc) {
// Broadcast the mutation so that query results stay correct.
- _cursorManager.invalidateDocument(txn, loc, INVALIDATION_MUTATION);
+ _cursorManager.invalidateDocument(opCtx, loc, INVALIDATION_MUTATION);
return Status::OK();
}
@@ -792,26 +792,26 @@ bool Collection::updateWithDamagesSupported() const {
}
StatusWith<RecordData> Collection::updateDocumentWithDamages(
- OperationContext* txn,
+ OperationContext* opCtx,
const RecordId& loc,
const Snapshotted<RecordData>& oldRec,
const char* damageSource,
const mutablebson::DamageVector& damages,
OplogUpdateEntryArgs* args) {
- dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
- invariant(oldRec.snapshotId() == txn->recoveryUnit()->getSnapshotId());
+ dassert(opCtx->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
+ invariant(oldRec.snapshotId() == opCtx->recoveryUnit()->getSnapshotId());
invariant(updateWithDamagesSupported());
// Broadcast the mutation so that query results stay correct.
- _cursorManager.invalidateDocument(txn, loc, INVALIDATION_MUTATION);
+ _cursorManager.invalidateDocument(opCtx, loc, INVALIDATION_MUTATION);
auto newRecStatus =
- _recordStore->updateWithDamages(txn, loc, oldRec.value(), damageSource, damages);
+ _recordStore->updateWithDamages(opCtx, loc, oldRec.value(), damageSource, damages);
if (newRecStatus.isOK()) {
args->updatedDoc = newRecStatus.getValue().toBson();
- getGlobalServiceContext()->getOpObserver()->onUpdate(txn, *args);
+ getGlobalServiceContext()->getOpObserver()->onUpdate(opCtx, *args);
}
return newRecStatus;
}
@@ -841,12 +841,12 @@ std::shared_ptr<CappedInsertNotifier> Collection::getCappedInsertNotifier() cons
return _cappedNotifier;
}
-uint64_t Collection::numRecords(OperationContext* txn) const {
- return _recordStore->numRecords(txn);
+uint64_t Collection::numRecords(OperationContext* opCtx) const {
+ return _recordStore->numRecords(opCtx);
}
-uint64_t Collection::dataSize(OperationContext* txn) const {
- return _recordStore->dataSize(txn);
+uint64_t Collection::dataSize(OperationContext* opCtx) const {
+ return _recordStore->dataSize(opCtx);
}
uint64_t Collection::getIndexSize(OperationContext* opCtx, BSONObjBuilder* details, int scale) {
@@ -878,15 +878,15 @@ uint64_t Collection::getIndexSize(OperationContext* opCtx, BSONObjBuilder* detai
* 3) truncate record store
* 4) re-write indexes
*/
-Status Collection::truncate(OperationContext* txn) {
- dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_X));
+Status Collection::truncate(OperationContext* opCtx) {
+ dassert(opCtx->lockState()->isCollectionLockedForMode(ns().toString(), MODE_X));
BackgroundOperation::assertNoBgOpInProgForNs(ns());
- invariant(_indexCatalog.numIndexesInProgress(txn) == 0);
+ invariant(_indexCatalog.numIndexesInProgress(opCtx) == 0);
// 1) store index specs
vector<BSONObj> indexSpecs;
{
- IndexCatalog::IndexIterator ii = _indexCatalog.getIndexIterator(txn, false);
+ IndexCatalog::IndexIterator ii = _indexCatalog.getIndexIterator(opCtx, false);
while (ii.more()) {
const IndexDescriptor* idx = ii.next();
indexSpecs.push_back(idx->infoObj().getOwned());
@@ -894,19 +894,19 @@ Status Collection::truncate(OperationContext* txn) {
}
// 2) drop indexes
- Status status = _indexCatalog.dropAllIndexes(txn, true);
+ Status status = _indexCatalog.dropAllIndexes(opCtx, true);
if (!status.isOK())
return status;
_cursorManager.invalidateAll(false, "collection truncated");
// 3) truncate record store
- status = _recordStore->truncate(txn);
+ status = _recordStore->truncate(opCtx);
if (!status.isOK())
return status;
// 4) re-create indexes
for (size_t i = 0; i < indexSpecs.size(); i++) {
- status = _indexCatalog.createIndexOnEmptyCollection(txn, indexSpecs[i]).getStatus();
+ status = _indexCatalog.createIndexOnEmptyCollection(opCtx, indexSpecs[i]).getStatus();
if (!status.isOK())
return status;
}
@@ -914,18 +914,18 @@ Status Collection::truncate(OperationContext* txn) {
return Status::OK();
}
-void Collection::cappedTruncateAfter(OperationContext* txn, RecordId end, bool inclusive) {
- dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
+void Collection::cappedTruncateAfter(OperationContext* opCtx, RecordId end, bool inclusive) {
+ dassert(opCtx->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
invariant(isCapped());
BackgroundOperation::assertNoBgOpInProgForNs(ns());
- invariant(_indexCatalog.numIndexesInProgress(txn) == 0);
+ invariant(_indexCatalog.numIndexesInProgress(opCtx) == 0);
_cursorManager.invalidateAll(false, "capped collection truncated");
- _recordStore->cappedTruncateAfter(txn, end, inclusive);
+ _recordStore->cappedTruncateAfter(opCtx, end, inclusive);
}
-Status Collection::setValidator(OperationContext* txn, BSONObj validatorDoc) {
- invariant(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_X));
+Status Collection::setValidator(OperationContext* opCtx, BSONObj validatorDoc) {
+ invariant(opCtx->lockState()->isCollectionLockedForMode(ns().toString(), MODE_X));
// Make owned early so that the parsed match expression refers to the owned object.
if (!validatorDoc.isOwned())
@@ -935,7 +935,7 @@ Status Collection::setValidator(OperationContext* txn, BSONObj validatorDoc) {
if (!statusWithMatcher.isOK())
return statusWithMatcher.getStatus();
- _details->updateValidator(txn, validatorDoc, getValidationLevel(), getValidationAction());
+ _details->updateValidator(opCtx, validatorDoc, getValidationLevel(), getValidationAction());
_validator = std::move(statusWithMatcher.getValue());
_validatorDoc = std::move(validatorDoc);
@@ -994,8 +994,8 @@ StringData Collection::getValidationAction() const {
MONGO_UNREACHABLE;
}
-Status Collection::setValidationLevel(OperationContext* txn, StringData newLevel) {
- invariant(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_X));
+Status Collection::setValidationLevel(OperationContext* opCtx, StringData newLevel) {
+ invariant(opCtx->lockState()->isCollectionLockedForMode(ns().toString(), MODE_X));
StatusWith<ValidationLevel> status = parseValidationLevel(newLevel);
if (!status.isOK()) {
@@ -1004,13 +1004,13 @@ Status Collection::setValidationLevel(OperationContext* txn, StringData newLevel
_validationLevel = status.getValue();
- _details->updateValidator(txn, _validatorDoc, getValidationLevel(), getValidationAction());
+ _details->updateValidator(opCtx, _validatorDoc, getValidationLevel(), getValidationAction());
return Status::OK();
}
-Status Collection::setValidationAction(OperationContext* txn, StringData newAction) {
- invariant(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_X));
+Status Collection::setValidationAction(OperationContext* opCtx, StringData newAction) {
+ invariant(opCtx->lockState()->isCollectionLockedForMode(ns().toString(), MODE_X));
StatusWith<ValidationAction> status = parseValidationAction(newAction);
if (!status.isOK()) {
@@ -1019,7 +1019,7 @@ Status Collection::setValidationAction(OperationContext* txn, StringData newActi
_validationAction = status.getValue();
- _details->updateValidator(txn, _validatorDoc, getValidationLevel(), getValidationAction());
+ _details->updateValidator(opCtx, _validatorDoc, getValidationLevel(), getValidationAction());
return Status::OK();
}
@@ -1037,11 +1037,11 @@ using ValidateResultsMap = std::map<std::string, ValidateResults>;
class RecordStoreValidateAdaptor : public ValidateAdaptor {
public:
- RecordStoreValidateAdaptor(OperationContext* txn,
+ RecordStoreValidateAdaptor(OperationContext* opCtx,
ValidateCmdLevel level,
IndexCatalog* ic,
ValidateResultsMap* irm)
- : _txn(txn), _level(level), _indexCatalog(ic), _indexNsResultsMap(irm) {
+ : _opCtx(opCtx), _level(level), _indexCatalog(ic), _indexNsResultsMap(irm) {
_ikc = std::unique_ptr<IndexKeyCountTable>(new IndexKeyCountTable());
}
@@ -1068,7 +1068,7 @@ public:
return status;
}
- IndexCatalog::IndexIterator i = _indexCatalog->getIndexIterator(_txn, false);
+ IndexCatalog::IndexIterator i = _indexCatalog->getIndexIterator(_opCtx, false);
while (i.more()) {
const IndexDescriptor* descriptor = i.next();
@@ -1097,7 +1097,7 @@ public:
&documentKeySet,
multikeyPaths);
- if (!descriptor->isMultikey(_txn) && documentKeySet.size() > 1) {
+ if (!descriptor->isMultikey(_opCtx) && documentKeySet.size() > 1) {
string msg = str::stream() << "Index " << descriptor->indexName()
<< " is not multi-key but has more than one"
<< " key in document " << recordId;
@@ -1158,7 +1158,7 @@ public:
BSONObj prevIndexEntryKey;
bool isFirstEntry = true;
- std::unique_ptr<SortedDataInterface::Cursor> cursor = iam->newCursor(_txn, true);
+ std::unique_ptr<SortedDataInterface::Cursor> cursor = iam->newCursor(_opCtx, true);
// Seeking to BSONObj() is equivalent to seeking to the first entry of an index.
for (auto indexEntry = cursor->seek(BSONObj(), true); indexEntry;
indexEntry = cursor->next()) {
@@ -1206,7 +1206,7 @@ public:
}
}
- if (results.valid && !idx->isMultikey(_txn) && totalKeys > numRecs) {
+ if (results.valid && !idx->isMultikey(_opCtx) && totalKeys > numRecs) {
string err = str::stream()
<< "index " << idx->indexName() << " is not multi-key, but has more entries ("
<< numIndexedKeys << ") than documents in the index (" << numRecs - numLongKeys
@@ -1249,7 +1249,7 @@ private:
uint32_t _indexKeyCountTableNumEntries = 0;
bool _hasDocWithoutIndexEntry = false;
- OperationContext* _txn; // Not owned.
+ OperationContext* _opCtx; // Not owned.
ValidateCmdLevel _level;
IndexCatalog* _indexCatalog; // Not owned.
ValidateResultsMap* _indexNsResultsMap; // Not owned.
@@ -1264,30 +1264,30 @@ private:
};
} // namespace
-Status Collection::validate(OperationContext* txn,
+Status Collection::validate(OperationContext* opCtx,
ValidateCmdLevel level,
ValidateResults* results,
BSONObjBuilder* output) {
- dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IS));
+ dassert(opCtx->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IS));
try {
ValidateResultsMap indexNsResultsMap;
std::unique_ptr<RecordStoreValidateAdaptor> indexValidator(
- new RecordStoreValidateAdaptor(txn, level, &_indexCatalog, &indexNsResultsMap));
+ new RecordStoreValidateAdaptor(opCtx, level, &_indexCatalog, &indexNsResultsMap));
BSONObjBuilder keysPerIndex; // not using subObjStart to be exception safe
- IndexCatalog::IndexIterator i = _indexCatalog.getIndexIterator(txn, false);
+ IndexCatalog::IndexIterator i = _indexCatalog.getIndexIterator(opCtx, false);
// Validate Indexes.
while (i.more()) {
- txn->checkForInterrupt();
+ opCtx->checkForInterrupt();
const IndexDescriptor* descriptor = i.next();
log(LogComponent::kIndex) << "validating index " << descriptor->indexNamespace()
<< endl;
IndexAccessMethod* iam = _indexCatalog.getIndex(descriptor);
ValidateResults curIndexResults;
int64_t numKeys;
- iam->validate(txn, &numKeys, &curIndexResults);
+ iam->validate(opCtx, &numKeys, &curIndexResults);
keysPerIndex.appendNumber(descriptor->indexNamespace(),
static_cast<long long>(numKeys));
@@ -1302,7 +1302,8 @@ Status Collection::validate(OperationContext* txn,
// Validate RecordStore and, if `level == kValidateFull`, cross validate indexes and
// RecordStore.
if (results->valid) {
- auto status = _recordStore->validate(txn, level, indexValidator.get(), results, output);
+ auto status =
+ _recordStore->validate(opCtx, level, indexValidator.get(), results, output);
// RecordStore::validate always returns Status::OK(). Errors are reported through
// `results`.
dassert(status.isOK());
@@ -1323,14 +1324,14 @@ Status Collection::validate(OperationContext* txn,
// Validate index key count.
if (results->valid) {
- IndexCatalog::IndexIterator i = _indexCatalog.getIndexIterator(txn, false);
+ IndexCatalog::IndexIterator i = _indexCatalog.getIndexIterator(opCtx, false);
while (i.more()) {
IndexDescriptor* descriptor = i.next();
ValidateResults& curIndexResults = indexNsResultsMap[descriptor->indexNamespace()];
if (curIndexResults.valid) {
indexValidator->validateIndexKeyCount(
- descriptor, _recordStore->numRecords(txn), curIndexResults);
+ descriptor, _recordStore->numRecords(opCtx), curIndexResults);
}
}
}
@@ -1365,7 +1366,7 @@ Status Collection::validate(OperationContext* txn,
results->errors.insert(results->errors.end(), vr.errors.begin(), vr.errors.end());
}
- output->append("nIndexes", _indexCatalog.numIndexesReady(txn));
+ output->append("nIndexes", _indexCatalog.numIndexesReady(opCtx));
output->append("keysPerIndex", keysPerIndex.done());
if (indexDetails.get()) {
output->append("indexDetails", indexDetails->done());
@@ -1382,13 +1383,13 @@ Status Collection::validate(OperationContext* txn,
return Status::OK();
}
-Status Collection::touch(OperationContext* txn,
+Status Collection::touch(OperationContext* opCtx,
bool touchData,
bool touchIndexes,
BSONObjBuilder* output) const {
if (touchData) {
BSONObjBuilder b;
- Status status = _recordStore->touch(txn, &b);
+ Status status = _recordStore->touch(opCtx, &b);
if (!status.isOK())
return status;
output->append("data", b.obj());
@@ -1396,17 +1397,18 @@ Status Collection::touch(OperationContext* txn,
if (touchIndexes) {
Timer t;
- IndexCatalog::IndexIterator ii = _indexCatalog.getIndexIterator(txn, false);
+ IndexCatalog::IndexIterator ii = _indexCatalog.getIndexIterator(opCtx, false);
while (ii.more()) {
const IndexDescriptor* desc = ii.next();
const IndexAccessMethod* iam = _indexCatalog.getIndex(desc);
- Status status = iam->touch(txn);
+ Status status = iam->touch(opCtx);
if (!status.isOK())
return status;
}
- output->append("indexes",
- BSON("num" << _indexCatalog.numIndexesTotal(txn) << "millis" << t.millis()));
+ output->append(
+ "indexes",
+ BSON("num" << _indexCatalog.numIndexesTotal(opCtx) << "millis" << t.millis()));
}
return Status::OK();
diff --git a/src/mongo/db/catalog/collection.h b/src/mongo/db/catalog/collection.h
index 6e9b1c56b1b..0d04abc5c76 100644
--- a/src/mongo/db/catalog/collection.h
+++ b/src/mongo/db/catalog/collection.h
@@ -178,7 +178,7 @@ private:
*/
class Collection final : CappedCallback, UpdateNotifier {
public:
- Collection(OperationContext* txn,
+ Collection(OperationContext* opCtx,
StringData fullNS,
CollectionCatalogEntry* details, // does not own
RecordStore* recordStore, // does not own
@@ -228,22 +228,22 @@ public:
bool requiresIdIndex() const;
- Snapshotted<BSONObj> docFor(OperationContext* txn, const RecordId& loc) const;
+ Snapshotted<BSONObj> docFor(OperationContext* opCtx, const RecordId& loc) const;
/**
* @param out - contents set to the right docs if exists, or nothing.
* @return true iff loc exists
*/
- bool findDoc(OperationContext* txn, const RecordId& loc, Snapshotted<BSONObj>* out) const;
+ bool findDoc(OperationContext* opCtx, const RecordId& loc, Snapshotted<BSONObj>* out) const;
- std::unique_ptr<SeekableRecordCursor> getCursor(OperationContext* txn,
+ std::unique_ptr<SeekableRecordCursor> getCursor(OperationContext* opCtx,
bool forward = true) const;
/**
* Returns many cursors that partition the Collection into many disjoint sets. Iterating
* all returned cursors is equivalent to iterating the full collection.
*/
- std::vector<std::unique_ptr<RecordCursor>> getManyCursors(OperationContext* txn) const;
+ std::vector<std::unique_ptr<RecordCursor>> getManyCursors(OperationContext* opCtx) const;
/**
* Deletes the document with the given RecordId from the collection.
@@ -257,7 +257,7 @@ public:
* 'noWarn' if unindexing the record causes an error, if noWarn is true the error
* will not be logged.
*/
- void deleteDocument(OperationContext* txn,
+ void deleteDocument(OperationContext* opCtx,
const RecordId& loc,
OpDebug* opDebug,
bool fromMigrate = false,
@@ -270,7 +270,7 @@ public:
*
* 'opDebug' Optional argument. When not null, will be used to record operation statistics.
*/
- Status insertDocuments(OperationContext* txn,
+ Status insertDocuments(OperationContext* opCtx,
std::vector<BSONObj>::const_iterator begin,
std::vector<BSONObj>::const_iterator end,
OpDebug* opDebug,
@@ -284,7 +284,7 @@ public:
* 'opDebug' Optional argument. When not null, will be used to record operation statistics.
* 'enforceQuota' If false, quotas will be ignored.
*/
- Status insertDocument(OperationContext* txn,
+ Status insertDocument(OperationContext* opCtx,
const BSONObj& doc,
OpDebug* opDebug,
bool enforceQuota,
@@ -294,7 +294,7 @@ public:
* Callers must ensure no document validation is performed for this collection when calling
* this method.
*/
- Status insertDocumentsForOplog(OperationContext* txn,
+ Status insertDocumentsForOplog(OperationContext* opCtx,
const DocWriter* const* docs,
size_t nDocs);
@@ -303,7 +303,7 @@ public:
*
* NOTE: It is up to caller to commit the indexes.
*/
- Status insertDocument(OperationContext* txn,
+ Status insertDocument(OperationContext* opCtx,
const BSONObj& doc,
const std::vector<MultiIndexBlock*>& indexBlocks,
bool enforceQuota);
@@ -317,7 +317,7 @@ public:
* 'opDebug' Optional argument. When not null, will be used to record operation statistics.
* @return the post update location of the doc (may or may not be the same as oldLocation)
*/
- StatusWith<RecordId> updateDocument(OperationContext* txn,
+ StatusWith<RecordId> updateDocument(OperationContext* opCtx,
const RecordId& oldLocation,
const Snapshotted<BSONObj>& oldDoc,
const BSONObj& newDoc,
@@ -335,7 +335,7 @@ public:
* success.
* @return the contents of the updated record.
*/
- StatusWith<RecordData> updateDocumentWithDamages(OperationContext* txn,
+ StatusWith<RecordData> updateDocumentWithDamages(OperationContext* opCtx,
const RecordId& loc,
const Snapshotted<RecordData>& oldRec,
const char* damageSource,
@@ -344,21 +344,21 @@ public:
// -----------
- StatusWith<CompactStats> compact(OperationContext* txn, const CompactOptions* options);
+ StatusWith<CompactStats> compact(OperationContext* opCtx, const CompactOptions* options);
/**
* removes all documents as fast as possible
* indexes before and after will be the same
* as will other characteristics
*/
- Status truncate(OperationContext* txn);
+ Status truncate(OperationContext* opCtx);
/**
* @return OK if the validate run successfully
* OK will be returned even if corruption is found
* deatils will be in result
*/
- Status validate(OperationContext* txn,
+ Status validate(OperationContext* opCtx,
ValidateCmdLevel level,
ValidateResults* results,
BSONObjBuilder* output);
@@ -366,7 +366,7 @@ public:
/**
* forces data into cache
*/
- Status touch(OperationContext* txn,
+ Status touch(OperationContext* opCtx,
bool touchData,
bool touchIndexes,
BSONObjBuilder* output) const;
@@ -377,7 +377,7 @@ public:
* function. An assertion will be thrown if that is attempted.
* @param inclusive - Truncate 'end' as well iff true
*/
- void cappedTruncateAfter(OperationContext* txn, RecordId end, bool inclusive);
+ void cappedTruncateAfter(OperationContext* opCtx, RecordId end, bool inclusive);
enum ValidationAction { WARN, ERROR_V };
enum ValidationLevel { OFF, MODERATE, STRICT_V };
@@ -395,10 +395,10 @@ public:
* An empty validator removes all validation.
* Requires an exclusive lock on the collection.
*/
- Status setValidator(OperationContext* txn, BSONObj validator);
+ Status setValidator(OperationContext* opCtx, BSONObj validator);
- Status setValidationLevel(OperationContext* txn, StringData newLevel);
- Status setValidationAction(OperationContext* txn, StringData newAction);
+ Status setValidationLevel(OperationContext* opCtx, StringData newLevel);
+ Status setValidationAction(OperationContext* opCtx, StringData newAction);
StringData getValidationLevel() const;
StringData getValidationAction() const;
@@ -419,15 +419,15 @@ public:
*/
std::shared_ptr<CappedInsertNotifier> getCappedInsertNotifier() const;
- uint64_t numRecords(OperationContext* txn) const;
+ uint64_t numRecords(OperationContext* opCtx) const;
- uint64_t dataSize(OperationContext* txn) const;
+ uint64_t dataSize(OperationContext* opCtx) const;
- int averageObjectSize(OperationContext* txn) const {
- uint64_t n = numRecords(txn);
+ int averageObjectSize(OperationContext* opCtx) const {
+ uint64_t n = numRecords(opCtx);
if (n == 0)
return 5;
- return static_cast<int>(dataSize(txn) / n);
+ return static_cast<int>(dataSize(opCtx) / n);
}
uint64_t getIndexSize(OperationContext* opCtx, BSONObjBuilder* details = NULL, int scale = 1);
@@ -459,20 +459,20 @@ private:
/**
* Returns a non-ok Status if document does not pass this collection's validator.
*/
- Status checkValidation(OperationContext* txn, const BSONObj& document) const;
+ Status checkValidation(OperationContext* opCtx, const BSONObj& document) const;
- Status recordStoreGoingToUpdateInPlace(OperationContext* txn, const RecordId& loc);
+ Status recordStoreGoingToUpdateInPlace(OperationContext* opCtx, const RecordId& loc);
- Status aboutToDeleteCapped(OperationContext* txn, const RecordId& loc, RecordData data);
+ Status aboutToDeleteCapped(OperationContext* opCtx, const RecordId& loc, RecordData data);
/**
* same semantics as insertDocument, but doesn't do:
* - some user error checks
* - adjust padding
*/
- Status _insertDocument(OperationContext* txn, const BSONObj& doc, bool enforceQuota);
+ Status _insertDocument(OperationContext* opCtx, const BSONObj& doc, bool enforceQuota);
- Status _insertDocuments(OperationContext* txn,
+ Status _insertDocuments(OperationContext* opCtx,
std::vector<BSONObj>::const_iterator begin,
std::vector<BSONObj>::const_iterator end,
bool enforceQuota,
@@ -482,7 +482,7 @@ private:
/**
* Perform update when document move will be required.
*/
- StatusWith<RecordId> _updateDocumentWithMove(OperationContext* txn,
+ StatusWith<RecordId> _updateDocumentWithMove(OperationContext* opCtx,
const RecordId& oldLocation,
const Snapshotted<BSONObj>& oldDoc,
const BSONObj& newDoc,
diff --git a/src/mongo/db/catalog/collection_catalog_entry.h b/src/mongo/db/catalog/collection_catalog_entry.h
index 879459839d3..16e7002db05 100644
--- a/src/mongo/db/catalog/collection_catalog_entry.h
+++ b/src/mongo/db/catalog/collection_catalog_entry.h
@@ -52,17 +52,17 @@ public:
// ------- indexes ----------
- virtual CollectionOptions getCollectionOptions(OperationContext* txn) const = 0;
+ virtual CollectionOptions getCollectionOptions(OperationContext* opCtx) const = 0;
- virtual int getTotalIndexCount(OperationContext* txn) const = 0;
+ virtual int getTotalIndexCount(OperationContext* opCtx) const = 0;
- virtual int getCompletedIndexCount(OperationContext* txn) const = 0;
+ virtual int getCompletedIndexCount(OperationContext* opCtx) const = 0;
virtual int getMaxAllowedIndexes() const = 0;
- virtual void getAllIndexes(OperationContext* txn, std::vector<std::string>* names) const = 0;
+ virtual void getAllIndexes(OperationContext* opCtx, std::vector<std::string>* names) const = 0;
- virtual BSONObj getIndexSpec(OperationContext* txn, StringData idxName) const = 0;
+ virtual BSONObj getIndexSpec(OperationContext* opCtx, StringData idxName) const = 0;
/**
* Returns true if the index identified by 'indexName' is multikey, and returns false otherwise.
@@ -75,7 +75,7 @@ public:
* multikey information, then 'multikeyPaths' is initialized as a vector with size equal to the
* number of elements in the index key pattern of empty sets.
*/
- virtual bool isIndexMultikey(OperationContext* txn,
+ virtual bool isIndexMultikey(OperationContext* opCtx,
StringData indexName,
MultikeyPaths* multikeyPaths) const = 0;
@@ -88,29 +88,29 @@ public:
*
* This function returns true if the index metadata has changed, and returns false otherwise.
*/
- virtual bool setIndexIsMultikey(OperationContext* txn,
+ virtual bool setIndexIsMultikey(OperationContext* opCtx,
StringData indexName,
const MultikeyPaths& multikeyPaths) = 0;
- virtual RecordId getIndexHead(OperationContext* txn, StringData indexName) const = 0;
+ virtual RecordId getIndexHead(OperationContext* opCtx, StringData indexName) const = 0;
- virtual void setIndexHead(OperationContext* txn,
+ virtual void setIndexHead(OperationContext* opCtx,
StringData indexName,
const RecordId& newHead) = 0;
- virtual bool isIndexReady(OperationContext* txn, StringData indexName) const = 0;
+ virtual bool isIndexReady(OperationContext* opCtx, StringData indexName) const = 0;
- virtual Status removeIndex(OperationContext* txn, StringData indexName) = 0;
+ virtual Status removeIndex(OperationContext* opCtx, StringData indexName) = 0;
- virtual Status prepareForIndexBuild(OperationContext* txn, const IndexDescriptor* spec) = 0;
+ virtual Status prepareForIndexBuild(OperationContext* opCtx, const IndexDescriptor* spec) = 0;
- virtual void indexBuildSuccess(OperationContext* txn, StringData indexName) = 0;
+ virtual void indexBuildSuccess(OperationContext* opCtx, StringData indexName) = 0;
/* Updates the expireAfterSeconds field of the given index to the value in newExpireSecs.
* The specified index must already contain an expireAfterSeconds field, and the value in
* that field and newExpireSecs must both be numeric.
*/
- virtual void updateTTLSetting(OperationContext* txn,
+ virtual void updateTTLSetting(OperationContext* opCtx,
StringData idxName,
long long newExpireSeconds) = 0;
@@ -118,14 +118,14 @@ public:
* Sets the flags field of CollectionOptions to newValue.
* Subsequent calls to getCollectionOptions should have flags==newValue and flagsSet==true.
*/
- virtual void updateFlags(OperationContext* txn, int newValue) = 0;
+ virtual void updateFlags(OperationContext* opCtx, int newValue) = 0;
/**
* Updates the validator for this collection.
*
* An empty validator removes all validation.
*/
- virtual void updateValidator(OperationContext* txn,
+ virtual void updateValidator(OperationContext* opCtx,
const BSONObj& validator,
StringData validationLevel,
StringData validationAction) = 0;
diff --git a/src/mongo/db/catalog/collection_compact.cpp b/src/mongo/db/catalog/collection_compact.cpp
index 4c64eb8f239..69fd76bcfe9 100644
--- a/src/mongo/db/catalog/collection_compact.cpp
+++ b/src/mongo/db/catalog/collection_compact.cpp
@@ -107,11 +107,11 @@ private:
}
-StatusWith<CompactStats> Collection::compact(OperationContext* txn,
+StatusWith<CompactStats> Collection::compact(OperationContext* opCtx,
const CompactOptions* compactOptions) {
- dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_X));
+ dassert(opCtx->lockState()->isCollectionLockedForMode(ns().toString(), MODE_X));
- DisableDocumentValidation validationDisabler(txn);
+ DisableDocumentValidation validationDisabler(opCtx);
if (!_recordStore->compactSupported())
return StatusWith<CompactStats>(ErrorCodes::CommandNotSupported,
@@ -121,18 +121,18 @@ StatusWith<CompactStats> Collection::compact(OperationContext* txn,
if (_recordStore->compactsInPlace()) {
CompactStats stats;
- Status status = _recordStore->compact(txn, NULL, compactOptions, &stats);
+ Status status = _recordStore->compact(opCtx, NULL, compactOptions, &stats);
if (!status.isOK())
return StatusWith<CompactStats>(status);
// Compact all indexes (not including unfinished indexes)
- IndexCatalog::IndexIterator ii(_indexCatalog.getIndexIterator(txn, false));
+ IndexCatalog::IndexIterator ii(_indexCatalog.getIndexIterator(opCtx, false));
while (ii.more()) {
IndexDescriptor* descriptor = ii.next();
IndexAccessMethod* index = _indexCatalog.getIndex(descriptor);
LOG(1) << "compacting index: " << descriptor->toString();
- Status status = index->compact(txn);
+ Status status = index->compact(opCtx);
if (!status.isOK()) {
error() << "failed to compact index: " << descriptor->toString();
return status;
@@ -142,13 +142,13 @@ StatusWith<CompactStats> Collection::compact(OperationContext* txn,
return StatusWith<CompactStats>(stats);
}
- if (_indexCatalog.numIndexesInProgress(txn))
+ if (_indexCatalog.numIndexesInProgress(opCtx))
return StatusWith<CompactStats>(ErrorCodes::BadValue,
"cannot compact when indexes in progress");
vector<BSONObj> indexSpecs;
{
- IndexCatalog::IndexIterator ii(_indexCatalog.getIndexIterator(txn, false));
+ IndexCatalog::IndexIterator ii(_indexCatalog.getIndexIterator(opCtx, false));
while (ii.more()) {
IndexDescriptor* descriptor = ii.next();
@@ -170,14 +170,14 @@ StatusWith<CompactStats> Collection::compact(OperationContext* txn,
}
// Give a chance to be interrupted *before* we drop all indexes.
- txn->checkForInterrupt();
+ opCtx->checkForInterrupt();
{
// note that the drop indexes call also invalidates all clientcursors for the namespace,
// which is important and wanted here
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
log() << "compact dropping indexes";
- Status status = _indexCatalog.dropAllIndexes(txn, true);
+ Status status = _indexCatalog.dropAllIndexes(opCtx, true);
if (!status.isOK()) {
return StatusWith<CompactStats>(status);
}
@@ -186,7 +186,7 @@ StatusWith<CompactStats> Collection::compact(OperationContext* txn,
CompactStats stats;
- MultiIndexBlock indexer(txn, this);
+ MultiIndexBlock indexer(opCtx, this);
indexer.allowInterruption();
indexer.ignoreUniqueConstraint(); // in compact we should be doing no checking
@@ -196,7 +196,7 @@ StatusWith<CompactStats> Collection::compact(OperationContext* txn,
MyCompactAdaptor adaptor(this, &indexer);
- status = _recordStore->compact(txn, &adaptor, compactOptions, &stats);
+ status = _recordStore->compact(opCtx, &adaptor, compactOptions, &stats);
if (!status.isOK())
return StatusWith<CompactStats>(status);
@@ -206,7 +206,7 @@ StatusWith<CompactStats> Collection::compact(OperationContext* txn,
return StatusWith<CompactStats>(status);
{
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
indexer.commit();
wunit.commit();
}
diff --git a/src/mongo/db/catalog/collection_info_cache.cpp b/src/mongo/db/catalog/collection_info_cache.cpp
index d5431bdb0eb..b601cbd948e 100644
--- a/src/mongo/db/catalog/collection_info_cache.cpp
+++ b/src/mongo/db/catalog/collection_info_cache.cpp
@@ -64,20 +64,20 @@ CollectionInfoCache::~CollectionInfoCache() {
}
}
-const UpdateIndexData& CollectionInfoCache::getIndexKeys(OperationContext* txn) const {
+const UpdateIndexData& CollectionInfoCache::getIndexKeys(OperationContext* opCtx) const {
// This requires "some" lock, and MODE_IS is an expression for that, for now.
- dassert(txn->lockState()->isCollectionLockedForMode(_collection->ns().ns(), MODE_IS));
+ dassert(opCtx->lockState()->isCollectionLockedForMode(_collection->ns().ns(), MODE_IS));
invariant(_keysComputed);
return _indexedPaths;
}
-void CollectionInfoCache::computeIndexKeys(OperationContext* txn) {
+void CollectionInfoCache::computeIndexKeys(OperationContext* opCtx) {
_indexedPaths.clear();
bool hadTTLIndex = _hasTTLIndex;
_hasTTLIndex = false;
- IndexCatalog::IndexIterator i = _collection->getIndexCatalog()->getIndexIterator(txn, true);
+ IndexCatalog::IndexIterator i = _collection->getIndexCatalog()->getIndexIterator(opCtx, true);
while (i.more()) {
IndexDescriptor* descriptor = i.next();
@@ -140,13 +140,13 @@ void CollectionInfoCache::computeIndexKeys(OperationContext* txn) {
_keysComputed = true;
}
-void CollectionInfoCache::notifyOfQuery(OperationContext* txn,
+void CollectionInfoCache::notifyOfQuery(OperationContext* opCtx,
const std::set<std::string>& indexesUsed) {
// Record indexes used to fulfill query.
for (auto it = indexesUsed.begin(); it != indexesUsed.end(); ++it) {
// This index should still exist, since the PlanExecutor would have been killed if the
// index was dropped (and we would not get here).
- dassert(NULL != _collection->getIndexCatalog()->findIndexByName(txn, *it));
+ dassert(NULL != _collection->getIndexCatalog()->findIndexByName(opCtx, *it));
_indexUsageTracker.recordIndexAccess(*it);
}
@@ -167,21 +167,21 @@ QuerySettings* CollectionInfoCache::getQuerySettings() const {
return _querySettings.get();
}
-void CollectionInfoCache::updatePlanCacheIndexEntries(OperationContext* txn) {
+void CollectionInfoCache::updatePlanCacheIndexEntries(OperationContext* opCtx) {
std::vector<IndexEntry> indexEntries;
// TODO We shouldn't need to include unfinished indexes, but we must here because the index
// catalog may be in an inconsistent state. SERVER-18346.
const bool includeUnfinishedIndexes = true;
IndexCatalog::IndexIterator ii =
- _collection->getIndexCatalog()->getIndexIterator(txn, includeUnfinishedIndexes);
+ _collection->getIndexCatalog()->getIndexIterator(opCtx, includeUnfinishedIndexes);
while (ii.more()) {
const IndexDescriptor* desc = ii.next();
const IndexCatalogEntry* ice = ii.catalogEntry(desc);
indexEntries.emplace_back(desc->keyPattern(),
desc->getAccessMethodName(),
- desc->isMultikey(txn),
- ice->getMultikeyPaths(txn),
+ desc->isMultikey(opCtx),
+ ice->getMultikeyPaths(opCtx),
desc->isSparse(),
desc->unique(),
desc->indexName(),
@@ -193,45 +193,45 @@ void CollectionInfoCache::updatePlanCacheIndexEntries(OperationContext* txn) {
_planCache->notifyOfIndexEntries(indexEntries);
}
-void CollectionInfoCache::init(OperationContext* txn) {
+void CollectionInfoCache::init(OperationContext* opCtx) {
// Requires exclusive collection lock.
- invariant(txn->lockState()->isCollectionLockedForMode(_collection->ns().ns(), MODE_X));
+ invariant(opCtx->lockState()->isCollectionLockedForMode(_collection->ns().ns(), MODE_X));
const bool includeUnfinishedIndexes = false;
IndexCatalog::IndexIterator ii =
- _collection->getIndexCatalog()->getIndexIterator(txn, includeUnfinishedIndexes);
+ _collection->getIndexCatalog()->getIndexIterator(opCtx, includeUnfinishedIndexes);
while (ii.more()) {
const IndexDescriptor* desc = ii.next();
_indexUsageTracker.registerIndex(desc->indexName(), desc->keyPattern());
}
- rebuildIndexData(txn);
+ rebuildIndexData(opCtx);
}
-void CollectionInfoCache::addedIndex(OperationContext* txn, const IndexDescriptor* desc) {
+void CollectionInfoCache::addedIndex(OperationContext* opCtx, const IndexDescriptor* desc) {
// Requires exclusive collection lock.
- invariant(txn->lockState()->isCollectionLockedForMode(_collection->ns().ns(), MODE_X));
+ invariant(opCtx->lockState()->isCollectionLockedForMode(_collection->ns().ns(), MODE_X));
invariant(desc);
- rebuildIndexData(txn);
+ rebuildIndexData(opCtx);
_indexUsageTracker.registerIndex(desc->indexName(), desc->keyPattern());
}
-void CollectionInfoCache::droppedIndex(OperationContext* txn, StringData indexName) {
+void CollectionInfoCache::droppedIndex(OperationContext* opCtx, StringData indexName) {
// Requires exclusive collection lock.
- invariant(txn->lockState()->isCollectionLockedForMode(_collection->ns().ns(), MODE_X));
+ invariant(opCtx->lockState()->isCollectionLockedForMode(_collection->ns().ns(), MODE_X));
- rebuildIndexData(txn);
+ rebuildIndexData(opCtx);
_indexUsageTracker.unregisterIndex(indexName);
}
-void CollectionInfoCache::rebuildIndexData(OperationContext* txn) {
+void CollectionInfoCache::rebuildIndexData(OperationContext* opCtx) {
clearQueryCache();
_keysComputed = false;
- computeIndexKeys(txn);
- updatePlanCacheIndexEntries(txn);
+ computeIndexKeys(opCtx);
+ updatePlanCacheIndexEntries(opCtx);
}
CollectionIndexUsageMap CollectionInfoCache::getIndexUsageStats() const {
diff --git a/src/mongo/db/catalog/collection_info_cache.h b/src/mongo/db/catalog/collection_info_cache.h
index ea832633cd3..cfe783748d1 100644
--- a/src/mongo/db/catalog/collection_info_cache.h
+++ b/src/mongo/db/catalog/collection_info_cache.h
@@ -64,7 +64,7 @@ public:
/* get set of index keys for this namespace. handy to quickly check if a given
field is indexed (Note it might be a secondary component of a compound index.)
*/
- const UpdateIndexData& getIndexKeys(OperationContext* txn) const;
+ const UpdateIndexData& getIndexKeys(OperationContext* opCtx) const;
/**
* Returns cached index usage statistics for this collection. The map returned will contain
@@ -78,7 +78,7 @@ public:
/**
* Builds internal cache state based on the current state of the Collection's IndexCatalog
*/
- void init(OperationContext* txn);
+ void init(OperationContext* opCtx);
/**
* Register a newly-created index with the cache. Must be called whenever an index is
@@ -86,7 +86,7 @@ public:
*
* Must be called under exclusive collection lock.
*/
- void addedIndex(OperationContext* txn, const IndexDescriptor* desc);
+ void addedIndex(OperationContext* opCtx, const IndexDescriptor* desc);
/**
* Deregister a newly-dropped index with the cache. Must be called whenever an index is
@@ -94,7 +94,7 @@ public:
*
* Must be called under exclusive collection lock.
*/
- void droppedIndex(OperationContext* txn, StringData indexName);
+ void droppedIndex(OperationContext* opCtx, StringData indexName);
/**
* Removes all cached query plans.
@@ -105,7 +105,7 @@ public:
* Signal to the cache that a query operation has completed. 'indexesUsed' should list the
* set of indexes used by the winning plan, if any.
*/
- void notifyOfQuery(OperationContext* txn, const std::set<std::string>& indexesUsed);
+ void notifyOfQuery(OperationContext* opCtx, const std::set<std::string>& indexesUsed);
private:
Collection* _collection; // not owned
@@ -124,14 +124,14 @@ private:
// Tracks index usage statistics for this collection.
CollectionIndexUsageTracker _indexUsageTracker;
- void computeIndexKeys(OperationContext* txn);
- void updatePlanCacheIndexEntries(OperationContext* txn);
+ void computeIndexKeys(OperationContext* opCtx);
+ void updatePlanCacheIndexEntries(OperationContext* opCtx);
/**
* Rebuilds cached information that is dependent on index composition. Must be called
* when index composition changes.
*/
- void rebuildIndexData(OperationContext* txn);
+ void rebuildIndexData(OperationContext* opCtx);
bool _hasTTLIndex = false;
};
diff --git a/src/mongo/db/catalog/create_collection.cpp b/src/mongo/db/catalog/create_collection.cpp
index abb1e1d8c16..2007aba76d6 100644
--- a/src/mongo/db/catalog/create_collection.cpp
+++ b/src/mongo/db/catalog/create_collection.cpp
@@ -40,7 +40,7 @@
#include "mongo/db/repl/replication_coordinator_global.h"
namespace mongo {
-Status createCollection(OperationContext* txn,
+Status createCollection(OperationContext* opCtx,
const std::string& dbName,
const BSONObj& cmdObj,
const BSONObj& idIndex) {
@@ -73,27 +73,27 @@ Status createCollection(OperationContext* txn,
options.hasField("$nExtents"));
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dbXLock(txn->lockState(), dbName, MODE_X);
- OldClientContext ctx(txn, nss.ns());
- if (txn->writesAreReplicated() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(txn, nss)) {
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ Lock::DBLock dbXLock(opCtx->lockState(), dbName, MODE_X);
+ OldClientContext ctx(opCtx, nss.ns());
+ if (opCtx->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, nss)) {
return Status(ErrorCodes::NotMaster,
str::stream() << "Not primary while creating collection " << nss.ns());
}
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
// Create collection.
const bool createDefaultIndexes = true;
- status = userCreateNS(txn, ctx.db(), nss.ns(), options, createDefaultIndexes, idIndex);
+ status = userCreateNS(opCtx, ctx.db(), nss.ns(), options, createDefaultIndexes, idIndex);
if (!status.isOK()) {
return status;
}
wunit.commit();
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "create", nss.ns());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "create", nss.ns());
return Status::OK();
}
} // namespace mongo
diff --git a/src/mongo/db/catalog/create_collection.h b/src/mongo/db/catalog/create_collection.h
index 5f503a692a2..73dd82bebd8 100644
--- a/src/mongo/db/catalog/create_collection.h
+++ b/src/mongo/db/catalog/create_collection.h
@@ -40,7 +40,7 @@ class OperationContext;
* _id index according to 'idIndex', if it is non-empty. When 'idIndex' is empty, creates the
* default _id index.
*/
-Status createCollection(OperationContext* txn,
+Status createCollection(OperationContext* opCtx,
const std::string& dbName,
const BSONObj& cmdObj,
const BSONObj& idIndex = BSONObj());
diff --git a/src/mongo/db/catalog/cursor_manager.cpp b/src/mongo/db/catalog/cursor_manager.cpp
index f6f1cf21474..2c9f9b2d9d8 100644
--- a/src/mongo/db/catalog/cursor_manager.cpp
+++ b/src/mongo/db/catalog/cursor_manager.cpp
@@ -107,11 +107,11 @@ public:
/**
* works globally
*/
- bool eraseCursor(OperationContext* txn, CursorId id, bool checkAuth);
+ bool eraseCursor(OperationContext* opCtx, CursorId id, bool checkAuth);
void appendStats(BSONObjBuilder& builder);
- std::size_t timeoutCursors(OperationContext* txn, int millisSinceLastCall);
+ std::size_t timeoutCursors(OperationContext* opCtx, int millisSinceLastCall);
int64_t nextSeed();
@@ -178,7 +178,7 @@ void GlobalCursorIdCache::destroyed(unsigned id, const std::string& ns) {
_idToNS.erase(id);
}
-bool GlobalCursorIdCache::eraseCursor(OperationContext* txn, CursorId id, bool checkAuth) {
+bool GlobalCursorIdCache::eraseCursor(OperationContext* opCtx, CursorId id, bool checkAuth) {
// Figure out what the namespace of this cursor is.
std::string ns;
if (globalCursorManager->ownsCursorId(id)) {
@@ -206,17 +206,17 @@ bool GlobalCursorIdCache::eraseCursor(OperationContext* txn, CursorId id, bool c
// Check if we are authorized to erase this cursor.
if (checkAuth) {
- AuthorizationSession* as = AuthorizationSession::get(txn->getClient());
+ AuthorizationSession* as = AuthorizationSession::get(opCtx->getClient());
Status authorizationStatus = as->checkAuthForKillCursors(nss, id);
if (!authorizationStatus.isOK()) {
- audit::logKillCursorsAuthzCheck(txn->getClient(), nss, id, ErrorCodes::Unauthorized);
+ audit::logKillCursorsAuthzCheck(opCtx->getClient(), nss, id, ErrorCodes::Unauthorized);
return false;
}
}
// If this cursor is owned by the global cursor manager, ask it to erase the cursor for us.
if (globalCursorManager->ownsCursorId(id)) {
- Status eraseStatus = globalCursorManager->eraseCursor(txn, id, checkAuth);
+ Status eraseStatus = globalCursorManager->eraseCursor(opCtx, id, checkAuth);
massert(28697,
eraseStatus.reason(),
eraseStatus.code() == ErrorCodes::OK ||
@@ -226,15 +226,16 @@ bool GlobalCursorIdCache::eraseCursor(OperationContext* txn, CursorId id, bool c
// If not, then the cursor must be owned by a collection. Erase the cursor under the
// collection lock (to prevent the collection from going away during the erase).
- AutoGetCollectionForRead ctx(txn, nss);
+ AutoGetCollectionForRead ctx(opCtx, nss);
Collection* collection = ctx.getCollection();
if (!collection) {
if (checkAuth)
- audit::logKillCursorsAuthzCheck(txn->getClient(), nss, id, ErrorCodes::CursorNotFound);
+ audit::logKillCursorsAuthzCheck(
+ opCtx->getClient(), nss, id, ErrorCodes::CursorNotFound);
return false;
}
- Status eraseStatus = collection->getCursorManager()->eraseCursor(txn, id, checkAuth);
+ Status eraseStatus = collection->getCursorManager()->eraseCursor(opCtx, id, checkAuth);
massert(16089,
eraseStatus.reason(),
eraseStatus.code() == ErrorCodes::OK ||
@@ -242,7 +243,7 @@ bool GlobalCursorIdCache::eraseCursor(OperationContext* txn, CursorId id, bool c
return eraseStatus.isOK();
}
-std::size_t GlobalCursorIdCache::timeoutCursors(OperationContext* txn, int millisSinceLastCall) {
+std::size_t GlobalCursorIdCache::timeoutCursors(OperationContext* opCtx, int millisSinceLastCall) {
size_t totalTimedOut = 0;
// Time out the cursors from the global cursor manager.
@@ -265,7 +266,7 @@ std::size_t GlobalCursorIdCache::timeoutCursors(OperationContext* txn, int milli
// For each collection, time out its cursors under the collection lock (to prevent the
// collection from going away during the erase).
for (unsigned i = 0; i < todo.size(); i++) {
- AutoGetCollectionOrViewForRead ctx(txn, NamespaceString(todo[i]));
+ AutoGetCollectionOrViewForRead ctx(opCtx, NamespaceString(todo[i]));
if (!ctx.getDb()) {
continue;
}
@@ -287,26 +288,26 @@ CursorManager* CursorManager::getGlobalCursorManager() {
return globalCursorManager.get();
}
-std::size_t CursorManager::timeoutCursorsGlobal(OperationContext* txn, int millisSinceLastCall) {
- return globalCursorIdCache->timeoutCursors(txn, millisSinceLastCall);
+std::size_t CursorManager::timeoutCursorsGlobal(OperationContext* opCtx, int millisSinceLastCall) {
+ return globalCursorIdCache->timeoutCursors(opCtx, millisSinceLastCall);
}
-int CursorManager::eraseCursorGlobalIfAuthorized(OperationContext* txn, int n, const char* _ids) {
+int CursorManager::eraseCursorGlobalIfAuthorized(OperationContext* opCtx, int n, const char* _ids) {
ConstDataCursor ids(_ids);
int numDeleted = 0;
for (int i = 0; i < n; i++) {
- if (eraseCursorGlobalIfAuthorized(txn, ids.readAndAdvance<LittleEndian<int64_t>>()))
+ if (eraseCursorGlobalIfAuthorized(opCtx, ids.readAndAdvance<LittleEndian<int64_t>>()))
numDeleted++;
if (globalInShutdownDeprecated())
break;
}
return numDeleted;
}
-bool CursorManager::eraseCursorGlobalIfAuthorized(OperationContext* txn, CursorId id) {
- return globalCursorIdCache->eraseCursor(txn, id, true);
+bool CursorManager::eraseCursorGlobalIfAuthorized(OperationContext* opCtx, CursorId id) {
+ return globalCursorIdCache->eraseCursor(opCtx, id, true);
}
-bool CursorManager::eraseCursorGlobal(OperationContext* txn, CursorId id) {
- return globalCursorIdCache->eraseCursor(txn, id, false);
+bool CursorManager::eraseCursorGlobal(OperationContext* opCtx, CursorId id) {
+ return globalCursorIdCache->eraseCursor(opCtx, id, false);
}
@@ -397,7 +398,7 @@ void CursorManager::invalidateAll(bool collectionGoingAway, const std::string& r
}
}
-void CursorManager::invalidateDocument(OperationContext* txn,
+void CursorManager::invalidateDocument(OperationContext* opCtx,
const RecordId& dl,
InvalidationType type) {
if (supportsDocLocking()) {
@@ -411,13 +412,13 @@ void CursorManager::invalidateDocument(OperationContext* txn,
for (ExecSet::iterator it = _nonCachedExecutors.begin(); it != _nonCachedExecutors.end();
++it) {
PlanExecutor* exec = *it;
- exec->invalidate(txn, dl, type);
+ exec->invalidate(opCtx, dl, type);
}
for (CursorMap::const_iterator i = _cursors.begin(); i != _cursors.end(); ++i) {
PlanExecutor* exec = i->second->getExecutor();
if (exec) {
- exec->invalidate(txn, dl, type);
+ exec->invalidate(opCtx, dl, type);
}
}
}
@@ -543,7 +544,7 @@ void CursorManager::deregisterCursor(ClientCursor* cc) {
_deregisterCursor_inlock(cc);
}
-Status CursorManager::eraseCursor(OperationContext* txn, CursorId id, bool shouldAudit) {
+Status CursorManager::eraseCursor(OperationContext* opCtx, CursorId id, bool shouldAudit) {
ClientCursor* cursor;
{
@@ -553,7 +554,7 @@ Status CursorManager::eraseCursor(OperationContext* txn, CursorId id, bool shoul
if (it == _cursors.end()) {
if (shouldAudit) {
audit::logKillCursorsAuthzCheck(
- txn->getClient(), _nss, id, ErrorCodes::CursorNotFound);
+ opCtx->getClient(), _nss, id, ErrorCodes::CursorNotFound);
}
return {ErrorCodes::CursorNotFound, str::stream() << "Cursor id not found: " << id};
}
@@ -563,14 +564,14 @@ Status CursorManager::eraseCursor(OperationContext* txn, CursorId id, bool shoul
if (cursor->_isPinned) {
if (shouldAudit) {
audit::logKillCursorsAuthzCheck(
- txn->getClient(), _nss, id, ErrorCodes::OperationFailed);
+ opCtx->getClient(), _nss, id, ErrorCodes::OperationFailed);
}
return {ErrorCodes::OperationFailed,
str::stream() << "Cannot kill pinned cursor: " << id};
}
if (shouldAudit) {
- audit::logKillCursorsAuthzCheck(txn->getClient(), _nss, id, ErrorCodes::OK);
+ audit::logKillCursorsAuthzCheck(opCtx->getClient(), _nss, id, ErrorCodes::OK);
}
cursor->kill();
diff --git a/src/mongo/db/catalog/cursor_manager.h b/src/mongo/db/catalog/cursor_manager.h
index 36c861f7a5c..ad4289d4f38 100644
--- a/src/mongo/db/catalog/cursor_manager.h
+++ b/src/mongo/db/catalog/cursor_manager.h
@@ -99,7 +99,7 @@ public:
* Broadcast a document invalidation to all relevant PlanExecutor(s). invalidateDocument
* must called *before* the provided RecordId is about to be deleted or mutated.
*/
- void invalidateDocument(OperationContext* txn, const RecordId& dl, InvalidationType type);
+ void invalidateDocument(OperationContext* opCtx, const RecordId& dl, InvalidationType type);
/**
* Destroys cursors that have been inactive for too long.
@@ -151,7 +151,7 @@ public:
*
* If 'shouldAudit' is true, will perform audit logging.
*/
- Status eraseCursor(OperationContext* txn, CursorId id, bool shouldAudit);
+ Status eraseCursor(OperationContext* opCtx, CursorId id, bool shouldAudit);
/**
* Returns true if the space of cursor ids that cursor manager is responsible for includes
@@ -172,17 +172,17 @@ public:
static CursorManager* getGlobalCursorManager();
- static int eraseCursorGlobalIfAuthorized(OperationContext* txn, int n, const char* ids);
+ static int eraseCursorGlobalIfAuthorized(OperationContext* opCtx, int n, const char* ids);
- static bool eraseCursorGlobalIfAuthorized(OperationContext* txn, CursorId id);
+ static bool eraseCursorGlobalIfAuthorized(OperationContext* opCtx, CursorId id);
- static bool eraseCursorGlobal(OperationContext* txn, CursorId id);
+ static bool eraseCursorGlobal(OperationContext* opCtx, CursorId id);
/**
* Deletes inactive cursors from the global cursor manager and from all per-collection cursor
* managers. Returns the number of cursors that were timed out.
*/
- static std::size_t timeoutCursorsGlobal(OperationContext* txn, int millisSinceLastCall);
+ static std::size_t timeoutCursorsGlobal(OperationContext* opCtx, int millisSinceLastCall);
private:
friend class ClientCursorPin;
diff --git a/src/mongo/db/catalog/database.cpp b/src/mongo/db/catalog/database.cpp
index d9abaef39ca..b6c11f4c51f 100644
--- a/src/mongo/db/catalog/database.cpp
+++ b/src/mongo/db/catalog/database.cpp
@@ -85,8 +85,8 @@ void massertNamespaceNotIndex(StringData ns, StringData caller) {
class Database::AddCollectionChange : public RecoveryUnit::Change {
public:
- AddCollectionChange(OperationContext* txn, Database* db, StringData ns)
- : _txn(txn), _db(db), _ns(ns.toString()) {}
+ AddCollectionChange(OperationContext* opCtx, Database* db, StringData ns)
+ : _opCtx(opCtx), _db(db), _ns(ns.toString()) {}
virtual void commit() {
CollectionMap::const_iterator it = _db->_collections.find(_ns);
@@ -94,8 +94,8 @@ public:
return;
// Ban reading from this collection on committed reads on snapshots before now.
- auto replCoord = repl::ReplicationCoordinator::get(_txn);
- auto snapshotName = replCoord->reserveSnapshotName(_txn);
+ auto replCoord = repl::ReplicationCoordinator::get(_opCtx);
+ auto snapshotName = replCoord->reserveSnapshotName(_opCtx);
replCoord->forceSnapshotCreation(); // Ensures a newer snapshot gets created even if idle.
it->second->setMinimumVisibleSnapshot(snapshotName);
}
@@ -109,7 +109,7 @@ public:
_db->_collections.erase(it);
}
- OperationContext* const _txn;
+ OperationContext* const _opCtx;
Database* const _db;
const std::string _ns;
};
@@ -138,11 +138,11 @@ Database::~Database() {
delete i->second;
}
-void Database::close(OperationContext* txn) {
+void Database::close(OperationContext* opCtx) {
// XXX? - Do we need to close database under global lock or just DB-lock is sufficient ?
- invariant(txn->lockState()->isW());
+ invariant(opCtx->lockState()->isW());
// oplog caches some things, dirty its caches
- repl::oplogCheckCloseDatabase(txn, this);
+ repl::oplogCheckCloseDatabase(opCtx, this);
if (BackgroundOperation::inProgForDb(_name)) {
log() << "warning: bg op in prog during close db? " << _name;
@@ -181,7 +181,7 @@ Status Database::validateDBName(StringData dbname) {
return Status::OK();
}
-Collection* Database::_getOrCreateCollectionInstance(OperationContext* txn, StringData fullns) {
+Collection* Database::_getOrCreateCollectionInstance(OperationContext* opCtx, StringData fullns) {
Collection* collection = getCollection(fullns);
if (collection) {
return collection;
@@ -194,11 +194,11 @@ Collection* Database::_getOrCreateCollectionInstance(OperationContext* txn, Stri
invariant(rs.get()); // if cce exists, so should this
// Not registering AddCollectionChange since this is for collections that already exist.
- Collection* c = new Collection(txn, fullns, cce.release(), rs.release(), _dbEntry);
+ Collection* c = new Collection(opCtx, fullns, cce.release(), rs.release(), _dbEntry);
return c;
}
-Database::Database(OperationContext* txn, StringData name, DatabaseCatalogEntry* dbEntry)
+Database::Database(OperationContext* opCtx, StringData name, DatabaseCatalogEntry* dbEntry)
: _name(name.toString()),
_dbEntry(dbEntry),
_profileName(_name + ".system.profile"),
@@ -218,14 +218,14 @@ Database::Database(OperationContext* txn, StringData name, DatabaseCatalogEntry*
_dbEntry->getCollectionNamespaces(&collections);
for (list<string>::const_iterator it = collections.begin(); it != collections.end(); ++it) {
const string ns = *it;
- _collections[ns] = _getOrCreateCollectionInstance(txn, ns);
+ _collections[ns] = _getOrCreateCollectionInstance(opCtx, ns);
}
// At construction time of the viewCatalog, the _collections map wasn't initialized yet, so no
// system.views collection would be found. Now we're sufficiently initialized, signal a version
// change. Also force a reload, so if there are problems with the catalog contents as might be
// caused by incorrect mongod versions or similar, they are found right away.
_views.invalidate();
- Status reloadStatus = _views.reloadIfNeeded(txn);
+ Status reloadStatus = _views.reloadIfNeeded(opCtx);
if (!reloadStatus.isOK()) {
warning() << "Unable to parse views: " << redact(reloadStatus)
<< "; remove any invalid views from the " << _viewsName
@@ -233,8 +233,8 @@ Database::Database(OperationContext* txn, StringData name, DatabaseCatalogEntry*
}
}
-void Database::clearTmpCollections(OperationContext* txn) {
- invariant(txn->lockState()->isDbLockedForMode(name(), MODE_X));
+void Database::clearTmpCollections(OperationContext* opCtx) {
+ invariant(opCtx->lockState()->isDbLockedForMode(name(), MODE_X));
list<string> collections;
_dbEntry->getCollectionNamespaces(&collections);
@@ -245,12 +245,12 @@ void Database::clearTmpCollections(OperationContext* txn) {
CollectionCatalogEntry* coll = _dbEntry->getCollectionCatalogEntry(ns);
- CollectionOptions options = coll->getCollectionOptions(txn);
+ CollectionOptions options = coll->getCollectionOptions(opCtx);
if (!options.temp)
continue;
try {
- WriteUnitOfWork wunit(txn);
- Status status = dropCollection(txn, ns);
+ WriteUnitOfWork wunit(opCtx);
+ Status status = dropCollection(opCtx, ns);
if (!status.isOK()) {
warning() << "could not drop temp collection '" << ns << "': " << redact(status);
continue;
@@ -260,12 +260,12 @@ void Database::clearTmpCollections(OperationContext* txn) {
} catch (const WriteConflictException& exp) {
warning() << "could not drop temp collection '" << ns << "' due to "
"WriteConflictException";
- txn->recoveryUnit()->abandonSnapshot();
+ opCtx->recoveryUnit()->abandonSnapshot();
}
}
}
-Status Database::setProfilingLevel(OperationContext* txn, int newLevel) {
+Status Database::setProfilingLevel(OperationContext* opCtx, int newLevel) {
if (_profile == newLevel) {
return Status::OK();
}
@@ -279,7 +279,7 @@ Status Database::setProfilingLevel(OperationContext* txn, int newLevel) {
return Status(ErrorCodes::BadValue, "profiling level has to be >=0 and <= 2");
}
- Status status = createProfileCollection(txn, this);
+ Status status = createProfileCollection(opCtx, this);
if (!status.isOK()) {
return status;
}
@@ -336,13 +336,13 @@ void Database::getStats(OperationContext* opCtx, BSONObjBuilder* output, double
_dbEntry->appendExtraStats(opCtx, output, scale);
}
-Status Database::dropView(OperationContext* txn, StringData fullns) {
- Status status = _views.dropView(txn, NamespaceString(fullns));
- Top::get(txn->getClient()->getServiceContext()).collectionDropped(fullns);
+Status Database::dropView(OperationContext* opCtx, StringData fullns) {
+ Status status = _views.dropView(opCtx, NamespaceString(fullns));
+ Top::get(opCtx->getClient()->getServiceContext()).collectionDropped(fullns);
return status;
}
-Status Database::dropCollection(OperationContext* txn, StringData fullns) {
+Status Database::dropCollection(OperationContext* opCtx, StringData fullns) {
if (!getCollection(fullns)) {
// Collection doesn't exist so don't bother validating if it can be dropped.
return Status::OK();
@@ -364,11 +364,12 @@ Status Database::dropCollection(OperationContext* txn, StringData fullns) {
}
}
- return dropCollectionEvenIfSystem(txn, nss);
+ return dropCollectionEvenIfSystem(opCtx, nss);
}
-Status Database::dropCollectionEvenIfSystem(OperationContext* txn, const NamespaceString& fullns) {
- invariant(txn->lockState()->isDbLockedForMode(name(), MODE_X));
+Status Database::dropCollectionEvenIfSystem(OperationContext* opCtx,
+ const NamespaceString& fullns) {
+ invariant(opCtx->lockState()->isDbLockedForMode(name(), MODE_X));
LOG(1) << "dropCollection: " << fullns;
@@ -383,23 +384,23 @@ Status Database::dropCollectionEvenIfSystem(OperationContext* txn, const Namespa
audit::logDropCollection(&cc(), fullns.toString());
- Status s = collection->getIndexCatalog()->dropAllIndexes(txn, true);
+ Status s = collection->getIndexCatalog()->dropAllIndexes(opCtx, true);
if (!s.isOK()) {
warning() << "could not drop collection, trying to drop indexes" << fullns << " because of "
<< redact(s.toString());
return s;
}
- verify(collection->_details->getTotalIndexCount(txn) == 0);
+ verify(collection->_details->getTotalIndexCount(opCtx) == 0);
LOG(1) << "\t dropIndexes done";
- Top::get(txn->getClient()->getServiceContext()).collectionDropped(fullns.toString());
+ Top::get(opCtx->getClient()->getServiceContext()).collectionDropped(fullns.toString());
// We want to destroy the Collection object before telling the StorageEngine to destroy the
// RecordStore.
- _clearCollectionCache(txn, fullns.toString(), "collection dropped");
+ _clearCollectionCache(opCtx, fullns.toString(), "collection dropped");
- s = _dbEntry->dropCollection(txn, fullns.toString());
+ s = _dbEntry->dropCollection(opCtx, fullns.toString());
if (!s.isOK())
return s;
@@ -416,12 +417,12 @@ Status Database::dropCollectionEvenIfSystem(OperationContext* txn, const Namespa
}
}
- getGlobalServiceContext()->getOpObserver()->onDropCollection(txn, fullns);
+ getGlobalServiceContext()->getOpObserver()->onDropCollection(opCtx, fullns);
return Status::OK();
}
-void Database::_clearCollectionCache(OperationContext* txn,
+void Database::_clearCollectionCache(OperationContext* opCtx,
StringData fullns,
const std::string& reason) {
verify(_name == nsToDatabaseSubstring(fullns));
@@ -430,7 +431,7 @@ void Database::_clearCollectionCache(OperationContext* txn,
return;
// Takes ownership of the collection
- txn->recoveryUnit()->registerChange(new RemoveCollectionChange(this, it->second));
+ opCtx->recoveryUnit()->registerChange(new RemoveCollectionChange(this, it->second));
it->second->_cursorManager.invalidateAll(false, reason);
_collections.erase(it);
@@ -446,12 +447,12 @@ Collection* Database::getCollection(StringData ns) const {
return NULL;
}
-Status Database::renameCollection(OperationContext* txn,
+Status Database::renameCollection(OperationContext* opCtx,
StringData fromNS,
StringData toNS,
bool stayTemp) {
audit::logRenameCollection(&cc(), fromNS, toNS);
- invariant(txn->lockState()->isDbLockedForMode(name(), MODE_X));
+ invariant(opCtx->lockState()->isDbLockedForMode(name(), MODE_X));
BackgroundOperation::assertNoBgOpInProgForNs(fromNS);
BackgroundOperation::assertNoBgOpInProgForNs(toNS);
@@ -462,28 +463,28 @@ Status Database::renameCollection(OperationContext* txn,
string clearCacheReason = str::stream() << "renamed collection '" << fromNS << "' to '"
<< toNS << "'";
- IndexCatalog::IndexIterator ii = coll->getIndexCatalog()->getIndexIterator(txn, true);
+ IndexCatalog::IndexIterator ii = coll->getIndexCatalog()->getIndexIterator(opCtx, true);
while (ii.more()) {
IndexDescriptor* desc = ii.next();
- _clearCollectionCache(txn, desc->indexNamespace(), clearCacheReason);
+ _clearCollectionCache(opCtx, desc->indexNamespace(), clearCacheReason);
}
- _clearCollectionCache(txn, fromNS, clearCacheReason);
- _clearCollectionCache(txn, toNS, clearCacheReason);
+ _clearCollectionCache(opCtx, fromNS, clearCacheReason);
+ _clearCollectionCache(opCtx, toNS, clearCacheReason);
- Top::get(txn->getClient()->getServiceContext()).collectionDropped(fromNS.toString());
+ Top::get(opCtx->getClient()->getServiceContext()).collectionDropped(fromNS.toString());
}
- txn->recoveryUnit()->registerChange(new AddCollectionChange(txn, this, toNS));
- Status s = _dbEntry->renameCollection(txn, fromNS, toNS, stayTemp);
- _collections[toNS] = _getOrCreateCollectionInstance(txn, toNS);
+ opCtx->recoveryUnit()->registerChange(new AddCollectionChange(opCtx, this, toNS));
+ Status s = _dbEntry->renameCollection(opCtx, fromNS, toNS, stayTemp);
+ _collections[toNS] = _getOrCreateCollectionInstance(opCtx, toNS);
return s;
}
-Collection* Database::getOrCreateCollection(OperationContext* txn, StringData ns) {
+Collection* Database::getOrCreateCollection(OperationContext* opCtx, StringData ns) {
Collection* c = getCollection(ns);
if (!c) {
- c = createCollection(txn, ns);
+ c = createCollection(opCtx, ns);
}
return c;
}
@@ -509,10 +510,10 @@ void Database::_checkCanCreateCollection(const NamespaceString& nss,
uassert(28838, "cannot create a non-capped oplog collection", options.capped || !nss.isOplog());
}
-Status Database::createView(OperationContext* txn,
+Status Database::createView(OperationContext* opCtx,
StringData ns,
const CollectionOptions& options) {
- invariant(txn->lockState()->isDbLockedForMode(name(), MODE_X));
+ invariant(opCtx->lockState()->isDbLockedForMode(name(), MODE_X));
invariant(options.isView());
NamespaceString nss(ns);
@@ -524,27 +525,27 @@ Status Database::createView(OperationContext* txn,
return Status(ErrorCodes::InvalidNamespace,
str::stream() << "invalid namespace name for a view: " + nss.toString());
- return _views.createView(txn, nss, viewOnNss, BSONArray(options.pipeline), options.collation);
+ return _views.createView(opCtx, nss, viewOnNss, BSONArray(options.pipeline), options.collation);
}
-Collection* Database::createCollection(OperationContext* txn,
+Collection* Database::createCollection(OperationContext* opCtx,
StringData ns,
const CollectionOptions& options,
bool createIdIndex,
const BSONObj& idIndex) {
- invariant(txn->lockState()->isDbLockedForMode(name(), MODE_X));
+ invariant(opCtx->lockState()->isDbLockedForMode(name(), MODE_X));
invariant(!options.isView());
NamespaceString nss(ns);
_checkCanCreateCollection(nss, options);
audit::logCreateCollection(&cc(), ns);
- Status status = _dbEntry->createCollection(txn, ns, options, true /*allocateDefaultSpace*/);
+ Status status = _dbEntry->createCollection(opCtx, ns, options, true /*allocateDefaultSpace*/);
massertNoTraceStatusOK(status);
- txn->recoveryUnit()->registerChange(new AddCollectionChange(txn, this, ns));
- Collection* collection = _getOrCreateCollectionInstance(txn, ns);
+ opCtx->recoveryUnit()->registerChange(new AddCollectionChange(opCtx, this, ns));
+ Collection* collection = _getOrCreateCollectionInstance(opCtx, ns);
invariant(collection);
_collections[ns] = collection;
@@ -558,19 +559,19 @@ Collection* Database::createCollection(OperationContext* txn,
serverGlobalParams.featureCompatibility.version.load();
IndexCatalog* ic = collection->getIndexCatalog();
fullIdIndexSpec = uassertStatusOK(ic->createIndexOnEmptyCollection(
- txn,
+ opCtx,
!idIndex.isEmpty() ? idIndex
: ic->getDefaultIdIndexSpec(featureCompatibilityVersion)));
}
}
if (nss.isSystem()) {
- authindex::createSystemIndexes(txn, collection);
+ authindex::createSystemIndexes(opCtx, collection);
}
}
getGlobalServiceContext()->getOpObserver()->onCreateCollection(
- txn, nss, options, fullIdIndexSpec);
+ opCtx, nss, options, fullIdIndexSpec);
return collection;
}
@@ -579,9 +580,9 @@ const DatabaseCatalogEntry* Database::getDatabaseCatalogEntry() const {
return _dbEntry;
}
-void dropAllDatabasesExceptLocal(OperationContext* txn) {
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite lk(txn->lockState());
+void dropAllDatabasesExceptLocal(OperationContext* opCtx) {
+ ScopedTransaction transaction(opCtx, MODE_X);
+ Lock::GlobalWrite lk(opCtx->lockState());
vector<string> n;
StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
@@ -595,47 +596,47 @@ void dropAllDatabasesExceptLocal(OperationContext* txn) {
for (vector<string>::iterator i = n.begin(); i != n.end(); i++) {
if (*i != "local") {
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- Database* db = dbHolder().get(txn, *i);
+ Database* db = dbHolder().get(opCtx, *i);
// This is needed since dropDatabase can't be rolled back.
- // This is safe be replaced by "invariant(db);dropDatabase(txn, db);" once fixed
+ // This is safe be replaced by "invariant(db);dropDatabase(opCtx, db);" once fixed
if (db == nullptr) {
log() << "database disappeared after listDatabases but before drop: " << *i;
} else {
- Database::dropDatabase(txn, db);
+ Database::dropDatabase(opCtx, db);
}
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "dropAllDatabasesExceptLocal", *i);
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "dropAllDatabasesExceptLocal", *i);
}
}
}
-void Database::dropDatabase(OperationContext* txn, Database* db) {
+void Database::dropDatabase(OperationContext* opCtx, Database* db) {
invariant(db);
// Store the name so we have if for after the db object is deleted
const string name = db->name();
LOG(1) << "dropDatabase " << name;
- invariant(txn->lockState()->isDbLockedForMode(name, MODE_X));
+ invariant(opCtx->lockState()->isDbLockedForMode(name, MODE_X));
BackgroundOperation::assertNoBgOpInProgForDb(name);
- audit::logDropDatabase(txn->getClient(), name);
+ audit::logDropDatabase(opCtx->getClient(), name);
for (auto&& coll : *db) {
- Top::get(txn->getClient()->getServiceContext()).collectionDropped(coll->ns().ns(), true);
+ Top::get(opCtx->getClient()->getServiceContext()).collectionDropped(coll->ns().ns(), true);
}
- dbHolder().close(txn, name);
+ dbHolder().close(opCtx, name);
db = NULL; // d is now deleted
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- getGlobalServiceContext()->getGlobalStorageEngine()->dropDatabase(txn, name);
+ getGlobalServiceContext()->getGlobalStorageEngine()->dropDatabase(opCtx, name);
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "dropDatabase", name);
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "dropDatabase", name);
}
-Status userCreateNS(OperationContext* txn,
+Status userCreateNS(OperationContext* opCtx,
Database* db,
StringData ns,
BSONObj options,
@@ -654,7 +655,7 @@ Status userCreateNS(OperationContext* txn,
return Status(ErrorCodes::NamespaceExists,
str::stream() << "a collection '" << ns.toString() << "' already exists");
- if (db->getViewCatalog()->lookup(txn, ns))
+ if (db->getViewCatalog()->lookup(opCtx, ns))
return Status(ErrorCodes::NamespaceExists,
str::stream() << "a view '" << ns.toString() << "' already exists");
@@ -665,7 +666,7 @@ Status userCreateNS(OperationContext* txn,
// Validate the collation, if there is one.
if (!collectionOptions.collation.isEmpty()) {
- auto collator = CollatorFactoryInterface::get(txn->getServiceContext())
+ auto collator = CollatorFactoryInterface::get(opCtx->getServiceContext())
->makeFromBSON(collectionOptions.collation);
if (!collator.isOK()) {
return collator.getStatus();
@@ -703,9 +704,10 @@ Status userCreateNS(OperationContext* txn,
}
if (collectionOptions.isView()) {
- uassertStatusOK(db->createView(txn, ns, collectionOptions));
+ uassertStatusOK(db->createView(opCtx, ns, collectionOptions));
} else {
- invariant(db->createCollection(txn, ns, collectionOptions, createDefaultIndexes, idIndex));
+ invariant(
+ db->createCollection(opCtx, ns, collectionOptions, createDefaultIndexes, idIndex));
}
return Status::OK();
diff --git a/src/mongo/db/catalog/database.h b/src/mongo/db/catalog/database.h
index 2b405710b78..de8e65aa3ce 100644
--- a/src/mongo/db/catalog/database.h
+++ b/src/mongo/db/catalog/database.h
@@ -106,7 +106,7 @@ public:
CollectionMap::const_iterator _it;
};
- Database(OperationContext* txn, StringData name, DatabaseCatalogEntry* dbEntry);
+ Database(OperationContext* opCtx, StringData name, DatabaseCatalogEntry* dbEntry);
// must call close first
~Database();
@@ -120,21 +120,21 @@ public:
}
// closes files and other cleanup see below.
- void close(OperationContext* txn);
+ void close(OperationContext* opCtx);
const std::string& name() const {
return _name;
}
- void clearTmpCollections(OperationContext* txn);
+ void clearTmpCollections(OperationContext* opCtx);
/**
* Sets a new profiling level for the database and returns the outcome.
*
- * @param txn Operation context which to use for creating the profiling collection.
+ * @param opCtx Operation context which to use for creating the profiling collection.
* @param newLevel New profiling level to use.
*/
- Status setProfilingLevel(OperationContext* txn, int newLevel);
+ Status setProfilingLevel(OperationContext* opCtx, int newLevel);
int getProfilingLevel() const {
return _profile;
@@ -151,18 +151,20 @@ public:
* dropCollection() will refuse to drop system collections. Use dropCollectionEvenIfSystem() if
* that is required.
*/
- Status dropCollection(OperationContext* txn, StringData fullns);
- Status dropCollectionEvenIfSystem(OperationContext* txn, const NamespaceString& fullns);
+ Status dropCollection(OperationContext* opCtx, StringData fullns);
+ Status dropCollectionEvenIfSystem(OperationContext* opCtx, const NamespaceString& fullns);
- Status dropView(OperationContext* txn, StringData fullns);
+ Status dropView(OperationContext* opCtx, StringData fullns);
- Collection* createCollection(OperationContext* txn,
+ Collection* createCollection(OperationContext* opCtx,
StringData ns,
const CollectionOptions& options = CollectionOptions(),
bool createDefaultIndexes = true,
const BSONObj& idIndex = BSONObj());
- Status createView(OperationContext* txn, StringData viewName, const CollectionOptions& options);
+ Status createView(OperationContext* opCtx,
+ StringData viewName,
+ const CollectionOptions& options);
/**
* @param ns - this is fully qualified, which is maybe not ideal ???
@@ -181,9 +183,9 @@ public:
return &_views;
}
- Collection* getOrCreateCollection(OperationContext* txn, StringData ns);
+ Collection* getOrCreateCollection(OperationContext* opCtx, StringData ns);
- Status renameCollection(OperationContext* txn,
+ Status renameCollection(OperationContext* opCtx,
StringData fromNS,
StringData toNS,
bool stayTemp);
@@ -195,7 +197,7 @@ public:
*
* Must be called with the specified database locked in X mode.
*/
- static void dropDatabase(OperationContext* txn, Database* db);
+ static void dropDatabase(OperationContext* opCtx, Database* db);
static Status validateDBName(StringData dbname);
@@ -215,7 +217,7 @@ private:
* Note: This does not add the collection to _collections map, that must be done
* by the caller, who takes onership of the Collection*
*/
- Collection* _getOrCreateCollectionInstance(OperationContext* txn, StringData fullns);
+ Collection* _getOrCreateCollectionInstance(OperationContext* opCtx, StringData fullns);
/**
* Throws if there is a reason 'ns' cannot be created as a user collection.
@@ -226,7 +228,9 @@ private:
* Deregisters and invalidates all cursors on collection 'fullns'. Callers must specify
* 'reason' for why the cache is being cleared.
*/
- void _clearCollectionCache(OperationContext* txn, StringData fullns, const std::string& reason);
+ void _clearCollectionCache(OperationContext* opCtx,
+ StringData fullns,
+ const std::string& reason);
class AddCollectionChange;
class RemoveCollectionChange;
@@ -251,7 +255,7 @@ private:
friend class IndexCatalog;
};
-void dropAllDatabasesExceptLocal(OperationContext* txn);
+void dropAllDatabasesExceptLocal(OperationContext* opCtx);
/**
* Creates the namespace 'ns' in the database 'db' according to 'options'. If 'createDefaultIndexes'
@@ -259,7 +263,7 @@ void dropAllDatabasesExceptLocal(OperationContext* txn);
* collections). Creates the collection's _id index according to 'idIndex', if it is non-empty. When
* 'idIndex' is empty, creates the default _id index.
*/
-Status userCreateNS(OperationContext* txn,
+Status userCreateNS(OperationContext* opCtx,
Database* db,
StringData ns,
BSONObj options,
diff --git a/src/mongo/db/catalog/database_catalog_entry.h b/src/mongo/db/catalog/database_catalog_entry.h
index 772871cacab..714e3537acf 100644
--- a/src/mongo/db/catalog/database_catalog_entry.h
+++ b/src/mongo/db/catalog/database_catalog_entry.h
@@ -96,16 +96,16 @@ public:
virtual RecordStore* getRecordStore(StringData ns) const = 0;
// Ownership passes to caller
- virtual IndexAccessMethod* getIndex(OperationContext* txn,
+ virtual IndexAccessMethod* getIndex(OperationContext* opCtx,
const CollectionCatalogEntry* collection,
IndexCatalogEntry* index) = 0;
- virtual Status createCollection(OperationContext* txn,
+ virtual Status createCollection(OperationContext* opCtx,
StringData ns,
const CollectionOptions& options,
bool allocateDefaultSpace) = 0;
- virtual Status renameCollection(OperationContext* txn,
+ virtual Status renameCollection(OperationContext* opCtx,
StringData fromNS,
StringData toNS,
bool stayTemp) = 0;
diff --git a/src/mongo/db/catalog/database_holder.cpp b/src/mongo/db/catalog/database_holder.cpp
index faa0e21a6d9..d3ee0e2e37f 100644
--- a/src/mongo/db/catalog/database_holder.cpp
+++ b/src/mongo/db/catalog/database_holder.cpp
@@ -81,9 +81,9 @@ DatabaseHolder& dbHolder() {
}
-Database* DatabaseHolder::get(OperationContext* txn, StringData ns) const {
+Database* DatabaseHolder::get(OperationContext* opCtx, StringData ns) const {
const StringData db = _todb(ns);
- invariant(txn->lockState()->isDbLockedForMode(db, MODE_IS));
+ invariant(opCtx->lockState()->isDbLockedForMode(db, MODE_IS));
stdx::lock_guard<SimpleMutex> lk(_m);
DBs::const_iterator it = _dbs.find(db);
@@ -110,9 +110,9 @@ std::set<std::string> DatabaseHolder::getNamesWithConflictingCasing(StringData n
return _getNamesWithConflictingCasing_inlock(name);
}
-Database* DatabaseHolder::openDb(OperationContext* txn, StringData ns, bool* justCreated) {
+Database* DatabaseHolder::openDb(OperationContext* opCtx, StringData ns, bool* justCreated) {
const StringData dbname = _todb(ns);
- invariant(txn->lockState()->isDbLockedForMode(dbname, MODE_X));
+ invariant(opCtx->lockState()->isDbLockedForMode(dbname, MODE_X));
if (justCreated)
*justCreated = false; // Until proven otherwise.
@@ -148,7 +148,7 @@ Database* DatabaseHolder::openDb(OperationContext* txn, StringData ns, bool* jus
// different databases for the same name.
lk.unlock();
StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
- DatabaseCatalogEntry* entry = storageEngine->getDatabaseCatalogEntry(txn, dbname);
+ DatabaseCatalogEntry* entry = storageEngine->getDatabaseCatalogEntry(opCtx, dbname);
if (!entry->exists()) {
audit::logCreateDatabase(&cc(), dbname);
@@ -156,7 +156,7 @@ Database* DatabaseHolder::openDb(OperationContext* txn, StringData ns, bool* jus
*justCreated = true;
}
- auto newDb = stdx::make_unique<Database>(txn, dbname, entry);
+ auto newDb = stdx::make_unique<Database>(opCtx, dbname, entry);
// Finally replace our nullptr entry with the new Database pointer.
removeDbGuard.Dismiss();
@@ -169,8 +169,8 @@ Database* DatabaseHolder::openDb(OperationContext* txn, StringData ns, bool* jus
return it->second;
}
-void DatabaseHolder::close(OperationContext* txn, StringData ns) {
- invariant(txn->lockState()->isW());
+void DatabaseHolder::close(OperationContext* opCtx, StringData ns) {
+ invariant(opCtx->lockState()->isW());
const StringData dbName = _todb(ns);
@@ -181,15 +181,15 @@ void DatabaseHolder::close(OperationContext* txn, StringData ns) {
return;
}
- it->second->close(txn);
+ it->second->close(opCtx);
delete it->second;
_dbs.erase(it);
- getGlobalServiceContext()->getGlobalStorageEngine()->closeDatabase(txn, dbName.toString());
+ getGlobalServiceContext()->getGlobalStorageEngine()->closeDatabase(opCtx, dbName.toString());
}
-bool DatabaseHolder::closeAll(OperationContext* txn, BSONObjBuilder& result, bool force) {
- invariant(txn->lockState()->isW());
+bool DatabaseHolder::closeAll(OperationContext* opCtx, BSONObjBuilder& result, bool force) {
+ invariant(opCtx->lockState()->isW());
stdx::lock_guard<SimpleMutex> lk(_m);
@@ -213,12 +213,12 @@ bool DatabaseHolder::closeAll(OperationContext* txn, BSONObjBuilder& result, boo
}
Database* db = _dbs[name];
- db->close(txn);
+ db->close(opCtx);
delete db;
_dbs.erase(name);
- getGlobalServiceContext()->getGlobalStorageEngine()->closeDatabase(txn, name);
+ getGlobalServiceContext()->getGlobalStorageEngine()->closeDatabase(opCtx, name);
bb.append(name);
}
diff --git a/src/mongo/db/catalog/database_holder.h b/src/mongo/db/catalog/database_holder.h
index 8881dfd9795..fd574832d6c 100644
--- a/src/mongo/db/catalog/database_holder.h
+++ b/src/mongo/db/catalog/database_holder.h
@@ -53,7 +53,7 @@ public:
* Retrieves an already opened database or returns NULL. Must be called with the database
* locked in at least IS-mode.
*/
- Database* get(OperationContext* txn, StringData ns) const;
+ Database* get(OperationContext* opCtx, StringData ns) const;
/**
* Retrieves a database reference if it is already opened, or opens it if it hasn't been
@@ -62,12 +62,12 @@ public:
* @param justCreated Returns whether the database was newly created (true) or it already
* existed (false). Can be NULL if this information is not necessary.
*/
- Database* openDb(OperationContext* txn, StringData ns, bool* justCreated = NULL);
+ Database* openDb(OperationContext* opCtx, StringData ns, bool* justCreated = NULL);
/**
* Closes the specified database. Must be called with the database locked in X-mode.
*/
- void close(OperationContext* txn, StringData ns);
+ void close(OperationContext* opCtx, StringData ns);
/**
* Closes all opened databases. Must be called with the global lock acquired in X-mode.
@@ -75,7 +75,7 @@ public:
* @param result Populated with the names of the databases, which were closed.
* @param force Force close even if something underway - use at shutdown
*/
- bool closeAll(OperationContext* txn, BSONObjBuilder& result, bool force);
+ bool closeAll(OperationContext* opCtx, BSONObjBuilder& result, bool force);
/**
* Returns the set of existing database names that differ only in casing.
diff --git a/src/mongo/db/catalog/document_validation.h b/src/mongo/db/catalog/document_validation.h
index e5b0fc0555c..e92d5bd4edd 100644
--- a/src/mongo/db/catalog/document_validation.h
+++ b/src/mongo/db/catalog/document_validation.h
@@ -56,17 +56,17 @@ class DisableDocumentValidation {
MONGO_DISALLOW_COPYING(DisableDocumentValidation);
public:
- DisableDocumentValidation(OperationContext* txn)
- : _txn(txn), _initialState(documentValidationDisabled(_txn)) {
- documentValidationDisabled(_txn) = true;
+ DisableDocumentValidation(OperationContext* opCtx)
+ : _opCtx(opCtx), _initialState(documentValidationDisabled(_opCtx)) {
+ documentValidationDisabled(_opCtx) = true;
}
~DisableDocumentValidation() {
- documentValidationDisabled(_txn) = _initialState;
+ documentValidationDisabled(_opCtx) = _initialState;
}
private:
- OperationContext* const _txn;
+ OperationContext* const _opCtx;
const bool _initialState;
};
@@ -75,9 +75,9 @@ private:
*/
class DisableDocumentValidationIfTrue {
public:
- DisableDocumentValidationIfTrue(OperationContext* txn, bool shouldDisableValidation) {
+ DisableDocumentValidationIfTrue(OperationContext* opCtx, bool shouldDisableValidation) {
if (shouldDisableValidation)
- _documentValidationDisabler.emplace(txn);
+ _documentValidationDisabler.emplace(opCtx);
}
private:
diff --git a/src/mongo/db/catalog/drop_collection.cpp b/src/mongo/db/catalog/drop_collection.cpp
index ae5d0fed3d7..3699f1a76ad 100644
--- a/src/mongo/db/catalog/drop_collection.cpp
+++ b/src/mongo/db/catalog/drop_collection.cpp
@@ -49,7 +49,7 @@
namespace mongo {
-Status dropCollection(OperationContext* txn,
+Status dropCollection(OperationContext* opCtx,
const NamespaceString& collectionName,
BSONObjBuilder& result) {
if (!serverGlobalParams.quiet.load()) {
@@ -59,22 +59,23 @@ Status dropCollection(OperationContext* txn,
const std::string dbname = collectionName.db().toString();
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(txn, MODE_IX);
+ ScopedTransaction transaction(opCtx, MODE_IX);
- AutoGetDb autoDb(txn, dbname, MODE_X);
+ AutoGetDb autoDb(opCtx, dbname, MODE_X);
Database* const db = autoDb.getDb();
Collection* coll = db ? db->getCollection(collectionName) : nullptr;
- auto view = db && !coll ? db->getViewCatalog()->lookup(txn, collectionName.ns()) : nullptr;
+ auto view =
+ db && !coll ? db->getViewCatalog()->lookup(opCtx, collectionName.ns()) : nullptr;
if (!db || (!coll && !view)) {
return Status(ErrorCodes::NamespaceNotFound, "ns not found");
}
const bool shardVersionCheck = true;
- OldClientContext context(txn, collectionName.ns(), shardVersionCheck);
+ OldClientContext context(opCtx, collectionName.ns(), shardVersionCheck);
- bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(txn, collectionName);
+ bool userInitiatedWritesAndNotPrimary = opCtx->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, collectionName);
if (userInitiatedWritesAndNotPrimary) {
return Status(ErrorCodes::NotMaster,
@@ -82,16 +83,16 @@ Status dropCollection(OperationContext* txn,
<< collectionName.ns());
}
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
result.append("ns", collectionName.ns());
if (coll) {
invariant(!view);
- int numIndexes = coll->getIndexCatalog()->numIndexesTotal(txn);
+ int numIndexes = coll->getIndexCatalog()->numIndexesTotal(opCtx);
BackgroundOperation::assertNoBgOpInProgForNs(collectionName.ns());
- Status s = db->dropCollection(txn, collectionName.ns());
+ Status s = db->dropCollection(opCtx, collectionName.ns());
if (!s.isOK()) {
return s;
@@ -100,14 +101,14 @@ Status dropCollection(OperationContext* txn,
result.append("nIndexesWas", numIndexes);
} else {
invariant(view);
- Status status = db->dropView(txn, collectionName.ns());
+ Status status = db->dropView(opCtx, collectionName.ns());
if (!status.isOK()) {
return status;
}
}
wunit.commit();
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "drop", collectionName.ns());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "drop", collectionName.ns());
return Status::OK();
}
diff --git a/src/mongo/db/catalog/drop_collection.h b/src/mongo/db/catalog/drop_collection.h
index c62b2c376c5..f0d51b6b051 100644
--- a/src/mongo/db/catalog/drop_collection.h
+++ b/src/mongo/db/catalog/drop_collection.h
@@ -37,7 +37,7 @@ class OperationContext;
* Drops the collection "collectionName" and populates "result" with statistics about what
* was removed.
*/
-Status dropCollection(OperationContext* txn,
+Status dropCollection(OperationContext* opCtx,
const NamespaceString& collectionName,
BSONObjBuilder& result);
} // namespace mongo
diff --git a/src/mongo/db/catalog/drop_database.cpp b/src/mongo/db/catalog/drop_database.cpp
index e1a4ce63e33..4c20d20ffa9 100644
--- a/src/mongo/db/catalog/drop_database.cpp
+++ b/src/mongo/db/catalog/drop_database.cpp
@@ -48,21 +48,21 @@
namespace mongo {
-Status dropDatabase(OperationContext* txn, const std::string& dbName) {
+Status dropDatabase(OperationContext* opCtx, const std::string& dbName) {
uassert(ErrorCodes::IllegalOperation,
"Cannot drop a database in read-only mode",
!storageGlobalParams.readOnly);
// TODO (Kal): OldClientContext legacy, needs to be removed
{
- CurOp::get(txn)->ensureStarted();
- stdx::lock_guard<Client> lk(*txn->getClient());
- CurOp::get(txn)->setNS_inlock(dbName);
+ CurOp::get(opCtx)->ensureStarted();
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
+ CurOp::get(opCtx)->setNS_inlock(dbName);
}
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite lk(txn->lockState());
- AutoGetDb autoDB(txn, dbName, MODE_X);
+ ScopedTransaction transaction(opCtx, MODE_X);
+ Lock::GlobalWrite lk(opCtx->lockState());
+ AutoGetDb autoDB(opCtx, dbName, MODE_X);
Database* const db = autoDB.getDb();
if (!db) {
return Status(ErrorCodes::NamespaceNotFound,
@@ -70,8 +70,8 @@ Status dropDatabase(OperationContext* txn, const std::string& dbName) {
<< " because it does not exist");
}
- bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(txn, dbName);
+ bool userInitiatedWritesAndNotPrimary = opCtx->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(opCtx, dbName);
if (userInitiatedWritesAndNotPrimary) {
return Status(ErrorCodes::NotMaster,
@@ -79,16 +79,16 @@ Status dropDatabase(OperationContext* txn, const std::string& dbName) {
}
log() << "dropDatabase " << dbName << " starting";
- Database::dropDatabase(txn, db);
+ Database::dropDatabase(opCtx, db);
log() << "dropDatabase " << dbName << " finished";
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
- getGlobalServiceContext()->getOpObserver()->onDropDatabase(txn, dbName + ".$cmd");
+ getGlobalServiceContext()->getOpObserver()->onDropDatabase(opCtx, dbName + ".$cmd");
wunit.commit();
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "dropDatabase", dbName);
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "dropDatabase", dbName);
return Status::OK();
}
diff --git a/src/mongo/db/catalog/drop_database.h b/src/mongo/db/catalog/drop_database.h
index b60e817be27..e0a0c8560e5 100644
--- a/src/mongo/db/catalog/drop_database.h
+++ b/src/mongo/db/catalog/drop_database.h
@@ -34,5 +34,5 @@ class OperationContext;
/**
* Drops the database "dbName".
*/
-Status dropDatabase(OperationContext* txn, const std::string& dbName);
+Status dropDatabase(OperationContext* opCtx, const std::string& dbName);
} // namespace mongo
diff --git a/src/mongo/db/catalog/drop_indexes.cpp b/src/mongo/db/catalog/drop_indexes.cpp
index 78739b6e4da..55c559912df 100644
--- a/src/mongo/db/catalog/drop_indexes.cpp
+++ b/src/mongo/db/catalog/drop_indexes.cpp
@@ -48,7 +48,7 @@
namespace mongo {
namespace {
-Status wrappedRun(OperationContext* txn,
+Status wrappedRun(OperationContext* opCtx,
const StringData& dbname,
const std::string& toDeleteNs,
Database* const db,
@@ -61,7 +61,7 @@ Status wrappedRun(OperationContext* txn,
// If db/collection does not exist, short circuit and return.
if (!db || !collection) {
- if (db && db->getViewCatalog()->lookup(txn, toDeleteNs)) {
+ if (db && db->getViewCatalog()->lookup(opCtx, toDeleteNs)) {
return {ErrorCodes::CommandNotSupportedOnView,
str::stream() << "Cannot drop indexes on view " << toDeleteNs};
}
@@ -69,11 +69,11 @@ Status wrappedRun(OperationContext* txn,
return Status(ErrorCodes::NamespaceNotFound, "ns not found");
}
- OldClientContext ctx(txn, toDeleteNs);
+ OldClientContext ctx(opCtx, toDeleteNs);
BackgroundOperation::assertNoBgOpInProgForNs(toDeleteNs);
IndexCatalog* indexCatalog = collection->getIndexCatalog();
- anObjBuilder->appendNumber("nIndexesWas", indexCatalog->numIndexesTotal(txn));
+ anObjBuilder->appendNumber("nIndexesWas", indexCatalog->numIndexesTotal(opCtx));
BSONElement f = jsobj.getField("index");
@@ -81,7 +81,7 @@ Status wrappedRun(OperationContext* txn,
std::string indexToDelete = f.valuestr();
if (indexToDelete == "*") {
- Status s = indexCatalog->dropAllIndexes(txn, false);
+ Status s = indexCatalog->dropAllIndexes(opCtx, false);
if (!s.isOK()) {
return s;
}
@@ -89,7 +89,8 @@ Status wrappedRun(OperationContext* txn,
return Status::OK();
}
- IndexDescriptor* desc = collection->getIndexCatalog()->findIndexByName(txn, indexToDelete);
+ IndexDescriptor* desc =
+ collection->getIndexCatalog()->findIndexByName(opCtx, indexToDelete);
if (desc == NULL) {
return Status(ErrorCodes::IndexNotFound,
str::stream() << "index not found with name [" << indexToDelete << "]");
@@ -99,7 +100,7 @@ Status wrappedRun(OperationContext* txn,
return Status(ErrorCodes::InvalidOptions, "cannot drop _id index");
}
- Status s = indexCatalog->dropIndex(txn, desc);
+ Status s = indexCatalog->dropIndex(opCtx, desc);
if (!s.isOK()) {
return s;
}
@@ -110,7 +111,7 @@ Status wrappedRun(OperationContext* txn,
if (f.type() == Object) {
std::vector<IndexDescriptor*> indexes;
collection->getIndexCatalog()->findIndexesByKeyPattern(
- txn, f.embeddedObject(), false, &indexes);
+ opCtx, f.embeddedObject(), false, &indexes);
if (indexes.empty()) {
return Status(ErrorCodes::IndexNotFound,
str::stream() << "can't find index with key: " << f.embeddedObject());
@@ -130,7 +131,7 @@ Status wrappedRun(OperationContext* txn,
return Status(ErrorCodes::InvalidOptions, "cannot drop _id index");
}
- Status s = indexCatalog->dropIndex(txn, desc);
+ Status s = indexCatalog->dropIndex(opCtx, desc);
if (!s.isOK()) {
return s;
}
@@ -142,35 +143,35 @@ Status wrappedRun(OperationContext* txn,
}
} // namespace
-Status dropIndexes(OperationContext* txn,
+Status dropIndexes(OperationContext* opCtx,
const NamespaceString& nss,
const BSONObj& idxDescriptor,
BSONObjBuilder* result) {
StringData dbName = nss.db();
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(txn, MODE_IX);
- AutoGetDb autoDb(txn, dbName, MODE_X);
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ AutoGetDb autoDb(opCtx, dbName, MODE_X);
- bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(txn, nss);
+ bool userInitiatedWritesAndNotPrimary = opCtx->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, nss);
if (userInitiatedWritesAndNotPrimary) {
return {ErrorCodes::NotMaster,
str::stream() << "Not primary while dropping indexes in " << nss.ns()};
}
- WriteUnitOfWork wunit(txn);
- Status status = wrappedRun(txn, dbName, nss.ns(), autoDb.getDb(), idxDescriptor, result);
+ WriteUnitOfWork wunit(opCtx);
+ Status status = wrappedRun(opCtx, dbName, nss.ns(), autoDb.getDb(), idxDescriptor, result);
if (!status.isOK()) {
return status;
}
getGlobalServiceContext()->getOpObserver()->onDropIndex(
- txn, dbName.toString() + ".$cmd", idxDescriptor);
+ opCtx, dbName.toString() + ".$cmd", idxDescriptor);
wunit.commit();
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "dropIndexes", dbName);
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "dropIndexes", dbName);
return Status::OK();
}
diff --git a/src/mongo/db/catalog/drop_indexes.h b/src/mongo/db/catalog/drop_indexes.h
index 931fa348019..67f4b5a64c2 100644
--- a/src/mongo/db/catalog/drop_indexes.h
+++ b/src/mongo/db/catalog/drop_indexes.h
@@ -38,7 +38,7 @@ class OperationContext;
* Drops the index from collection "ns" that matches the "idxDescriptor" and populates
* "result" with some statistics about the dropped index.
*/
-Status dropIndexes(OperationContext* txn,
+Status dropIndexes(OperationContext* opCtx,
const NamespaceString& ns,
const BSONObj& idxDescriptor,
BSONObjBuilder* result);
diff --git a/src/mongo/db/catalog/head_manager.h b/src/mongo/db/catalog/head_manager.h
index 7a671ccf69f..12042e33f84 100644
--- a/src/mongo/db/catalog/head_manager.h
+++ b/src/mongo/db/catalog/head_manager.h
@@ -42,9 +42,9 @@ class HeadManager {
public:
virtual ~HeadManager() {}
- virtual const RecordId getHead(OperationContext* txn) const = 0;
+ virtual const RecordId getHead(OperationContext* opCtx) const = 0;
- virtual void setHead(OperationContext* txn, const RecordId newHead) = 0;
+ virtual void setHead(OperationContext* opCtx, const RecordId newHead) = 0;
};
} // namespace mongo
diff --git a/src/mongo/db/catalog/index_catalog.cpp b/src/mongo/db/catalog/index_catalog.cpp
index a34baf61c28..32fb80b5587 100644
--- a/src/mongo/db/catalog/index_catalog.cpp
+++ b/src/mongo/db/catalog/index_catalog.cpp
@@ -99,26 +99,26 @@ IndexCatalog::~IndexCatalog() {
_magic = 123456;
}
-Status IndexCatalog::init(OperationContext* txn) {
+Status IndexCatalog::init(OperationContext* opCtx) {
vector<string> indexNames;
- _collection->getCatalogEntry()->getAllIndexes(txn, &indexNames);
+ _collection->getCatalogEntry()->getAllIndexes(opCtx, &indexNames);
for (size_t i = 0; i < indexNames.size(); i++) {
const string& indexName = indexNames[i];
- BSONObj spec = _collection->getCatalogEntry()->getIndexSpec(txn, indexName).getOwned();
+ BSONObj spec = _collection->getCatalogEntry()->getIndexSpec(opCtx, indexName).getOwned();
- if (!_collection->getCatalogEntry()->isIndexReady(txn, indexName)) {
+ if (!_collection->getCatalogEntry()->isIndexReady(opCtx, indexName)) {
_unfinishedIndexes.push_back(spec);
continue;
}
BSONObj keyPattern = spec.getObjectField("key");
IndexDescriptor* descriptor =
- new IndexDescriptor(_collection, _getAccessMethodName(txn, keyPattern), spec);
+ new IndexDescriptor(_collection, _getAccessMethodName(opCtx, keyPattern), spec);
const bool initFromDisk = true;
- IndexCatalogEntry* entry = _setupInMemoryStructures(txn, descriptor, initFromDisk);
+ IndexCatalogEntry* entry = _setupInMemoryStructures(opCtx, descriptor, initFromDisk);
- fassert(17340, entry->isReady(txn));
+ fassert(17340, entry->isReady(opCtx));
}
if (_unfinishedIndexes.size()) {
@@ -132,36 +132,36 @@ Status IndexCatalog::init(OperationContext* txn) {
return Status::OK();
}
-IndexCatalogEntry* IndexCatalog::_setupInMemoryStructures(OperationContext* txn,
+IndexCatalogEntry* IndexCatalog::_setupInMemoryStructures(OperationContext* opCtx,
IndexDescriptor* descriptor,
bool initFromDisk) {
unique_ptr<IndexDescriptor> descriptorCleanup(descriptor);
- Status status = _isSpecOk(txn, descriptor->infoObj());
+ Status status = _isSpecOk(opCtx, descriptor->infoObj());
if (!status.isOK() && status != ErrorCodes::IndexAlreadyExists) {
severe() << "Found an invalid index " << descriptor->infoObj() << " on the "
<< _collection->ns().ns() << " collection: " << redact(status);
fassertFailedNoTrace(28782);
}
- auto entry = stdx::make_unique<IndexCatalogEntry>(txn,
+ auto entry = stdx::make_unique<IndexCatalogEntry>(opCtx,
_collection->ns().ns(),
_collection->getCatalogEntry(),
descriptorCleanup.release(),
_collection->infoCache());
std::unique_ptr<IndexAccessMethod> accessMethod(
- _collection->_dbce->getIndex(txn, _collection->getCatalogEntry(), entry.get()));
+ _collection->_dbce->getIndex(opCtx, _collection->getCatalogEntry(), entry.get()));
entry->init(std::move(accessMethod));
IndexCatalogEntry* save = entry.get();
_entries.add(entry.release());
if (!initFromDisk) {
- txn->recoveryUnit()->onRollback([this, txn, descriptor] {
+ opCtx->recoveryUnit()->onRollback([this, opCtx, descriptor] {
// Need to preserve indexName as descriptor no longer exists after remove().
const std::string indexName = descriptor->indexName();
_entries.remove(descriptor);
- _collection->infoCache()->droppedIndex(txn, indexName);
+ _collection->infoCache()->droppedIndex(opCtx, indexName);
});
}
@@ -193,11 +193,11 @@ Status IndexCatalog::checkUnfinished() const {
<< _collection->ns().ns());
}
-bool IndexCatalog::_shouldOverridePlugin(OperationContext* txn, const BSONObj& keyPattern) const {
+bool IndexCatalog::_shouldOverridePlugin(OperationContext* opCtx, const BSONObj& keyPattern) const {
string pluginName = IndexNames::findPluginName(keyPattern);
bool known = IndexNames::isKnownName(pluginName);
- if (!_collection->_dbce->isOlderThan24(txn)) {
+ if (!_collection->_dbce->isOlderThan24(opCtx)) {
// RulesFor24+
// This assert will be triggered when downgrading from a future version that
// supports an index plugin unsupported by this version.
@@ -225,8 +225,9 @@ bool IndexCatalog::_shouldOverridePlugin(OperationContext* txn, const BSONObj& k
return false;
}
-string IndexCatalog::_getAccessMethodName(OperationContext* txn, const BSONObj& keyPattern) const {
- if (_shouldOverridePlugin(txn, keyPattern)) {
+string IndexCatalog::_getAccessMethodName(OperationContext* opCtx,
+ const BSONObj& keyPattern) const {
+ if (_shouldOverridePlugin(opCtx, keyPattern)) {
return "";
}
@@ -236,7 +237,7 @@ string IndexCatalog::_getAccessMethodName(OperationContext* txn, const BSONObj&
// ---------------------------
-Status IndexCatalog::_upgradeDatabaseMinorVersionIfNeeded(OperationContext* txn,
+Status IndexCatalog::_upgradeDatabaseMinorVersionIfNeeded(OperationContext* opCtx,
const string& newPluginName) {
// first check if requested index requires pdfile minor version to be bumped
if (IndexNames::existedBefore24(newPluginName)) {
@@ -245,7 +246,7 @@ Status IndexCatalog::_upgradeDatabaseMinorVersionIfNeeded(OperationContext* txn,
DatabaseCatalogEntry* dbce = _collection->_dbce;
- if (!dbce->isOlderThan24(txn)) {
+ if (!dbce->isOlderThan24(opCtx)) {
return Status::OK(); // these checks have already been done
}
@@ -255,7 +256,7 @@ Status IndexCatalog::_upgradeDatabaseMinorVersionIfNeeded(OperationContext* txn,
// which allows creation of indexes using new plugins.
RecordStore* indexes = dbce->getRecordStore(dbce->name() + ".system.indexes");
- auto cursor = indexes->getCursor(txn);
+ auto cursor = indexes->getCursor(opCtx);
while (auto record = cursor->next()) {
const BSONObj index = record->data.releaseToBson();
const BSONObj key = index.getObjectField("key");
@@ -271,45 +272,45 @@ Status IndexCatalog::_upgradeDatabaseMinorVersionIfNeeded(OperationContext* txn,
return Status(ErrorCodes::CannotCreateIndex, errmsg);
}
- dbce->markIndexSafe24AndUp(txn);
+ dbce->markIndexSafe24AndUp(opCtx);
return Status::OK();
}
-StatusWith<BSONObj> IndexCatalog::prepareSpecForCreate(OperationContext* txn,
+StatusWith<BSONObj> IndexCatalog::prepareSpecForCreate(OperationContext* opCtx,
const BSONObj& original) const {
- Status status = _isSpecOk(txn, original);
+ Status status = _isSpecOk(opCtx, original);
if (!status.isOK())
return StatusWith<BSONObj>(status);
- auto fixed = _fixIndexSpec(txn, _collection, original);
+ auto fixed = _fixIndexSpec(opCtx, _collection, original);
if (!fixed.isOK()) {
return fixed;
}
// we double check with new index spec
- status = _isSpecOk(txn, fixed.getValue());
+ status = _isSpecOk(opCtx, fixed.getValue());
if (!status.isOK())
return StatusWith<BSONObj>(status);
- status = _doesSpecConflictWithExisting(txn, fixed.getValue());
+ status = _doesSpecConflictWithExisting(opCtx, fixed.getValue());
if (!status.isOK())
return StatusWith<BSONObj>(status);
return fixed;
}
-StatusWith<BSONObj> IndexCatalog::createIndexOnEmptyCollection(OperationContext* txn,
+StatusWith<BSONObj> IndexCatalog::createIndexOnEmptyCollection(OperationContext* opCtx,
BSONObj spec) {
- invariant(txn->lockState()->isCollectionLockedForMode(_collection->ns().toString(), MODE_X));
- invariant(_collection->numRecords(txn) == 0);
+ invariant(opCtx->lockState()->isCollectionLockedForMode(_collection->ns().toString(), MODE_X));
+ invariant(_collection->numRecords(opCtx) == 0);
_checkMagic();
Status status = checkUnfinished();
if (!status.isOK())
return status;
- StatusWith<BSONObj> statusWithSpec = prepareSpecForCreate(txn, spec);
+ StatusWith<BSONObj> statusWithSpec = prepareSpecForCreate(opCtx, spec);
status = statusWithSpec.getStatus();
if (!status.isOK())
return status;
@@ -317,13 +318,13 @@ StatusWith<BSONObj> IndexCatalog::createIndexOnEmptyCollection(OperationContext*
string pluginName = IndexNames::findPluginName(spec["key"].Obj());
if (pluginName.size()) {
- Status s = _upgradeDatabaseMinorVersionIfNeeded(txn, pluginName);
+ Status s = _upgradeDatabaseMinorVersionIfNeeded(opCtx, pluginName);
if (!s.isOK())
return s;
}
// now going to touch disk
- IndexBuildBlock indexBuildBlock(txn, _collection, spec);
+ IndexBuildBlock indexBuildBlock(opCtx, _collection, spec);
status = indexBuildBlock.init();
if (!status.isOK())
return status;
@@ -335,18 +336,18 @@ StatusWith<BSONObj> IndexCatalog::createIndexOnEmptyCollection(OperationContext*
invariant(descriptor);
invariant(entry == _entries.find(descriptor));
- status = entry->accessMethod()->initializeAsEmpty(txn);
+ status = entry->accessMethod()->initializeAsEmpty(opCtx);
if (!status.isOK())
return status;
indexBuildBlock.success();
// sanity check
- invariant(_collection->getCatalogEntry()->isIndexReady(txn, descriptor->indexName()));
+ invariant(_collection->getCatalogEntry()->isIndexReady(opCtx, descriptor->indexName()));
return spec;
}
-IndexCatalog::IndexBuildBlock::IndexBuildBlock(OperationContext* txn,
+IndexCatalog::IndexBuildBlock::IndexBuildBlock(OperationContext* opCtx,
Collection* collection,
const BSONObj& spec)
: _collection(collection),
@@ -354,7 +355,7 @@ IndexCatalog::IndexBuildBlock::IndexBuildBlock(OperationContext* txn,
_ns(_catalog->_collection->ns().ns()),
_spec(spec.getOwned()),
_entry(NULL),
- _txn(txn) {
+ _opCtx(opCtx) {
invariant(collection);
}
@@ -370,18 +371,18 @@ Status IndexCatalog::IndexBuildBlock::init() {
/// ---------- setup on disk structures ----------------
- Status status = _collection->getCatalogEntry()->prepareForIndexBuild(_txn, descriptor);
+ Status status = _collection->getCatalogEntry()->prepareForIndexBuild(_opCtx, descriptor);
if (!status.isOK())
return status;
/// ---------- setup in memory structures ----------------
const bool initFromDisk = false;
- _entry = _catalog->_setupInMemoryStructures(_txn, descriptorCleaner.release(), initFromDisk);
+ _entry = _catalog->_setupInMemoryStructures(_opCtx, descriptorCleaner.release(), initFromDisk);
// Register this index with the CollectionInfoCache to regenerate the cache. This way, updates
// occurring while an index is being build in the background will be aware of whether or not
// they need to modify any indexes.
- _collection->infoCache()->addedIndex(_txn, descriptor);
+ _collection->infoCache()->addedIndex(_opCtx, descriptor);
return Status::OK();
}
@@ -397,9 +398,9 @@ void IndexCatalog::IndexBuildBlock::fail() {
invariant(entry == _entry);
if (entry) {
- _catalog->_dropIndex(_txn, entry);
+ _catalog->_dropIndex(_opCtx, entry);
} else {
- _catalog->_deleteIndexFromDisk(_txn, _indexName, _indexNamespace);
+ _catalog->_deleteIndexFromDisk(_opCtx, _indexName, _indexNamespace);
}
}
@@ -407,24 +408,24 @@ void IndexCatalog::IndexBuildBlock::success() {
Collection* collection = _catalog->_collection;
fassert(17207, collection->ok());
NamespaceString ns(_indexNamespace);
- invariant(_txn->lockState()->isDbLockedForMode(ns.db(), MODE_X));
+ invariant(_opCtx->lockState()->isDbLockedForMode(ns.db(), MODE_X));
- collection->getCatalogEntry()->indexBuildSuccess(_txn, _indexName);
+ collection->getCatalogEntry()->indexBuildSuccess(_opCtx, _indexName);
- IndexDescriptor* desc = _catalog->findIndexByName(_txn, _indexName, true);
+ IndexDescriptor* desc = _catalog->findIndexByName(_opCtx, _indexName, true);
fassert(17330, desc);
IndexCatalogEntry* entry = _catalog->_entries.find(desc);
fassert(17331, entry && entry == _entry);
- OperationContext* txn = _txn;
+ OperationContext* opCtx = _opCtx;
LOG(2) << "marking index " << _indexName << " as ready in snapshot id "
- << txn->recoveryUnit()->getSnapshotId();
- _txn->recoveryUnit()->onCommit([txn, entry, collection] {
+ << opCtx->recoveryUnit()->getSnapshotId();
+ _opCtx->recoveryUnit()->onCommit([opCtx, entry, collection] {
// Note: this runs after the WUOW commits but before we release our X lock on the
// collection. This means that any snapshot created after this must include the full index,
// and no one can try to read this index before we set the visibility.
- auto replCoord = repl::ReplicationCoordinator::get(txn);
- auto snapshotName = replCoord->reserveSnapshotName(txn);
+ auto replCoord = repl::ReplicationCoordinator::get(opCtx);
+ auto snapshotName = replCoord->reserveSnapshotName(opCtx);
replCoord->forceSnapshotCreation(); // Ensures a newer snapshot gets created even if idle.
entry->setMinimumVisibleSnapshot(snapshotName);
@@ -470,7 +471,7 @@ Status _checkValidFilterExpressions(MatchExpression* expression, int level = 0)
}
}
-Status IndexCatalog::_isSpecOk(OperationContext* txn, const BSONObj& spec) const {
+Status IndexCatalog::_isSpecOk(OperationContext* opCtx, const BSONObj& spec) const {
const NamespaceString& nss = _collection->ns();
BSONElement vElt = spec["v"];
@@ -505,7 +506,7 @@ Status IndexCatalog::_isSpecOk(OperationContext* txn, const BSONObj& spec) const
// SERVER-16893 Forbid use of v0 indexes with non-mmapv1 engines
if (indexVersion == IndexVersion::kV0 &&
- !txn->getServiceContext()->getGlobalStorageEngine()->isMmapV1()) {
+ !opCtx->getServiceContext()->getGlobalStorageEngine()->isMmapV1()) {
return Status(ErrorCodes::CannotCreateIndex,
str::stream() << "use of v0 indexes is only allowed with the "
<< "mmapv1 storage engine");
@@ -577,7 +578,7 @@ Status IndexCatalog::_isSpecOk(OperationContext* txn, const BSONObj& spec) const
return Status(ErrorCodes::CannotCreateIndex,
"\"collation\" for an index must be a document");
}
- auto statusWithCollator = CollatorFactoryInterface::get(txn->getServiceContext())
+ auto statusWithCollator = CollatorFactoryInterface::get(opCtx->getServiceContext())
->makeFromBSON(collationElement.Obj());
if (!statusWithCollator.isOK()) {
return statusWithCollator.getStatus();
@@ -696,7 +697,7 @@ Status IndexCatalog::_isSpecOk(OperationContext* txn, const BSONObj& spec) const
return Status::OK();
}
-Status IndexCatalog::_doesSpecConflictWithExisting(OperationContext* txn,
+Status IndexCatalog::_doesSpecConflictWithExisting(OperationContext* opCtx,
const BSONObj& spec) const {
const char* name = spec.getStringField("name");
invariant(name[0]);
@@ -706,7 +707,7 @@ Status IndexCatalog::_doesSpecConflictWithExisting(OperationContext* txn,
{
// Check both existing and in-progress indexes (2nd param = true)
- const IndexDescriptor* desc = findIndexByName(txn, name, true);
+ const IndexDescriptor* desc = findIndexByName(opCtx, name, true);
if (desc) {
// index already exists with same name
@@ -736,7 +737,7 @@ Status IndexCatalog::_doesSpecConflictWithExisting(OperationContext* txn,
<< spec);
}
- IndexDescriptor temp(_collection, _getAccessMethodName(txn, key), spec);
+ IndexDescriptor temp(_collection, _getAccessMethodName(opCtx, key), spec);
if (!desc->areIndexOptionsEquivalent(&temp))
return Status(ErrorCodes::IndexOptionsConflict,
str::stream() << "Index with name: " << name
@@ -753,12 +754,12 @@ Status IndexCatalog::_doesSpecConflictWithExisting(OperationContext* txn,
// Check both existing and in-progress indexes.
const bool findInProgressIndexes = true;
const IndexDescriptor* desc =
- findIndexByKeyPatternAndCollationSpec(txn, key, collation, findInProgressIndexes);
+ findIndexByKeyPatternAndCollationSpec(opCtx, key, collation, findInProgressIndexes);
if (desc) {
LOG(2) << "index already exists with diff name " << name << " pattern: " << key
<< " collation: " << collation;
- IndexDescriptor temp(_collection, _getAccessMethodName(txn, key), spec);
+ IndexDescriptor temp(_collection, _getAccessMethodName(opCtx, key), spec);
if (!desc->areIndexOptionsEquivalent(&temp))
return Status(ErrorCodes::IndexOptionsConflict,
str::stream() << "Index: " << spec
@@ -770,7 +771,7 @@ Status IndexCatalog::_doesSpecConflictWithExisting(OperationContext* txn,
}
}
- if (numIndexesTotal(txn) >= _maxNumIndexesAllowed) {
+ if (numIndexesTotal(opCtx) >= _maxNumIndexesAllowed) {
string s = str::stream() << "add index fails, too many indexes for "
<< _collection->ns().ns() << " key:" << key;
log() << s;
@@ -783,7 +784,7 @@ Status IndexCatalog::_doesSpecConflictWithExisting(OperationContext* txn,
if (pluginName == IndexNames::TEXT) {
vector<IndexDescriptor*> textIndexes;
const bool includeUnfinishedIndexes = true;
- findIndexByType(txn, IndexNames::TEXT, textIndexes, includeUnfinishedIndexes);
+ findIndexByType(opCtx, IndexNames::TEXT, textIndexes, includeUnfinishedIndexes);
if (textIndexes.size() > 0) {
return Status(ErrorCodes::CannotCreateIndex,
str::stream() << "only one text index per collection allowed, "
@@ -813,8 +814,8 @@ BSONObj IndexCatalog::getDefaultIdIndexSpec(
return b.obj();
}
-Status IndexCatalog::dropAllIndexes(OperationContext* txn, bool includingIdIndex) {
- invariant(txn->lockState()->isCollectionLockedForMode(_collection->ns().toString(), MODE_X));
+Status IndexCatalog::dropAllIndexes(OperationContext* opCtx, bool includingIdIndex) {
+ invariant(opCtx->lockState()->isCollectionLockedForMode(_collection->ns().toString(), MODE_X));
BackgroundOperation::assertNoBgOpInProgForNs(_collection->ns().ns());
@@ -825,14 +826,14 @@ Status IndexCatalog::dropAllIndexes(OperationContext* txn, bool includingIdIndex
// make sure nothing in progress
massert(17348,
"cannot dropAllIndexes when index builds in progress",
- numIndexesTotal(txn) == numIndexesReady(txn));
+ numIndexesTotal(opCtx) == numIndexesReady(opCtx));
bool haveIdIndex = false;
vector<string> indexNamesToDrop;
{
int seen = 0;
- IndexIterator ii = getIndexIterator(txn, true);
+ IndexIterator ii = getIndexIterator(opCtx, true);
while (ii.more()) {
seen++;
IndexDescriptor* desc = ii.next();
@@ -842,39 +843,39 @@ Status IndexCatalog::dropAllIndexes(OperationContext* txn, bool includingIdIndex
}
indexNamesToDrop.push_back(desc->indexName());
}
- invariant(seen == numIndexesTotal(txn));
+ invariant(seen == numIndexesTotal(opCtx));
}
for (size_t i = 0; i < indexNamesToDrop.size(); i++) {
string indexName = indexNamesToDrop[i];
- IndexDescriptor* desc = findIndexByName(txn, indexName, true);
+ IndexDescriptor* desc = findIndexByName(opCtx, indexName, true);
invariant(desc);
LOG(1) << "\t dropAllIndexes dropping: " << desc->toString();
IndexCatalogEntry* entry = _entries.find(desc);
invariant(entry);
- _dropIndex(txn, entry);
+ _dropIndex(opCtx, entry);
}
// verify state is sane post cleaning
long long numIndexesInCollectionCatalogEntry =
- _collection->getCatalogEntry()->getTotalIndexCount(txn);
+ _collection->getCatalogEntry()->getTotalIndexCount(opCtx);
if (haveIdIndex) {
- fassert(17324, numIndexesTotal(txn) == 1);
- fassert(17325, numIndexesReady(txn) == 1);
+ fassert(17324, numIndexesTotal(opCtx) == 1);
+ fassert(17325, numIndexesReady(opCtx) == 1);
fassert(17326, numIndexesInCollectionCatalogEntry == 1);
fassert(17336, _entries.size() == 1);
} else {
- if (numIndexesTotal(txn) || numIndexesInCollectionCatalogEntry || _entries.size()) {
+ if (numIndexesTotal(opCtx) || numIndexesInCollectionCatalogEntry || _entries.size()) {
error() << "About to fassert - "
- << " numIndexesTotal(): " << numIndexesTotal(txn)
+ << " numIndexesTotal(): " << numIndexesTotal(opCtx)
<< " numSystemIndexesEntries: " << numIndexesInCollectionCatalogEntry
<< " _entries.size(): " << _entries.size()
<< " indexNamesToDrop: " << indexNamesToDrop.size()
<< " haveIdIndex: " << haveIdIndex;
}
- fassert(17327, numIndexesTotal(txn) == 0);
+ fassert(17327, numIndexesTotal(opCtx) == 0);
fassert(17328, numIndexesInCollectionCatalogEntry == 0);
fassert(17337, _entries.size() == 0);
}
@@ -882,34 +883,34 @@ Status IndexCatalog::dropAllIndexes(OperationContext* txn, bool includingIdIndex
return Status::OK();
}
-Status IndexCatalog::dropIndex(OperationContext* txn, IndexDescriptor* desc) {
- invariant(txn->lockState()->isCollectionLockedForMode(_collection->ns().toString(), MODE_X));
+Status IndexCatalog::dropIndex(OperationContext* opCtx, IndexDescriptor* desc) {
+ invariant(opCtx->lockState()->isCollectionLockedForMode(_collection->ns().toString(), MODE_X));
IndexCatalogEntry* entry = _entries.find(desc);
if (!entry)
return Status(ErrorCodes::InternalError, "cannot find index to delete");
- if (!entry->isReady(txn))
+ if (!entry->isReady(opCtx))
return Status(ErrorCodes::InternalError, "cannot delete not ready index");
BackgroundOperation::assertNoBgOpInProgForNs(_collection->ns().ns());
- return _dropIndex(txn, entry);
+ return _dropIndex(opCtx, entry);
}
namespace {
class IndexRemoveChange final : public RecoveryUnit::Change {
public:
- IndexRemoveChange(OperationContext* txn,
+ IndexRemoveChange(OperationContext* opCtx,
Collection* collection,
IndexCatalogEntryContainer* entries,
IndexCatalogEntry* entry)
- : _txn(txn), _collection(collection), _entries(entries), _entry(entry) {}
+ : _opCtx(opCtx), _collection(collection), _entries(entries), _entry(entry) {}
void commit() final {
// Ban reading from this collection on committed reads on snapshots before now.
- auto replCoord = repl::ReplicationCoordinator::get(_txn);
- auto snapshotName = replCoord->reserveSnapshotName(_txn);
+ auto replCoord = repl::ReplicationCoordinator::get(_opCtx);
+ auto snapshotName = replCoord->reserveSnapshotName(_opCtx);
replCoord->forceSnapshotCreation(); // Ensures a newer snapshot gets created even if idle.
_collection->setMinimumVisibleSnapshot(snapshotName);
@@ -918,18 +919,18 @@ public:
void rollback() final {
_entries->add(_entry);
- _collection->infoCache()->addedIndex(_txn, _entry->descriptor());
+ _collection->infoCache()->addedIndex(_opCtx, _entry->descriptor());
}
private:
- OperationContext* _txn;
+ OperationContext* _opCtx;
Collection* _collection;
IndexCatalogEntryContainer* _entries;
IndexCatalogEntry* _entry;
};
} // namespace
-Status IndexCatalog::_dropIndex(OperationContext* txn, IndexCatalogEntry* entry) {
+Status IndexCatalog::_dropIndex(OperationContext* opCtx, IndexCatalogEntry* entry) {
/**
* IndexState in order
* <db>.system.indexes
@@ -955,7 +956,7 @@ Status IndexCatalog::_dropIndex(OperationContext* txn, IndexCatalogEntry* entry)
// being built.
// TODO only kill cursors that are actually using the index rather than everything on this
// collection.
- if (entry->isReady(txn)) {
+ if (entry->isReady(opCtx)) {
_collection->getCursorManager()->invalidateAll(
false, str::stream() << "index '" << indexName << "' dropped");
}
@@ -964,21 +965,22 @@ Status IndexCatalog::_dropIndex(OperationContext* txn, IndexCatalogEntry* entry)
audit::logDropIndex(&cc(), indexName, _collection->ns().ns());
invariant(_entries.release(entry->descriptor()) == entry);
- txn->recoveryUnit()->registerChange(new IndexRemoveChange(txn, _collection, &_entries, entry));
+ opCtx->recoveryUnit()->registerChange(
+ new IndexRemoveChange(opCtx, _collection, &_entries, entry));
entry = NULL;
- _deleteIndexFromDisk(txn, indexName, indexNamespace);
+ _deleteIndexFromDisk(opCtx, indexName, indexNamespace);
_checkMagic();
- _collection->infoCache()->droppedIndex(txn, indexName);
+ _collection->infoCache()->droppedIndex(opCtx, indexName);
return Status::OK();
}
-void IndexCatalog::_deleteIndexFromDisk(OperationContext* txn,
+void IndexCatalog::_deleteIndexFromDisk(OperationContext* opCtx,
const string& indexName,
const string& indexNamespace) {
- Status status = _collection->getCatalogEntry()->removeIndex(txn, indexName);
+ Status status = _collection->getCatalogEntry()->removeIndex(opCtx, indexName);
if (status.code() == ErrorCodes::NamespaceNotFound) {
// this is ok, as we may be partially through index creation
} else if (!status.isOK()) {
@@ -987,30 +989,30 @@ void IndexCatalog::_deleteIndexFromDisk(OperationContext* txn,
}
}
-vector<BSONObj> IndexCatalog::getAndClearUnfinishedIndexes(OperationContext* txn) {
+vector<BSONObj> IndexCatalog::getAndClearUnfinishedIndexes(OperationContext* opCtx) {
vector<BSONObj> toReturn = _unfinishedIndexes;
_unfinishedIndexes.clear();
for (size_t i = 0; i < toReturn.size(); i++) {
BSONObj spec = toReturn[i];
BSONObj keyPattern = spec.getObjectField("key");
- IndexDescriptor desc(_collection, _getAccessMethodName(txn, keyPattern), spec);
+ IndexDescriptor desc(_collection, _getAccessMethodName(opCtx, keyPattern), spec);
- _deleteIndexFromDisk(txn, desc.indexName(), desc.indexNamespace());
+ _deleteIndexFromDisk(opCtx, desc.indexName(), desc.indexNamespace());
}
return toReturn;
}
-bool IndexCatalog::isMultikey(OperationContext* txn, const IndexDescriptor* idx) {
+bool IndexCatalog::isMultikey(OperationContext* opCtx, const IndexDescriptor* idx) {
IndexCatalogEntry* entry = _entries.find(idx);
invariant(entry);
return entry->isMultikey();
}
-MultikeyPaths IndexCatalog::getMultikeyPaths(OperationContext* txn, const IndexDescriptor* idx) {
+MultikeyPaths IndexCatalog::getMultikeyPaths(OperationContext* opCtx, const IndexDescriptor* idx) {
IndexCatalogEntry* entry = _entries.find(idx);
invariant(entry);
- return entry->getMultikeyPaths(txn);
+ return entry->getMultikeyPaths(opCtx);
}
// ---------------------------
@@ -1019,32 +1021,32 @@ bool IndexCatalog::haveAnyIndexes() const {
return _entries.size() != 0;
}
-int IndexCatalog::numIndexesTotal(OperationContext* txn) const {
+int IndexCatalog::numIndexesTotal(OperationContext* opCtx) const {
int count = _entries.size() + _unfinishedIndexes.size();
- dassert(_collection->getCatalogEntry()->getTotalIndexCount(txn) == count);
+ dassert(_collection->getCatalogEntry()->getTotalIndexCount(opCtx) == count);
return count;
}
-int IndexCatalog::numIndexesReady(OperationContext* txn) const {
+int IndexCatalog::numIndexesReady(OperationContext* opCtx) const {
int count = 0;
- IndexIterator ii = getIndexIterator(txn, /*includeUnfinished*/ false);
+ IndexIterator ii = getIndexIterator(opCtx, /*includeUnfinished*/ false);
while (ii.more()) {
ii.next();
count++;
}
- dassert(_collection->getCatalogEntry()->getCompletedIndexCount(txn) == count);
+ dassert(_collection->getCatalogEntry()->getCompletedIndexCount(opCtx) == count);
return count;
}
-bool IndexCatalog::haveIdIndex(OperationContext* txn) const {
- return findIdIndex(txn) != NULL;
+bool IndexCatalog::haveIdIndex(OperationContext* opCtx) const {
+ return findIdIndex(opCtx) != NULL;
}
-IndexCatalog::IndexIterator::IndexIterator(OperationContext* txn,
+IndexCatalog::IndexIterator::IndexIterator(OperationContext* opCtx,
const IndexCatalog* cat,
bool includeUnfinishedIndexes)
: _includeUnfinishedIndexes(includeUnfinishedIndexes),
- _txn(txn),
+ _opCtx(opCtx),
_catalog(cat),
_iterator(cat->_entries.begin()),
_start(true),
@@ -1086,7 +1088,7 @@ void IndexCatalog::IndexIterator::_advance() {
if (!_includeUnfinishedIndexes) {
if (auto minSnapshot = entry->getMinimumVisibleSnapshot()) {
- if (auto mySnapshot = _txn->recoveryUnit()->getMajorityCommittedSnapshot()) {
+ if (auto mySnapshot = _opCtx->recoveryUnit()->getMajorityCommittedSnapshot()) {
if (mySnapshot < minSnapshot) {
// This index isn't finished in my snapshot.
continue;
@@ -1094,7 +1096,7 @@ void IndexCatalog::IndexIterator::_advance() {
}
}
- if (!entry->isReady(_txn))
+ if (!entry->isReady(_opCtx))
continue;
}
@@ -1104,8 +1106,8 @@ void IndexCatalog::IndexIterator::_advance() {
}
-IndexDescriptor* IndexCatalog::findIdIndex(OperationContext* txn) const {
- IndexIterator ii = getIndexIterator(txn, false);
+IndexDescriptor* IndexCatalog::findIdIndex(OperationContext* opCtx) const {
+ IndexIterator ii = getIndexIterator(opCtx, false);
while (ii.more()) {
IndexDescriptor* desc = ii.next();
if (desc->isIdIndex())
@@ -1114,10 +1116,10 @@ IndexDescriptor* IndexCatalog::findIdIndex(OperationContext* txn) const {
return NULL;
}
-IndexDescriptor* IndexCatalog::findIndexByName(OperationContext* txn,
+IndexDescriptor* IndexCatalog::findIndexByName(OperationContext* opCtx,
StringData name,
bool includeUnfinishedIndexes) const {
- IndexIterator ii = getIndexIterator(txn, includeUnfinishedIndexes);
+ IndexIterator ii = getIndexIterator(opCtx, includeUnfinishedIndexes);
while (ii.more()) {
IndexDescriptor* desc = ii.next();
if (desc->indexName() == name)
@@ -1127,11 +1129,11 @@ IndexDescriptor* IndexCatalog::findIndexByName(OperationContext* txn,
}
IndexDescriptor* IndexCatalog::findIndexByKeyPatternAndCollationSpec(
- OperationContext* txn,
+ OperationContext* opCtx,
const BSONObj& key,
const BSONObj& collationSpec,
bool includeUnfinishedIndexes) const {
- IndexIterator ii = getIndexIterator(txn, includeUnfinishedIndexes);
+ IndexIterator ii = getIndexIterator(opCtx, includeUnfinishedIndexes);
while (ii.more()) {
IndexDescriptor* desc = ii.next();
if (SimpleBSONObjComparator::kInstance.evaluate(desc->keyPattern() == key) &&
@@ -1143,12 +1145,12 @@ IndexDescriptor* IndexCatalog::findIndexByKeyPatternAndCollationSpec(
return NULL;
}
-void IndexCatalog::findIndexesByKeyPattern(OperationContext* txn,
+void IndexCatalog::findIndexesByKeyPattern(OperationContext* opCtx,
const BSONObj& key,
bool includeUnfinishedIndexes,
std::vector<IndexDescriptor*>* matches) const {
invariant(matches);
- IndexIterator ii = getIndexIterator(txn, includeUnfinishedIndexes);
+ IndexIterator ii = getIndexIterator(opCtx, includeUnfinishedIndexes);
while (ii.more()) {
IndexDescriptor* desc = ii.next();
if (SimpleBSONObjComparator::kInstance.evaluate(desc->keyPattern() == key)) {
@@ -1157,12 +1159,12 @@ void IndexCatalog::findIndexesByKeyPattern(OperationContext* txn,
}
}
-IndexDescriptor* IndexCatalog::findShardKeyPrefixedIndex(OperationContext* txn,
+IndexDescriptor* IndexCatalog::findShardKeyPrefixedIndex(OperationContext* opCtx,
const BSONObj& shardKey,
bool requireSingleKey) const {
IndexDescriptor* best = NULL;
- IndexIterator ii = getIndexIterator(txn, false);
+ IndexIterator ii = getIndexIterator(opCtx, false);
while (ii.more()) {
IndexDescriptor* desc = ii.next();
bool hasSimpleCollation = desc->infoObj().getObjectField("collation").isEmpty();
@@ -1173,7 +1175,7 @@ IndexDescriptor* IndexCatalog::findShardKeyPrefixedIndex(OperationContext* txn,
if (!shardKey.isPrefixOf(desc->keyPattern(), SimpleBSONElementComparator::kInstance))
continue;
- if (!desc->isMultikey(txn) && hasSimpleCollation)
+ if (!desc->isMultikey(opCtx) && hasSimpleCollation)
return desc;
if (!requireSingleKey && hasSimpleCollation)
@@ -1183,11 +1185,11 @@ IndexDescriptor* IndexCatalog::findShardKeyPrefixedIndex(OperationContext* txn,
return best;
}
-void IndexCatalog::findIndexByType(OperationContext* txn,
+void IndexCatalog::findIndexByType(OperationContext* opCtx,
const string& type,
vector<IndexDescriptor*>& matches,
bool includeUnfinishedIndexes) const {
- IndexIterator ii = getIndexIterator(txn, includeUnfinishedIndexes);
+ IndexIterator ii = getIndexIterator(opCtx, includeUnfinishedIndexes);
while (ii.more()) {
IndexDescriptor* desc = ii.next();
if (IndexNames::findPluginName(desc->keyPattern()) == type) {
@@ -1213,13 +1215,13 @@ const IndexCatalogEntry* IndexCatalog::getEntry(const IndexDescriptor* desc) con
}
-const IndexDescriptor* IndexCatalog::refreshEntry(OperationContext* txn,
+const IndexDescriptor* IndexCatalog::refreshEntry(OperationContext* opCtx,
const IndexDescriptor* oldDesc) {
- invariant(txn->lockState()->isCollectionLockedForMode(_collection->ns().ns(), MODE_X));
+ invariant(opCtx->lockState()->isCollectionLockedForMode(_collection->ns().ns(), MODE_X));
invariant(!BackgroundOperation::inProgForNs(_collection->ns()));
const std::string indexName = oldDesc->indexName();
- invariant(_collection->getCatalogEntry()->isIndexReady(txn, indexName));
+ invariant(_collection->getCatalogEntry()->isIndexReady(opCtx, indexName));
// Notify other users of the IndexCatalog that we're about to invalidate 'oldDesc'.
const bool collectionGoingAway = false;
@@ -1229,19 +1231,19 @@ const IndexDescriptor* IndexCatalog::refreshEntry(OperationContext* txn,
// Delete the IndexCatalogEntry that owns this descriptor. After deletion, 'oldDesc' is
// invalid and should not be dereferenced.
IndexCatalogEntry* oldEntry = _entries.release(oldDesc);
- txn->recoveryUnit()->registerChange(
- new IndexRemoveChange(txn, _collection, &_entries, oldEntry));
+ opCtx->recoveryUnit()->registerChange(
+ new IndexRemoveChange(opCtx, _collection, &_entries, oldEntry));
// Ask the CollectionCatalogEntry for the new index spec.
- BSONObj spec = _collection->getCatalogEntry()->getIndexSpec(txn, indexName).getOwned();
+ BSONObj spec = _collection->getCatalogEntry()->getIndexSpec(opCtx, indexName).getOwned();
BSONObj keyPattern = spec.getObjectField("key");
// Re-register this index in the index catalog with the new spec.
IndexDescriptor* newDesc =
- new IndexDescriptor(_collection, _getAccessMethodName(txn, keyPattern), spec);
+ new IndexDescriptor(_collection, _getAccessMethodName(opCtx, keyPattern), spec);
const bool initFromDisk = false;
- const IndexCatalogEntry* newEntry = _setupInMemoryStructures(txn, newDesc, initFromDisk);
- invariant(newEntry->isReady(txn));
+ const IndexCatalogEntry* newEntry = _setupInMemoryStructures(opCtx, newDesc, initFromDisk);
+ invariant(newEntry->isReady(opCtx));
// Return the new descriptor.
return newEntry->descriptor();
@@ -1249,18 +1251,18 @@ const IndexDescriptor* IndexCatalog::refreshEntry(OperationContext* txn,
// ---------------------------
-Status IndexCatalog::_indexFilteredRecords(OperationContext* txn,
+Status IndexCatalog::_indexFilteredRecords(OperationContext* opCtx,
IndexCatalogEntry* index,
const std::vector<BsonRecord>& bsonRecords,
int64_t* keysInsertedOut) {
InsertDeleteOptions options;
- prepareInsertDeleteOptions(txn, index->descriptor(), &options);
+ prepareInsertDeleteOptions(opCtx, index->descriptor(), &options);
for (auto bsonRecord : bsonRecords) {
int64_t inserted;
invariant(bsonRecord.id != RecordId());
Status status = index->accessMethod()->insert(
- txn, *bsonRecord.docPtr, bsonRecord.id, options, &inserted);
+ opCtx, *bsonRecord.docPtr, bsonRecord.id, options, &inserted);
if (!status.isOK())
return status;
@@ -1271,13 +1273,13 @@ Status IndexCatalog::_indexFilteredRecords(OperationContext* txn,
return Status::OK();
}
-Status IndexCatalog::_indexRecords(OperationContext* txn,
+Status IndexCatalog::_indexRecords(OperationContext* opCtx,
IndexCatalogEntry* index,
const std::vector<BsonRecord>& bsonRecords,
int64_t* keysInsertedOut) {
const MatchExpression* filter = index->getFilterExpression();
if (!filter)
- return _indexFilteredRecords(txn, index, bsonRecords, keysInsertedOut);
+ return _indexFilteredRecords(opCtx, index, bsonRecords, keysInsertedOut);
std::vector<BsonRecord> filteredBsonRecords;
for (auto bsonRecord : bsonRecords) {
@@ -1285,26 +1287,26 @@ Status IndexCatalog::_indexRecords(OperationContext* txn,
filteredBsonRecords.push_back(bsonRecord);
}
- return _indexFilteredRecords(txn, index, filteredBsonRecords, keysInsertedOut);
+ return _indexFilteredRecords(opCtx, index, filteredBsonRecords, keysInsertedOut);
}
-Status IndexCatalog::_unindexRecord(OperationContext* txn,
+Status IndexCatalog::_unindexRecord(OperationContext* opCtx,
IndexCatalogEntry* index,
const BSONObj& obj,
const RecordId& loc,
bool logIfError,
int64_t* keysDeletedOut) {
InsertDeleteOptions options;
- prepareInsertDeleteOptions(txn, index->descriptor(), &options);
+ prepareInsertDeleteOptions(opCtx, index->descriptor(), &options);
options.logIfError = logIfError;
// For unindex operations, dupsAllowed=false really means that it is safe to delete anything
// that matches the key, without checking the RecordID, since dups are impossible. We need
// to disable this behavior for in-progress indexes. See SERVER-17487 for more details.
- options.dupsAllowed = options.dupsAllowed || !index->isReady(txn);
+ options.dupsAllowed = options.dupsAllowed || !index->isReady(opCtx);
int64_t removed;
- Status status = index->accessMethod()->remove(txn, obj, loc, options, &removed);
+ Status status = index->accessMethod()->remove(opCtx, obj, loc, options, &removed);
if (!status.isOK()) {
log() << "Couldn't unindex record " << redact(obj) << " from collection "
@@ -1319,7 +1321,7 @@ Status IndexCatalog::_unindexRecord(OperationContext* txn,
}
-Status IndexCatalog::indexRecords(OperationContext* txn,
+Status IndexCatalog::indexRecords(OperationContext* opCtx,
const std::vector<BsonRecord>& bsonRecords,
int64_t* keysInsertedOut) {
if (keysInsertedOut) {
@@ -1328,7 +1330,7 @@ Status IndexCatalog::indexRecords(OperationContext* txn,
for (IndexCatalogEntryContainer::const_iterator i = _entries.begin(); i != _entries.end();
++i) {
- Status s = _indexRecords(txn, *i, bsonRecords, keysInsertedOut);
+ Status s = _indexRecords(opCtx, *i, bsonRecords, keysInsertedOut);
if (!s.isOK())
return s;
}
@@ -1336,7 +1338,7 @@ Status IndexCatalog::indexRecords(OperationContext* txn,
return Status::OK();
}
-void IndexCatalog::unindexRecord(OperationContext* txn,
+void IndexCatalog::unindexRecord(OperationContext* opCtx,
const BSONObj& obj,
const RecordId& loc,
bool noWarn,
@@ -1350,8 +1352,8 @@ void IndexCatalog::unindexRecord(OperationContext* txn,
IndexCatalogEntry* entry = *i;
// If it's a background index, we DO NOT want to log anything.
- bool logIfError = entry->isReady(txn) ? !noWarn : false;
- _unindexRecord(txn, entry, obj, loc, logIfError, keysDeletedOut);
+ bool logIfError = entry->isReady(opCtx) ? !noWarn : false;
+ _unindexRecord(opCtx, entry, obj, loc, logIfError, keysDeletedOut);
}
}
@@ -1365,11 +1367,11 @@ BSONObj IndexCatalog::fixIndexKey(const BSONObj& key) {
return key;
}
-void IndexCatalog::prepareInsertDeleteOptions(OperationContext* txn,
+void IndexCatalog::prepareInsertDeleteOptions(OperationContext* opCtx,
const IndexDescriptor* desc,
InsertDeleteOptions* options) {
- auto replCoord = repl::ReplicationCoordinator::get(txn);
- if (replCoord->shouldRelaxIndexConstraints(txn, NamespaceString(desc->parentNS()))) {
+ auto replCoord = repl::ReplicationCoordinator::get(opCtx);
+ if (replCoord->shouldRelaxIndexConstraints(opCtx, NamespaceString(desc->parentNS()))) {
options->getKeysMode = IndexAccessMethod::GetKeysMode::kRelaxConstraints;
} else {
options->getKeysMode = IndexAccessMethod::GetKeysMode::kEnforceConstraints;
@@ -1384,7 +1386,7 @@ void IndexCatalog::prepareInsertDeleteOptions(OperationContext* txn,
}
}
-StatusWith<BSONObj> IndexCatalog::_fixIndexSpec(OperationContext* txn,
+StatusWith<BSONObj> IndexCatalog::_fixIndexSpec(OperationContext* opCtx,
Collection* collection,
const BSONObj& spec) {
auto statusWithSpec = IndexLegacy::adjustIndexSpecObject(spec);
diff --git a/src/mongo/db/catalog/index_catalog.h b/src/mongo/db/catalog/index_catalog.h
index 4016b7860d4..469b86f04a8 100644
--- a/src/mongo/db/catalog/index_catalog.h
+++ b/src/mongo/db/catalog/index_catalog.h
@@ -60,17 +60,17 @@ public:
~IndexCatalog();
// must be called before used
- Status init(OperationContext* txn);
+ Status init(OperationContext* opCtx);
bool ok() const;
// ---- accessors -----
bool haveAnyIndexes() const;
- int numIndexesTotal(OperationContext* txn) const;
- int numIndexesReady(OperationContext* txn) const;
- int numIndexesInProgress(OperationContext* txn) const {
- return numIndexesTotal(txn) - numIndexesReady(txn);
+ int numIndexesTotal(OperationContext* opCtx) const;
+ int numIndexesReady(OperationContext* opCtx) const;
+ int numIndexesInProgress(OperationContext* opCtx) const {
+ return numIndexesTotal(opCtx) - numIndexesReady(opCtx);
}
/**
@@ -78,7 +78,7 @@ public:
* in which case everything from this tree has to go away
*/
- bool haveIdIndex(OperationContext* txn) const;
+ bool haveIdIndex(OperationContext* opCtx) const;
/**
* Returns the spec for the id index to create by default for this collection.
@@ -86,14 +86,14 @@ public:
BSONObj getDefaultIdIndexSpec(
ServerGlobalParams::FeatureCompatibility::Version featureCompatibilityVersion) const;
- IndexDescriptor* findIdIndex(OperationContext* txn) const;
+ IndexDescriptor* findIdIndex(OperationContext* opCtx) const;
/**
* Find index by name. The index name uniquely identifies an index.
*
* @return null if cannot find
*/
- IndexDescriptor* findIndexByName(OperationContext* txn,
+ IndexDescriptor* findIndexByName(OperationContext* opCtx,
StringData name,
bool includeUnfinishedIndexes = false) const;
@@ -108,7 +108,7 @@ public:
* collation.
*/
IndexDescriptor* findIndexByKeyPatternAndCollationSpec(
- OperationContext* txn,
+ OperationContext* opCtx,
const BSONObj& key,
const BSONObj& collationSpec,
bool includeUnfinishedIndexes = false) const;
@@ -119,7 +119,7 @@ public:
*
* Consider using 'findIndexByName' if expecting to match one index.
*/
- void findIndexesByKeyPattern(OperationContext* txn,
+ void findIndexesByKeyPattern(OperationContext* opCtx,
const BSONObj& key,
bool includeUnfinishedIndexes,
std::vector<IndexDescriptor*>* matches) const;
@@ -137,11 +137,11 @@ public:
*
* If no such index exists, returns NULL.
*/
- IndexDescriptor* findShardKeyPrefixedIndex(OperationContext* txn,
+ IndexDescriptor* findShardKeyPrefixedIndex(OperationContext* opCtx,
const BSONObj& shardKey,
bool requireSingleKey) const;
- void findIndexByType(OperationContext* txn,
+ void findIndexByType(OperationContext* opCtx,
const std::string& type,
std::vector<IndexDescriptor*>& matches,
bool includeUnfinishedIndexes = false) const;
@@ -158,7 +158,7 @@ public:
* an invalidateAll() on the cursor manager to notify other users of the IndexCatalog that
* this descriptor is now invalid.
*/
- const IndexDescriptor* refreshEntry(OperationContext* txn, const IndexDescriptor* oldDesc);
+ const IndexDescriptor* refreshEntry(OperationContext* opCtx, const IndexDescriptor* oldDesc);
// never returns NULL
const IndexCatalogEntry* getEntry(const IndexDescriptor* desc) const;
@@ -184,7 +184,7 @@ public:
IndexCatalogEntry* catalogEntry(const IndexDescriptor* desc);
private:
- IndexIterator(OperationContext* txn,
+ IndexIterator(OperationContext* opCtx,
const IndexCatalog* cat,
bool includeUnfinishedIndexes);
@@ -192,7 +192,7 @@ public:
bool _includeUnfinishedIndexes;
- OperationContext* const _txn;
+ OperationContext* const _opCtx;
const IndexCatalog* _catalog;
IndexCatalogEntryContainer::const_iterator _iterator;
@@ -204,8 +204,8 @@ public:
friend class IndexCatalog;
};
- IndexIterator getIndexIterator(OperationContext* txn, bool includeUnfinishedIndexes) const {
- return IndexIterator(txn, this, includeUnfinishedIndexes);
+ IndexIterator getIndexIterator(OperationContext* opCtx, bool includeUnfinishedIndexes) const {
+ return IndexIterator(opCtx, this, includeUnfinishedIndexes);
};
// ---- index set modifiers ------
@@ -215,19 +215,20 @@ public:
* empty collection can be rolled back as part of a larger WUOW. Returns the full specification
* of the created index, as it is stored in this index catalog.
*/
- StatusWith<BSONObj> createIndexOnEmptyCollection(OperationContext* txn, BSONObj spec);
+ StatusWith<BSONObj> createIndexOnEmptyCollection(OperationContext* opCtx, BSONObj spec);
- StatusWith<BSONObj> prepareSpecForCreate(OperationContext* txn, const BSONObj& original) const;
+ StatusWith<BSONObj> prepareSpecForCreate(OperationContext* opCtx,
+ const BSONObj& original) const;
- Status dropAllIndexes(OperationContext* txn, bool includingIdIndex);
+ Status dropAllIndexes(OperationContext* opCtx, bool includingIdIndex);
- Status dropIndex(OperationContext* txn, IndexDescriptor* desc);
+ Status dropIndex(OperationContext* opCtx, IndexDescriptor* desc);
/**
* will drop all incompleted indexes and return specs
* after this, the indexes can be rebuilt
*/
- std::vector<BSONObj> getAndClearUnfinishedIndexes(OperationContext* txn);
+ std::vector<BSONObj> getAndClearUnfinishedIndexes(OperationContext* opCtx);
struct IndexKillCriteria {
@@ -241,7 +242,7 @@ public:
/**
* Returns true if the index 'idx' is multikey, and returns false otherwise.
*/
- bool isMultikey(OperationContext* txn, const IndexDescriptor* idx);
+ bool isMultikey(OperationContext* opCtx, const IndexDescriptor* idx);
/**
* Returns the path components that cause the index 'idx' to be multikey if the index supports
@@ -252,7 +253,7 @@ public:
* returns a vector with size equal to the number of elements in the index key pattern where
* each element in the vector is an empty set.
*/
- MultikeyPaths getMultikeyPaths(OperationContext* txn, const IndexDescriptor* idx);
+ MultikeyPaths getMultikeyPaths(OperationContext* opCtx, const IndexDescriptor* idx);
// --- these probably become private?
@@ -270,7 +271,7 @@ public:
MONGO_DISALLOW_COPYING(IndexBuildBlock);
public:
- IndexBuildBlock(OperationContext* txn, Collection* collection, const BSONObj& spec);
+ IndexBuildBlock(OperationContext* opCtx, Collection* collection, const BSONObj& spec);
~IndexBuildBlock();
@@ -300,7 +301,7 @@ public:
IndexCatalogEntry* _entry;
bool _inProgress;
- OperationContext* _txn;
+ OperationContext* _opCtx;
};
// ----- data modifiers ------
@@ -311,7 +312,7 @@ public:
*
* This method may throw.
*/
- Status indexRecords(OperationContext* txn,
+ Status indexRecords(OperationContext* opCtx,
const std::vector<BsonRecord>& bsonRecords,
int64_t* keysInsertedOut);
@@ -319,7 +320,7 @@ public:
* When 'keysDeletedOut' is not null, it will be set to the number of index keys removed by
* this operation.
*/
- void unindexRecord(OperationContext* txn,
+ void unindexRecord(OperationContext* opCtx,
const BSONObj& obj,
const RecordId& loc,
bool noWarn,
@@ -327,11 +328,11 @@ public:
// ------- temp internal -------
- std::string getAccessMethodName(OperationContext* txn, const BSONObj& keyPattern) {
- return _getAccessMethodName(txn, keyPattern);
+ std::string getAccessMethodName(OperationContext* opCtx, const BSONObj& keyPattern) {
+ return _getAccessMethodName(opCtx, keyPattern);
}
- Status _upgradeDatabaseMinorVersionIfNeeded(OperationContext* txn,
+ Status _upgradeDatabaseMinorVersionIfNeeded(OperationContext* opCtx,
const std::string& newPluginName);
// public static helpers
@@ -342,35 +343,35 @@ public:
* Fills out 'options' in order to indicate whether to allow dups or relax
* index constraints, as needed by replication.
*/
- static void prepareInsertDeleteOptions(OperationContext* txn,
+ static void prepareInsertDeleteOptions(OperationContext* opCtx,
const IndexDescriptor* desc,
InsertDeleteOptions* options);
private:
static const BSONObj _idObj; // { _id : 1 }
- bool _shouldOverridePlugin(OperationContext* txn, const BSONObj& keyPattern) const;
+ bool _shouldOverridePlugin(OperationContext* opCtx, const BSONObj& keyPattern) const;
/**
* This differs from IndexNames::findPluginName in that returns the plugin name we *should*
* use, not the plugin name inside of the provided key pattern. To understand when these
* differ, see shouldOverridePlugin.
*/
- std::string _getAccessMethodName(OperationContext* txn, const BSONObj& keyPattern) const;
+ std::string _getAccessMethodName(OperationContext* opCtx, const BSONObj& keyPattern) const;
void _checkMagic() const;
- Status _indexFilteredRecords(OperationContext* txn,
+ Status _indexFilteredRecords(OperationContext* opCtx,
IndexCatalogEntry* index,
const std::vector<BsonRecord>& bsonRecords,
int64_t* keysInsertedOut);
- Status _indexRecords(OperationContext* txn,
+ Status _indexRecords(OperationContext* opCtx,
IndexCatalogEntry* index,
const std::vector<BsonRecord>& bsonRecords,
int64_t* keysInsertedOut);
- Status _unindexRecord(OperationContext* txn,
+ Status _unindexRecord(OperationContext* opCtx,
IndexCatalogEntry* index,
const BSONObj& obj,
const RecordId& loc,
@@ -380,18 +381,18 @@ private:
/**
* this does no sanity checks
*/
- Status _dropIndex(OperationContext* txn, IndexCatalogEntry* entry);
+ Status _dropIndex(OperationContext* opCtx, IndexCatalogEntry* entry);
// just does disk hanges
// doesn't change memory state, etc...
- void _deleteIndexFromDisk(OperationContext* txn,
+ void _deleteIndexFromDisk(OperationContext* opCtx,
const std::string& indexName,
const std::string& indexNamespace);
// descriptor ownership passes to _setupInMemoryStructures
// initFromDisk: Avoids registering a change to undo this operation when set to true.
// You must set this flag if calling this function outside of a UnitOfWork.
- IndexCatalogEntry* _setupInMemoryStructures(OperationContext* txn,
+ IndexCatalogEntry* _setupInMemoryStructures(OperationContext* opCtx,
IndexDescriptor* descriptor,
bool initFromDisk);
@@ -399,13 +400,13 @@ private:
// conform to the standard for insertion. This function adds the 'v' field if it didn't
// exist, removes the '_id' field if it exists, applies plugin-level transformations if
// appropriate, etc.
- static StatusWith<BSONObj> _fixIndexSpec(OperationContext* txn,
+ static StatusWith<BSONObj> _fixIndexSpec(OperationContext* opCtx,
Collection* collection,
const BSONObj& spec);
- Status _isSpecOk(OperationContext* txn, const BSONObj& spec) const;
+ Status _isSpecOk(OperationContext* opCtx, const BSONObj& spec) const;
- Status _doesSpecConflictWithExisting(OperationContext* txn, const BSONObj& spec) const;
+ Status _doesSpecConflictWithExisting(OperationContext* opCtx, const BSONObj& spec) const;
int _magic;
Collection* const _collection;
diff --git a/src/mongo/db/catalog/index_catalog_entry.cpp b/src/mongo/db/catalog/index_catalog_entry.cpp
index c67b29f8559..7400536bc8b 100644
--- a/src/mongo/db/catalog/index_catalog_entry.cpp
+++ b/src/mongo/db/catalog/index_catalog_entry.cpp
@@ -59,12 +59,12 @@ public:
HeadManagerImpl(IndexCatalogEntry* ice) : _catalogEntry(ice) {}
virtual ~HeadManagerImpl() {}
- const RecordId getHead(OperationContext* txn) const {
- return _catalogEntry->head(txn);
+ const RecordId getHead(OperationContext* opCtx) const {
+ return _catalogEntry->head(opCtx);
}
- void setHead(OperationContext* txn, const RecordId newHead) {
- _catalogEntry->setHead(txn, newHead);
+ void setHead(OperationContext* opCtx, const RecordId newHead) {
+ _catalogEntry->setHead(opCtx, newHead);
}
private:
@@ -72,7 +72,7 @@ private:
IndexCatalogEntry* _catalogEntry;
};
-IndexCatalogEntry::IndexCatalogEntry(OperationContext* txn,
+IndexCatalogEntry::IndexCatalogEntry(OperationContext* opCtx,
StringData ns,
CollectionCatalogEntry* collection,
IndexDescriptor* descriptor,
@@ -86,12 +86,12 @@ IndexCatalogEntry::IndexCatalogEntry(OperationContext* txn,
_isReady(false) {
_descriptor->_cachedEntry = this;
- _isReady = _catalogIsReady(txn);
- _head = _catalogHead(txn);
+ _isReady = _catalogIsReady(opCtx);
+ _head = _catalogHead(opCtx);
{
stdx::lock_guard<stdx::mutex> lk(_indexMultikeyPathsMutex);
- _isMultikey.store(_catalogIsMultikey(txn, &_indexMultikeyPaths));
+ _isMultikey.store(_catalogIsMultikey(opCtx, &_indexMultikeyPaths));
_indexTracksPathLevelMultikeyInfo = !_indexMultikeyPaths.empty();
}
@@ -99,7 +99,7 @@ IndexCatalogEntry::IndexCatalogEntry(OperationContext* txn,
invariant(collationElement.isABSONObj());
BSONObj collation = collationElement.Obj();
auto statusWithCollator =
- CollatorFactoryInterface::get(txn->getServiceContext())->makeFromBSON(collation);
+ CollatorFactoryInterface::get(opCtx->getServiceContext())->makeFromBSON(collation);
// Index spec should have already been validated.
invariantOK(statusWithCollator.getStatus());
@@ -132,13 +132,13 @@ void IndexCatalogEntry::init(std::unique_ptr<IndexAccessMethod> accessMethod) {
_accessMethod = std::move(accessMethod);
}
-const RecordId& IndexCatalogEntry::head(OperationContext* txn) const {
- DEV invariant(_head == _catalogHead(txn));
+const RecordId& IndexCatalogEntry::head(OperationContext* opCtx) const {
+ DEV invariant(_head == _catalogHead(opCtx));
return _head;
}
-bool IndexCatalogEntry::isReady(OperationContext* txn) const {
- DEV invariant(_isReady == _catalogIsReady(txn));
+bool IndexCatalogEntry::isReady(OperationContext* opCtx) const {
+ DEV invariant(_isReady == _catalogIsReady(opCtx));
return _isReady;
}
@@ -146,7 +146,7 @@ bool IndexCatalogEntry::isMultikey() const {
return _isMultikey.load();
}
-MultikeyPaths IndexCatalogEntry::getMultikeyPaths(OperationContext* txn) const {
+MultikeyPaths IndexCatalogEntry::getMultikeyPaths(OperationContext* opCtx) const {
stdx::lock_guard<stdx::mutex> lk(_indexMultikeyPathsMutex);
return _indexMultikeyPaths;
}
@@ -170,10 +170,10 @@ public:
const RecordId _oldHead;
};
-void IndexCatalogEntry::setHead(OperationContext* txn, RecordId newHead) {
- _collection->setIndexHead(txn, _descriptor->indexName(), newHead);
+void IndexCatalogEntry::setHead(OperationContext* opCtx, RecordId newHead) {
+ _collection->setIndexHead(opCtx, _descriptor->indexName(), newHead);
- txn->recoveryUnit()->registerChange(new SetHeadChange(this, _head));
+ opCtx->recoveryUnit()->registerChange(new SetHeadChange(this, _head));
_head = newHead;
}
@@ -185,21 +185,21 @@ void IndexCatalogEntry::setHead(OperationContext* txn, RecordId newHead) {
*/
class RecoveryUnitSwap {
public:
- RecoveryUnitSwap(OperationContext* txn, RecoveryUnit* newRecoveryUnit)
- : _txn(txn),
- _oldRecoveryUnit(_txn->releaseRecoveryUnit()),
+ RecoveryUnitSwap(OperationContext* opCtx, RecoveryUnit* newRecoveryUnit)
+ : _opCtx(opCtx),
+ _oldRecoveryUnit(_opCtx->releaseRecoveryUnit()),
_oldRecoveryUnitState(
- _txn->setRecoveryUnit(newRecoveryUnit, OperationContext::kNotInUnitOfWork)),
+ _opCtx->setRecoveryUnit(newRecoveryUnit, OperationContext::kNotInUnitOfWork)),
_newRecoveryUnit(newRecoveryUnit) {}
~RecoveryUnitSwap() {
- _txn->releaseRecoveryUnit();
- _txn->setRecoveryUnit(_oldRecoveryUnit, _oldRecoveryUnitState);
+ _opCtx->releaseRecoveryUnit();
+ _opCtx->setRecoveryUnit(_oldRecoveryUnit, _oldRecoveryUnitState);
}
private:
// Not owned
- OperationContext* const _txn;
+ OperationContext* const _opCtx;
// Owned, but life-time is not controlled
RecoveryUnit* const _oldRecoveryUnit;
@@ -209,7 +209,7 @@ private:
const std::unique_ptr<RecoveryUnit> _newRecoveryUnit;
};
-void IndexCatalogEntry::setMultikey(OperationContext* txn, const MultikeyPaths& multikeyPaths) {
+void IndexCatalogEntry::setMultikey(OperationContext* opCtx, const MultikeyPaths& multikeyPaths) {
if (!_indexTracksPathLevelMultikeyInfo && isMultikey()) {
// If the index is already set as multikey and we don't have any path-level information to
// update, then there's nothing more for us to do.
@@ -243,7 +243,8 @@ void IndexCatalogEntry::setMultikey(OperationContext* txn, const MultikeyPaths&
{
// Only one thread should set the multi-key value per collection, because the metadata for a
// collection is one large document.
- Lock::ResourceLock collMDLock(txn->lockState(), ResourceId(RESOURCE_METADATA, _ns), MODE_X);
+ Lock::ResourceLock collMDLock(
+ opCtx->lockState(), ResourceId(RESOURCE_METADATA, _ns), MODE_X);
if (!_indexTracksPathLevelMultikeyInfo && isMultikey()) {
// It's possible that we raced with another thread when acquiring the MD lock. If the
@@ -257,9 +258,9 @@ void IndexCatalogEntry::setMultikey(OperationContext* txn, const MultikeyPaths&
// snapshot isolation.
{
StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
- RecoveryUnitSwap ruSwap(txn, storageEngine->newRecoveryUnit());
+ RecoveryUnitSwap ruSwap(opCtx, storageEngine->newRecoveryUnit());
- WriteUnitOfWork wuow(txn);
+ WriteUnitOfWork wuow(opCtx);
// It's possible that the index type (e.g. ascending/descending index) supports tracking
// path-level multikey information, but this particular index doesn't.
@@ -267,7 +268,7 @@ void IndexCatalogEntry::setMultikey(OperationContext* txn, const MultikeyPaths&
// multikey information in order to avoid unintentionally setting path-level multikey
// information on an index created before 3.4.
if (_collection->setIndexIsMultikey(
- txn,
+ opCtx,
_descriptor->indexName(),
_indexTracksPathLevelMultikeyInfo ? multikeyPaths : MultikeyPaths{})) {
if (_infoCache) {
@@ -293,17 +294,17 @@ void IndexCatalogEntry::setMultikey(OperationContext* txn, const MultikeyPaths&
// ----
-bool IndexCatalogEntry::_catalogIsReady(OperationContext* txn) const {
- return _collection->isIndexReady(txn, _descriptor->indexName());
+bool IndexCatalogEntry::_catalogIsReady(OperationContext* opCtx) const {
+ return _collection->isIndexReady(opCtx, _descriptor->indexName());
}
-RecordId IndexCatalogEntry::_catalogHead(OperationContext* txn) const {
- return _collection->getIndexHead(txn, _descriptor->indexName());
+RecordId IndexCatalogEntry::_catalogHead(OperationContext* opCtx) const {
+ return _collection->getIndexHead(opCtx, _descriptor->indexName());
}
-bool IndexCatalogEntry::_catalogIsMultikey(OperationContext* txn,
+bool IndexCatalogEntry::_catalogIsMultikey(OperationContext* opCtx,
MultikeyPaths* multikeyPaths) const {
- return _collection->isIndexMultikey(txn, _descriptor->indexName(), multikeyPaths);
+ return _collection->isIndexMultikey(opCtx, _descriptor->indexName(), multikeyPaths);
}
// ------------------
diff --git a/src/mongo/db/catalog/index_catalog_entry.h b/src/mongo/db/catalog/index_catalog_entry.h
index 64f72822404..065d1a544ac 100644
--- a/src/mongo/db/catalog/index_catalog_entry.h
+++ b/src/mongo/db/catalog/index_catalog_entry.h
@@ -56,7 +56,7 @@ class IndexCatalogEntry {
MONGO_DISALLOW_COPYING(IndexCatalogEntry);
public:
- IndexCatalogEntry(OperationContext* txn,
+ IndexCatalogEntry(OperationContext* opCtx,
StringData ns,
CollectionCatalogEntry* collection, // not owned
IndexDescriptor* descriptor, // ownership passes to me
@@ -98,9 +98,9 @@ public:
/// ---------------------
- const RecordId& head(OperationContext* txn) const;
+ const RecordId& head(OperationContext* opCtx) const;
- void setHead(OperationContext* txn, RecordId newHead);
+ void setHead(OperationContext* opCtx, RecordId newHead);
void setIsReady(bool newIsReady);
@@ -124,7 +124,7 @@ public:
* returns a vector with size equal to the number of elements in the index key pattern where
* each element in the vector is an empty set.
*/
- MultikeyPaths getMultikeyPaths(OperationContext* txn) const;
+ MultikeyPaths getMultikeyPaths(OperationContext* opCtx) const;
/**
* Sets this index to be multikey. Information regarding which newly detected path components
@@ -136,10 +136,10 @@ public:
* with size equal to the number of elements in the index key pattern. Additionally, at least
* one path component of the indexed fields must cause this index to be multikey.
*/
- void setMultikey(OperationContext* txn, const MultikeyPaths& multikeyPaths);
+ void setMultikey(OperationContext* opCtx, const MultikeyPaths& multikeyPaths);
// if this ready is ready for queries
- bool isReady(OperationContext* txn) const;
+ bool isReady(OperationContext* opCtx) const;
/**
* If return value is not boost::none, reads with majority read concern using an older snapshot
@@ -157,15 +157,15 @@ private:
class SetMultikeyChange;
class SetHeadChange;
- bool _catalogIsReady(OperationContext* txn) const;
- RecordId _catalogHead(OperationContext* txn) const;
+ bool _catalogIsReady(OperationContext* opCtx) const;
+ RecordId _catalogHead(OperationContext* opCtx) const;
/**
* Retrieves the multikey information associated with this index from '_collection',
*
* See CollectionCatalogEntry::isIndexMultikey() for more details.
*/
- bool _catalogIsMultikey(OperationContext* txn, MultikeyPaths* multikeyPaths) const;
+ bool _catalogIsMultikey(OperationContext* opCtx, MultikeyPaths* multikeyPaths) const;
// -----
diff --git a/src/mongo/db/catalog/index_create.cpp b/src/mongo/db/catalog/index_create.cpp
index d7c86f27163..622706776f5 100644
--- a/src/mongo/db/catalog/index_create.cpp
+++ b/src/mongo/db/catalog/index_create.cpp
@@ -124,9 +124,9 @@ private:
MultiIndexBlock* const _indexer;
};
-MultiIndexBlock::MultiIndexBlock(OperationContext* txn, Collection* collection)
+MultiIndexBlock::MultiIndexBlock(OperationContext* opCtx, Collection* collection)
: _collection(collection),
- _txn(txn),
+ _opCtx(opCtx),
_buildInBackground(false),
_allowInterruption(false),
_ignoreUnique(false),
@@ -137,7 +137,7 @@ MultiIndexBlock::~MultiIndexBlock() {
return;
while (true) {
try {
- WriteUnitOfWork wunit(_txn);
+ WriteUnitOfWork wunit(_opCtx);
// This cleans up all index builds.
// Because that may need to write, it is done inside
// of a WUOW. Nothing inside this block can fail, and it is made fatal if it does.
@@ -164,7 +164,7 @@ MultiIndexBlock::~MultiIndexBlock() {
void MultiIndexBlock::removeExistingIndexes(std::vector<BSONObj>* specs) const {
for (size_t i = 0; i < specs->size(); i++) {
Status status =
- _collection->getIndexCatalog()->prepareSpecForCreate(_txn, (*specs)[i]).getStatus();
+ _collection->getIndexCatalog()->prepareSpecForCreate(_opCtx, (*specs)[i]).getStatus();
if (status.code() == ErrorCodes::IndexAlreadyExists) {
specs->erase(specs->begin() + i);
i--;
@@ -179,10 +179,10 @@ StatusWith<std::vector<BSONObj>> MultiIndexBlock::init(const BSONObj& spec) {
}
StatusWith<std::vector<BSONObj>> MultiIndexBlock::init(const std::vector<BSONObj>& indexSpecs) {
- WriteUnitOfWork wunit(_txn);
+ WriteUnitOfWork wunit(_opCtx);
invariant(_indexes.empty());
- _txn->recoveryUnit()->registerChange(new CleanupIndexesVectorOnRollback(this));
+ _opCtx->recoveryUnit()->registerChange(new CleanupIndexesVectorOnRollback(this));
const string& ns = _collection->ns().ns();
@@ -199,7 +199,7 @@ StatusWith<std::vector<BSONObj>> MultiIndexBlock::init(const std::vector<BSONObj
string pluginName = IndexNames::findPluginName(info["key"].Obj());
if (pluginName.size()) {
Status s = _collection->getIndexCatalog()->_upgradeDatabaseMinorVersionIfNeeded(
- _txn, pluginName);
+ _opCtx, pluginName);
if (!s.isOK())
return s;
}
@@ -220,7 +220,7 @@ StatusWith<std::vector<BSONObj>> MultiIndexBlock::init(const std::vector<BSONObj
for (size_t i = 0; i < indexSpecs.size(); i++) {
BSONObj info = indexSpecs[i];
StatusWith<BSONObj> statusWithInfo =
- _collection->getIndexCatalog()->prepareSpecForCreate(_txn, info);
+ _collection->getIndexCatalog()->prepareSpecForCreate(_opCtx, info);
Status status = statusWithInfo.getStatus();
if (!status.isOK())
return status;
@@ -228,13 +228,13 @@ StatusWith<std::vector<BSONObj>> MultiIndexBlock::init(const std::vector<BSONObj
indexInfoObjs.push_back(info);
IndexToBuild index;
- index.block.reset(new IndexCatalog::IndexBuildBlock(_txn, _collection, info));
+ index.block.reset(new IndexCatalog::IndexBuildBlock(_opCtx, _collection, info));
status = index.block->init();
if (!status.isOK())
return status;
index.real = index.block->getEntry()->accessMethod();
- status = index.real->initializeAsEmpty(_txn);
+ status = index.real->initializeAsEmpty(_opCtx);
if (!status.isOK())
return status;
@@ -246,7 +246,7 @@ StatusWith<std::vector<BSONObj>> MultiIndexBlock::init(const std::vector<BSONObj
const IndexDescriptor* descriptor = index.block->getEntry()->descriptor();
- IndexCatalog::prepareInsertDeleteOptions(_txn, descriptor, &index.options);
+ IndexCatalog::prepareInsertDeleteOptions(_opCtx, descriptor, &index.options);
index.options.dupsAllowed = index.options.dupsAllowed || _ignoreUnique;
if (_ignoreUnique) {
index.options.getKeysMode = IndexAccessMethod::GetKeysMode::kRelaxConstraints;
@@ -260,7 +260,7 @@ StatusWith<std::vector<BSONObj>> MultiIndexBlock::init(const std::vector<BSONObj
index.filterExpression = index.block->getEntry()->getFilterExpression();
// TODO SERVER-14888 Suppress this in cases we don't want to audit.
- audit::logCreateIndex(_txn->getClient(), &info, descriptor->indexName(), ns);
+ audit::logCreateIndex(_opCtx->getClient(), &info, descriptor->indexName(), ns);
_indexes.push_back(std::move(index));
}
@@ -274,8 +274,8 @@ StatusWith<std::vector<BSONObj>> MultiIndexBlock::init(const std::vector<BSONObj
log() << "Index build interrupted due to 'crashAfterStartingIndexBuild' failpoint. Exiting "
"after waiting for changes to become durable.";
Locker::LockSnapshot lockInfo;
- _txn->lockState()->saveLockStateAndUnlock(&lockInfo);
- if (_txn->recoveryUnit()->waitUntilDurable()) {
+ _opCtx->lockState()->saveLockStateAndUnlock(&lockInfo);
+ if (_opCtx->recoveryUnit()->waitUntilDurable()) {
quickExit(EXIT_TEST);
}
}
@@ -285,9 +285,10 @@ StatusWith<std::vector<BSONObj>> MultiIndexBlock::init(const std::vector<BSONObj
Status MultiIndexBlock::insertAllDocumentsInCollection(std::set<RecordId>* dupsOut) {
const char* curopMessage = _buildInBackground ? "Index Build (background)" : "Index Build";
- const auto numRecords = _collection->numRecords(_txn);
- stdx::unique_lock<Client> lk(*_txn->getClient());
- ProgressMeterHolder progress(*_txn->setMessage_inlock(curopMessage, curopMessage, numRecords));
+ const auto numRecords = _collection->numRecords(_opCtx);
+ stdx::unique_lock<Client> lk(*_opCtx->getClient());
+ ProgressMeterHolder progress(
+ *_opCtx->setMessage_inlock(curopMessage, curopMessage, numRecords));
lk.unlock();
Timer t;
@@ -295,7 +296,7 @@ Status MultiIndexBlock::insertAllDocumentsInCollection(std::set<RecordId>* dupsO
unsigned long long n = 0;
unique_ptr<PlanExecutor> exec(InternalPlanner::collectionScan(
- _txn, _collection->ns().ns(), _collection, PlanExecutor::YIELD_MANUAL));
+ _opCtx, _collection->ns().ns(), _collection, PlanExecutor::YIELD_MANUAL));
if (_buildInBackground) {
invariant(_allowInterruption);
exec->setYieldPolicy(PlanExecutor::YIELD_AUTO, _collection);
@@ -311,20 +312,20 @@ Status MultiIndexBlock::insertAllDocumentsInCollection(std::set<RecordId>* dupsO
(PlanExecutor::ADVANCED == (state = exec->getNextSnapshotted(&objToIndex, &loc)))) {
try {
if (_allowInterruption)
- _txn->checkForInterrupt();
+ _opCtx->checkForInterrupt();
// Make sure we are working with the latest version of the document.
- if (objToIndex.snapshotId() != _txn->recoveryUnit()->getSnapshotId() &&
- !_collection->findDoc(_txn, loc, &objToIndex)) {
+ if (objToIndex.snapshotId() != _opCtx->recoveryUnit()->getSnapshotId() &&
+ !_collection->findDoc(_opCtx, loc, &objToIndex)) {
// doc was deleted so don't index it.
retries = 0;
continue;
}
// Done before insert so we can retry document if it WCEs.
- progress->setTotalWhileRunning(_collection->numRecords(_txn));
+ progress->setTotalWhileRunning(_collection->numRecords(_opCtx));
- WriteUnitOfWork wunit(_txn);
+ WriteUnitOfWork wunit(_opCtx);
Status ret = insert(objToIndex.value(), loc);
if (_buildInBackground)
exec->saveState();
@@ -346,14 +347,14 @@ Status MultiIndexBlock::insertAllDocumentsInCollection(std::set<RecordId>* dupsO
n++;
retries = 0;
} catch (const WriteConflictException& wce) {
- CurOp::get(_txn)->debug().writeConflicts++;
+ CurOp::get(_opCtx)->debug().writeConflicts++;
retries++; // logAndBackoff expects this to be 1 on first call.
wce.logAndBackoff(retries, "index creation", _collection->ns().ns());
// Can't use WRITE_CONFLICT_RETRY_LOOP macros since we need to save/restore exec
// around call to abandonSnapshot.
exec->saveState();
- _txn->recoveryUnit()->abandonSnapshot();
+ _opCtx->recoveryUnit()->abandonSnapshot();
exec->restoreState(); // Handles any WCEs internally.
}
}
@@ -372,13 +373,13 @@ Status MultiIndexBlock::insertAllDocumentsInCollection(std::set<RecordId>* dupsO
}
// Check for interrupt to allow for killop prior to index build completion.
- _txn->checkForInterrupt();
+ _opCtx->checkForInterrupt();
}
if (MONGO_FAIL_POINT(hangAfterStartingIndexBuildUnlocked)) {
// Unlock before hanging so replication recognizes we've completed.
Locker::LockSnapshot lockInfo;
- _txn->lockState()->saveLockStateAndUnlock(&lockInfo);
+ _opCtx->lockState()->saveLockStateAndUnlock(&lockInfo);
while (MONGO_FAIL_POINT(hangAfterStartingIndexBuildUnlocked)) {
log() << "Hanging index build with no locks due to "
"'hangAfterStartingIndexBuildUnlocked' failpoint";
@@ -409,9 +410,9 @@ Status MultiIndexBlock::insert(const BSONObj& doc, const RecordId& loc) {
int64_t unused;
Status idxStatus(ErrorCodes::InternalError, "");
if (_indexes[i].bulk) {
- idxStatus = _indexes[i].bulk->insert(_txn, doc, loc, _indexes[i].options, &unused);
+ idxStatus = _indexes[i].bulk->insert(_opCtx, doc, loc, _indexes[i].options, &unused);
} else {
- idxStatus = _indexes[i].real->insert(_txn, doc, loc, _indexes[i].options, &unused);
+ idxStatus = _indexes[i].real->insert(_opCtx, doc, loc, _indexes[i].options, &unused);
}
if (!idxStatus.isOK())
@@ -426,7 +427,7 @@ Status MultiIndexBlock::doneInserting(std::set<RecordId>* dupsOut) {
continue;
LOG(1) << "\t bulk commit starting for index: "
<< _indexes[i].block->getEntry()->descriptor()->indexName();
- Status status = _indexes[i].real->commitBulk(_txn,
+ Status status = _indexes[i].real->commitBulk(_opCtx,
std::move(_indexes[i].bulk),
_allowInterruption,
_indexes[i].options.dupsAllowed,
@@ -449,7 +450,7 @@ void MultiIndexBlock::commit() {
_indexes[i].block->success();
}
- _txn->recoveryUnit()->registerChange(new SetNeedToCleanupOnRollback(this));
+ _opCtx->recoveryUnit()->registerChange(new SetNeedToCleanupOnRollback(this));
_needToCleanup = false;
}
diff --git a/src/mongo/db/catalog/index_create.h b/src/mongo/db/catalog/index_create.h
index 88dd5db8393..14bec4dafbf 100644
--- a/src/mongo/db/catalog/index_create.h
+++ b/src/mongo/db/catalog/index_create.h
@@ -65,7 +65,7 @@ public:
/**
* Neither pointer is owned.
*/
- MultiIndexBlock(OperationContext* txn, Collection* collection);
+ MultiIndexBlock(OperationContext* opCtx, Collection* collection);
~MultiIndexBlock();
/**
@@ -206,7 +206,7 @@ private:
// Pointers not owned here and must outlive 'this'
Collection* _collection;
- OperationContext* _txn;
+ OperationContext* _opCtx;
bool _buildInBackground;
bool _allowInterruption;
diff --git a/src/mongo/db/catalog/index_key_validate.cpp b/src/mongo/db/catalog/index_key_validate.cpp
index e1616a34c86..0d48247f68d 100644
--- a/src/mongo/db/catalog/index_key_validate.cpp
+++ b/src/mongo/db/catalog/index_key_validate.cpp
@@ -439,14 +439,14 @@ Status validateIndexSpecFieldNames(const BSONObj& indexSpec) {
return Status::OK();
}
-StatusWith<BSONObj> validateIndexSpecCollation(OperationContext* txn,
+StatusWith<BSONObj> validateIndexSpecCollation(OperationContext* opCtx,
const BSONObj& indexSpec,
const CollatorInterface* defaultCollator) {
if (auto collationElem = indexSpec[IndexDescriptor::kCollationFieldName]) {
// validateIndexSpec() should have already verified that 'collationElem' is an object.
invariant(collationElem.type() == BSONType::Object);
- auto collator = CollatorFactoryInterface::get(txn->getServiceContext())
+ auto collator = CollatorFactoryInterface::get(opCtx->getServiceContext())
->makeFromBSON(collationElem.Obj());
if (!collator.isOK()) {
return collator.getStatus();
diff --git a/src/mongo/db/catalog/index_key_validate.h b/src/mongo/db/catalog/index_key_validate.h
index bb2cc7ff123..9ee07df7dc5 100644
--- a/src/mongo/db/catalog/index_key_validate.h
+++ b/src/mongo/db/catalog/index_key_validate.h
@@ -74,7 +74,7 @@ Status validateIndexSpecFieldNames(const BSONObj& indexSpec);
* collation spec. If 'collation' is missing, fills it in with the spec for 'defaultCollator'.
* Returns the index specification with 'collation' filled in.
*/
-StatusWith<BSONObj> validateIndexSpecCollation(OperationContext* txn,
+StatusWith<BSONObj> validateIndexSpecCollation(OperationContext* opCtx,
const BSONObj& indexSpec,
const CollatorInterface* defaultCollator);
diff --git a/src/mongo/db/catalog/index_spec_validate_test.cpp b/src/mongo/db/catalog/index_spec_validate_test.cpp
index 6d0bc3eb918..45e65b00d6f 100644
--- a/src/mongo/db/catalog/index_spec_validate_test.cpp
+++ b/src/mongo/db/catalog/index_spec_validate_test.cpp
@@ -790,11 +790,11 @@ TEST(IdIndexSpecValidateTest, ReturnsOKStatusIfAllFieldsAllowedForIdIndex) {
TEST(IndexSpecCollationValidateTest, FillsInFullCollationSpec) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
const CollatorInterface* defaultCollator = nullptr;
- auto result = validateIndexSpecCollation(txn.get(),
+ auto result = validateIndexSpecCollation(opCtx.get(),
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
<< "ns"
@@ -840,11 +840,11 @@ TEST(IndexSpecCollationValidateTest, FillsInFullCollationSpec) {
TEST(IndexSpecCollationValidateTest, RemovesCollationFieldIfSimple) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
const CollatorInterface* defaultCollator = nullptr;
- auto result = validateIndexSpecCollation(txn.get(),
+ auto result = validateIndexSpecCollation(opCtx.get(),
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
<< "ns"
@@ -869,11 +869,11 @@ TEST(IndexSpecCollationValidateTest, RemovesCollationFieldIfSimple) {
TEST(IndexSpecCollationValidateTest, FillsInCollationFieldWithCollectionDefaultIfNotPresent) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
const CollatorInterfaceMock defaultCollator(CollatorInterfaceMock::MockType::kReverseString);
- auto result = validateIndexSpecCollation(txn.get(),
+ auto result = validateIndexSpecCollation(opCtx.get(),
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
<< "ns"
diff --git a/src/mongo/db/catalog/rename_collection.cpp b/src/mongo/db/catalog/rename_collection.cpp
index fd8b6819df6..f48454fae29 100644
--- a/src/mongo/db/catalog/rename_collection.cpp
+++ b/src/mongo/db/catalog/rename_collection.cpp
@@ -52,29 +52,29 @@
namespace mongo {
namespace {
-static void dropCollection(OperationContext* txn, Database* db, StringData collName) {
- WriteUnitOfWork wunit(txn);
- if (db->dropCollection(txn, collName).isOK()) {
+static void dropCollection(OperationContext* opCtx, Database* db, StringData collName) {
+ WriteUnitOfWork wunit(opCtx);
+ if (db->dropCollection(opCtx, collName).isOK()) {
// ignoring failure case
wunit.commit();
}
}
} // namespace
-Status renameCollection(OperationContext* txn,
+Status renameCollection(OperationContext* opCtx,
const NamespaceString& source,
const NamespaceString& target,
bool dropTarget,
bool stayTemp) {
- DisableDocumentValidation validationDisabler(txn);
+ DisableDocumentValidation validationDisabler(opCtx);
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite globalWriteLock(txn->lockState());
+ ScopedTransaction transaction(opCtx, MODE_X);
+ Lock::GlobalWrite globalWriteLock(opCtx->lockState());
// We stay in source context the whole time. This is mostly to set the CurOp namespace.
- OldClientContext ctx(txn, source.ns());
+ OldClientContext ctx(opCtx, source.ns());
- bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(txn, source);
+ bool userInitiatedWritesAndNotPrimary = opCtx->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, source);
if (userInitiatedWritesAndNotPrimary) {
return Status(ErrorCodes::NotMaster,
@@ -83,17 +83,17 @@ Status renameCollection(OperationContext* txn,
<< target.ns());
}
- Database* const sourceDB = dbHolder().get(txn, source.db());
+ Database* const sourceDB = dbHolder().get(opCtx, source.db());
Collection* const sourceColl = sourceDB ? sourceDB->getCollection(source.ns()) : nullptr;
if (!sourceColl) {
- if (sourceDB && sourceDB->getViewCatalog()->lookup(txn, source.ns()))
+ if (sourceDB && sourceDB->getViewCatalog()->lookup(opCtx, source.ns()))
return Status(ErrorCodes::CommandNotSupportedOnView,
str::stream() << "cannot rename view: " << source.ns());
return Status(ErrorCodes::NamespaceNotFound, "source namespace does not exist");
}
// Make sure the source collection is not sharded.
- if (CollectionShardingState::get(txn, source)->getMetadata()) {
+ if (CollectionShardingState::get(opCtx, source)->getMetadata()) {
return {ErrorCodes::IllegalOperation, "source namespace cannot be sharded"};
}
@@ -102,7 +102,7 @@ Status renameCollection(OperationContext* txn,
// Ensure that index names do not push the length over the max.
// Iterator includes unfinished indexes.
IndexCatalog::IndexIterator sourceIndIt =
- sourceColl->getIndexCatalog()->getIndexIterator(txn, true);
+ sourceColl->getIndexCatalog()->getIndexIterator(opCtx, true);
int longestIndexNameLength = 0;
while (sourceIndIt.more()) {
int thisLength = sourceIndIt.next()->indexName().length();
@@ -123,16 +123,16 @@ Status renameCollection(OperationContext* txn,
BackgroundOperation::assertNoBgOpInProgForNs(source.ns());
- Database* const targetDB = dbHolder().openDb(txn, target.db());
+ Database* const targetDB = dbHolder().openDb(opCtx, target.db());
{
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
// Check if the target namespace exists and if dropTarget is true.
// Return a non-OK status if target exists and dropTarget is not true or if the collection
// is sharded.
if (targetDB->getCollection(target)) {
- if (CollectionShardingState::get(txn, target)->getMetadata()) {
+ if (CollectionShardingState::get(opCtx, target)->getMetadata()) {
return {ErrorCodes::IllegalOperation, "cannot rename to a sharded collection"};
}
@@ -140,11 +140,11 @@ Status renameCollection(OperationContext* txn,
return Status(ErrorCodes::NamespaceExists, "target namespace exists");
}
- Status s = targetDB->dropCollection(txn, target.ns());
+ Status s = targetDB->dropCollection(opCtx, target.ns());
if (!s.isOK()) {
return s;
}
- } else if (targetDB->getViewCatalog()->lookup(txn, target.ns())) {
+ } else if (targetDB->getViewCatalog()->lookup(opCtx, target.ns())) {
return Status(ErrorCodes::NamespaceExists,
str::stream() << "a view already exists with that name: " << target.ns());
}
@@ -152,13 +152,13 @@ Status renameCollection(OperationContext* txn,
// If we are renaming in the same database, just
// rename the namespace and we're done.
if (sourceDB == targetDB) {
- Status s = targetDB->renameCollection(txn, source.ns(), target.ns(), stayTemp);
+ Status s = targetDB->renameCollection(opCtx, source.ns(), target.ns(), stayTemp);
if (!s.isOK()) {
return s;
}
getGlobalServiceContext()->getOpObserver()->onRenameCollection(
- txn, NamespaceString(source), NamespaceString(target), dropTarget, stayTemp);
+ opCtx, NamespaceString(source), NamespaceString(target), dropTarget, stayTemp);
wunit.commit();
return Status::OK();
@@ -174,18 +174,18 @@ Status renameCollection(OperationContext* txn,
// TODO use a temp collection and unset the temp flag on success.
Collection* targetColl = nullptr;
{
- CollectionOptions options = sourceColl->getCatalogEntry()->getCollectionOptions(txn);
+ CollectionOptions options = sourceColl->getCatalogEntry()->getCollectionOptions(opCtx);
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
// No logOp necessary because the entire renameCollection command is one logOp.
- bool shouldReplicateWrites = txn->writesAreReplicated();
- txn->setReplicatedWrites(false);
- targetColl = targetDB->createCollection(txn,
+ bool shouldReplicateWrites = opCtx->writesAreReplicated();
+ opCtx->setReplicatedWrites(false);
+ targetColl = targetDB->createCollection(opCtx,
target.ns(),
options,
false); // _id index build with others later.
- txn->setReplicatedWrites(shouldReplicateWrites);
+ opCtx->setReplicatedWrites(shouldReplicateWrites);
if (!targetColl) {
return Status(ErrorCodes::OutOfDiskSpace, "Failed to create target collection.");
}
@@ -194,9 +194,9 @@ Status renameCollection(OperationContext* txn,
}
// Dismissed on success
- ScopeGuard targetCollectionDropper = MakeGuard(dropCollection, txn, targetDB, target.ns());
+ ScopeGuard targetCollectionDropper = MakeGuard(dropCollection, opCtx, targetDB, target.ns());
- MultiIndexBlock indexer(txn, targetColl);
+ MultiIndexBlock indexer(opCtx, targetColl);
indexer.allowInterruption();
std::vector<MultiIndexBlock*> indexers{&indexer};
@@ -204,7 +204,7 @@ Status renameCollection(OperationContext* txn,
{
std::vector<BSONObj> indexesToCopy;
IndexCatalog::IndexIterator sourceIndIt =
- sourceColl->getIndexCatalog()->getIndexIterator(txn, true);
+ sourceColl->getIndexCatalog()->getIndexIterator(opCtx, true);
while (sourceIndIt.more()) {
const BSONObj currIndex = sourceIndIt.next()->infoObj();
@@ -224,18 +224,18 @@ Status renameCollection(OperationContext* txn,
{
// Copy over all the data from source collection to target collection.
- auto cursor = sourceColl->getCursor(txn);
+ auto cursor = sourceColl->getCursor(opCtx);
while (auto record = cursor->next()) {
- txn->checkForInterrupt();
+ opCtx->checkForInterrupt();
const auto obj = record->data.releaseToBson();
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
// No logOp necessary because the entire renameCollection command is one logOp.
- bool shouldReplicateWrites = txn->writesAreReplicated();
- txn->setReplicatedWrites(false);
- Status status = targetColl->insertDocument(txn, obj, indexers, true);
- txn->setReplicatedWrites(shouldReplicateWrites);
+ bool shouldReplicateWrites = opCtx->writesAreReplicated();
+ opCtx->setReplicatedWrites(false);
+ Status status = targetColl->insertDocument(opCtx, obj, indexers, true);
+ opCtx->setReplicatedWrites(shouldReplicateWrites);
if (!status.isOK())
return status;
wunit.commit();
@@ -249,19 +249,19 @@ Status renameCollection(OperationContext* txn,
{
// Getting here means we successfully built the target copy. We now remove the
// source collection and finalize the rename.
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
- bool shouldReplicateWrites = txn->writesAreReplicated();
- txn->setReplicatedWrites(false);
- Status status = sourceDB->dropCollection(txn, source.ns());
- txn->setReplicatedWrites(shouldReplicateWrites);
+ bool shouldReplicateWrites = opCtx->writesAreReplicated();
+ opCtx->setReplicatedWrites(false);
+ Status status = sourceDB->dropCollection(opCtx, source.ns());
+ opCtx->setReplicatedWrites(shouldReplicateWrites);
if (!status.isOK())
return status;
indexer.commit();
getGlobalServiceContext()->getOpObserver()->onRenameCollection(
- txn, NamespaceString(source), NamespaceString(target), dropTarget, stayTemp);
+ opCtx, NamespaceString(source), NamespaceString(target), dropTarget, stayTemp);
wunit.commit();
}
diff --git a/src/mongo/db/catalog/rename_collection.h b/src/mongo/db/catalog/rename_collection.h
index fb1aa7b5387..c6a3f56b380 100644
--- a/src/mongo/db/catalog/rename_collection.h
+++ b/src/mongo/db/catalog/rename_collection.h
@@ -37,7 +37,7 @@ class OperationContext;
* iff "dropTarget" is true. "stayTemp" indicates whether a collection should maintain its
* temporariness.
*/
-Status renameCollection(OperationContext* txn,
+Status renameCollection(OperationContext* opCtx,
const NamespaceString& source,
const NamespaceString& target,
bool dropTarget,