summaryrefslogtreecommitdiff
path: root/src/mongo/db/commands
diff options
context:
space:
mode:
authorMaria van Keulen <maria@mongodb.com>2017-03-07 12:00:08 -0500
committerMaria van Keulen <maria@mongodb.com>2017-03-07 12:00:08 -0500
commit589a5c169ced8f6e9ddcd3d182ae1b75db6b7d79 (patch)
treec7a090ffdd56a91ae677e2492c61b820af44f964 /src/mongo/db/commands
parent3cba97198638df3750e3b455e2ad57af7ee536ae (diff)
downloadmongo-589a5c169ced8f6e9ddcd3d182ae1b75db6b7d79.tar.gz
SERVER-27938 Rename all OperationContext variables to opCtx
This commit is an automated rename of all whole word instances of txn, _txn, and txnPtr to opCtx, _opCtx, and opCtxPtr, respectively in all .cpp and .h files in src/mongo.
Diffstat (limited to 'src/mongo/db/commands')
-rw-r--r--src/mongo/db/commands/apply_ops_cmd.cpp14
-rw-r--r--src/mongo/db/commands/apply_ops_cmd_common.cpp20
-rw-r--r--src/mongo/db/commands/apply_ops_cmd_common.h2
-rw-r--r--src/mongo/db/commands/authentication_commands.cpp26
-rw-r--r--src/mongo/db/commands/authentication_commands.h8
-rw-r--r--src/mongo/db/commands/clone.cpp10
-rw-r--r--src/mongo/db/commands/clone_collection.cpp8
-rw-r--r--src/mongo/db/commands/collection_to_capped.cpp14
-rw-r--r--src/mongo/db/commands/compact.cpp12
-rw-r--r--src/mongo/db/commands/conn_pool_stats.cpp6
-rw-r--r--src/mongo/db/commands/conn_pool_sync.cpp2
-rw-r--r--src/mongo/db/commands/connection_status.cpp2
-rw-r--r--src/mongo/db/commands/copydb.cpp18
-rw-r--r--src/mongo/db/commands/copydb_start_commands.cpp8
-rw-r--r--src/mongo/db/commands/count_cmd.cpp22
-rw-r--r--src/mongo/db/commands/cpuprofile.cpp20
-rw-r--r--src/mongo/db/commands/create_indexes.cpp70
-rw-r--r--src/mongo/db/commands/current_op.cpp8
-rw-r--r--src/mongo/db/commands/dbcommands.cpp263
-rw-r--r--src/mongo/db/commands/dbhash.cpp16
-rw-r--r--src/mongo/db/commands/dbhash.h2
-rw-r--r--src/mongo/db/commands/distinct.cpp32
-rw-r--r--src/mongo/db/commands/driverHelpers.cpp2
-rw-r--r--src/mongo/db/commands/drop_indexes.cpp32
-rw-r--r--src/mongo/db/commands/eval.cpp18
-rw-r--r--src/mongo/db/commands/explain_cmd.cpp20
-rw-r--r--src/mongo/db/commands/fail_point_cmd.cpp2
-rw-r--r--src/mongo/db/commands/feature_compatibility_version.cpp35
-rw-r--r--src/mongo/db/commands/feature_compatibility_version.h5
-rw-r--r--src/mongo/db/commands/find_and_modify.cpp122
-rw-r--r--src/mongo/db/commands/find_cmd.cpp52
-rw-r--r--src/mongo/db/commands/fsync.cpp40
-rw-r--r--src/mongo/db/commands/generic.cpp18
-rw-r--r--src/mongo/db/commands/geo_near_cmd.cpp25
-rw-r--r--src/mongo/db/commands/get_last_error.cpp22
-rw-r--r--src/mongo/db/commands/getmore_cmd.cpp66
-rw-r--r--src/mongo/db/commands/group_cmd.cpp18
-rw-r--r--src/mongo/db/commands/hashcmd.cpp2
-rw-r--r--src/mongo/db/commands/haystack.cpp8
-rw-r--r--src/mongo/db/commands/index_filter_commands.cpp40
-rw-r--r--src/mongo/db/commands/index_filter_commands.h14
-rw-r--r--src/mongo/db/commands/index_filter_commands_test.cpp105
-rw-r--r--src/mongo/db/commands/isself.cpp2
-rw-r--r--src/mongo/db/commands/kill_op.cpp6
-rw-r--r--src/mongo/db/commands/killcursors_cmd.cpp16
-rw-r--r--src/mongo/db/commands/killcursors_common.cpp6
-rw-r--r--src/mongo/db/commands/killcursors_common.h6
-rw-r--r--src/mongo/db/commands/list_collections.cpp32
-rw-r--r--src/mongo/db/commands/list_databases.cpp14
-rw-r--r--src/mongo/db/commands/list_indexes.cpp18
-rw-r--r--src/mongo/db/commands/lock_info.cpp4
-rw-r--r--src/mongo/db/commands/mr.cpp278
-rw-r--r--src/mongo/db/commands/mr.h12
-rw-r--r--src/mongo/db/commands/oplog_note.cpp10
-rw-r--r--src/mongo/db/commands/parallel_collection_scan.cpp14
-rw-r--r--src/mongo/db/commands/parameters.cpp18
-rw-r--r--src/mongo/db/commands/pipeline_command.cpp71
-rw-r--r--src/mongo/db/commands/plan_cache_commands.cpp42
-rw-r--r--src/mongo/db/commands/plan_cache_commands.h16
-rw-r--r--src/mongo/db/commands/plan_cache_commands_test.cpp109
-rw-r--r--src/mongo/db/commands/rename_collection_cmd.cpp10
-rw-r--r--src/mongo/db/commands/repair_cursor.cpp12
-rw-r--r--src/mongo/db/commands/server_status.cpp22
-rw-r--r--src/mongo/db/commands/server_status.h10
-rw-r--r--src/mongo/db/commands/set_feature_compatibility_version_command.cpp4
-rw-r--r--src/mongo/db/commands/snapshot_management.cpp19
-rw-r--r--src/mongo/db/commands/test_commands.cpp54
-rw-r--r--src/mongo/db/commands/top_command.cpp4
-rw-r--r--src/mongo/db/commands/touch.cpp6
-rw-r--r--src/mongo/db/commands/user_management_commands.cpp330
-rw-r--r--src/mongo/db/commands/validate.cpp10
-rw-r--r--src/mongo/db/commands/write_commands/write_commands.cpp64
72 files changed, 1246 insertions, 1202 deletions
diff --git a/src/mongo/db/commands/apply_ops_cmd.cpp b/src/mongo/db/commands/apply_ops_cmd.cpp
index c252b8fd073..233fcc744e3 100644
--- a/src/mongo/db/commands/apply_ops_cmd.cpp
+++ b/src/mongo/db/commands/apply_ops_cmd.cpp
@@ -80,13 +80,13 @@ public:
}
- virtual Status checkAuthForOperation(OperationContext* txn,
+ virtual Status checkAuthForOperation(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj) {
- return checkAuthForApplyOpsCommand(txn, dbname, cmdObj);
+ return checkAuthForApplyOpsCommand(opCtx, dbname, cmdObj);
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -96,7 +96,7 @@ public:
boost::optional<DisableDocumentValidation> maybeDisableValidation;
if (shouldBypassDocumentValidationForCommand(cmdObj))
- maybeDisableValidation.emplace(txn);
+ maybeDisableValidation.emplace(opCtx);
if (cmdObj.firstElement().type() != Array) {
errmsg = "ops has to be an array";
@@ -116,14 +116,14 @@ public:
}
}
- auto client = txn->getClient();
+ auto client = opCtx->getClient();
auto lastOpAtOperationStart = repl::ReplClientInfo::forClient(client).getLastOp();
ScopeGuard lastOpSetterGuard =
MakeObjGuard(repl::ReplClientInfo::forClient(client),
&repl::ReplClientInfo::setLastOpToSystemLastOpTime,
- txn);
+ opCtx);
- auto applyOpsStatus = appendCommandStatus(result, applyOps(txn, dbname, cmdObj, &result));
+ auto applyOpsStatus = appendCommandStatus(result, applyOps(opCtx, dbname, cmdObj, &result));
if (repl::ReplClientInfo::forClient(client).getLastOp() != lastOpAtOperationStart) {
// If this operation has already generated a new lastOp, don't bother setting it
diff --git a/src/mongo/db/commands/apply_ops_cmd_common.cpp b/src/mongo/db/commands/apply_ops_cmd_common.cpp
index ebacbdfd476..55b463abc04 100644
--- a/src/mongo/db/commands/apply_ops_cmd_common.cpp
+++ b/src/mongo/db/commands/apply_ops_cmd_common.cpp
@@ -46,11 +46,11 @@ namespace mongo {
namespace {
-Status checkOperationAuthorization(OperationContext* txn,
+Status checkOperationAuthorization(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& oplogEntry,
bool alwaysUpsert) {
- AuthorizationSession* authSession = AuthorizationSession::get(txn->getClient());
+ AuthorizationSession* authSession = AuthorizationSession::get(opCtx->getClient());
BSONElement opTypeElem = oplogEntry["op"];
checkBSONType(BSONType::String, opTypeElem);
@@ -79,11 +79,11 @@ Status checkOperationAuthorization(OperationContext* txn,
return Status(ErrorCodes::FailedToParse, "Unrecognized command in op");
}
- return Command::checkAuthorization(command, txn, dbname, o);
+ return Command::checkAuthorization(command, opCtx, dbname, o);
}
if (opType == "i"_sd) {
- return authSession->checkAuthForInsert(txn, ns, o);
+ return authSession->checkAuthForInsert(opCtx, ns, o);
} else if (opType == "u"_sd) {
BSONElement o2Elem = oplogEntry["o2"];
checkBSONType(BSONType::Object, o2Elem);
@@ -97,10 +97,10 @@ Status checkOperationAuthorization(OperationContext* txn,
const bool upsert = b || alwaysUpsert;
- return authSession->checkAuthForUpdate(txn, ns, o, o2, upsert);
+ return authSession->checkAuthForUpdate(opCtx, ns, o, o2, upsert);
} else if (opType == "d"_sd) {
- return authSession->checkAuthForDelete(txn, ns, o);
+ return authSession->checkAuthForDelete(opCtx, ns, o);
} else if (opType == "db"_sd) {
// It seems that 'db' isn't used anymore. Require all actions to prevent casual use.
ActionSet allActions;
@@ -175,10 +175,10 @@ ApplyOpsValidity validateApplyOpsCommand(const BSONObj& cmdObj) {
return ApplyOpsValidity::kOk;
}
-Status checkAuthForApplyOpsCommand(OperationContext* txn,
+Status checkAuthForApplyOpsCommand(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj) {
- AuthorizationSession* authSession = AuthorizationSession::get(txn->getClient());
+ AuthorizationSession* authSession = AuthorizationSession::get(opCtx->getClient());
ApplyOpsValidity validity = validateApplyOpsCommand(cmdObj);
if (validity == ApplyOpsValidity::kNeedsSuperuser) {
@@ -193,7 +193,7 @@ Status checkAuthForApplyOpsCommand(OperationContext* txn,
boost::optional<DisableDocumentValidation> maybeDisableValidation;
if (shouldBypassDocumentValidationForCommand(cmdObj))
- maybeDisableValidation.emplace(txn);
+ maybeDisableValidation.emplace(opCtx);
const bool alwaysUpsert =
@@ -202,7 +202,7 @@ Status checkAuthForApplyOpsCommand(OperationContext* txn,
checkBSONType(BSONType::Array, cmdObj.firstElement());
for (const BSONElement& e : cmdObj.firstElement().Array()) {
checkBSONType(BSONType::Object, e);
- Status status = checkOperationAuthorization(txn, dbname, e.Obj(), alwaysUpsert);
+ Status status = checkOperationAuthorization(opCtx, dbname, e.Obj(), alwaysUpsert);
if (!status.isOK()) {
return status;
}
diff --git a/src/mongo/db/commands/apply_ops_cmd_common.h b/src/mongo/db/commands/apply_ops_cmd_common.h
index 443e862b798..f9fb5ec9823 100644
--- a/src/mongo/db/commands/apply_ops_cmd_common.h
+++ b/src/mongo/db/commands/apply_ops_cmd_common.h
@@ -39,7 +39,7 @@ class Status;
/**
* Returns Status::OK if the associated client is authorized to perform the command in cmdObj.
*/
-Status checkAuthForApplyOpsCommand(OperationContext* txn,
+Status checkAuthForApplyOpsCommand(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj);
diff --git a/src/mongo/db/commands/authentication_commands.cpp b/src/mongo/db/commands/authentication_commands.cpp
index 8de3931d529..95a92d89e3a 100644
--- a/src/mongo/db/commands/authentication_commands.cpp
+++ b/src/mongo/db/commands/authentication_commands.cpp
@@ -113,7 +113,7 @@ public:
virtual void addRequiredPrivileges(const std::string& dbname,
const BSONObj& cmdObj,
std::vector<Privilege>* out) {} // No auth required
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string&,
BSONObj& cmdObj,
int,
@@ -151,7 +151,7 @@ void CmdAuthenticate::redactForLogging(mutablebson::Document* cmdObj) {
}
}
-bool CmdAuthenticate::run(OperationContext* txn,
+bool CmdAuthenticate::run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -167,7 +167,7 @@ bool CmdAuthenticate::run(OperationContext* txn,
mechanism = "MONGODB-CR";
}
UserName user;
- auto& sslPeerInfo = SSLPeerInfo::forSession(txn->getClient()->session());
+ auto& sslPeerInfo = SSLPeerInfo::forSession(opCtx->getClient()->session());
if (mechanism == "MONGODB-X509" && !cmdObj.hasField("user")) {
user = UserName(sslPeerInfo.subjectName, dbname);
} else {
@@ -182,7 +182,7 @@ bool CmdAuthenticate::run(OperationContext* txn,
user = internalSecurity.user->getName();
}
- Status status = _authenticate(txn, mechanism, user, cmdObj);
+ Status status = _authenticate(opCtx, mechanism, user, cmdObj);
audit::logAuthentication(Client::getCurrent(), mechanism, user, status.code());
if (!status.isOK()) {
if (!serverGlobalParams.quiet.load()) {
@@ -204,22 +204,22 @@ bool CmdAuthenticate::run(OperationContext* txn,
return true;
}
-Status CmdAuthenticate::_authenticate(OperationContext* txn,
+Status CmdAuthenticate::_authenticate(OperationContext* opCtx,
const std::string& mechanism,
const UserName& user,
const BSONObj& cmdObj) {
if (mechanism == "MONGODB-CR") {
- return _authenticateCR(txn, user, cmdObj);
+ return _authenticateCR(opCtx, user, cmdObj);
}
#ifdef MONGO_CONFIG_SSL
if (mechanism == "MONGODB-X509") {
- return _authenticateX509(txn, user, cmdObj);
+ return _authenticateX509(opCtx, user, cmdObj);
}
#endif
return Status(ErrorCodes::BadValue, "Unsupported mechanism: " + mechanism);
}
-Status CmdAuthenticate::_authenticateCR(OperationContext* txn,
+Status CmdAuthenticate::_authenticateCR(OperationContext* opCtx,
const UserName& user,
const BSONObj& cmdObj) {
if (user == internalSecurity.user->getName() &&
@@ -265,7 +265,7 @@ Status CmdAuthenticate::_authenticateCR(OperationContext* txn,
}
User* userObj;
- Status status = getGlobalAuthorizationManager()->acquireUser(txn, user, &userObj);
+ Status status = getGlobalAuthorizationManager()->acquireUser(opCtx, user, &userObj);
if (!status.isOK()) {
// Failure to find the privilege document indicates no-such-user, a fact that we do not
// wish to reveal to the client. So, we return AuthenticationFailed rather than passing
@@ -298,7 +298,7 @@ Status CmdAuthenticate::_authenticateCR(OperationContext* txn,
}
AuthorizationSession* authorizationSession = AuthorizationSession::get(Client::getCurrent());
- status = authorizationSession->addAndAuthorizeUser(txn, user);
+ status = authorizationSession->addAndAuthorizeUser(opCtx, user);
if (!status.isOK()) {
return status;
}
@@ -307,7 +307,7 @@ Status CmdAuthenticate::_authenticateCR(OperationContext* txn,
}
#ifdef MONGO_CONFIG_SSL
-Status CmdAuthenticate::_authenticateX509(OperationContext* txn,
+Status CmdAuthenticate::_authenticateX509(OperationContext* opCtx,
const UserName& user,
const BSONObj& cmdObj) {
if (!getSSLManager()) {
@@ -348,7 +348,7 @@ Status CmdAuthenticate::_authenticateX509(OperationContext* txn,
if (_isX509AuthDisabled) {
return Status(ErrorCodes::BadValue, _x509AuthenticationDisabledMessage);
}
- Status status = authorizationSession->addAndAuthorizeUser(txn, user);
+ Status status = authorizationSession->addAndAuthorizeUser(opCtx, user);
if (!status.isOK()) {
return status;
}
@@ -374,7 +374,7 @@ public:
return false;
}
CmdLogout() : Command("logout") {}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
diff --git a/src/mongo/db/commands/authentication_commands.h b/src/mongo/db/commands/authentication_commands.h
index 4b1caf54913..fddfcfdb1eb 100644
--- a/src/mongo/db/commands/authentication_commands.h
+++ b/src/mongo/db/commands/authentication_commands.h
@@ -55,7 +55,7 @@ public:
virtual void redactForLogging(mutablebson::Document* cmdObj);
CmdAuthenticate() : Command("authenticate") {}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -75,12 +75,12 @@ private:
* mechanism, and ProtocolError, indicating an error in the use of the authentication
* protocol.
*/
- Status _authenticate(OperationContext* txn,
+ Status _authenticate(OperationContext* opCtx,
const std::string& mechanism,
const UserName& user,
const BSONObj& cmdObj);
- Status _authenticateCR(OperationContext* txn, const UserName& user, const BSONObj& cmdObj);
- Status _authenticateX509(OperationContext* txn, const UserName& user, const BSONObj& cmdObj);
+ Status _authenticateCR(OperationContext* opCtx, const UserName& user, const BSONObj& cmdObj);
+ Status _authenticateX509(OperationContext* opCtx, const UserName& user, const BSONObj& cmdObj);
};
extern CmdAuthenticate cmdAuthenticate;
diff --git a/src/mongo/db/commands/clone.cpp b/src/mongo/db/commands/clone.cpp
index 60b9d031dc1..8548152087d 100644
--- a/src/mongo/db/commands/clone.cpp
+++ b/src/mongo/db/commands/clone.cpp
@@ -86,7 +86,7 @@ public:
return Status::OK();
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -94,7 +94,7 @@ public:
BSONObjBuilder& result) {
boost::optional<DisableDocumentValidation> maybeDisableValidation;
if (shouldBypassDocumentValidationForCommand(cmdObj)) {
- maybeDisableValidation.emplace(txn);
+ maybeDisableValidation.emplace(opCtx);
}
string from = cmdObj.getStringField("clone");
@@ -119,11 +119,11 @@ public:
set<string> clonedColls;
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dbXLock(txn->lockState(), dbname, MODE_X);
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ Lock::DBLock dbXLock(opCtx->lockState(), dbname, MODE_X);
Cloner cloner;
- Status status = cloner.copyDb(txn, dbname, from, opts, &clonedColls);
+ Status status = cloner.copyDb(opCtx, dbname, from, opts, &clonedColls);
BSONArrayBuilder barr;
barr.append(clonedColls);
diff --git a/src/mongo/db/commands/clone_collection.cpp b/src/mongo/db/commands/clone_collection.cpp
index 512c9d7b737..d920ac62c05 100644
--- a/src/mongo/db/commands/clone_collection.cpp
+++ b/src/mongo/db/commands/clone_collection.cpp
@@ -103,7 +103,7 @@ public:
"is placed at the same db.collection (namespace) as the source.\n";
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -111,7 +111,7 @@ public:
BSONObjBuilder& result) {
boost::optional<DisableDocumentValidation> maybeDisableValidation;
if (shouldBypassDocumentValidationForCommand(cmdObj))
- maybeDisableValidation.emplace(txn);
+ maybeDisableValidation.emplace(opCtx);
string fromhost = cmdObj.getStringField("from");
if (fromhost.empty()) {
@@ -121,7 +121,7 @@ public:
{
HostAndPort h(fromhost);
- if (repl::isSelf(h, txn->getServiceContext())) {
+ if (repl::isSelf(h, opCtx->getServiceContext())) {
errmsg = "can't cloneCollection from self";
return false;
}
@@ -152,7 +152,7 @@ public:
cloner.setConnection(myconn.release());
- return cloner.copyCollection(txn, collection, query, errmsg, copyIndexes);
+ return cloner.copyCollection(opCtx, collection, query, errmsg, copyIndexes);
}
} cmdCloneCollection;
diff --git a/src/mongo/db/commands/collection_to_capped.cpp b/src/mongo/db/commands/collection_to_capped.cpp
index 373147da069..222e6d8887f 100644
--- a/src/mongo/db/commands/collection_to_capped.cpp
+++ b/src/mongo/db/commands/collection_to_capped.cpp
@@ -84,7 +84,7 @@ public:
out->push_back(Privilege(ResourcePattern::forExactNamespace(nss), targetActions));
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& jsobj,
int,
@@ -118,11 +118,11 @@ public:
return false;
}
- ScopedTransaction transaction(txn, MODE_IX);
- AutoGetDb autoDb(txn, dbname, MODE_X);
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ AutoGetDb autoDb(opCtx, dbname, MODE_X);
NamespaceString nss(dbname, to);
- if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(txn, nss)) {
+ if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, nss)) {
return appendCommandStatus(
result,
Status(ErrorCodes::NotMaster,
@@ -140,7 +140,7 @@ public:
}
Status status =
- cloneCollectionAsCapped(txn, db, from.toString(), to.toString(), size, temp);
+ cloneCollectionAsCapped(opCtx, db, from.toString(), to.toString(), size, temp);
return appendCommandStatus(result, status);
}
} cmdCloneCollectionAsCapped;
@@ -170,7 +170,7 @@ public:
out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& jsobj,
int,
@@ -184,7 +184,7 @@ public:
return false;
}
- return appendCommandStatus(result, convertToCapped(txn, nss, size));
+ return appendCommandStatus(result, convertToCapped(opCtx, nss, size));
}
} cmdConvertToCapped;
diff --git a/src/mongo/db/commands/compact.cpp b/src/mongo/db/commands/compact.cpp
index b53fcda0b65..e93b94a5892 100644
--- a/src/mongo/db/commands/compact.cpp
+++ b/src/mongo/db/commands/compact.cpp
@@ -86,7 +86,7 @@ public:
}
CompactCmd() : Command("compact") {}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& db,
BSONObj& cmdObj,
int,
@@ -144,13 +144,13 @@ public:
if (cmdObj.hasElement("validate"))
compactOptions.validateDocuments = cmdObj["validate"].trueValue();
- ScopedTransaction transaction(txn, MODE_IX);
- AutoGetDb autoDb(txn, db, MODE_X);
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ AutoGetDb autoDb(opCtx, db, MODE_X);
Database* const collDB = autoDb.getDb();
Collection* collection = collDB ? collDB->getCollection(nss) : nullptr;
auto view =
- collDB && !collection ? collDB->getViewCatalog()->lookup(txn, nss.ns()) : nullptr;
+ collDB && !collection ? collDB->getViewCatalog()->lookup(opCtx, nss.ns()) : nullptr;
// If db/collection does not exist, short circuit and return.
if (!collDB || !collection) {
@@ -162,12 +162,12 @@ public:
result, {ErrorCodes::NamespaceNotFound, "collection does not exist"});
}
- OldClientContext ctx(txn, nss.ns());
+ OldClientContext ctx(opCtx, nss.ns());
BackgroundOperation::assertNoBgOpInProgForNs(nss.ns());
log() << "compact " << nss.ns() << " begin, options: " << compactOptions;
- StatusWith<CompactStats> status = collection->compact(txn, &compactOptions);
+ StatusWith<CompactStats> status = collection->compact(opCtx, &compactOptions);
if (!status.isOK())
return appendCommandStatus(result, status.getStatus());
diff --git a/src/mongo/db/commands/conn_pool_stats.cpp b/src/mongo/db/commands/conn_pool_stats.cpp
index 2cd000e7d30..1d23df5060d 100644
--- a/src/mongo/db/commands/conn_pool_stats.cpp
+++ b/src/mongo/db/commands/conn_pool_stats.cpp
@@ -69,7 +69,7 @@ public:
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string&,
mongo::BSONObj&,
int,
@@ -83,13 +83,13 @@ public:
result.appendNumber("numAScopedConnections", AScopedConnection::getNumConnections());
// Replication connections, if we have them.
- auto replCoord = repl::ReplicationCoordinator::get(txn);
+ auto replCoord = repl::ReplicationCoordinator::get(opCtx);
if (replCoord && replCoord->isReplEnabled()) {
replCoord->appendConnectionStats(&stats);
}
// Sharding connections, if we have any.
- auto grid = Grid::get(txn);
+ auto grid = Grid::get(opCtx);
if (grid->shardRegistry()) {
grid->getExecutorPool()->appendConnectionStats(&stats);
if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) {
diff --git a/src/mongo/db/commands/conn_pool_sync.cpp b/src/mongo/db/commands/conn_pool_sync.cpp
index cb9410d7619..e3318079efd 100644
--- a/src/mongo/db/commands/conn_pool_sync.cpp
+++ b/src/mongo/db/commands/conn_pool_sync.cpp
@@ -54,7 +54,7 @@ public:
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string&,
mongo::BSONObj&,
int,
diff --git a/src/mongo/db/commands/connection_status.cpp b/src/mongo/db/commands/connection_status.cpp
index e26e6b9d192..651a84327fa 100644
--- a/src/mongo/db/commands/connection_status.cpp
+++ b/src/mongo/db/commands/connection_status.cpp
@@ -55,7 +55,7 @@ public:
h << "Returns connection-specific information such as logged-in users and their roles";
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string&,
BSONObj& cmdObj,
int,
diff --git a/src/mongo/db/commands/copydb.cpp b/src/mongo/db/commands/copydb.cpp
index 00f26f8bd4d..b13066949df 100644
--- a/src/mongo/db/commands/copydb.cpp
+++ b/src/mongo/db/commands/copydb.cpp
@@ -114,7 +114,7 @@ public:
<< "[, slaveOk: <bool>, username: <username>, nonce: <nonce>, key: <key>]}";
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -122,7 +122,7 @@ public:
BSONObjBuilder& result) {
boost::optional<DisableDocumentValidation> maybeDisableValidation;
if (shouldBypassDocumentValidationForCommand(cmdObj))
- maybeDisableValidation.emplace(txn);
+ maybeDisableValidation.emplace(opCtx);
string fromhost = cmdObj.getStringField("fromhost");
bool fromSelf = fromhost.empty();
@@ -171,7 +171,7 @@ public:
string nonce = cmdObj.getStringField("nonce");
string key = cmdObj.getStringField("key");
- auto& authConn = CopyDbAuthConnection::forClient(txn->getClient());
+ auto& authConn = CopyDbAuthConnection::forClient(opCtx->getClient());
if (!username.empty() && !nonce.empty() && !key.empty()) {
uassert(13008, "must call copydbgetnonce first", authConn.get());
@@ -226,13 +226,13 @@ public:
if (fromSelf) {
// SERVER-4328 todo lock just the two db's not everything for the fromself case
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite lk(txn->lockState());
- uassertStatusOK(cloner.copyDb(txn, todb, fromhost, cloneOptions, NULL));
+ ScopedTransaction transaction(opCtx, MODE_X);
+ Lock::GlobalWrite lk(opCtx->lockState());
+ uassertStatusOK(cloner.copyDb(opCtx, todb, fromhost, cloneOptions, NULL));
} else {
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock lk(txn->lockState(), todb, MODE_X);
- uassertStatusOK(cloner.copyDb(txn, todb, fromhost, cloneOptions, NULL));
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ Lock::DBLock lk(opCtx->lockState(), todb, MODE_X);
+ uassertStatusOK(cloner.copyDb(opCtx, todb, fromhost, cloneOptions, NULL));
}
return true;
diff --git a/src/mongo/db/commands/copydb_start_commands.cpp b/src/mongo/db/commands/copydb_start_commands.cpp
index 3dc9769024a..f7a8949ca42 100644
--- a/src/mongo/db/commands/copydb_start_commands.cpp
+++ b/src/mongo/db/commands/copydb_start_commands.cpp
@@ -96,7 +96,7 @@ public:
help << "usage: {copydbgetnonce: 1, fromhost: <hostname>}";
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string&,
BSONObj& cmdObj,
int,
@@ -112,7 +112,7 @@ public:
const ConnectionString cs(uassertStatusOK(ConnectionString::parse(fromhost)));
- auto& authConn = CopyDbAuthConnection::forClient(txn->getClient());
+ auto& authConn = CopyDbAuthConnection::forClient(opCtx->getClient());
authConn.reset(cs.connect(StringData(), errmsg));
if (!authConn) {
return false;
@@ -170,7 +170,7 @@ public:
"from secure server\n";
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string&,
BSONObj& cmdObj,
int,
@@ -209,7 +209,7 @@ public:
return false;
}
- auto& authConn = CopyDbAuthConnection::forClient(txn->getClient());
+ auto& authConn = CopyDbAuthConnection::forClient(opCtx->getClient());
authConn.reset(cs.connect(StringData(), errmsg));
if (!authConn.get()) {
return false;
diff --git a/src/mongo/db/commands/count_cmd.cpp b/src/mongo/db/commands/count_cmd.cpp
index 8c1bb515738..7ecbe2f08ea 100644
--- a/src/mongo/db/commands/count_cmd.cpp
+++ b/src/mongo/db/commands/count_cmd.cpp
@@ -101,7 +101,7 @@ public:
out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
}
- virtual Status explain(OperationContext* txn,
+ virtual Status explain(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
ExplainCommon::Verbosity verbosity,
@@ -122,7 +122,7 @@ public:
}
// Acquire the db read lock.
- AutoGetCollectionOrViewForRead ctx(txn, request.getValue().getNs());
+ AutoGetCollectionOrViewForRead ctx(opCtx, request.getValue().getNs());
Collection* collection = ctx.getCollection();
if (ctx.getView()) {
@@ -135,7 +135,7 @@ public:
std::string errmsg;
(void)Command::findCommand("aggregate")
- ->run(txn, dbname, viewAggregation.getValue(), 0, errmsg, *out);
+ ->run(opCtx, dbname, viewAggregation.getValue(), 0, errmsg, *out);
return Status::OK();
}
@@ -143,7 +143,7 @@ public:
// version on initial entry into count.
RangePreserver preserver(collection);
- auto statusWithPlanExecutor = getExecutorCount(txn,
+ auto statusWithPlanExecutor = getExecutorCount(opCtx,
collection,
request.getValue(),
true, // explain
@@ -158,7 +158,7 @@ public:
return Status::OK();
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -180,7 +180,7 @@ public:
"http://dochub.mongodb.org/core/3.4-feature-compatibility."));
}
- AutoGetCollectionOrViewForRead ctx(txn, request.getValue().getNs());
+ AutoGetCollectionOrViewForRead ctx(opCtx, request.getValue().getNs());
Collection* collection = ctx.getCollection();
if (ctx.getView()) {
@@ -193,7 +193,7 @@ public:
BSONObjBuilder aggResult;
(void)Command::findCommand("aggregate")
- ->run(txn, dbname, viewAggregation.getValue(), options, errmsg, aggResult);
+ ->run(opCtx, dbname, viewAggregation.getValue(), options, errmsg, aggResult);
if (ResolvedView::isResolvedViewErrorResponse(aggResult.asTempObj())) {
result.appendElements(aggResult.obj());
@@ -212,7 +212,7 @@ public:
// version on initial entry into count.
RangePreserver preserver(collection);
- auto statusWithPlanExecutor = getExecutorCount(txn,
+ auto statusWithPlanExecutor = getExecutorCount(opCtx,
collection,
request.getValue(),
false, // !explain
@@ -224,9 +224,9 @@ public:
unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
// Store the plan summary string in CurOp.
- auto curOp = CurOp::get(txn);
+ auto curOp = CurOp::get(opCtx);
{
- stdx::lock_guard<Client> lk(*txn->getClient());
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
curOp->setPlanSummary_inlock(Explain::getPlanSummary(exec.get()));
}
@@ -238,7 +238,7 @@ public:
PlanSummaryStats summaryStats;
Explain::getSummaryStats(*exec, &summaryStats);
if (collection) {
- collection->infoCache()->notifyOfQuery(txn, summaryStats.indexesUsed);
+ collection->infoCache()->notifyOfQuery(opCtx, summaryStats.indexesUsed);
}
curOp->debug().setPlanSummaryMetrics(summaryStats);
diff --git a/src/mongo/db/commands/cpuprofile.cpp b/src/mongo/db/commands/cpuprofile.cpp
index 24fbd034b81..608a626aa92 100644
--- a/src/mongo/db/commands/cpuprofile.cpp
+++ b/src/mongo/db/commands/cpuprofile.cpp
@@ -103,7 +103,7 @@ class CpuProfilerStartCommand : public CpuProfilerCommand {
public:
CpuProfilerStartCommand() : CpuProfilerCommand(commandName) {}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
std::string const& db,
BSONObj& cmdObj,
int options,
@@ -120,7 +120,7 @@ class CpuProfilerStopCommand : public CpuProfilerCommand {
public:
CpuProfilerStopCommand() : CpuProfilerCommand(commandName) {}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
std::string const& db,
BSONObj& cmdObj,
int options,
@@ -133,16 +133,16 @@ public:
char const* const CpuProfilerStartCommand::commandName = "_cpuProfilerStart";
char const* const CpuProfilerStopCommand::commandName = "_cpuProfilerStop";
-bool CpuProfilerStartCommand::run(OperationContext* txn,
+bool CpuProfilerStartCommand::run(OperationContext* opCtx,
std::string const& db,
BSONObj& cmdObj,
int options,
std::string& errmsg,
BSONObjBuilder& result) {
// The DB lock here is just so we have IX on the global lock in order to prevent shutdown
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dbXLock(txn->lockState(), db, MODE_X);
- OldClientContext ctx(txn, db, false /* no shard version checking */);
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ Lock::DBLock dbXLock(opCtx->lockState(), db, MODE_X);
+ OldClientContext ctx(opCtx, db, false /* no shard version checking */);
std::string profileFilename = cmdObj[commandName]["profileFilename"].String();
if (!::ProfilerStart(profileFilename.c_str())) {
@@ -152,16 +152,16 @@ bool CpuProfilerStartCommand::run(OperationContext* txn,
return true;
}
-bool CpuProfilerStopCommand::run(OperationContext* txn,
+bool CpuProfilerStopCommand::run(OperationContext* opCtx,
std::string const& db,
BSONObj& cmdObj,
int options,
std::string& errmsg,
BSONObjBuilder& result) {
// The DB lock here is just so we have IX on the global lock in order to prevent shutdown
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dbXLock(txn->lockState(), db, MODE_X);
- OldClientContext ctx(txn, db, false /* no shard version checking */);
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ Lock::DBLock dbXLock(opCtx->lockState(), db, MODE_X);
+ OldClientContext ctx(opCtx, db, false /* no shard version checking */);
::ProfilerStop();
return true;
diff --git a/src/mongo/db/commands/create_indexes.cpp b/src/mongo/db/commands/create_indexes.cpp
index 681370348ce..8c887502d62 100644
--- a/src/mongo/db/commands/create_indexes.cpp
+++ b/src/mongo/db/commands/create_indexes.cpp
@@ -156,12 +156,12 @@ StatusWith<std::vector<BSONObj>> parseAndValidateIndexSpecs(
* form stored in the IndexCatalog should any of these indexes already exist.
*/
StatusWith<std::vector<BSONObj>> resolveCollectionDefaultProperties(
- OperationContext* txn, const Collection* collection, std::vector<BSONObj> indexSpecs) {
+ OperationContext* opCtx, const Collection* collection, std::vector<BSONObj> indexSpecs) {
std::vector<BSONObj> indexSpecsWithDefaults = std::move(indexSpecs);
for (size_t i = 0, numIndexSpecs = indexSpecsWithDefaults.size(); i < numIndexSpecs; ++i) {
auto indexSpecStatus = index_key_validate::validateIndexSpecCollation(
- txn, indexSpecsWithDefaults[i], collection->getDefaultCollator());
+ opCtx, indexSpecsWithDefaults[i], collection->getDefaultCollator());
if (!indexSpecStatus.isOK()) {
return indexSpecStatus.getStatus();
}
@@ -171,7 +171,7 @@ StatusWith<std::vector<BSONObj>> resolveCollectionDefaultProperties(
indexSpec[IndexDescriptor::kKeyPatternFieldName].Obj())) {
std::unique_ptr<CollatorInterface> indexCollator;
if (auto collationElem = indexSpec[IndexDescriptor::kCollationFieldName]) {
- auto collatorStatus = CollatorFactoryInterface::get(txn->getServiceContext())
+ auto collatorStatus = CollatorFactoryInterface::get(opCtx->getServiceContext())
->makeFromBSON(collationElem.Obj());
// validateIndexSpecCollation() should have checked that the index collation spec is
// valid.
@@ -225,7 +225,7 @@ public:
return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -246,56 +246,56 @@ public:
// now we know we have to create index(es)
// Note: createIndexes command does not currently respect shard versioning.
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dbLock(txn->lockState(), ns.db(), MODE_X);
- if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(txn, ns)) {
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ Lock::DBLock dbLock(opCtx->lockState(), ns.db(), MODE_X);
+ if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, ns)) {
return appendCommandStatus(
result,
Status(ErrorCodes::NotMaster,
str::stream() << "Not primary while creating indexes in " << ns.ns()));
}
- Database* db = dbHolder().get(txn, ns.db());
+ Database* db = dbHolder().get(opCtx, ns.db());
if (!db) {
- db = dbHolder().openDb(txn, ns.db());
+ db = dbHolder().openDb(opCtx, ns.db());
}
Collection* collection = db->getCollection(ns.ns());
if (collection) {
result.appendBool("createdCollectionAutomatically", false);
} else {
- if (db->getViewCatalog()->lookup(txn, ns.ns())) {
+ if (db->getViewCatalog()->lookup(opCtx, ns.ns())) {
errmsg = "Cannot create indexes on a view";
return appendCommandStatus(result, {ErrorCodes::CommandNotSupportedOnView, errmsg});
}
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- WriteUnitOfWork wunit(txn);
- collection = db->createCollection(txn, ns.ns(), CollectionOptions());
+ WriteUnitOfWork wunit(opCtx);
+ collection = db->createCollection(opCtx, ns.ns(), CollectionOptions());
invariant(collection);
wunit.commit();
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, kCommandName, ns.ns());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, kCommandName, ns.ns());
result.appendBool("createdCollectionAutomatically", true);
}
auto indexSpecsWithDefaults =
- resolveCollectionDefaultProperties(txn, collection, std::move(specs));
+ resolveCollectionDefaultProperties(opCtx, collection, std::move(specs));
if (!indexSpecsWithDefaults.isOK()) {
return appendCommandStatus(result, indexSpecsWithDefaults.getStatus());
}
specs = std::move(indexSpecsWithDefaults.getValue());
- const int numIndexesBefore = collection->getIndexCatalog()->numIndexesTotal(txn);
+ const int numIndexesBefore = collection->getIndexCatalog()->numIndexesTotal(opCtx);
result.append("numIndexesBefore", numIndexesBefore);
- auto client = txn->getClient();
+ auto client = opCtx->getClient();
ScopeGuard lastOpSetterGuard =
MakeObjGuard(repl::ReplClientInfo::forClient(client),
&repl::ReplClientInfo::setLastOpToSystemLastOpTime,
- txn);
+ opCtx);
- MultiIndexBlock indexer(txn, collection);
+ MultiIndexBlock indexer(opCtx, collection);
indexer.allowBackgroundBuilding();
indexer.allowInterruption();
@@ -315,7 +315,7 @@ public:
for (size_t i = 0; i < specs.size(); i++) {
const BSONObj& spec = specs[i];
if (spec["unique"].trueValue()) {
- status = checkUniqueIndexConstraints(txn, ns.ns(), spec["key"].Obj());
+ status = checkUniqueIndexConstraints(opCtx, ns.ns(), spec["key"].Obj());
if (!status.isOK()) {
return appendCommandStatus(result, status);
@@ -327,14 +327,14 @@ public:
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
indexInfoObjs = uassertStatusOK(indexer.init(specs));
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, kCommandName, ns.ns());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, kCommandName, ns.ns());
// If we're a background index, replace exclusive db lock with an intent lock, so that
// other readers and writers can proceed during this phase.
if (indexer.getBuildInBackground()) {
- txn->recoveryUnit()->abandonSnapshot();
+ opCtx->recoveryUnit()->abandonSnapshot();
dbLock.relockWithMode(MODE_IX);
- if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(txn, ns)) {
+ if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, ns)) {
return appendCommandStatus(
result,
Status(ErrorCodes::NotMaster,
@@ -344,7 +344,7 @@ public:
}
try {
- Lock::CollectionLock colLock(txn->lockState(), ns.ns(), MODE_IX);
+ Lock::CollectionLock colLock(opCtx->lockState(), ns.ns(), MODE_IX);
uassertStatusOK(indexer.insertAllDocumentsInCollection());
} catch (const DBException& e) {
invariant(e.getCode() != ErrorCodes::WriteConflict);
@@ -354,9 +354,9 @@ public:
try {
// This function cannot throw today, but we will preemptively prepare for
// that day, to avoid data corruption due to lack of index cleanup.
- txn->recoveryUnit()->abandonSnapshot();
+ opCtx->recoveryUnit()->abandonSnapshot();
dbLock.relockWithMode(MODE_X);
- if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(txn, ns)) {
+ if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, ns)) {
return appendCommandStatus(
result,
Status(ErrorCodes::NotMaster,
@@ -374,33 +374,33 @@ public:
}
// Need to return db lock back to exclusive, to complete the index build.
if (indexer.getBuildInBackground()) {
- txn->recoveryUnit()->abandonSnapshot();
+ opCtx->recoveryUnit()->abandonSnapshot();
dbLock.relockWithMode(MODE_X);
uassert(ErrorCodes::NotMaster,
str::stream() << "Not primary while completing index build in " << dbname,
- repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(txn, ns));
+ repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, ns));
- Database* db = dbHolder().get(txn, ns.db());
+ Database* db = dbHolder().get(opCtx, ns.db());
uassert(28551, "database dropped during index build", db);
uassert(28552, "collection dropped during index build", db->getCollection(ns.ns()));
}
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
indexer.commit();
for (auto&& infoObj : indexInfoObjs) {
std::string systemIndexes = ns.getSystemIndexesCollection();
getGlobalServiceContext()->getOpObserver()->onCreateIndex(
- txn, systemIndexes, infoObj, false);
+ opCtx, systemIndexes, infoObj, false);
}
wunit.commit();
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, kCommandName, ns.ns());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, kCommandName, ns.ns());
- result.append("numIndexesAfter", collection->getIndexCatalog()->numIndexesTotal(txn));
+ result.append("numIndexesAfter", collection->getIndexCatalog()->numIndexesTotal(opCtx));
lastOpSetterGuard.Dismiss();
@@ -408,12 +408,12 @@ public:
}
private:
- static Status checkUniqueIndexConstraints(OperationContext* txn,
+ static Status checkUniqueIndexConstraints(OperationContext* opCtx,
StringData ns,
const BSONObj& newIdxKey) {
- invariant(txn->lockState()->isCollectionLockedForMode(ns, MODE_X));
+ invariant(opCtx->lockState()->isCollectionLockedForMode(ns, MODE_X));
- auto metadata(CollectionShardingState::get(txn, ns.toString())->getMetadata());
+ auto metadata(CollectionShardingState::get(opCtx, ns.toString())->getMetadata());
if (metadata) {
ShardKeyPattern shardKeyPattern(metadata->getKeyPattern());
if (!shardKeyPattern.isUniqueIndexCompatible(newIdxKey)) {
diff --git a/src/mongo/db/commands/current_op.cpp b/src/mongo/db/commands/current_op.cpp
index c696533ef7a..c5f05e314ae 100644
--- a/src/mongo/db/commands/current_op.cpp
+++ b/src/mongo/db/commands/current_op.cpp
@@ -85,7 +85,7 @@ public:
return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& db,
BSONObj& cmdObj,
int options,
@@ -117,14 +117,14 @@ public:
std::vector<BSONObj> inprogInfos;
BSONArrayBuilder inprogBuilder(result.subarrayStart("inprog"));
- for (ServiceContext::LockedClientsCursor cursor(txn->getClient()->getServiceContext());
+ for (ServiceContext::LockedClientsCursor cursor(opCtx->getClient()->getServiceContext());
Client* client = cursor.next();) {
invariant(client);
stdx::lock_guard<Client> lk(*client);
if (ownOpsOnly &&
- !AuthorizationSession::get(txn->getClient())->isCoauthorizedWithClient(client)) {
+ !AuthorizationSession::get(opCtx->getClient())->isCoauthorizedWithClient(client)) {
continue;
}
@@ -183,7 +183,7 @@ public:
// don't have a collection, we pass in a fake collection name (and this is okay,
// because $where parsing only relies on the database part of the namespace).
const NamespaceString fakeNS(db, "$dummyNamespaceForCurrop");
- const Matcher matcher(filter, ExtensionsCallbackReal(txn, &fakeNS), nullptr);
+ const Matcher matcher(filter, ExtensionsCallbackReal(opCtx, &fakeNS), nullptr);
for (const auto& info : inprogInfos) {
if (matcher.matches(info)) {
diff --git a/src/mongo/db/commands/dbcommands.cpp b/src/mongo/db/commands/dbcommands.cpp
index 9ba85e79f9f..7b4879425a3 100644
--- a/src/mongo/db/commands/dbcommands.cpp
+++ b/src/mongo/db/commands/dbcommands.cpp
@@ -121,8 +121,8 @@ using std::stringstream;
using std::unique_ptr;
namespace {
-void registerErrorImpl(OperationContext* txn, const DBException& exception) {
- CurOp::get(txn)->debug().exceptionInfo = exception.getInfo();
+void registerErrorImpl(OperationContext* opCtx, const DBException& exception) {
+ CurOp::get(opCtx)->debug().exceptionInfo = exception.getInfo();
}
MONGO_INITIALIZER(InitializeRegisterErrorHandler)(InitializerContext* const) {
@@ -130,18 +130,18 @@ MONGO_INITIALIZER(InitializeRegisterErrorHandler)(InitializerContext* const) {
return Status::OK();
}
/**
- * For replica set members it returns the last known op time from txn. Otherwise will return
+ * For replica set members it returns the last known op time from opCtx. Otherwise will return
* uninitialized logical time.
*/
-LogicalTime _getClientOperationTime(OperationContext* txn) {
+LogicalTime _getClientOperationTime(OperationContext* opCtx) {
repl::ReplicationCoordinator* replCoord =
- repl::ReplicationCoordinator::get(txn->getClient()->getServiceContext());
+ repl::ReplicationCoordinator::get(opCtx->getClient()->getServiceContext());
const bool isReplSet =
replCoord->getReplicationMode() == repl::ReplicationCoordinator::modeReplSet;
LogicalTime operationTime;
if (isReplSet) {
operationTime = LogicalTime(
- repl::ReplClientInfo::forClient(txn->getClient()).getLastOp().getTimestamp());
+ repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp().getTimestamp());
}
return operationTime;
}
@@ -159,7 +159,7 @@ public:
<< "N to wait N seconds for other members to catch up.";
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -173,7 +173,7 @@ public:
}
Status status = repl::getGlobalReplicationCoordinator()->stepDown(
- txn, force, Seconds(timeoutSecs), Seconds(120));
+ opCtx, force, Seconds(timeoutSecs), Seconds(120));
if (!status.isOK() && status.code() != ErrorCodes::NotMaster) { // ignore not master
return appendCommandStatus(result, status);
}
@@ -209,7 +209,7 @@ public:
CmdDropDatabase() : Command("dropDatabase") {}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -239,7 +239,7 @@ public:
result, Status(ErrorCodes::IllegalOperation, "have to pass 1 as db parameter"));
}
- Status status = dropDatabase(txn, dbname);
+ Status status = dropDatabase(opCtx, dbname);
if (status == ErrorCodes::NamespaceNotFound) {
return appendCommandStatus(result, Status::OK());
}
@@ -278,7 +278,7 @@ public:
CmdRepairDatabase() : Command("repairDatabase") {}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -291,9 +291,9 @@ public:
}
// Closing a database requires a global lock.
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite lk(txn->lockState());
- if (!dbHolder().get(txn, dbname)) {
+ ScopedTransaction transaction(opCtx, MODE_X);
+ Lock::GlobalWrite lk(opCtx->lockState());
+ if (!dbHolder().get(opCtx, dbname)) {
// If the name doesn't make an exact match, check for a case insensitive match.
std::set<std::string> otherCasing = dbHolder().getNamesWithConflictingCasing(dbname);
if (otherCasing.empty()) {
@@ -310,9 +310,9 @@ public:
// TODO (Kal): OldClientContext legacy, needs to be removed
{
- CurOp::get(txn)->ensureStarted();
- stdx::lock_guard<Client> lk(*txn->getClient());
- CurOp::get(txn)->setNS_inlock(dbname);
+ CurOp::get(opCtx)->ensureStarted();
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
+ CurOp::get(opCtx)->setNS_inlock(dbname);
}
log() << "repairDatabase " << dbname;
@@ -324,14 +324,14 @@ public:
bool backupOriginalFiles = e.isBoolean() && e.boolean();
StorageEngine* engine = getGlobalServiceContext()->getGlobalStorageEngine();
- bool shouldReplicateWrites = txn->writesAreReplicated();
- txn->setReplicatedWrites(false);
- ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, txn, shouldReplicateWrites);
- Status status =
- repairDatabase(txn, engine, dbname, preserveClonedFilesOnFailure, backupOriginalFiles);
+ bool shouldReplicateWrites = opCtx->writesAreReplicated();
+ opCtx->setReplicatedWrites(false);
+ ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, opCtx, shouldReplicateWrites);
+ Status status = repairDatabase(
+ opCtx, engine, dbname, preserveClonedFilesOnFailure, backupOriginalFiles);
// Open database before returning
- dbHolder().openDb(txn, dbname);
+ dbHolder().openDb(opCtx, dbname);
return appendCommandStatus(result, status);
}
} cmdRepairDatabase;
@@ -385,7 +385,7 @@ public:
CmdProfile() : Command("profile") {}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -403,8 +403,8 @@ public:
Status status = Status::OK();
- ScopedTransaction transaction(txn, transactionMode);
- AutoGetDb ctx(txn, dbname, dbMode);
+ ScopedTransaction transaction(opCtx, transactionMode);
+ AutoGetDb ctx(opCtx, dbname, dbMode);
Database* db = ctx.getDb();
result.append("was", db ? db->getProfilingLevel() : serverGlobalParams.defaultProfile);
@@ -415,9 +415,9 @@ public:
if (!db) {
// When setting the profiling level, create the database if it didn't already exist.
// When just reading the profiling level, we do not create the database.
- db = dbHolder().openDb(txn, dbname);
+ db = dbHolder().openDb(opCtx, dbname);
}
- status = db->setProfilingLevel(txn, profilingLevel);
+ status = db->setProfilingLevel(opCtx, profilingLevel);
}
const BSONElement slow = cmdObj["slowms"];
@@ -470,7 +470,7 @@ public:
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -483,14 +483,14 @@ public:
// This doesn't look like it requires exclusive DB lock, because it uses its own diag
// locking, but originally the lock was set to be WRITE, so preserving the behaviour.
//
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dbXLock(txn->lockState(), dbname, MODE_X);
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ Lock::DBLock dbXLock(opCtx->lockState(), dbname, MODE_X);
// TODO (Kal): OldClientContext legacy, needs to be removed
{
- CurOp::get(txn)->ensureStarted();
- stdx::lock_guard<Client> lk(*txn->getClient());
- CurOp::get(txn)->setNS_inlock(dbname);
+ CurOp::get(opCtx)->ensureStarted();
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
+ CurOp::get(opCtx)->setNS_inlock(dbname);
}
int was = _diaglog.setLevel(cmdObj.firstElement().numberInt());
@@ -530,7 +530,7 @@ public:
return true;
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -550,7 +550,7 @@ public:
return false;
}
- return appendCommandStatus(result, dropCollection(txn, nsToDrop, result));
+ return appendCommandStatus(result, dropCollection(opCtx, nsToDrop, result));
}
} cmdDrop;
@@ -582,7 +582,7 @@ public:
return AuthorizationSession::get(client)->checkAuthForCreate(nss, cmdObj);
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -649,7 +649,7 @@ public:
{ErrorCodes::TypeMismatch,
str::stream() << "'collation' has to be a document: " << collationElem});
}
- auto collatorStatus = CollatorFactoryInterface::get(txn->getServiceContext())
+ auto collatorStatus = CollatorFactoryInterface::get(opCtx->getServiceContext())
->makeFromBSON(collationElem.Obj());
if (!collatorStatus.isOK()) {
return appendCommandStatus(result, collatorStatus.getStatus());
@@ -657,10 +657,10 @@ public:
defaultCollator = std::move(collatorStatus.getValue());
}
idIndexSpec = uassertStatusOK(index_key_validate::validateIndexSpecCollation(
- txn, idIndexSpec, defaultCollator.get()));
+ opCtx, idIndexSpec, defaultCollator.get()));
std::unique_ptr<CollatorInterface> idIndexCollator;
if (auto collationElem = idIndexSpec["collation"]) {
- auto collatorStatus = CollatorFactoryInterface::get(txn->getServiceContext())
+ auto collatorStatus = CollatorFactoryInterface::get(opCtx->getServiceContext())
->makeFromBSON(collationElem.Obj());
// validateIndexSpecCollation() should have checked that the _id index collation
// spec is valid.
@@ -677,12 +677,12 @@ public:
// Remove "idIndex" field from command.
auto resolvedCmdObj = cmdObj.removeField("idIndex");
- return appendCommandStatus(result,
- createCollection(txn, dbname, resolvedCmdObj, idIndexSpec));
+ return appendCommandStatus(
+ result, createCollection(opCtx, dbname, resolvedCmdObj, idIndexSpec));
}
BSONObj idIndexSpec;
- return appendCommandStatus(result, createCollection(txn, dbname, cmdObj, idIndexSpec));
+ return appendCommandStatus(result, createCollection(opCtx, dbname, cmdObj, idIndexSpec));
}
} cmdCreate;
@@ -724,7 +724,7 @@ public:
out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), ActionType::find));
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& jsobj,
int,
@@ -763,7 +763,7 @@ public:
qr->setSort(sort);
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn, std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx, std::move(qr), ExtensionsCallbackDisallowExtensions());
if (!statusWithCQ.isOK()) {
uasserted(17240, "Can't canonicalize query " + query.toString());
return 0;
@@ -773,10 +773,10 @@ public:
// Check shard version at startup.
// This will throw before we've done any work if shard version is outdated
// We drop and re-acquire these locks every document because md5'ing is expensive
- unique_ptr<AutoGetCollectionForRead> ctx(new AutoGetCollectionForRead(txn, nss));
+ unique_ptr<AutoGetCollectionForRead> ctx(new AutoGetCollectionForRead(opCtx, nss));
Collection* coll = ctx->getCollection();
- auto statusWithPlanExecutor = getExecutor(txn,
+ auto statusWithPlanExecutor = getExecutor(opCtx,
coll,
std::move(cq),
PlanExecutor::YIELD_MANUAL,
@@ -801,7 +801,7 @@ public:
break; // skipped chunk is probably on another shard
}
log() << "should have chunk: " << n << " have:" << myn;
- dumpChunks(txn, nss.ns(), query, sort);
+ dumpChunks(opCtx, nss.ns(), query, sort);
uassert(10040, "chunks out of order", n == myn);
}
@@ -819,7 +819,7 @@ public:
try {
// RELOCKED
- ctx.reset(new AutoGetCollectionForRead(txn, nss));
+ ctx.reset(new AutoGetCollectionForRead(opCtx, nss));
} catch (const SendStaleConfigException& ex) {
LOG(1) << "chunk metadata changed during filemd5, will retarget and continue";
break;
@@ -850,15 +850,15 @@ public:
result.append("numChunks", n);
result.append("md5", digestToString(d));
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "filemd5", dbname);
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "filemd5", dbname);
return true;
}
- void dumpChunks(OperationContext* txn,
+ void dumpChunks(OperationContext* opCtx,
const string& ns,
const BSONObj& query,
const BSONObj& sort) {
- DBDirectClient client(txn);
+ DBDirectClient client(opCtx);
Query q(query);
q.sort(sort);
unique_ptr<DBClientCursor> c = client.query(ns, q);
@@ -905,7 +905,7 @@ public:
out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& jsobj,
int,
@@ -919,12 +919,12 @@ public:
BSONObj keyPattern = jsobj.getObjectField("keyPattern");
bool estimate = jsobj["estimate"].trueValue();
- AutoGetCollectionForRead ctx(txn, NamespaceString(ns));
+ AutoGetCollectionForRead ctx(opCtx, NamespaceString(ns));
Collection* collection = ctx.getCollection();
long long numRecords = 0;
if (collection) {
- numRecords = collection->numRecords(txn);
+ numRecords = collection->numRecords(opCtx);
}
if (numRecords == 0) {
@@ -939,12 +939,13 @@ public:
unique_ptr<PlanExecutor> exec;
if (min.isEmpty() && max.isEmpty()) {
if (estimate) {
- result.appendNumber("size", static_cast<long long>(collection->dataSize(txn)));
+ result.appendNumber("size", static_cast<long long>(collection->dataSize(opCtx)));
result.appendNumber("numObjects", numRecords);
result.append("millis", timer.millis());
return 1;
}
- exec = InternalPlanner::collectionScan(txn, ns, collection, PlanExecutor::YIELD_MANUAL);
+ exec =
+ InternalPlanner::collectionScan(opCtx, ns, collection, PlanExecutor::YIELD_MANUAL);
} else if (min.isEmpty() || max.isEmpty()) {
errmsg = "only one of min or max specified";
return false;
@@ -955,7 +956,7 @@ public:
}
IndexDescriptor* idx =
- collection->getIndexCatalog()->findShardKeyPrefixedIndex(txn,
+ collection->getIndexCatalog()->findShardKeyPrefixedIndex(opCtx,
keyPattern,
true); // requireSingleKey
@@ -968,7 +969,7 @@ public:
min = Helpers::toKeyFormat(kp.extendRangeBound(min, false));
max = Helpers::toKeyFormat(kp.extendRangeBound(max, false));
- exec = InternalPlanner::indexScan(txn,
+ exec = InternalPlanner::indexScan(opCtx,
collection,
idx,
min,
@@ -977,7 +978,7 @@ public:
PlanExecutor::YIELD_MANUAL);
}
- long long avgObjSize = collection->dataSize(txn) / numRecords;
+ long long avgObjSize = collection->dataSize(opCtx) / numRecords;
long long maxSize = jsobj["maxSize"].numberLong();
long long maxObjects = jsobj["maxObjects"].numberLong();
@@ -992,7 +993,7 @@ public:
if (estimate)
size += avgObjSize;
else
- size += collection->getRecordStore()->dataFor(txn, loc).size();
+ size += collection->getRecordStore()->dataFor(opCtx, loc).size();
numObjects++;
@@ -1049,7 +1050,7 @@ public:
out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& jsobj,
int,
@@ -1063,7 +1064,7 @@ public:
}
result.append("ns", nss.ns());
- Status status = appendCollectionStorageStats(txn, nss, jsobj, &result);
+ Status status = appendCollectionStorageStats(opCtx, nss, jsobj, &result);
if (!status.isOK()) {
errmsg = status.reason();
return false;
@@ -1098,14 +1099,14 @@ public:
return AuthorizationSession::get(client)->checkAuthForCollMod(nss, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& jsobj,
int,
string& errmsg,
BSONObjBuilder& result) {
const NamespaceString nss(parseNsCollectionRequired(dbname, jsobj));
- return appendCommandStatus(result, collMod(txn, nss, jsobj, &result));
+ return appendCommandStatus(result, collMod(opCtx, nss, jsobj, &result));
}
} collectionModCommand;
@@ -1134,7 +1135,7 @@ public:
out->push_back(Privilege(ResourcePattern::forDatabaseName(dbname), actions));
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& jsobj,
int,
@@ -1159,16 +1160,16 @@ public:
// TODO (Kal): OldClientContext legacy, needs to be removed
{
- CurOp::get(txn)->ensureStarted();
- stdx::lock_guard<Client> lk(*txn->getClient());
- CurOp::get(txn)->setNS_inlock(dbname);
+ CurOp::get(opCtx)->ensureStarted();
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
+ CurOp::get(opCtx)->setNS_inlock(dbname);
}
// We lock the entire database in S-mode in order to ensure that the contents will not
// change for the stats snapshot. This might be unnecessary and if it becomes a
// performance issue, we can take IS lock and then lock collection-by-collection.
- ScopedTransaction scopedXact(txn, MODE_IS);
- AutoGetDb autoDb(txn, ns, MODE_S);
+ ScopedTransaction scopedXact(opCtx, MODE_IS);
+ AutoGetDb autoDb(opCtx, ns, MODE_S);
result.append("db", ns);
@@ -1191,12 +1192,12 @@ public:
result.appendNumber("fileSize", 0);
} else {
{
- stdx::lock_guard<Client> lk(*txn->getClient());
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
// TODO: OldClientContext legacy, needs to be removed
- CurOp::get(txn)->enter_inlock(dbname.c_str(), db->getProfilingLevel());
+ CurOp::get(opCtx)->enter_inlock(dbname.c_str(), db->getProfilingLevel());
}
- db->getStats(txn, &result, scale);
+ db->getStats(opCtx, &result, scale);
}
return true;
@@ -1220,13 +1221,13 @@ public:
virtual void addRequiredPrivileges(const std::string& dbname,
const BSONObj& cmdObj,
std::vector<Privilege>* out) {} // No auth required
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
string& errmsg,
BSONObjBuilder& result) {
- result << "you" << txn->getClient()->clientAddress(true /*includePort*/);
+ result << "you" << opCtx->getClient()->clientAddress(true /*includePort*/);
return true;
}
} cmdWhatsMyUri;
@@ -1247,7 +1248,7 @@ public:
return Status::OK();
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -1299,10 +1300,10 @@ const std::array<StringData, 4> neededFieldNames{QueryRequest::cmdOptionMaxTimeM
QueryRequest::queryOptionMaxTimeMS};
} // namespace
-void appendOpTimeMetadata(OperationContext* txn,
+void appendOpTimeMetadata(OperationContext* opCtx,
const rpc::RequestInterface& request,
BSONObjBuilder* metadataBob) {
- const bool isShardingAware = ShardingState::get(txn)->enabled();
+ const bool isShardingAware = ShardingState::get(opCtx)->enabled();
const bool isConfig = serverGlobalParams.clusterRole == ClusterRole::ConfigServer;
repl::ReplicationCoordinator* replCoord = repl::getGlobalReplicationCoordinator();
const bool isReplSet =
@@ -1311,7 +1312,7 @@ void appendOpTimeMetadata(OperationContext* txn,
if (isReplSet) {
// Attach our own last opTime.
repl::OpTime lastOpTimeFromClient =
- repl::ReplClientInfo::forClient(txn->getClient()).getLastOp();
+ repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp();
replCoord->prepareReplMetadata(request.getMetadata(), lastOpTimeFromClient, metadataBob);
// For commands from mongos, append some info to help getLastError(w) work.
// TODO: refactor out of here as part of SERVER-18236
@@ -1329,11 +1330,11 @@ void appendOpTimeMetadata(OperationContext* txn,
}
namespace {
-void execCommandHandler(OperationContext* const txn,
+void execCommandHandler(OperationContext* const opCtx,
Command* const command,
const rpc::RequestInterface& request,
rpc::ReplyBuilderInterface* const replyBuilder) {
- mongo::execCommandDatabase(txn, command, request, replyBuilder);
+ mongo::execCommandDatabase(opCtx, command, request, replyBuilder);
}
MONGO_INITIALIZER(InitializeCommandExecCommandHandler)(InitializerContext* const) {
@@ -1346,7 +1347,7 @@ MONGO_INITIALIZER(InitializeCommandExecCommandHandler)(InitializerContext* const
// use shardingState and the repl coordinator without changing our entire library
// structure.
// It will be moved back as part of SERVER-18236.
-bool Command::run(OperationContext* txn,
+bool Command::run(OperationContext* opCtx,
const rpc::RequestInterface& request,
rpc::ReplyBuilderInterface* replyBuilder) {
auto bytesToReserve = reserveBytesForReply();
@@ -1366,7 +1367,7 @@ bool Command::run(OperationContext* txn,
const std::string db = request.getDatabase().toString();
BSONObjBuilder inPlaceReplyBob(replyBuilder->getInPlaceReplyBuilder(bytesToReserve));
- auto readConcernArgsStatus = extractReadConcern(txn, cmd, supportsReadConcern());
+ auto readConcernArgsStatus = extractReadConcern(opCtx, cmd, supportsReadConcern());
if (!readConcernArgsStatus.isOK()) {
auto result = appendCommandStatus(inPlaceReplyBob, readConcernArgsStatus.getStatus());
@@ -1375,7 +1376,7 @@ bool Command::run(OperationContext* txn,
return result;
}
- Status rcStatus = waitForReadConcern(txn, readConcernArgsStatus.getValue());
+ Status rcStatus = waitForReadConcern(opCtx, readConcernArgsStatus.getValue());
if (!rcStatus.isOK()) {
if (rcStatus == ErrorCodes::ExceededTimeLimit) {
const int debugLevel =
@@ -1393,7 +1394,7 @@ bool Command::run(OperationContext* txn,
std::string errmsg;
bool result;
- auto startOperationTime = _getClientOperationTime(txn);
+ auto startOperationTime = _getClientOperationTime(opCtx);
if (!supportsWriteConcern(cmd)) {
if (commandSpecifiesWriteConcern(cmd)) {
auto result = appendCommandStatus(
@@ -1405,9 +1406,9 @@ bool Command::run(OperationContext* txn,
}
// TODO: remove queryOptions parameter from command's run method.
- result = run(txn, db, cmd, 0, errmsg, inPlaceReplyBob);
+ result = run(opCtx, db, cmd, 0, errmsg, inPlaceReplyBob);
} else {
- auto wcResult = extractWriteConcern(txn, cmd, db);
+ auto wcResult = extractWriteConcern(opCtx, cmd, db);
if (!wcResult.isOK()) {
auto result = appendCommandStatus(inPlaceReplyBob, wcResult.getStatus());
inPlaceReplyBob.doneFast();
@@ -1416,20 +1417,20 @@ bool Command::run(OperationContext* txn,
}
// Change the write concern while running the command.
- const auto oldWC = txn->getWriteConcern();
- ON_BLOCK_EXIT([&] { txn->setWriteConcern(oldWC); });
- txn->setWriteConcern(wcResult.getValue());
+ const auto oldWC = opCtx->getWriteConcern();
+ ON_BLOCK_EXIT([&] { opCtx->setWriteConcern(oldWC); });
+ opCtx->setWriteConcern(wcResult.getValue());
- result = run(txn, db, cmd, 0, errmsg, inPlaceReplyBob);
+ result = run(opCtx, db, cmd, 0, errmsg, inPlaceReplyBob);
// Nothing in run() should change the writeConcern.
- dassert(SimpleBSONObjComparator::kInstance.evaluate(txn->getWriteConcern().toBSON() ==
+ dassert(SimpleBSONObjComparator::kInstance.evaluate(opCtx->getWriteConcern().toBSON() ==
wcResult.getValue().toBSON()));
WriteConcernResult res;
auto waitForWCStatus =
- waitForWriteConcern(txn,
- repl::ReplClientInfo::forClient(txn->getClient()).getLastOp(),
+ waitForWriteConcern(opCtx,
+ repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp(),
wcResult.getValue(),
&res);
appendCommandWCStatus(inPlaceReplyBob, waitForWCStatus, res);
@@ -1450,7 +1451,7 @@ bool Command::run(OperationContext* txn,
repl::ReadConcernLevel::kLinearizableReadConcern) &&
(request.getCommandName() != "getMore")) {
- auto linearizableReadStatus = waitForLinearizableReadConcern(txn);
+ auto linearizableReadStatus = waitForLinearizableReadConcern(opCtx);
if (!linearizableReadStatus.isOK()) {
inPlaceReplyBob.resetToEmpty();
@@ -1463,14 +1464,14 @@ bool Command::run(OperationContext* txn,
appendCommandStatus(inPlaceReplyBob, result, errmsg);
- auto finishOperationTime = _getClientOperationTime(txn);
+ auto finishOperationTime = _getClientOperationTime(opCtx);
auto operationTime = finishOperationTime;
invariant(finishOperationTime >= startOperationTime);
// this command did not write, so return current clusterTime.
if (finishOperationTime == startOperationTime) {
// TODO: SERVER-27786 to return the clusterTime of the read.
- operationTime = LogicalClock::get(txn)->getClusterTime().getTime();
+ operationTime = LogicalClock::get(opCtx)->getClusterTime().getTime();
}
appendOperationTime(inPlaceReplyBob, operationTime);
@@ -1478,7 +1479,7 @@ bool Command::run(OperationContext* txn,
inPlaceReplyBob.doneFast();
BSONObjBuilder metadataBob;
- appendOpTimeMetadata(txn, request, &metadataBob);
+ appendOpTimeMetadata(opCtx, request, &metadataBob);
replyBuilder->setMetadata(metadataBob.done());
return result;
@@ -1495,20 +1496,20 @@ bool Command::run(OperationContext* txn,
- context
then calls run()
*/
-void mongo::execCommandDatabase(OperationContext* txn,
+void mongo::execCommandDatabase(OperationContext* opCtx,
Command* command,
const rpc::RequestInterface& request,
rpc::ReplyBuilderInterface* replyBuilder) {
try {
{
- stdx::lock_guard<Client> lk(*txn->getClient());
- CurOp::get(txn)->setCommand_inlock(command);
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
+ CurOp::get(opCtx)->setCommand_inlock(command);
}
// TODO: move this back to runCommands when mongos supports OperationContext
// see SERVER-18515 for details.
- uassertStatusOK(rpc::readRequestMetadata(txn, request.getMetadata()));
- rpc::TrackingMetadata::get(txn).initWithOperName(command->getName());
+ uassertStatusOK(rpc::readRequestMetadata(opCtx, request.getMetadata()));
+ rpc::TrackingMetadata::get(opCtx).initWithOperName(command->getName());
dassert(replyBuilder->getState() == rpc::ReplyBuilderInterface::State::kCommandReply);
@@ -1520,30 +1521,30 @@ void mongo::execCommandDatabase(OperationContext* txn,
request.getCommandArgs().getFields(neededFieldNames, &extractedFields);
if (Command::isHelpRequest(extractedFields[kHelpField])) {
- CurOp::get(txn)->ensureStarted();
+ CurOp::get(opCtx)->ensureStarted();
// We disable last-error for help requests due to SERVER-11492, because config servers
// use help requests to determine which commands are database writes, and so must be
// forwarded to all config servers.
- LastError::get(txn->getClient()).disable();
- Command::generateHelpResponse(txn, request, replyBuilder, *command);
+ LastError::get(opCtx->getClient()).disable();
+ Command::generateHelpResponse(opCtx, request, replyBuilder, *command);
return;
}
- ImpersonationSessionGuard guard(txn);
+ ImpersonationSessionGuard guard(opCtx);
uassertStatusOK(
- Command::checkAuthorization(command, txn, dbname, request.getCommandArgs()));
+ Command::checkAuthorization(command, opCtx, dbname, request.getCommandArgs()));
repl::ReplicationCoordinator* replCoord =
- repl::ReplicationCoordinator::get(txn->getClient()->getServiceContext());
- const bool iAmPrimary = replCoord->canAcceptWritesForDatabase_UNSAFE(txn, dbname);
+ repl::ReplicationCoordinator::get(opCtx->getClient()->getServiceContext());
+ const bool iAmPrimary = replCoord->canAcceptWritesForDatabase_UNSAFE(opCtx, dbname);
{
bool commandCanRunOnSecondary = command->slaveOk();
bool commandIsOverriddenToRunOnSecondary = command->slaveOverrideOk() &&
- rpc::ServerSelectionMetadata::get(txn).canRunOnSecondary();
+ rpc::ServerSelectionMetadata::get(opCtx).canRunOnSecondary();
- bool iAmStandalone = !txn->writesAreReplicated();
+ bool iAmStandalone = !opCtx->writesAreReplicated();
bool canRunHere = iAmPrimary || commandCanRunOnSecondary ||
commandIsOverriddenToRunOnSecondary || iAmStandalone;
@@ -1556,7 +1557,7 @@ void mongo::execCommandDatabase(OperationContext* txn,
if (!command->maintenanceOk() &&
replCoord->getReplicationMode() == repl::ReplicationCoordinator::modeReplSet &&
- !replCoord->canAcceptWritesForDatabase_UNSAFE(txn, dbname) &&
+ !replCoord->canAcceptWritesForDatabase_UNSAFE(opCtx, dbname) &&
!replCoord->getMemberState().secondary()) {
uassert(ErrorCodes::NotMasterOrSecondary,
@@ -1596,43 +1597,43 @@ void mongo::execCommandDatabase(OperationContext* txn,
if (maxTimeMS > 0) {
uassert(40119,
"Illegal attempt to set operation deadline within DBDirectClient",
- !txn->getClient()->isInDirectClient());
- txn->setDeadlineAfterNowBy(Milliseconds{maxTimeMS});
+ !opCtx->getClient()->isInDirectClient());
+ opCtx->setDeadlineAfterNowBy(Milliseconds{maxTimeMS});
}
// Operations are only versioned against the primary. We also make sure not to redo shard
// version handling if this command was issued via the direct client.
- if (iAmPrimary && !txn->getClient()->isInDirectClient()) {
+ if (iAmPrimary && !opCtx->getClient()->isInDirectClient()) {
// Handle a shard version that may have been sent along with the command.
auto commandNS = NamespaceString(command->parseNs(dbname, request.getCommandArgs()));
- auto& oss = OperationShardingState::get(txn);
+ auto& oss = OperationShardingState::get(opCtx);
oss.initializeShardVersion(commandNS, extractedFields[kShardVersionFieldIdx]);
- auto shardingState = ShardingState::get(txn);
+ auto shardingState = ShardingState::get(opCtx);
if (oss.hasShardVersion()) {
uassertStatusOK(shardingState->canAcceptShardedCommands());
}
// Handle config optime information that may have been sent along with the command.
- uassertStatusOK(shardingState->updateConfigServerOpTimeFromMetadata(txn));
+ uassertStatusOK(shardingState->updateConfigServerOpTimeFromMetadata(opCtx));
}
// Can throw
- txn->checkForInterrupt(); // May trigger maxTimeAlwaysTimeOut fail point.
+ opCtx->checkForInterrupt(); // May trigger maxTimeAlwaysTimeOut fail point.
bool retval = false;
- CurOp::get(txn)->ensureStarted();
+ CurOp::get(opCtx)->ensureStarted();
command->_commandsExecuted.increment();
if (logger::globalLogDomain()->shouldLog(logger::LogComponent::kTracking,
logger::LogSeverity::Debug(1)) &&
- rpc::TrackingMetadata::get(txn).getParentOperId()) {
+ rpc::TrackingMetadata::get(opCtx).getParentOperId()) {
MONGO_LOG_COMPONENT(1, logger::LogComponent::kTracking)
- << rpc::TrackingMetadata::get(txn).toString();
- rpc::TrackingMetadata::get(txn).setIsLogged(true);
+ << rpc::TrackingMetadata::get(opCtx).toString();
+ rpc::TrackingMetadata::get(opCtx).setIsLogged(true);
}
- retval = command->run(txn, request, replyBuilder);
+ retval = command->run(opCtx, request, replyBuilder);
dassert(replyBuilder->getState() == rpc::ReplyBuilderInterface::State::kOutputDocs);
@@ -1645,15 +1646,15 @@ void mongo::execCommandDatabase(OperationContext* txn,
auto sce = dynamic_cast<const StaleConfigException*>(&e);
invariant(sce); // do not upcasts from DBException created by uassert variants.
- ShardingState::get(txn)->onStaleShardVersion(
- txn, NamespaceString(sce->getns()), sce->getVersionReceived());
+ ShardingState::get(opCtx)->onStaleShardVersion(
+ opCtx, NamespaceString(sce->getns()), sce->getVersionReceived());
}
BSONObjBuilder metadataBob;
- appendOpTimeMetadata(txn, request, &metadataBob);
+ appendOpTimeMetadata(opCtx, request, &metadataBob);
- auto operationTime = _getClientOperationTime(txn);
+ auto operationTime = _getClientOperationTime(opCtx);
Command::generateErrorResponse(
- txn, replyBuilder, e, request, command, metadataBob.done(), operationTime);
+ opCtx, replyBuilder, e, request, command, metadataBob.done(), operationTime);
}
}
diff --git a/src/mongo/db/commands/dbhash.cpp b/src/mongo/db/commands/dbhash.cpp
index 6d68cd93f00..0da2752a28d 100644
--- a/src/mongo/db/commands/dbhash.cpp
+++ b/src/mongo/db/commands/dbhash.cpp
@@ -80,7 +80,7 @@ public:
out->push_back(Privilege(ResourcePattern::forDatabaseName(dbname), actions));
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -109,8 +109,8 @@ public:
// We lock the entire database in S-mode in order to ensure that the contents will not
// change for the snapshot.
- ScopedTransaction scopedXact(txn, MODE_IS);
- AutoGetDb autoDb(txn, ns, MODE_S);
+ ScopedTransaction scopedXact(opCtx, MODE_IS);
+ AutoGetDb autoDb(opCtx, ns, MODE_S);
Database* db = autoDb.getDb();
if (db) {
db->getDatabaseCatalogEntry()->getCollectionNamespaces(&colls);
@@ -152,7 +152,7 @@ public:
continue;
bool fromCache = false;
- string hash = _hashCollection(txn, db, fullCollectionName, &fromCache);
+ string hash = _hashCollection(opCtx, db, fullCollectionName, &fromCache);
bb.append(shortCollectionName, hash);
@@ -174,11 +174,11 @@ public:
return 1;
}
- void wipeCacheForCollection(OperationContext* txn, const NamespaceString& ns) {
+ void wipeCacheForCollection(OperationContext* opCtx, const NamespaceString& ns) {
if (!_isCachable(ns))
return;
- txn->recoveryUnit()->onCommit([this, txn, ns] {
+ opCtx->recoveryUnit()->onCommit([this, opCtx, ns] {
stdx::lock_guard<stdx::mutex> lk(_cachedHashedMutex);
if (ns.isCommand()) {
// The <dbName>.$cmd namespace can represent a command that
@@ -274,9 +274,9 @@ private:
} // namespace
-void logOpForDbHash(OperationContext* txn, const char* ns) {
+void logOpForDbHash(OperationContext* opCtx, const char* ns) {
NamespaceString nsString(ns);
- dbhashCmd.wipeCacheForCollection(txn, nsString);
+ dbhashCmd.wipeCacheForCollection(opCtx, nsString);
}
} // namespace mongo
diff --git a/src/mongo/db/commands/dbhash.h b/src/mongo/db/commands/dbhash.h
index 2afc3efb454..09db7e97e0e 100644
--- a/src/mongo/db/commands/dbhash.h
+++ b/src/mongo/db/commands/dbhash.h
@@ -32,6 +32,6 @@ namespace mongo {
class OperationContext;
-void logOpForDbHash(OperationContext* txn, const char* ns);
+void logOpForDbHash(OperationContext* opCtx, const char* ns);
} // namespace mongo
diff --git a/src/mongo/db/commands/distinct.cpp b/src/mongo/db/commands/distinct.cpp
index 0f619356f96..febed50512f 100644
--- a/src/mongo/db/commands/distinct.cpp
+++ b/src/mongo/db/commands/distinct.cpp
@@ -111,7 +111,7 @@ public:
help << "{ distinct : 'collection name' , key : 'a.b' , query : {} }";
}
- virtual Status explain(OperationContext* txn,
+ virtual Status explain(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
ExplainCommon::Verbosity verbosity,
@@ -119,8 +119,8 @@ public:
BSONObjBuilder* out) const {
const NamespaceString nss(parseNsCollectionRequired(dbname, cmdObj));
- const ExtensionsCallbackReal extensionsCallback(txn, &nss);
- auto parsedDistinct = ParsedDistinct::parse(txn, nss, cmdObj, extensionsCallback, true);
+ const ExtensionsCallbackReal extensionsCallback(opCtx, &nss);
+ auto parsedDistinct = ParsedDistinct::parse(opCtx, nss, cmdObj, extensionsCallback, true);
if (!parsedDistinct.isOK()) {
return parsedDistinct.getStatus();
}
@@ -133,7 +133,7 @@ public:
"http://dochub.mongodb.org/core/3.4-feature-compatibility.");
}
- AutoGetCollectionOrViewForRead ctx(txn, nss);
+ AutoGetCollectionOrViewForRead ctx(opCtx, nss);
Collection* collection = ctx.getCollection();
if (ctx.getView()) {
@@ -145,12 +145,12 @@ public:
}
std::string errmsg;
(void)Command::findCommand("aggregate")
- ->run(txn, dbname, viewAggregation.getValue(), 0, errmsg, *out);
+ ->run(opCtx, dbname, viewAggregation.getValue(), 0, errmsg, *out);
return Status::OK();
}
auto executor = getExecutorDistinct(
- txn, collection, nss.ns(), &parsedDistinct.getValue(), PlanExecutor::YIELD_AUTO);
+ opCtx, collection, nss.ns(), &parsedDistinct.getValue(), PlanExecutor::YIELD_AUTO);
if (!executor.isOK()) {
return executor.getStatus();
}
@@ -159,7 +159,7 @@ public:
return Status::OK();
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -167,8 +167,8 @@ public:
BSONObjBuilder& result) {
const NamespaceString nss(parseNsCollectionRequired(dbname, cmdObj));
- const ExtensionsCallbackReal extensionsCallback(txn, &nss);
- auto parsedDistinct = ParsedDistinct::parse(txn, nss, cmdObj, extensionsCallback, false);
+ const ExtensionsCallbackReal extensionsCallback(opCtx, &nss);
+ auto parsedDistinct = ParsedDistinct::parse(opCtx, nss, cmdObj, extensionsCallback, false);
if (!parsedDistinct.isOK()) {
return appendCommandStatus(result, parsedDistinct.getStatus());
}
@@ -183,7 +183,7 @@ public:
"http://dochub.mongodb.org/core/3.4-feature-compatibility."));
}
- AutoGetCollectionOrViewForRead ctx(txn, nss);
+ AutoGetCollectionOrViewForRead ctx(opCtx, nss);
Collection* collection = ctx.getCollection();
if (ctx.getView()) {
@@ -196,7 +196,7 @@ public:
BSONObjBuilder aggResult;
(void)Command::findCommand("aggregate")
- ->run(txn, dbname, viewAggregation.getValue(), options, errmsg, aggResult);
+ ->run(opCtx, dbname, viewAggregation.getValue(), options, errmsg, aggResult);
if (ResolvedView::isResolvedViewErrorResponse(aggResult.asTempObj())) {
result.appendElements(aggResult.obj());
@@ -212,14 +212,14 @@ public:
}
auto executor = getExecutorDistinct(
- txn, collection, nss.ns(), &parsedDistinct.getValue(), PlanExecutor::YIELD_AUTO);
+ opCtx, collection, nss.ns(), &parsedDistinct.getValue(), PlanExecutor::YIELD_AUTO);
if (!executor.isOK()) {
return appendCommandStatus(result, executor.getStatus());
}
{
- stdx::lock_guard<Client> lk(*txn->getClient());
- CurOp::get(txn)->setPlanSummary_inlock(
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
+ CurOp::get(opCtx)->setPlanSummary_inlock(
Explain::getPlanSummary(executor.getValue().get()));
}
@@ -274,13 +274,13 @@ public:
}
- auto curOp = CurOp::get(txn);
+ auto curOp = CurOp::get(opCtx);
// Get summary information about the plan.
PlanSummaryStats stats;
Explain::getSummaryStats(*executor.getValue(), &stats);
if (collection) {
- collection->infoCache()->notifyOfQuery(txn, stats.indexesUsed);
+ collection->infoCache()->notifyOfQuery(opCtx, stats.indexesUsed);
}
curOp->debug().setPlanSummaryMetrics(stats);
diff --git a/src/mongo/db/commands/driverHelpers.cpp b/src/mongo/db/commands/driverHelpers.cpp
index 8b6163e678b..c25887bf0a2 100644
--- a/src/mongo/db/commands/driverHelpers.cpp
+++ b/src/mongo/db/commands/driverHelpers.cpp
@@ -73,7 +73,7 @@ public:
virtual void addRequiredPrivileges(const std::string& dbname,
const BSONObj& cmdObj,
std::vector<Privilege>* out) {} // No auth required
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string&,
BSONObj& cmdObj,
int,
diff --git a/src/mongo/db/commands/drop_indexes.cpp b/src/mongo/db/commands/drop_indexes.cpp
index b3f2b73d21f..86082761cce 100644
--- a/src/mongo/db/commands/drop_indexes.cpp
+++ b/src/mongo/db/commands/drop_indexes.cpp
@@ -84,14 +84,14 @@ public:
}
CmdDropIndexes() : Command("dropIndexes", false, "deleteIndexes") {}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& jsobj,
int,
string& errmsg,
BSONObjBuilder& result) {
const NamespaceString nss = parseNsCollectionRequired(dbname, jsobj);
- return appendCommandStatus(result, dropIndexes(txn, nss, jsobj, &result));
+ return appendCommandStatus(result, dropIndexes(opCtx, nss, jsobj, &result));
}
} cmdDropIndexes;
@@ -116,25 +116,25 @@ public:
}
CmdReIndex() : Command("reIndex") {}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& jsobj,
int,
string& errmsg,
BSONObjBuilder& result) {
- DBDirectClient db(txn);
+ DBDirectClient db(opCtx);
const NamespaceString toReIndexNs = parseNsCollectionRequired(dbname, jsobj);
LOG(0) << "CMD: reIndex " << toReIndexNs;
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dbXLock(txn->lockState(), dbname, MODE_X);
- OldClientContext ctx(txn, toReIndexNs.ns());
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ Lock::DBLock dbXLock(opCtx->lockState(), dbname, MODE_X);
+ OldClientContext ctx(opCtx, toReIndexNs.ns());
Collection* collection = ctx.db()->getCollection(toReIndexNs.ns());
if (!collection) {
- if (ctx.db()->getViewCatalog()->lookup(txn, toReIndexNs.ns()))
+ if (ctx.db()->getViewCatalog()->lookup(opCtx, toReIndexNs.ns()))
return appendCommandStatus(
result, {ErrorCodes::CommandNotSupportedOnView, "can't re-index a view"});
else
@@ -152,12 +152,12 @@ public:
vector<BSONObj> all;
{
vector<string> indexNames;
- collection->getCatalogEntry()->getAllIndexes(txn, &indexNames);
+ collection->getCatalogEntry()->getAllIndexes(opCtx, &indexNames);
all.reserve(indexNames.size());
for (size_t i = 0; i < indexNames.size(); i++) {
const string& name = indexNames[i];
- BSONObj spec = collection->getCatalogEntry()->getIndexSpec(txn, name);
+ BSONObj spec = collection->getCatalogEntry()->getIndexSpec(opCtx, name);
{
BSONObjBuilder bob;
@@ -192,8 +192,8 @@ public:
result.appendNumber("nIndexesWas", all.size());
{
- WriteUnitOfWork wunit(txn);
- Status s = collection->getIndexCatalog()->dropAllIndexes(txn, true);
+ WriteUnitOfWork wunit(opCtx);
+ Status s = collection->getIndexCatalog()->dropAllIndexes(opCtx, true);
if (!s.isOK()) {
errmsg = "dropIndexes failed";
return appendCommandStatus(result, s);
@@ -201,7 +201,7 @@ public:
wunit.commit();
}
- MultiIndexBlock indexer(txn, collection);
+ MultiIndexBlock indexer(opCtx, collection);
// do not want interruption as that will leave us without indexes.
auto indexInfoObjs = indexer.init(all);
@@ -215,7 +215,7 @@ public:
}
{
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
indexer.commit();
wunit.commit();
}
@@ -224,8 +224,8 @@ public:
// This was also done when dropAllIndexes() committed, but we need to ensure that no one
// tries to read in the intermediate state where all indexes are newer than the current
// snapshot so are unable to be used.
- auto replCoord = repl::ReplicationCoordinator::get(txn);
- auto snapshotName = replCoord->reserveSnapshotName(txn);
+ auto replCoord = repl::ReplicationCoordinator::get(opCtx);
+ auto snapshotName = replCoord->reserveSnapshotName(opCtx);
replCoord->forceSnapshotCreation(); // Ensures a newer snapshot gets created even if idle.
collection->setMinimumVisibleSnapshot(snapshotName);
diff --git a/src/mongo/db/commands/eval.cpp b/src/mongo/db/commands/eval.cpp
index f9345d4247c..20945eb4828 100644
--- a/src/mongo/db/commands/eval.cpp
+++ b/src/mongo/db/commands/eval.cpp
@@ -58,7 +58,7 @@ namespace {
const int edebug = 0;
-bool dbEval(OperationContext* txn,
+bool dbEval(OperationContext* opCtx,
const string& dbName,
const BSONObj& cmd,
BSONObjBuilder& result,
@@ -92,7 +92,7 @@ bool dbEval(OperationContext* txn,
}
unique_ptr<Scope> s(getGlobalScriptEngine()->newScope());
- s->registerOperation(txn);
+ s->registerOperation(opCtx);
ScriptingFunction f = s->createFunction(code);
if (f == 0) {
@@ -100,7 +100,7 @@ bool dbEval(OperationContext* txn,
return false;
}
- s->localConnectForDbEval(txn, dbName.c_str());
+ s->localConnectForDbEval(opCtx, dbName.c_str());
if (e.type() == CodeWScope) {
s->init(e.codeWScopeScopeDataUnsafe());
@@ -171,22 +171,22 @@ public:
CmdEval() : Command("eval", false, "$eval") {}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
string& errmsg,
BSONObjBuilder& result) {
if (cmdObj["nolock"].trueValue()) {
- return dbEval(txn, dbname, cmdObj, result, errmsg);
+ return dbEval(opCtx, dbname, cmdObj, result, errmsg);
}
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite lk(txn->lockState());
+ ScopedTransaction transaction(opCtx, MODE_X);
+ Lock::GlobalWrite lk(opCtx->lockState());
- OldClientContext ctx(txn, dbname, false /* no shard version checking */);
+ OldClientContext ctx(opCtx, dbname, false /* no shard version checking */);
- return dbEval(txn, dbname, cmdObj, result, errmsg);
+ return dbEval(opCtx, dbname, cmdObj, result, errmsg);
}
} cmdeval;
diff --git a/src/mongo/db/commands/explain_cmd.cpp b/src/mongo/db/commands/explain_cmd.cpp
index 678fd7effa2..7c72c26977f 100644
--- a/src/mongo/db/commands/explain_cmd.cpp
+++ b/src/mongo/db/commands/explain_cmd.cpp
@@ -89,7 +89,7 @@ public:
* the command that you are explaining. The auth check is performed recursively
* on the nested command.
*/
- virtual Status checkAuthForOperation(OperationContext* txn,
+ virtual Status checkAuthForOperation(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj) {
if (Object != cmdObj.firstElement().type()) {
@@ -105,10 +105,10 @@ public:
return Status(ErrorCodes::CommandNotFound, ss);
}
- return commToExplain->checkAuthForOperation(txn, dbname, explainObj);
+ return commToExplain->checkAuthForOperation(opCtx, dbname, explainObj);
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -135,12 +135,12 @@ public:
// copied from Command::execCommand and should be abstracted. Until then, make
// sure to keep it up to date.
repl::ReplicationCoordinator* replCoord = repl::getGlobalReplicationCoordinator();
- bool iAmPrimary = replCoord->canAcceptWritesForDatabase_UNSAFE(txn, dbname);
+ bool iAmPrimary = replCoord->canAcceptWritesForDatabase_UNSAFE(opCtx, dbname);
bool commandCanRunOnSecondary = commToExplain->slaveOk();
bool commandIsOverriddenToRunOnSecondary = commToExplain->slaveOverrideOk() &&
- rpc::ServerSelectionMetadata::get(txn).canRunOnSecondary();
- bool iAmStandalone = !txn->writesAreReplicated();
+ rpc::ServerSelectionMetadata::get(opCtx).canRunOnSecondary();
+ bool iAmStandalone = !opCtx->writesAreReplicated();
const bool canRunHere = iAmPrimary || commandCanRunOnSecondary ||
commandIsOverriddenToRunOnSecondary || iAmStandalone;
@@ -154,8 +154,12 @@ public:
}
// Actually call the nested command's explain(...) method.
- Status explainStatus = commToExplain->explain(
- txn, dbname, explainObj, verbosity, rpc::ServerSelectionMetadata::get(txn), &result);
+ Status explainStatus = commToExplain->explain(opCtx,
+ dbname,
+ explainObj,
+ verbosity,
+ rpc::ServerSelectionMetadata::get(opCtx),
+ &result);
if (!explainStatus.isOK()) {
return appendCommandStatus(result, explainStatus);
}
diff --git a/src/mongo/db/commands/fail_point_cmd.cpp b/src/mongo/db/commands/fail_point_cmd.cpp
index a298c267647..9e6795e9d78 100644
--- a/src/mongo/db/commands/fail_point_cmd.cpp
+++ b/src/mongo/db/commands/fail_point_cmd.cpp
@@ -90,7 +90,7 @@ public:
h << "modifies the settings of a fail point";
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
diff --git a/src/mongo/db/commands/feature_compatibility_version.cpp b/src/mongo/db/commands/feature_compatibility_version.cpp
index 29d3a96513e..d97f5cf0c54 100644
--- a/src/mongo/db/commands/feature_compatibility_version.cpp
+++ b/src/mongo/db/commands/feature_compatibility_version.cpp
@@ -193,14 +193,14 @@ StatusWith<ServerGlobalParams::FeatureCompatibility::Version> FeatureCompatibili
return version;
}
-void FeatureCompatibilityVersion::set(OperationContext* txn, StringData version) {
+void FeatureCompatibilityVersion::set(OperationContext* opCtx, StringData version) {
uassert(40284,
"featureCompatibilityVersion must be '3.4' or '3.2'. See "
"http://dochub.mongodb.org/core/3.4-feature-compatibility.",
version == FeatureCompatibilityVersionCommandParser::kVersion34 ||
version == FeatureCompatibilityVersionCommandParser::kVersion32);
- DBDirectClient client(txn);
+ DBDirectClient client(opCtx);
NamespaceString nss(FeatureCompatibilityVersion::kCollection);
if (version == FeatureCompatibilityVersionCommandParser::kVersion34) {
@@ -211,27 +211,28 @@ void FeatureCompatibilityVersion::set(OperationContext* txn, StringData version)
std::vector<BSONObj> indexSpecs{k32IncompatibleIndexSpec};
{
- ScopedTransaction transaction(txn, MODE_IX);
- AutoGetOrCreateDb autoDB(txn, nss.db(), MODE_X);
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ AutoGetOrCreateDb autoDB(opCtx, nss.db(), MODE_X);
uassert(ErrorCodes::NotMaster,
str::stream() << "Cannot set featureCompatibilityVersion to '" << version
<< "'. Not primary while attempting to create index on: "
<< nss.ns(),
- repl::ReplicationCoordinator::get(txn->getServiceContext())
- ->canAcceptWritesFor(txn, nss));
+ repl::ReplicationCoordinator::get(opCtx->getServiceContext())
+ ->canAcceptWritesFor(opCtx, nss));
IndexBuilder builder(k32IncompatibleIndexSpec, false);
- auto status = builder.buildInForeground(txn, autoDB.getDb());
+ auto status = builder.buildInForeground(opCtx, autoDB.getDb());
uassertStatusOK(status);
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- WriteUnitOfWork wuow(txn);
+ WriteUnitOfWork wuow(opCtx);
getGlobalServiceContext()->getOpObserver()->onCreateIndex(
- txn, autoDB.getDb()->getSystemIndexesName(), k32IncompatibleIndexSpec, false);
+ opCtx, autoDB.getDb()->getSystemIndexesName(), k32IncompatibleIndexSpec, false);
wuow.commit();
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "FeatureCompatibilityVersion::set", nss.ns());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(
+ opCtx, "FeatureCompatibilityVersion::set", nss.ns());
}
// We then update the featureCompatibilityVersion document stored in the
@@ -279,7 +280,7 @@ void FeatureCompatibilityVersion::set(OperationContext* txn, StringData version)
}
}
-void FeatureCompatibilityVersion::setIfCleanStartup(OperationContext* txn,
+void FeatureCompatibilityVersion::setIfCleanStartup(OperationContext* opCtx,
repl::StorageInterface* storageInterface) {
if (serverGlobalParams.clusterRole != ClusterRole::ShardServer) {
std::vector<std::string> dbNames;
@@ -292,7 +293,7 @@ void FeatureCompatibilityVersion::setIfCleanStartup(OperationContext* txn,
}
}
- UnreplicatedWritesBlock unreplicatedWritesBlock(txn);
+ UnreplicatedWritesBlock unreplicatedWritesBlock(opCtx);
NamespaceString nss(FeatureCompatibilityVersion::kCollection);
// We build a v=2 index on the "admin.system.version" collection as part of setting the
@@ -302,11 +303,11 @@ void FeatureCompatibilityVersion::setIfCleanStartup(OperationContext* txn,
std::vector<BSONObj> indexSpecs{k32IncompatibleIndexSpec};
{
- ScopedTransaction transaction(txn, MODE_IX);
- AutoGetOrCreateDb autoDB(txn, nss.db(), MODE_X);
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ AutoGetOrCreateDb autoDB(opCtx, nss.db(), MODE_X);
IndexBuilder builder(k32IncompatibleIndexSpec, false);
- auto status = builder.buildInForeground(txn, autoDB.getDb());
+ auto status = builder.buildInForeground(opCtx, autoDB.getDb());
uassertStatusOK(status);
}
@@ -317,7 +318,7 @@ void FeatureCompatibilityVersion::setIfCleanStartup(OperationContext* txn,
// document when starting up, then on a subsequent start-up we'd no longer consider the data
// files "clean" and would instead be in featureCompatibilityVersion=3.2.
uassertStatusOK(storageInterface->insertDocument(
- txn,
+ opCtx,
nss,
BSON("_id" << FeatureCompatibilityVersion::kParameterName
<< FeatureCompatibilityVersion::kVersionField
@@ -372,7 +373,7 @@ public:
false // allowedToChangeAtRuntime
) {}
- virtual void append(OperationContext* txn, BSONObjBuilder& b, const std::string& name) {
+ virtual void append(OperationContext* opCtx, BSONObjBuilder& b, const std::string& name) {
b.append(name,
getFeatureCompatibilityVersionString(
serverGlobalParams.featureCompatibility.version.load()));
diff --git a/src/mongo/db/commands/feature_compatibility_version.h b/src/mongo/db/commands/feature_compatibility_version.h
index 4bcb4b56e55..44029c72e21 100644
--- a/src/mongo/db/commands/feature_compatibility_version.h
+++ b/src/mongo/db/commands/feature_compatibility_version.h
@@ -66,13 +66,14 @@ public:
* available.
* 'version' should be '3.4' or '3.2'.
*/
- static void set(OperationContext* txn, StringData version);
+ static void set(OperationContext* opCtx, StringData version);
/**
* If there are no non-local databases and we are not running with --shardsvr, set
* featureCompatibilityVersion to 3.4.
*/
- static void setIfCleanStartup(OperationContext* txn, repl::StorageInterface* storageInterface);
+ static void setIfCleanStartup(OperationContext* opCtx,
+ repl::StorageInterface* storageInterface);
/**
* Examines a document inserted or updated in admin.system.version. If it is the
diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp
index d0a260164d4..3aa7eb2018c 100644
--- a/src/mongo/db/commands/find_and_modify.cpp
+++ b/src/mongo/db/commands/find_and_modify.cpp
@@ -105,7 +105,7 @@ const DeleteStats* getDeleteStats(const PlanExecutor* exec) {
*
* If the operation failed, then an error Status is returned.
*/
-StatusWith<boost::optional<BSONObj>> advanceExecutor(OperationContext* txn,
+StatusWith<boost::optional<BSONObj>> advanceExecutor(OperationContext* opCtx,
PlanExecutor* exec,
bool isRemove) {
BSONObj value;
@@ -191,8 +191,8 @@ void appendCommandResponse(PlanExecutor* exec,
}
}
-Status checkCanAcceptWritesForDatabase(OperationContext* txn, const NamespaceString& nsString) {
- if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(txn, nsString)) {
+Status checkCanAcceptWritesForDatabase(OperationContext* opCtx, const NamespaceString& nsString) {
+ if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, nsString)) {
return Status(ErrorCodes::NotMaster,
str::stream()
<< "Not primary while running findAndModify command on collection "
@@ -201,12 +201,12 @@ Status checkCanAcceptWritesForDatabase(OperationContext* txn, const NamespaceStr
return Status::OK();
}
-void recordStatsForTopCommand(OperationContext* txn) {
- auto curOp = CurOp::get(txn);
+void recordStatsForTopCommand(OperationContext* opCtx) {
+ auto curOp = CurOp::get(opCtx);
const int writeLocked = 1;
- Top::get(txn->getClient()->getServiceContext())
- .record(txn,
+ Top::get(opCtx->getClient()->getServiceContext())
+ .record(opCtx,
curOp->getNS(),
curOp->getLogicalOp(),
writeLocked,
@@ -249,7 +249,7 @@ public:
return ReadWriteType::kWrite;
}
- Status explain(OperationContext* txn,
+ Status explain(OperationContext* opCtx,
const std::string& dbName,
const BSONObj& cmdObj,
ExplainCommon::Verbosity verbosity,
@@ -269,14 +269,14 @@ public:
const FindAndModifyRequest& args = parseStatus.getValue();
const NamespaceString& nsString = args.getNamespaceString();
- OpDebug* opDebug = &CurOp::get(txn)->debug();
+ OpDebug* opDebug = &CurOp::get(opCtx)->debug();
if (args.isRemove()) {
DeleteRequest request(nsString);
const bool isExplain = true;
makeDeleteRequest(args, isExplain, &request);
- ParsedDelete parsedDelete(txn, &request);
+ ParsedDelete parsedDelete(opCtx, &request);
Status parsedDeleteStatus = parsedDelete.parseRequest();
if (!parsedDeleteStatus.isOK()) {
return parsedDeleteStatus;
@@ -284,18 +284,18 @@ public:
// Explain calls of the findAndModify command are read-only, but we take write
// locks so that the timing information is more accurate.
- AutoGetCollection autoColl(txn, nsString, MODE_IX);
+ AutoGetCollection autoColl(opCtx, nsString, MODE_IX);
if (!autoColl.getDb()) {
return {ErrorCodes::NamespaceNotFound,
str::stream() << "database " << dbName << " does not exist."};
}
- auto css = CollectionShardingState::get(txn, nsString);
- css->checkShardVersionOrThrow(txn);
+ auto css = CollectionShardingState::get(opCtx, nsString);
+ css->checkShardVersionOrThrow(opCtx);
Collection* const collection = autoColl.getCollection();
auto statusWithPlanExecutor =
- getExecutorDelete(txn, opDebug, collection, &parsedDelete);
+ getExecutorDelete(opCtx, opDebug, collection, &parsedDelete);
if (!statusWithPlanExecutor.isOK()) {
return statusWithPlanExecutor.getStatus();
}
@@ -307,7 +307,7 @@ public:
const bool isExplain = true;
makeUpdateRequest(args, isExplain, &updateLifecycle, &request);
- ParsedUpdate parsedUpdate(txn, &request);
+ ParsedUpdate parsedUpdate(opCtx, &request);
Status parsedUpdateStatus = parsedUpdate.parseRequest();
if (!parsedUpdateStatus.isOK()) {
return parsedUpdateStatus;
@@ -315,18 +315,18 @@ public:
// Explain calls of the findAndModify command are read-only, but we take write
// locks so that the timing information is more accurate.
- AutoGetCollection autoColl(txn, nsString, MODE_IX);
+ AutoGetCollection autoColl(opCtx, nsString, MODE_IX);
if (!autoColl.getDb()) {
return {ErrorCodes::NamespaceNotFound,
str::stream() << "database " << dbName << " does not exist."};
}
- auto css = CollectionShardingState::get(txn, nsString);
- css->checkShardVersionOrThrow(txn);
+ auto css = CollectionShardingState::get(opCtx, nsString);
+ css->checkShardVersionOrThrow(opCtx);
Collection* collection = autoColl.getCollection();
auto statusWithPlanExecutor =
- getExecutorUpdate(txn, opDebug, collection, &parsedUpdate);
+ getExecutorUpdate(opCtx, opDebug, collection, &parsedUpdate);
if (!statusWithPlanExecutor.isOK()) {
return statusWithPlanExecutor.getStatus();
}
@@ -337,14 +337,14 @@ public:
return Status::OK();
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbName,
BSONObj& cmdObj,
int options,
std::string& errmsg,
BSONObjBuilder& result) override {
// findAndModify command is not replicated directly.
- invariant(txn->writesAreReplicated());
+ invariant(opCtx->writesAreReplicated());
const NamespaceString fullNs = parseNsCollectionRequired(dbName, cmdObj);
Status allowedWriteStatus = userAllowedWriteNS(fullNs.ns());
if (!allowedWriteStatus.isOK()) {
@@ -362,21 +362,21 @@ public:
boost::optional<DisableDocumentValidation> maybeDisableValidation;
if (shouldBypassDocumentValidationForCommand(cmdObj))
- maybeDisableValidation.emplace(txn);
+ maybeDisableValidation.emplace(opCtx);
- auto client = txn->getClient();
+ auto client = opCtx->getClient();
auto lastOpAtOperationStart = repl::ReplClientInfo::forClient(client).getLastOp();
ScopeGuard lastOpSetterGuard =
MakeObjGuard(repl::ReplClientInfo::forClient(client),
&repl::ReplClientInfo::setLastOpToSystemLastOpTime,
- txn);
+ opCtx);
// If this is the local database, don't set last op.
if (dbName == "local") {
lastOpSetterGuard.Dismiss();
}
- auto curOp = CurOp::get(txn);
+ auto curOp = CurOp::get(opCtx);
OpDebug* opDebug = &curOp->debug();
// Although usually the PlanExecutor handles WCE internally, it will throw WCEs when it is
@@ -388,38 +388,38 @@ public:
const bool isExplain = false;
makeDeleteRequest(args, isExplain, &request);
- ParsedDelete parsedDelete(txn, &request);
+ ParsedDelete parsedDelete(opCtx, &request);
Status parsedDeleteStatus = parsedDelete.parseRequest();
if (!parsedDeleteStatus.isOK()) {
return appendCommandStatus(result, parsedDeleteStatus);
}
- AutoGetOrCreateDb autoDb(txn, dbName, MODE_IX);
- Lock::CollectionLock collLock(txn->lockState(), nsString.ns(), MODE_IX);
+ AutoGetOrCreateDb autoDb(opCtx, dbName, MODE_IX);
+ Lock::CollectionLock collLock(opCtx->lockState(), nsString.ns(), MODE_IX);
// Attach the namespace and database profiling level to the current op.
{
- stdx::lock_guard<Client> lk(*txn->getClient());
- CurOp::get(txn)->enter_inlock(nsString.ns().c_str(),
- autoDb.getDb()->getProfilingLevel());
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
+ CurOp::get(opCtx)->enter_inlock(nsString.ns().c_str(),
+ autoDb.getDb()->getProfilingLevel());
}
- auto css = CollectionShardingState::get(txn, nsString);
- css->checkShardVersionOrThrow(txn);
+ auto css = CollectionShardingState::get(opCtx, nsString);
+ css->checkShardVersionOrThrow(opCtx);
- Status isPrimary = checkCanAcceptWritesForDatabase(txn, nsString);
+ Status isPrimary = checkCanAcceptWritesForDatabase(opCtx, nsString);
if (!isPrimary.isOK()) {
return appendCommandStatus(result, isPrimary);
}
Collection* const collection = autoDb.getDb()->getCollection(nsString.ns());
- if (!collection && autoDb.getDb()->getViewCatalog()->lookup(txn, nsString.ns())) {
+ if (!collection && autoDb.getDb()->getViewCatalog()->lookup(opCtx, nsString.ns())) {
return appendCommandStatus(result,
{ErrorCodes::CommandNotSupportedOnView,
"findAndModify not supported on a view"});
}
auto statusWithPlanExecutor =
- getExecutorDelete(txn, opDebug, collection, &parsedDelete);
+ getExecutorDelete(opCtx, opDebug, collection, &parsedDelete);
if (!statusWithPlanExecutor.isOK()) {
return appendCommandStatus(result, statusWithPlanExecutor.getStatus());
}
@@ -427,12 +427,12 @@ public:
std::move(statusWithPlanExecutor.getValue());
{
- stdx::lock_guard<Client> lk(*txn->getClient());
- CurOp::get(txn)->setPlanSummary_inlock(Explain::getPlanSummary(exec.get()));
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
+ CurOp::get(opCtx)->setPlanSummary_inlock(Explain::getPlanSummary(exec.get()));
}
StatusWith<boost::optional<BSONObj>> advanceStatus =
- advanceExecutor(txn, exec.get(), args.isRemove());
+ advanceExecutor(opCtx, exec.get(), args.isRemove());
if (!advanceStatus.isOK()) {
return appendCommandStatus(result, advanceStatus.getStatus());
}
@@ -443,7 +443,7 @@ public:
PlanSummaryStats summaryStats;
Explain::getSummaryStats(*exec, &summaryStats);
if (collection) {
- collection->infoCache()->notifyOfQuery(txn, summaryStats.indexesUsed);
+ collection->infoCache()->notifyOfQuery(opCtx, summaryStats.indexesUsed);
}
opDebug->setPlanSummaryMetrics(summaryStats);
@@ -455,7 +455,7 @@ public:
Explain::getWinningPlanStats(exec.get(), &execStatsBob);
curOp->debug().execStats = execStatsBob.obj();
}
- recordStatsForTopCommand(txn);
+ recordStatsForTopCommand(opCtx);
boost::optional<BSONObj> value = advanceStatus.getValue();
appendCommandResponse(exec.get(), args.isRemove(), value, result);
@@ -465,32 +465,32 @@ public:
const bool isExplain = false;
makeUpdateRequest(args, isExplain, &updateLifecycle, &request);
- ParsedUpdate parsedUpdate(txn, &request);
+ ParsedUpdate parsedUpdate(opCtx, &request);
Status parsedUpdateStatus = parsedUpdate.parseRequest();
if (!parsedUpdateStatus.isOK()) {
return appendCommandStatus(result, parsedUpdateStatus);
}
- AutoGetOrCreateDb autoDb(txn, dbName, MODE_IX);
- Lock::CollectionLock collLock(txn->lockState(), nsString.ns(), MODE_IX);
+ AutoGetOrCreateDb autoDb(opCtx, dbName, MODE_IX);
+ Lock::CollectionLock collLock(opCtx->lockState(), nsString.ns(), MODE_IX);
// Attach the namespace and database profiling level to the current op.
{
- stdx::lock_guard<Client> lk(*txn->getClient());
- CurOp::get(txn)->enter_inlock(nsString.ns().c_str(),
- autoDb.getDb()->getProfilingLevel());
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
+ CurOp::get(opCtx)->enter_inlock(nsString.ns().c_str(),
+ autoDb.getDb()->getProfilingLevel());
}
- auto css = CollectionShardingState::get(txn, nsString);
- css->checkShardVersionOrThrow(txn);
+ auto css = CollectionShardingState::get(opCtx, nsString);
+ css->checkShardVersionOrThrow(opCtx);
- Status isPrimary = checkCanAcceptWritesForDatabase(txn, nsString);
+ Status isPrimary = checkCanAcceptWritesForDatabase(opCtx, nsString);
if (!isPrimary.isOK()) {
return appendCommandStatus(result, isPrimary);
}
Collection* collection = autoDb.getDb()->getCollection(nsString.ns());
- if (!collection && autoDb.getDb()->getViewCatalog()->lookup(txn, nsString.ns())) {
+ if (!collection && autoDb.getDb()->getViewCatalog()->lookup(opCtx, nsString.ns())) {
return appendCommandStatus(result,
{ErrorCodes::CommandNotSupportedOnView,
"findAndModify not supported on a view"});
@@ -503,7 +503,7 @@ public:
// in exclusive mode in order to create the collection.
collLock.relockAsDatabaseExclusive(autoDb.lock());
collection = autoDb.getDb()->getCollection(nsString.ns());
- Status isPrimaryAfterRelock = checkCanAcceptWritesForDatabase(txn, nsString);
+ Status isPrimaryAfterRelock = checkCanAcceptWritesForDatabase(opCtx, nsString);
if (!isPrimaryAfterRelock.isOK()) {
return appendCommandStatus(result, isPrimaryAfterRelock);
}
@@ -511,9 +511,9 @@ public:
if (collection) {
// Someone else beat us to creating the collection, do nothing.
} else {
- WriteUnitOfWork wuow(txn);
+ WriteUnitOfWork wuow(opCtx);
Status createCollStatus =
- userCreateNS(txn, autoDb.getDb(), nsString.ns(), BSONObj());
+ userCreateNS(opCtx, autoDb.getDb(), nsString.ns(), BSONObj());
if (!createCollStatus.isOK()) {
return appendCommandStatus(result, createCollStatus);
}
@@ -525,7 +525,7 @@ public:
}
auto statusWithPlanExecutor =
- getExecutorUpdate(txn, opDebug, collection, &parsedUpdate);
+ getExecutorUpdate(opCtx, opDebug, collection, &parsedUpdate);
if (!statusWithPlanExecutor.isOK()) {
return appendCommandStatus(result, statusWithPlanExecutor.getStatus());
}
@@ -533,12 +533,12 @@ public:
std::move(statusWithPlanExecutor.getValue());
{
- stdx::lock_guard<Client> lk(*txn->getClient());
- CurOp::get(txn)->setPlanSummary_inlock(Explain::getPlanSummary(exec.get()));
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
+ CurOp::get(opCtx)->setPlanSummary_inlock(Explain::getPlanSummary(exec.get()));
}
StatusWith<boost::optional<BSONObj>> advanceStatus =
- advanceExecutor(txn, exec.get(), args.isRemove());
+ advanceExecutor(opCtx, exec.get(), args.isRemove());
if (!advanceStatus.isOK()) {
return appendCommandStatus(result, advanceStatus.getStatus());
}
@@ -549,7 +549,7 @@ public:
PlanSummaryStats summaryStats;
Explain::getSummaryStats(*exec, &summaryStats);
if (collection) {
- collection->infoCache()->notifyOfQuery(txn, summaryStats.indexesUsed);
+ collection->infoCache()->notifyOfQuery(opCtx, summaryStats.indexesUsed);
}
UpdateStage::recordUpdateStatsInOpDebug(getUpdateStats(exec.get()), opDebug);
opDebug->setPlanSummaryMetrics(summaryStats);
@@ -559,13 +559,13 @@ public:
Explain::getWinningPlanStats(exec.get(), &execStatsBob);
curOp->debug().execStats = execStatsBob.obj();
}
- recordStatsForTopCommand(txn);
+ recordStatsForTopCommand(opCtx);
boost::optional<BSONObj> value = advanceStatus.getValue();
appendCommandResponse(exec.get(), args.isRemove(), value, result);
}
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "findAndModify", nsString.ns());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "findAndModify", nsString.ns());
if (repl::ReplClientInfo::forClient(client).getLastOp() != lastOpAtOperationStart) {
// If this operation has already generated a new lastOp, don't bother setting it here.
diff --git a/src/mongo/db/commands/find_cmd.cpp b/src/mongo/db/commands/find_cmd.cpp
index d2b5075e283..c3d8e88b227 100644
--- a/src/mongo/db/commands/find_cmd.cpp
+++ b/src/mongo/db/commands/find_cmd.cpp
@@ -129,7 +129,7 @@ public:
return AuthorizationSession::get(client)->checkAuthForFind(nss, hasTerm);
}
- Status explain(OperationContext* txn,
+ Status explain(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
ExplainCommon::Verbosity verbosity,
@@ -158,9 +158,9 @@ public:
// Finish the parsing step by using the QueryRequest to create a CanonicalQuery.
- ExtensionsCallbackReal extensionsCallback(txn, &nss);
+ ExtensionsCallbackReal extensionsCallback(opCtx, &nss);
auto statusWithCQ =
- CanonicalQuery::canonicalize(txn, std::move(qrStatus.getValue()), extensionsCallback);
+ CanonicalQuery::canonicalize(opCtx, std::move(qrStatus.getValue()), extensionsCallback);
if (!statusWithCQ.isOK()) {
return statusWithCQ.getStatus();
}
@@ -168,7 +168,7 @@ public:
// Acquire locks. If the namespace is a view, we release our locks and convert the query
// request into an aggregation command.
- AutoGetCollectionOrViewForRead ctx(txn, nss);
+ AutoGetCollectionOrViewForRead ctx(opCtx, nss);
if (ctx.getView()) {
// Relinquish locks. The aggregation command will re-acquire them.
ctx.releaseLocksForView();
@@ -184,7 +184,7 @@ public:
std::string errmsg;
try {
- agg->run(txn, dbname, viewAggregationCommand.getValue(), 0, errmsg, *out);
+ agg->run(opCtx, dbname, viewAggregationCommand.getValue(), 0, errmsg, *out);
} catch (DBException& error) {
if (error.getCode() == ErrorCodes::InvalidPipelineOperator) {
return {ErrorCodes::InvalidPipelineOperator,
@@ -201,7 +201,7 @@ public:
// We have a parsed query. Time to get the execution plan for it.
auto statusWithPlanExecutor =
- getExecutorFind(txn, collection, nss, std::move(cq), PlanExecutor::YIELD_AUTO);
+ getExecutorFind(opCtx, collection, nss, std::move(cq), PlanExecutor::YIELD_AUTO);
if (!statusWithPlanExecutor.isOK()) {
return statusWithPlanExecutor.getStatus();
}
@@ -221,7 +221,7 @@ public:
* --Save state for getMore, transferring ownership of the executor to a ClientCursor.
* --Generate response to send to the client.
*/
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -237,7 +237,7 @@ public:
// Although it is a command, a find command gets counted as a query.
globalOpCounters.gotQuery();
- if (txn->getClient()->isInDirectClient()) {
+ if (opCtx->getClient()->isInDirectClient()) {
return appendCommandStatus(
result,
Status(ErrorCodes::IllegalOperation, "Cannot run find command from eval()"));
@@ -264,8 +264,8 @@ public:
// Validate term before acquiring locks, if provided.
if (auto term = qr->getReplicationTerm()) {
- auto replCoord = repl::ReplicationCoordinator::get(txn);
- Status status = replCoord->updateTerm(txn, *term);
+ auto replCoord = repl::ReplicationCoordinator::get(opCtx);
+ Status status = replCoord->updateTerm(opCtx, *term);
// Note: updateTerm returns ok if term stayed the same.
if (!status.isOK()) {
return appendCommandStatus(result, status);
@@ -279,11 +279,11 @@ public:
// find command parameters, so these fields are redundant.
const int ntoreturn = -1;
const int ntoskip = -1;
- beginQueryOp(txn, nss, cmdObj, ntoreturn, ntoskip);
+ beginQueryOp(opCtx, nss, cmdObj, ntoreturn, ntoskip);
// Finish the parsing step by using the QueryRequest to create a CanonicalQuery.
- ExtensionsCallbackReal extensionsCallback(txn, &nss);
- auto statusWithCQ = CanonicalQuery::canonicalize(txn, std::move(qr), extensionsCallback);
+ ExtensionsCallbackReal extensionsCallback(opCtx, &nss);
+ auto statusWithCQ = CanonicalQuery::canonicalize(opCtx, std::move(qr), extensionsCallback);
if (!statusWithCQ.isOK()) {
return appendCommandStatus(result, statusWithCQ.getStatus());
}
@@ -291,7 +291,7 @@ public:
// Acquire locks. If the query is on a view, we release our locks and convert the query
// request into an aggregation command.
- AutoGetCollectionOrViewForRead ctx(txn, nss);
+ AutoGetCollectionOrViewForRead ctx(opCtx, nss);
Collection* collection = ctx.getCollection();
if (ctx.getView()) {
// Relinquish locks. The aggregation command will re-acquire them.
@@ -306,7 +306,7 @@ public:
Command* agg = Command::findCommand("aggregate");
try {
- agg->run(txn, dbname, viewAggregationCommand.getValue(), options, errmsg, result);
+ agg->run(opCtx, dbname, viewAggregationCommand.getValue(), options, errmsg, result);
} catch (DBException& error) {
if (error.getCode() == ErrorCodes::InvalidPipelineOperator) {
return appendCommandStatus(
@@ -321,7 +321,7 @@ public:
// Get the execution plan for the query.
auto statusWithPlanExecutor =
- getExecutorFind(txn, collection, nss, std::move(cq), PlanExecutor::YIELD_AUTO);
+ getExecutorFind(opCtx, collection, nss, std::move(cq), PlanExecutor::YIELD_AUTO);
if (!statusWithPlanExecutor.isOK()) {
return appendCommandStatus(result, statusWithPlanExecutor.getStatus());
}
@@ -329,8 +329,8 @@ public:
std::unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
{
- stdx::lock_guard<Client> lk(*txn->getClient());
- CurOp::get(txn)->setPlanSummary_inlock(Explain::getPlanSummary(exec.get()));
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
+ CurOp::get(opCtx)->setPlanSummary_inlock(Explain::getPlanSummary(exec.get()));
}
if (!collection) {
@@ -338,7 +338,7 @@ public:
// there is no ClientCursor id, and then return.
const long long numResults = 0;
const CursorId cursorId = 0;
- endQueryOp(txn, collection, *exec, numResults, cursorId);
+ endQueryOp(opCtx, collection, *exec, numResults, cursorId);
appendCursorResponseObject(cursorId, nss.ns(), BSONArray(), &result);
return true;
}
@@ -378,12 +378,12 @@ public:
// Before saving the cursor, ensure that whatever plan we established happened with the
// expected collection version
- auto css = CollectionShardingState::get(txn, nss);
- css->checkShardVersionOrThrow(txn);
+ auto css = CollectionShardingState::get(opCtx, nss);
+ css->checkShardVersionOrThrow(opCtx);
// Set up the cursor for getMore.
CursorId cursorId = 0;
- if (shouldSaveCursor(txn, collection, state, exec.get())) {
+ if (shouldSaveCursor(opCtx, collection, state, exec.get())) {
// Register the execution plan inside a ClientCursor. Ownership of the PlanExecutor is
// transferred to the ClientCursor.
//
@@ -395,7 +395,7 @@ public:
ClientCursorPin pinnedCursor = collection->getCursorManager()->registerCursor(
{exec.release(),
nss.ns(),
- txn->recoveryUnit()->isReadingFromMajorityCommittedSnapshot(),
+ opCtx->recoveryUnit()->isReadingFromMajorityCommittedSnapshot(),
originalQR.getOptions(),
cmdObj.getOwned()});
cursorId = pinnedCursor.getCursor()->cursorid();
@@ -407,13 +407,13 @@ public:
cursorExec->saveState();
cursorExec->detachFromOperationContext();
- pinnedCursor.getCursor()->setLeftoverMaxTimeMicros(txn->getRemainingMaxTimeMicros());
+ pinnedCursor.getCursor()->setLeftoverMaxTimeMicros(opCtx->getRemainingMaxTimeMicros());
pinnedCursor.getCursor()->setPos(numResults);
// Fill out curop based on the results.
- endQueryOp(txn, collection, *cursorExec, numResults, cursorId);
+ endQueryOp(opCtx, collection, *cursorExec, numResults, cursorId);
} else {
- endQueryOp(txn, collection, *exec, numResults, cursorId);
+ endQueryOp(opCtx, collection, *exec, numResults, cursorId);
}
// Generate the response object to send to the client.
diff --git a/src/mongo/db/commands/fsync.cpp b/src/mongo/db/commands/fsync.cpp
index 01d922cec02..dfe417b6c06 100644
--- a/src/mongo/db/commands/fsync.cpp
+++ b/src/mongo/db/commands/fsync.cpp
@@ -117,13 +117,13 @@ public:
actions.addAction(ActionType::fsync);
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
string& errmsg,
BSONObjBuilder& result) {
- if (txn->lockState()->isLocked()) {
+ if (opCtx->lockState()->isLocked()) {
errmsg = "fsync: Cannot execute fsync command from contexts that hold a data lock";
return false;
}
@@ -138,23 +138,23 @@ public:
// the simple fsync command case
if (sync) {
// can this be GlobalRead? and if it can, it should be nongreedy.
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite w(txn->lockState());
+ ScopedTransaction transaction(opCtx, MODE_X);
+ Lock::GlobalWrite w(opCtx->lockState());
// TODO SERVER-26822: Replace MMAPv1 specific calls with ones that are storage
// engine agnostic.
- getDur().commitNow(txn);
+ getDur().commitNow(opCtx);
// No WriteUnitOfWork needed, as this does no writes of its own.
}
// Take a global IS lock to ensure the storage engine is not shutdown
- Lock::GlobalLock global(txn->lockState(), MODE_IS, UINT_MAX);
+ Lock::GlobalLock global(opCtx->lockState(), MODE_IS, UINT_MAX);
StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
- result.append("numFiles", storageEngine->flushAllFiles(txn, sync));
+ result.append("numFiles", storageEngine->flushAllFiles(opCtx, sync));
return true;
}
- Lock::ExclusiveLock lk(txn->lockState(), commandMutex);
+ Lock::ExclusiveLock lk(opCtx->lockState(), commandMutex);
if (!sync) {
errmsg = "fsync: sync option must be true when using lock";
return false;
@@ -292,7 +292,7 @@ public:
return isAuthorized ? Status::OK() : Status(ErrorCodes::Unauthorized, "Unauthorized");
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& db,
BSONObj& cmdObj,
int options,
@@ -300,7 +300,7 @@ public:
BSONObjBuilder& result) override {
log() << "command: unlock requested";
- Lock::ExclusiveLock lk(txn->lockState(), commandMutex);
+ Lock::ExclusiveLock lk(opCtx->lockState(), commandMutex);
if (unlockFsync()) {
const auto lockCount = fsyncCmd.getLockCount();
@@ -343,26 +343,26 @@ void FSyncLockThread::run() {
invariant(fsyncCmd.getLockCount_inLock() == 1);
try {
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- ScopedTransaction transaction(&txn, MODE_X);
- Lock::GlobalWrite global(txn.lockState()); // No WriteUnitOfWork needed
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ ScopedTransaction transaction(&opCtx, MODE_X);
+ Lock::GlobalWrite global(opCtx.lockState()); // No WriteUnitOfWork needed
try {
// TODO SERVER-26822: Replace MMAPv1 specific calls with ones that are storage engine
// agnostic.
- getDur().syncDataAndTruncateJournal(&txn);
+ getDur().syncDataAndTruncateJournal(&opCtx);
} catch (const std::exception& e) {
error() << "error doing syncDataAndTruncateJournal: " << e.what();
fsyncCmd.threadStatus = Status(ErrorCodes::CommandFailed, e.what());
fsyncCmd.acquireFsyncLockSyncCV.notify_one();
return;
}
- txn.lockState()->downgradeGlobalXtoSForMMAPV1();
+ opCtx.lockState()->downgradeGlobalXtoSForMMAPV1();
StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
try {
- storageEngine->flushAllFiles(&txn, true);
+ storageEngine->flushAllFiles(&opCtx, true);
} catch (const std::exception& e) {
error() << "error doing flushAll: " << e.what();
fsyncCmd.threadStatus = Status(ErrorCodes::CommandFailed, e.what());
@@ -371,9 +371,9 @@ void FSyncLockThread::run() {
}
try {
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- uassertStatusOK(storageEngine->beginBackup(&txn));
+ uassertStatusOK(storageEngine->beginBackup(&opCtx));
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(&txn, "beginBackup", "global");
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(&opCtx, "beginBackup", "global");
} catch (const DBException& e) {
error() << "storage engine unable to begin backup : " << e.toString();
fsyncCmd.threadStatus = e.toStatus();
@@ -388,7 +388,7 @@ void FSyncLockThread::run() {
fsyncCmd.releaseFsyncLockSyncCV.wait(lk);
}
- storageEngine->endBackup(&txn);
+ storageEngine->endBackup(&opCtx);
} catch (const std::exception& e) {
severe() << "FSyncLockThread exception: " << e.what();
diff --git a/src/mongo/db/commands/generic.cpp b/src/mongo/db/commands/generic.cpp
index f554498bfbf..f58179be389 100644
--- a/src/mongo/db/commands/generic.cpp
+++ b/src/mongo/db/commands/generic.cpp
@@ -88,7 +88,7 @@ public:
help << "{ buildinfo:1 }";
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& jsobj,
int, // options
@@ -118,7 +118,7 @@ public:
virtual void addRequiredPrivileges(const std::string& dbname,
const BSONObj& cmdObj,
std::vector<Privilege>* out) {} // No auth required
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& badns,
BSONObj& cmdObj,
int,
@@ -144,7 +144,7 @@ public:
virtual void addRequiredPrivileges(const std::string& dbname,
const BSONObj& cmdObj,
std::vector<Privilege>* out) {} // No auth required
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& ns,
BSONObj& cmdObj,
int,
@@ -187,7 +187,7 @@ public:
actions.addAction(ActionType::hostInfo);
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -235,7 +235,7 @@ public:
actions.addAction(ActionType::logRotate);
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& ns,
BSONObj& cmdObj,
int,
@@ -267,7 +267,7 @@ public:
virtual void addRequiredPrivileges(const std::string& dbname,
const BSONObj& cmdObj,
std::vector<Privilege>* out) {} // No auth required
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& ns,
BSONObj& cmdObj,
int,
@@ -366,7 +366,7 @@ public:
const BSONObj& cmdObj,
std::vector<Privilege>* out) {} // No auth required
CmdForceError() : Command("forceerror") {}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbnamne,
BSONObj& cmdObj,
int,
@@ -401,7 +401,7 @@ public:
help << "{ getLog : '*' } OR { getLog : 'global' }";
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -471,7 +471,7 @@ public:
actions.addAction(ActionType::getCmdLineOpts);
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string&,
BSONObj& cmdObj,
int,
diff --git a/src/mongo/db/commands/geo_near_cmd.cpp b/src/mongo/db/commands/geo_near_cmd.cpp
index 159feb2e74f..3c26005121b 100644
--- a/src/mongo/db/commands/geo_near_cmd.cpp
+++ b/src/mongo/db/commands/geo_near_cmd.cpp
@@ -99,7 +99,7 @@ public:
out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -111,7 +111,7 @@ public:
}
const NamespaceString nss(parseNsCollectionRequired(dbname, cmdObj));
- AutoGetCollectionForRead ctx(txn, nss);
+ AutoGetCollectionForRead ctx(opCtx, nss);
Collection* collection = ctx.getCollection();
if (!collection) {
@@ -126,7 +126,8 @@ public:
// We seek to populate this.
string nearFieldName;
bool using2DIndex = false;
- if (!getFieldName(txn, collection, indexCatalog, &nearFieldName, &errmsg, &using2DIndex)) {
+ if (!getFieldName(
+ opCtx, collection, indexCatalog, &nearFieldName, &errmsg, &using2DIndex)) {
return false;
}
@@ -224,8 +225,8 @@ public:
qr->setProj(projObj);
qr->setLimit(numWanted);
qr->setCollation(collation);
- const ExtensionsCallbackReal extensionsCallback(txn, &nss);
- auto statusWithCQ = CanonicalQuery::canonicalize(txn, std::move(qr), extensionsCallback);
+ const ExtensionsCallbackReal extensionsCallback(opCtx, &nss);
+ auto statusWithCQ = CanonicalQuery::canonicalize(opCtx, std::move(qr), extensionsCallback);
if (!statusWithCQ.isOK()) {
errmsg = "Can't parse filter / create query";
return false;
@@ -237,7 +238,7 @@ public:
RangePreserver preserver(collection);
auto statusWithPlanExecutor =
- getExecutor(txn, collection, std::move(cq), PlanExecutor::YIELD_AUTO, 0);
+ getExecutor(opCtx, collection, std::move(cq), PlanExecutor::YIELD_AUTO, 0);
if (!statusWithPlanExecutor.isOK()) {
errmsg = "can't get query executor";
return false;
@@ -245,9 +246,9 @@ public:
unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
- auto curOp = CurOp::get(txn);
+ auto curOp = CurOp::get(opCtx);
{
- stdx::lock_guard<Client> lk(*txn->getClient());
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
curOp->setPlanSummary_inlock(Explain::getPlanSummary(exec.get()));
}
@@ -334,7 +335,7 @@ public:
stats.appendIntOrLL("time", curOp->elapsedMicros() / 1000);
stats.done();
- collection->infoCache()->notifyOfQuery(txn, summary.indexesUsed);
+ collection->infoCache()->notifyOfQuery(opCtx, summary.indexesUsed);
curOp->debug().setPlanSummaryMetrics(summary);
@@ -348,7 +349,7 @@ public:
}
private:
- bool getFieldName(OperationContext* txn,
+ bool getFieldName(OperationContext* opCtx,
Collection* collection,
IndexCatalog* indexCatalog,
string* fieldOut,
@@ -357,7 +358,7 @@ private:
vector<IndexDescriptor*> idxs;
// First, try 2d.
- collection->getIndexCatalog()->findIndexByType(txn, IndexNames::GEO_2D, idxs);
+ collection->getIndexCatalog()->findIndexByType(opCtx, IndexNames::GEO_2D, idxs);
if (idxs.size() > 1) {
*errOut = "more than one 2d index, not sure which to run geoNear on";
return false;
@@ -378,7 +379,7 @@ private:
// Next, 2dsphere.
idxs.clear();
- collection->getIndexCatalog()->findIndexByType(txn, IndexNames::GEO_2DSPHERE, idxs);
+ collection->getIndexCatalog()->findIndexByType(opCtx, IndexNames::GEO_2DSPHERE, idxs);
if (0 == idxs.size()) {
*errOut = "no geo indices for geoNear";
return false;
diff --git a/src/mongo/db/commands/get_last_error.cpp b/src/mongo/db/commands/get_last_error.cpp
index e71dcf15210..0340a01139b 100644
--- a/src/mongo/db/commands/get_last_error.cpp
+++ b/src/mongo/db/commands/get_last_error.cpp
@@ -70,13 +70,13 @@ public:
help << "reset error state (used with getpreverror)";
}
CmdResetError() : Command("resetError", false, "reseterror") {}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& db,
BSONObj& cmdObj,
int,
string& errmsg,
BSONObjBuilder& result) {
- LastError::get(txn->getClient()).reset();
+ LastError::get(opCtx->getClient()).reset();
return true;
}
} cmdResetError;
@@ -104,7 +104,7 @@ public:
<< " { wtimeout:m} - timeout for w in m milliseconds";
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -134,11 +134,11 @@ public:
// err is null.
//
- LastError* le = &LastError::get(txn->getClient());
+ LastError* le = &LastError::get(opCtx->getClient());
le->disable();
// Always append lastOp and connectionId
- Client& c = *txn->getClient();
+ Client& c = *opCtx->getClient();
auto replCoord = repl::getGlobalReplicationCoordinator();
if (replCoord->getReplicationMode() == repl::ReplicationCoordinator::modeReplSet) {
const repl::OpTime lastOp = repl::ReplClientInfo::forClient(c).getLastOp();
@@ -224,7 +224,7 @@ public:
// Ensure options are valid for this host. Since getLastError doesn't do writes itself,
// treat it as if these are admin database writes, which need to be replicated so we do
// the strictest checks write concern checks.
- status = validateWriteConcern(txn, writeConcern, NamespaceString::kAdminDb);
+ status = validateWriteConcern(opCtx, writeConcern, NamespaceString::kAdminDb);
}
if (!status.isOK()) {
@@ -267,12 +267,12 @@ public:
}
{
- stdx::lock_guard<Client> lk(*txn->getClient());
- txn->setMessage_inlock("waiting for write concern");
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
+ opCtx->setMessage_inlock("waiting for write concern");
}
WriteConcernResult wcResult;
- status = waitForWriteConcern(txn, lastOpTime, writeConcern, &wcResult);
+ status = waitForWriteConcern(opCtx, lastOpTime, writeConcern, &wcResult);
wcResult.appendTo(writeConcern, &result);
// For backward compatibility with 2.4, wtimeout returns ok : 1.0
@@ -305,13 +305,13 @@ public:
const BSONObj& cmdObj,
std::vector<Privilege>* out) {} // No auth required
CmdGetPrevError() : Command("getPrevError", false, "getpreverror") {}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
string& errmsg,
BSONObjBuilder& result) {
- LastError* le = &LastError::get(txn->getClient());
+ LastError* le = &LastError::get(opCtx->getClient());
le->disable();
le->appendSelf(result, true);
if (le->isValid())
diff --git a/src/mongo/db/commands/getmore_cmd.cpp b/src/mongo/db/commands/getmore_cmd.cpp
index 4bedfe06e01..ed4b43a81a2 100644
--- a/src/mongo/db/commands/getmore_cmd.cpp
+++ b/src/mongo/db/commands/getmore_cmd.cpp
@@ -144,23 +144,23 @@ public:
request.nss, request.cursorid, request.term.is_initialized());
}
- bool runParsed(OperationContext* txn,
+ bool runParsed(OperationContext* opCtx,
const NamespaceString& origNss,
const GetMoreRequest& request,
BSONObj& cmdObj,
std::string& errmsg,
BSONObjBuilder& result) {
- auto curOp = CurOp::get(txn);
+ auto curOp = CurOp::get(opCtx);
curOp->debug().cursorid = request.cursorid;
// Disable shard version checking - getmore commands are always unversioned
- OperationShardingState::get(txn).setShardVersion(request.nss, ChunkVersion::IGNORED());
+ OperationShardingState::get(opCtx).setShardVersion(request.nss, ChunkVersion::IGNORED());
// Validate term before acquiring locks, if provided.
if (request.term) {
- auto replCoord = repl::ReplicationCoordinator::get(txn);
- Status status = replCoord->updateTerm(txn, *request.term);
+ auto replCoord = repl::ReplicationCoordinator::get(opCtx);
+ Status status = replCoord->updateTerm(opCtx, *request.term);
// Note: updateTerm returns ok if term stayed the same.
if (!status.isOK()) {
return appendCommandStatus(result, status);
@@ -193,7 +193,7 @@ public:
if (request.nss.isListIndexesCursorNS() || request.nss.isListCollectionsCursorNS()) {
cursorManager = CursorManager::getGlobalCursorManager();
} else {
- ctx = stdx::make_unique<AutoGetCollectionOrViewForRead>(txn, request.nss);
+ ctx = stdx::make_unique<AutoGetCollectionOrViewForRead>(opCtx, request.nss);
auto viewCtx = static_cast<AutoGetCollectionOrViewForRead*>(ctx.get());
Collection* collection = ctx->getCollection();
if (!collection) {
@@ -202,7 +202,7 @@ public:
// unknown, resulting in an appropriate error.
if (viewCtx->getView()) {
auto resolved =
- viewCtx->getDb()->getViewCatalog()->resolveView(txn, request.nss);
+ viewCtx->getDb()->getViewCatalog()->resolveView(opCtx, request.nss);
if (!resolved.isOK()) {
return appendCommandStatus(result, resolved.getStatus());
}
@@ -210,7 +210,7 @@ public:
// Only one shardversion can be set at a time for an operation, so unset it
// here to allow setting it on the underlying namespace.
- OperationShardingState::get(txn).unsetShardVersion(request.nss);
+ OperationShardingState::get(opCtx).unsetShardVersion(request.nss);
GetMoreRequest newRequest(resolved.getValue().getNamespace(),
request.cursorid,
@@ -219,11 +219,11 @@ public:
request.term,
request.lastKnownCommittedOpTime);
- bool retVal = runParsed(txn, origNss, newRequest, cmdObj, errmsg, result);
+ bool retVal = runParsed(opCtx, origNss, newRequest, cmdObj, errmsg, result);
{
// Set the namespace of the curop back to the view namespace so ctx records
// stats on this view namespace on destruction.
- stdx::lock_guard<Client> lk(*txn->getClient());
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
curOp->setNS_inlock(origNss.ns());
}
return retVal;
@@ -251,7 +251,7 @@ public:
invariant(!unpinCollLock);
sleepFor(Milliseconds(10));
ctx.reset();
- ctx = stdx::make_unique<AutoGetCollectionForRead>(txn, request.nss);
+ ctx = stdx::make_unique<AutoGetCollectionForRead>(opCtx, request.nss);
}
if (request.nss.ns() != cursor->ns()) {
@@ -289,15 +289,15 @@ public:
// On early return, get rid of the cursor.
ScopeGuard cursorFreer =
- MakeGuard(&GetMoreCmd::cleanupCursor, txn, &ccPin.getValue(), request);
+ MakeGuard(&GetMoreCmd::cleanupCursor, opCtx, &ccPin.getValue(), request);
if (cursor->isReadCommitted())
- uassertStatusOK(txn->recoveryUnit()->setReadFromMajorityCommittedSnapshot());
+ uassertStatusOK(opCtx->recoveryUnit()->setReadFromMajorityCommittedSnapshot());
// Reset timeout timer on the cursor since the cursor is still in use.
cursor->resetIdleTime();
- const bool hasOwnMaxTime = txn->hasDeadline();
+ const bool hasOwnMaxTime = opCtx->hasDeadline();
if (!hasOwnMaxTime) {
// There is no time limit set directly on this getMore command. If the cursor is
@@ -307,16 +307,16 @@ public:
if (isCursorAwaitData(cursor)) {
uassert(40117,
"Illegal attempt to set operation deadline within DBDirectClient",
- !txn->getClient()->isInDirectClient());
- txn->setDeadlineAfterNowBy(Seconds{1});
+ !opCtx->getClient()->isInDirectClient());
+ opCtx->setDeadlineAfterNowBy(Seconds{1});
} else if (cursor->getLeftoverMaxTimeMicros() < Microseconds::max()) {
uassert(40118,
"Illegal attempt to set operation deadline within DBDirectClient",
- !txn->getClient()->isInDirectClient());
- txn->setDeadlineAfterNowBy(cursor->getLeftoverMaxTimeMicros());
+ !opCtx->getClient()->isInDirectClient());
+ opCtx->setDeadlineAfterNowBy(cursor->getLeftoverMaxTimeMicros());
}
}
- txn->checkForInterrupt(); // May trigger maxTimeAlwaysTimeOut fail point.
+ opCtx->checkForInterrupt(); // May trigger maxTimeAlwaysTimeOut fail point.
if (cursor->isAggCursor()) {
// Agg cursors handle their own locking internally.
@@ -324,12 +324,12 @@ public:
}
PlanExecutor* exec = cursor->getExecutor();
- exec->reattachToOperationContext(txn);
+ exec->reattachToOperationContext(opCtx);
exec->restoreState();
auto planSummary = Explain::getPlanSummary(exec);
{
- stdx::lock_guard<Client> lk(*txn->getClient());
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
curOp->setPlanSummary_inlock(planSummary);
// Ensure that the original query or command object is available in the slow query log,
@@ -378,7 +378,7 @@ public:
// If this is an await data cursor, and we hit EOF without generating any results, then
// we block waiting for new data to arrive.
if (isCursorAwaitData(cursor) && state == PlanExecutor::IS_EOF && numResults == 0) {
- auto replCoord = repl::ReplicationCoordinator::get(txn);
+ auto replCoord = repl::ReplicationCoordinator::get(opCtx);
// Return immediately if we need to update the commit time.
if (!request.lastKnownCommittedOpTime ||
(request.lastKnownCommittedOpTime == replCoord->getLastCommittedOpTime())) {
@@ -393,7 +393,7 @@ public:
ctx.reset();
// Block waiting for data.
- const auto timeout = txn->getRemainingMaxTimeMicros();
+ const auto timeout = opCtx->getRemainingMaxTimeMicros();
notifier->wait(notifierVersion, timeout);
notifier.reset();
@@ -402,7 +402,7 @@ public:
// CappedInsertNotifier.
curOp->setExpectedLatencyMs(durationCount<Milliseconds>(timeout));
- ctx.reset(new AutoGetCollectionForRead(txn, request.nss));
+ ctx.reset(new AutoGetCollectionForRead(opCtx, request.nss));
exec->restoreState();
// We woke up because either the timed_wait expired, or there was more data. Either
@@ -440,7 +440,7 @@ public:
// from a previous find, then don't roll remaining micros over to the next
// getMore.
if (!hasOwnMaxTime) {
- cursor->setLeftoverMaxTimeMicros(txn->getRemainingMaxTimeMicros());
+ cursor->setLeftoverMaxTimeMicros(opCtx->getRemainingMaxTimeMicros());
}
cursor->incPos(numResults);
@@ -463,16 +463,16 @@ public:
// earlier and need to reacquire it in order to clean up our ClientCursorPin.
if (cursor->isAggCursor()) {
invariant(NULL == ctx.get());
- unpinDBLock.reset(new Lock::DBLock(txn->lockState(), request.nss.db(), MODE_IS));
+ unpinDBLock.reset(new Lock::DBLock(opCtx->lockState(), request.nss.db(), MODE_IS));
unpinCollLock.reset(
- new Lock::CollectionLock(txn->lockState(), request.nss.ns(), MODE_IS));
+ new Lock::CollectionLock(opCtx->lockState(), request.nss.ns(), MODE_IS));
}
}
return true;
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -481,7 +481,7 @@ public:
// Counted as a getMore, not as a command.
globalOpCounters.gotGetMore();
- if (txn->getClient()->isInDirectClient()) {
+ if (opCtx->getClient()->isInDirectClient()) {
return appendCommandStatus(
result,
Status(ErrorCodes::IllegalOperation, "Cannot run getMore command from eval()"));
@@ -492,7 +492,7 @@ public:
return appendCommandStatus(result, parsedRequest.getStatus());
}
auto request = parsedRequest.getValue();
- return runParsed(txn, request.nss, request, cmdObj, errmsg, result);
+ return runParsed(opCtx, request.nss, request, cmdObj, errmsg, result);
}
/**
@@ -558,7 +558,7 @@ public:
* Called via a ScopeGuard on early return in order to ensure that the ClientCursor gets
* cleaned up properly.
*/
- static void cleanupCursor(OperationContext* txn,
+ static void cleanupCursor(OperationContext* opCtx,
ClientCursorPin* ccPin,
const GetMoreRequest& request) {
ClientCursor* cursor = ccPin->getCursor();
@@ -567,9 +567,9 @@ public:
std::unique_ptr<Lock::CollectionLock> unpinCollLock;
if (cursor->isAggCursor()) {
- unpinDBLock.reset(new Lock::DBLock(txn->lockState(), request.nss.db(), MODE_IS));
+ unpinDBLock.reset(new Lock::DBLock(opCtx->lockState(), request.nss.db(), MODE_IS));
unpinCollLock.reset(
- new Lock::CollectionLock(txn->lockState(), request.nss.ns(), MODE_IS));
+ new Lock::CollectionLock(opCtx->lockState(), request.nss.ns(), MODE_IS));
}
ccPin->deleteUnderlying();
diff --git a/src/mongo/db/commands/group_cmd.cpp b/src/mongo/db/commands/group_cmd.cpp
index 8d91be1f920..cf42c368e75 100644
--- a/src/mongo/db/commands/group_cmd.cpp
+++ b/src/mongo/db/commands/group_cmd.cpp
@@ -120,7 +120,7 @@ private:
return nss.ns();
}
- virtual Status explain(OperationContext* txn,
+ virtual Status explain(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
ExplainCommon::Verbosity verbosity,
@@ -134,11 +134,11 @@ private:
groupRequest.explain = true;
- AutoGetCollectionForRead ctx(txn, groupRequest.ns);
+ AutoGetCollectionForRead ctx(opCtx, groupRequest.ns);
Collection* coll = ctx.getCollection();
auto statusWithPlanExecutor =
- getExecutorGroup(txn, coll, groupRequest, PlanExecutor::YIELD_AUTO);
+ getExecutorGroup(opCtx, coll, groupRequest, PlanExecutor::YIELD_AUTO);
if (!statusWithPlanExecutor.isOK()) {
return statusWithPlanExecutor.getStatus();
}
@@ -149,7 +149,7 @@ private:
return Status::OK();
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -166,20 +166,20 @@ private:
return appendCommandStatus(result, parseRequestStatus);
}
- AutoGetCollectionForRead ctx(txn, groupRequest.ns);
+ AutoGetCollectionForRead ctx(opCtx, groupRequest.ns);
Collection* coll = ctx.getCollection();
auto statusWithPlanExecutor =
- getExecutorGroup(txn, coll, groupRequest, PlanExecutor::YIELD_AUTO);
+ getExecutorGroup(opCtx, coll, groupRequest, PlanExecutor::YIELD_AUTO);
if (!statusWithPlanExecutor.isOK()) {
return appendCommandStatus(result, statusWithPlanExecutor.getStatus());
}
unique_ptr<PlanExecutor> planExecutor = std::move(statusWithPlanExecutor.getValue());
- auto curOp = CurOp::get(txn);
+ auto curOp = CurOp::get(opCtx);
{
- stdx::lock_guard<Client> lk(*txn->getClient());
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
curOp->setPlanSummary_inlock(Explain::getPlanSummary(planExecutor.get()));
}
@@ -204,7 +204,7 @@ private:
PlanSummaryStats summaryStats;
Explain::getSummaryStats(*planExecutor, &summaryStats);
if (coll) {
- coll->infoCache()->notifyOfQuery(txn, summaryStats.indexesUsed);
+ coll->infoCache()->notifyOfQuery(opCtx, summaryStats.indexesUsed);
}
curOp->debug().setPlanSummaryMetrics(summaryStats);
diff --git a/src/mongo/db/commands/hashcmd.cpp b/src/mongo/db/commands/hashcmd.cpp
index 76c1960f804..f7e54703898 100644
--- a/src/mongo/db/commands/hashcmd.cpp
+++ b/src/mongo/db/commands/hashcmd.cpp
@@ -79,7 +79,7 @@ public:
*> "out" : NumberLong(6271151123721111923),
*> "ok" : 1 }
**/
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& db,
BSONObj& cmdObj,
int options,
diff --git a/src/mongo/db/commands/haystack.cpp b/src/mongo/db/commands/haystack.cpp
index dc44fef0e1d..d760ee9b866 100644
--- a/src/mongo/db/commands/haystack.cpp
+++ b/src/mongo/db/commands/haystack.cpp
@@ -95,7 +95,7 @@ public:
out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -103,7 +103,7 @@ public:
BSONObjBuilder& result) {
const NamespaceString nss = parseNsCollectionRequired(dbname, cmdObj);
- AutoGetCollectionForRead ctx(txn, nss);
+ AutoGetCollectionForRead ctx(opCtx, nss);
Collection* collection = ctx.getCollection();
if (!collection) {
@@ -112,7 +112,7 @@ public:
}
vector<IndexDescriptor*> idxs;
- collection->getIndexCatalog()->findIndexByType(txn, IndexNames::GEO_HAYSTACK, idxs);
+ collection->getIndexCatalog()->findIndexByType(opCtx, IndexNames::GEO_HAYSTACK, idxs);
if (idxs.size() == 0) {
errmsg = "no geoSearch index";
return false;
@@ -137,7 +137,7 @@ public:
IndexDescriptor* desc = idxs[0];
HaystackAccessMethod* ham =
static_cast<HaystackAccessMethod*>(collection->getIndexCatalog()->getIndex(desc));
- ham->searchCommand(txn,
+ ham->searchCommand(opCtx,
collection,
nearElt.Obj(),
maxDistance.numberDouble(),
diff --git a/src/mongo/db/commands/index_filter_commands.cpp b/src/mongo/db/commands/index_filter_commands.cpp
index 3ec63ba635c..68230d587af 100644
--- a/src/mongo/db/commands/index_filter_commands.cpp
+++ b/src/mongo/db/commands/index_filter_commands.cpp
@@ -61,7 +61,7 @@ using namespace mongo;
/**
* Retrieves a collection's query settings and plan cache from the database.
*/
-static Status getQuerySettingsAndPlanCache(OperationContext* txn,
+static Status getQuerySettingsAndPlanCache(OperationContext* opCtx,
Collection* collection,
const string& ns,
QuerySettings** querySettingsOut,
@@ -115,14 +115,14 @@ using std::unique_ptr;
IndexFilterCommand::IndexFilterCommand(const string& name, const string& helpText)
: Command(name), helpText(helpText) {}
-bool IndexFilterCommand::run(OperationContext* txn,
+bool IndexFilterCommand::run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
string& errmsg,
BSONObjBuilder& result) {
const NamespaceString nss(parseNsCollectionRequired(dbname, cmdObj));
- Status status = runIndexFilterCommand(txn, nss.ns(), cmdObj, &result);
+ Status status = runIndexFilterCommand(opCtx, nss.ns(), cmdObj, &result);
return appendCommandStatus(result, status);
}
@@ -160,17 +160,17 @@ ListFilters::ListFilters()
: IndexFilterCommand("planCacheListFilters",
"Displays index filters for all query shapes in a collection.") {}
-Status ListFilters::runIndexFilterCommand(OperationContext* txn,
+Status ListFilters::runIndexFilterCommand(OperationContext* opCtx,
const string& ns,
BSONObj& cmdObj,
BSONObjBuilder* bob) {
// This is a read lock. The query settings is owned by the collection.
- AutoGetCollectionForRead ctx(txn, NamespaceString(ns));
+ AutoGetCollectionForRead ctx(opCtx, NamespaceString(ns));
QuerySettings* querySettings;
PlanCache* unused;
Status status =
- getQuerySettingsAndPlanCache(txn, ctx.getCollection(), ns, &querySettings, &unused);
+ getQuerySettingsAndPlanCache(opCtx, ctx.getCollection(), ns, &querySettings, &unused);
if (!status.isOK()) {
// No collection - return empty array of filters.
BSONArrayBuilder hintsBuilder(bob->subarrayStart("filters"));
@@ -228,26 +228,26 @@ ClearFilters::ClearFilters()
"Clears index filter for a single query shape or, "
"if the query shape is omitted, all filters for the collection.") {}
-Status ClearFilters::runIndexFilterCommand(OperationContext* txn,
+Status ClearFilters::runIndexFilterCommand(OperationContext* opCtx,
const std::string& ns,
BSONObj& cmdObj,
BSONObjBuilder* bob) {
// This is a read lock. The query settings is owned by the collection.
- AutoGetCollectionForRead ctx(txn, NamespaceString(ns));
+ AutoGetCollectionForRead ctx(opCtx, NamespaceString(ns));
QuerySettings* querySettings;
PlanCache* planCache;
Status status =
- getQuerySettingsAndPlanCache(txn, ctx.getCollection(), ns, &querySettings, &planCache);
+ getQuerySettingsAndPlanCache(opCtx, ctx.getCollection(), ns, &querySettings, &planCache);
if (!status.isOK()) {
// No collection - do nothing.
return Status::OK();
}
- return clear(txn, querySettings, planCache, ns, cmdObj);
+ return clear(opCtx, querySettings, planCache, ns, cmdObj);
}
// static
-Status ClearFilters::clear(OperationContext* txn,
+Status ClearFilters::clear(OperationContext* opCtx,
QuerySettings* querySettings,
PlanCache* planCache,
const std::string& ns,
@@ -259,7 +259,7 @@ Status ClearFilters::clear(OperationContext* txn,
// - clear hints for single query shape when a query shape is described in the
// command arguments.
if (cmdObj.hasField("query")) {
- auto statusWithCQ = PlanCacheCommand::canonicalize(txn, ns, cmdObj);
+ auto statusWithCQ = PlanCacheCommand::canonicalize(opCtx, ns, cmdObj);
if (!statusWithCQ.isOK()) {
return statusWithCQ.getStatus();
}
@@ -291,7 +291,7 @@ Status ClearFilters::clear(OperationContext* txn,
querySettings->clearAllowedIndices();
const NamespaceString nss(ns);
- const ExtensionsCallbackReal extensionsCallback(txn, &nss);
+ const ExtensionsCallbackReal extensionsCallback(opCtx, &nss);
// Remove corresponding entries from plan cache.
// Admin hints affect the planning process directly. If there were
@@ -312,7 +312,7 @@ Status ClearFilters::clear(OperationContext* txn,
qr->setSort(entry.sort);
qr->setProj(entry.projection);
qr->setCollation(entry.collation);
- auto statusWithCQ = CanonicalQuery::canonicalize(txn, std::move(qr), extensionsCallback);
+ auto statusWithCQ = CanonicalQuery::canonicalize(opCtx, std::move(qr), extensionsCallback);
invariantOK(statusWithCQ.getStatus());
std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
@@ -329,26 +329,26 @@ SetFilter::SetFilter()
: IndexFilterCommand("planCacheSetFilter",
"Sets index filter for a query shape. Overrides existing filter.") {}
-Status SetFilter::runIndexFilterCommand(OperationContext* txn,
+Status SetFilter::runIndexFilterCommand(OperationContext* opCtx,
const std::string& ns,
BSONObj& cmdObj,
BSONObjBuilder* bob) {
// This is a read lock. The query settings is owned by the collection.
const NamespaceString nss(ns);
- AutoGetCollectionForRead ctx(txn, nss);
+ AutoGetCollectionForRead ctx(opCtx, nss);
QuerySettings* querySettings;
PlanCache* planCache;
Status status =
- getQuerySettingsAndPlanCache(txn, ctx.getCollection(), ns, &querySettings, &planCache);
+ getQuerySettingsAndPlanCache(opCtx, ctx.getCollection(), ns, &querySettings, &planCache);
if (!status.isOK()) {
return status;
}
- return set(txn, querySettings, planCache, ns, cmdObj);
+ return set(opCtx, querySettings, planCache, ns, cmdObj);
}
// static
-Status SetFilter::set(OperationContext* txn,
+Status SetFilter::set(OperationContext* opCtx,
QuerySettings* querySettings,
PlanCache* planCache,
const string& ns,
@@ -385,7 +385,7 @@ Status SetFilter::set(OperationContext* txn,
}
}
- auto statusWithCQ = PlanCacheCommand::canonicalize(txn, ns, cmdObj);
+ auto statusWithCQ = PlanCacheCommand::canonicalize(opCtx, ns, cmdObj);
if (!statusWithCQ.isOK()) {
return statusWithCQ.getStatus();
}
diff --git a/src/mongo/db/commands/index_filter_commands.h b/src/mongo/db/commands/index_filter_commands.h
index c34494b19d8..1fada8269a5 100644
--- a/src/mongo/db/commands/index_filter_commands.h
+++ b/src/mongo/db/commands/index_filter_commands.h
@@ -63,7 +63,7 @@ public:
* implement plan cache command functionality.
*/
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -92,7 +92,7 @@ public:
* Should contain just enough logic to invoke run*Command() function
* in query_settings.h
*/
- virtual Status runIndexFilterCommand(OperationContext* txn,
+ virtual Status runIndexFilterCommand(OperationContext* opCtx,
const std::string& ns,
BSONObj& cmdObj,
BSONObjBuilder* bob) = 0;
@@ -111,7 +111,7 @@ class ListFilters : public IndexFilterCommand {
public:
ListFilters();
- virtual Status runIndexFilterCommand(OperationContext* txn,
+ virtual Status runIndexFilterCommand(OperationContext* opCtx,
const std::string& ns,
BSONObj& cmdObj,
BSONObjBuilder* bob);
@@ -133,7 +133,7 @@ class ClearFilters : public IndexFilterCommand {
public:
ClearFilters();
- virtual Status runIndexFilterCommand(OperationContext* txn,
+ virtual Status runIndexFilterCommand(OperationContext* opCtx,
const std::string& ns,
BSONObj& cmdObj,
BSONObjBuilder* bob);
@@ -144,7 +144,7 @@ public:
* Namespace argument ns is ignored if we are clearing the entire cache.
* Removes corresponding entries from plan cache.
*/
- static Status clear(OperationContext* txn,
+ static Status clear(OperationContext* opCtx,
QuerySettings* querySettings,
PlanCache* planCache,
const std::string& ns,
@@ -167,7 +167,7 @@ class SetFilter : public IndexFilterCommand {
public:
SetFilter();
- virtual Status runIndexFilterCommand(OperationContext* txn,
+ virtual Status runIndexFilterCommand(OperationContext* opCtx,
const std::string& ns,
BSONObj& cmdObj,
BSONObjBuilder* bob);
@@ -176,7 +176,7 @@ public:
* Sets index filter for a query shape.
* Removes entry for query shape from plan cache.
*/
- static Status set(OperationContext* txn,
+ static Status set(OperationContext* opCtx,
QuerySettings* querySettings,
PlanCache* planCache,
const std::string& ns,
diff --git a/src/mongo/db/commands/index_filter_commands_test.cpp b/src/mongo/db/commands/index_filter_commands_test.cpp
index 0da61155048..218a93be606 100644
--- a/src/mongo/db/commands/index_filter_commands_test.cpp
+++ b/src/mongo/db/commands/index_filter_commands_test.cpp
@@ -119,7 +119,7 @@ PlanRankingDecision* createDecision(size_t numPlans) {
/**
* Injects an entry into plan cache for query shape.
*/
-void addQueryShapeToPlanCache(OperationContext* txn,
+void addQueryShapeToPlanCache(OperationContext* opCtx,
PlanCache* planCache,
const char* queryStr,
const char* sortStr,
@@ -132,7 +132,7 @@ void addQueryShapeToPlanCache(OperationContext* txn,
qr->setProj(fromjson(projectionStr));
qr->setCollation(fromjson(collationStr));
auto statusWithCQ =
- CanonicalQuery::canonicalize(txn, std::move(qr), ExtensionsCallbackDisallowExtensions());
+ CanonicalQuery::canonicalize(opCtx, std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
@@ -153,7 +153,7 @@ bool planCacheContains(const PlanCache& planCache,
const char* projectionStr,
const char* collationStr) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
// Create canonical query.
auto qr = stdx::make_unique<QueryRequest>(nss);
@@ -162,7 +162,7 @@ bool planCacheContains(const PlanCache& planCache,
qr->setProj(fromjson(projectionStr));
qr->setCollation(fromjson(collationStr));
auto statusWithInputQuery = CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithInputQuery.getStatus());
unique_ptr<CanonicalQuery> inputQuery = std::move(statusWithInputQuery.getValue());
@@ -183,7 +183,7 @@ bool planCacheContains(const PlanCache& planCache,
qr->setProj(entry->projection);
qr->setCollation(entry->collation);
auto statusWithCurrentQuery = CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCurrentQuery.getStatus());
unique_ptr<CanonicalQuery> currentQuery = std::move(statusWithCurrentQuery.getValue());
@@ -213,34 +213,34 @@ TEST(IndexFilterCommandsTest, ListFiltersEmpty) {
TEST(IndexFilterCommandsTest, ClearFiltersInvalidParameter) {
QuerySettings empty;
PlanCache planCache;
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
// If present, query has to be an object.
ASSERT_NOT_OK(
- ClearFilters::clear(&txn, &empty, &planCache, nss.ns(), fromjson("{query: 1234}")));
+ ClearFilters::clear(&opCtx, &empty, &planCache, nss.ns(), fromjson("{query: 1234}")));
// If present, sort must be an object.
ASSERT_NOT_OK(ClearFilters::clear(
- &txn, &empty, &planCache, nss.ns(), fromjson("{query: {a: 1}, sort: 1234}")));
+ &opCtx, &empty, &planCache, nss.ns(), fromjson("{query: {a: 1}, sort: 1234}")));
// If present, projection must be an object.
ASSERT_NOT_OK(ClearFilters::clear(
- &txn, &empty, &planCache, nss.ns(), fromjson("{query: {a: 1}, projection: 1234}")));
+ &opCtx, &empty, &planCache, nss.ns(), fromjson("{query: {a: 1}, projection: 1234}")));
// Query must pass canonicalization.
ASSERT_NOT_OK(ClearFilters::clear(
- &txn, &empty, &planCache, nss.ns(), fromjson("{query: {a: {$no_such_op: 1}}}")));
+ &opCtx, &empty, &planCache, nss.ns(), fromjson("{query: {a: {$no_such_op: 1}}}")));
// Sort present without query is an error.
ASSERT_NOT_OK(
- ClearFilters::clear(&txn, &empty, &planCache, nss.ns(), fromjson("{sort: {a: 1}}")));
+ ClearFilters::clear(&opCtx, &empty, &planCache, nss.ns(), fromjson("{sort: {a: 1}}")));
// Projection present without query is an error.
ASSERT_NOT_OK(ClearFilters::clear(
- &txn, &empty, &planCache, nss.ns(), fromjson("{projection: {_id: 0, a: 1}}")));
+ &opCtx, &empty, &planCache, nss.ns(), fromjson("{projection: {_id: 0, a: 1}}")));
}
TEST(IndexFilterCommandsTest, ClearNonexistentHint) {
QuerySettings querySettings;
PlanCache planCache;
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
- ASSERT_OK(SetFilter::set(&txn,
+ ASSERT_OK(SetFilter::set(&opCtx,
&querySettings,
&planCache,
nss.ns(),
@@ -251,7 +251,7 @@ TEST(IndexFilterCommandsTest, ClearNonexistentHint) {
// Clear nonexistent hint.
// Command should succeed and cache should remain unchanged.
ASSERT_OK(ClearFilters::clear(
- &txn, &querySettings, &planCache, nss.ns(), fromjson("{query: {b: 1}}")));
+ &opCtx, &querySettings, &planCache, nss.ns(), fromjson("{query: {b: 1}}")));
filters = getFilters(querySettings);
ASSERT_EQUALS(filters.size(), 1U);
}
@@ -263,53 +263,57 @@ TEST(IndexFilterCommandsTest, ClearNonexistentHint) {
TEST(IndexFilterCommandsTest, SetFilterInvalidParameter) {
QuerySettings empty;
PlanCache planCache;
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
- ASSERT_NOT_OK(SetFilter::set(&txn, &empty, &planCache, nss.ns(), fromjson("{}")));
+ ASSERT_NOT_OK(SetFilter::set(&opCtx, &empty, &planCache, nss.ns(), fromjson("{}")));
// Missing required query field.
ASSERT_NOT_OK(
- SetFilter::set(&txn, &empty, &planCache, nss.ns(), fromjson("{indexes: [{a: 1}]}")));
+ SetFilter::set(&opCtx, &empty, &planCache, nss.ns(), fromjson("{indexes: [{a: 1}]}")));
// Missing required indexes field.
- ASSERT_NOT_OK(SetFilter::set(&txn, &empty, &planCache, nss.ns(), fromjson("{query: {a: 1}}")));
+ ASSERT_NOT_OK(
+ SetFilter::set(&opCtx, &empty, &planCache, nss.ns(), fromjson("{query: {a: 1}}")));
// Query has to be an object.
- ASSERT_NOT_OK(SetFilter::set(
- &txn, &empty, &planCache, nss.ns(), fromjson("{query: 1234, indexes: [{a: 1}, {b: 1}]}")));
+ ASSERT_NOT_OK(SetFilter::set(&opCtx,
+ &empty,
+ &planCache,
+ nss.ns(),
+ fromjson("{query: 1234, indexes: [{a: 1}, {b: 1}]}")));
// Indexes field has to be an array.
ASSERT_NOT_OK(SetFilter::set(
- &txn, &empty, &planCache, nss.ns(), fromjson("{query: {a: 1}, indexes: 1234}")));
+ &opCtx, &empty, &planCache, nss.ns(), fromjson("{query: {a: 1}, indexes: 1234}")));
// Array indexes field cannot empty.
ASSERT_NOT_OK(SetFilter::set(
- &txn, &empty, &planCache, nss.ns(), fromjson("{query: {a: 1}, indexes: []}")));
+ &opCtx, &empty, &planCache, nss.ns(), fromjson("{query: {a: 1}, indexes: []}")));
// Elements in indexes have to be objects.
ASSERT_NOT_OK(SetFilter::set(
- &txn, &empty, &planCache, nss.ns(), fromjson("{query: {a: 1}, indexes: [{a: 1}, 99]}")));
+ &opCtx, &empty, &planCache, nss.ns(), fromjson("{query: {a: 1}, indexes: [{a: 1}, 99]}")));
// Objects in indexes cannot be empty.
ASSERT_NOT_OK(SetFilter::set(
- &txn, &empty, &planCache, nss.ns(), fromjson("{query: {a: 1}, indexes: [{a: 1}, {}]}")));
+ &opCtx, &empty, &planCache, nss.ns(), fromjson("{query: {a: 1}, indexes: [{a: 1}, {}]}")));
// If present, sort must be an object.
ASSERT_NOT_OK(
- SetFilter::set(&txn,
+ SetFilter::set(&opCtx,
&empty,
&planCache,
nss.ns(),
fromjson("{query: {a: 1}, sort: 1234, indexes: [{a: 1}, {b: 1}]}")));
// If present, projection must be an object.
ASSERT_NOT_OK(
- SetFilter::set(&txn,
+ SetFilter::set(&opCtx,
&empty,
&planCache,
nss.ns(),
fromjson("{query: {a: 1}, projection: 1234, indexes: [{a: 1}, {b: 1}]}")));
// If present, collation must be an object.
ASSERT_NOT_OK(
- SetFilter::set(&txn,
+ SetFilter::set(&opCtx,
&empty,
&planCache,
nss.ns(),
fromjson("{query: {a: 1}, collation: 1234, indexes: [{a: 1}, {b: 1}]}")));
// Query must pass canonicalization.
ASSERT_NOT_OK(
- SetFilter::set(&txn,
+ SetFilter::set(&opCtx,
&empty,
&planCache,
nss.ns(),
@@ -320,10 +324,10 @@ TEST(IndexFilterCommandsTest, SetAndClearFilters) {
QuerySettings querySettings;
PlanCache planCache;
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
// Inject query shape into plan cache.
- addQueryShapeToPlanCache(txn.get(),
+ addQueryShapeToPlanCache(opCtx.get(),
&planCache,
"{a: 1, b: 1}",
"{a: -1}",
@@ -332,7 +336,7 @@ TEST(IndexFilterCommandsTest, SetAndClearFilters) {
ASSERT_TRUE(planCacheContains(
planCache, "{a: 1, b: 1}", "{a: -1}", "{_id: 0, a: 1}", "{locale: 'mock_reverse_string'}"));
- ASSERT_OK(SetFilter::set(txn.get(),
+ ASSERT_OK(SetFilter::set(opCtx.get(),
&querySettings,
&planCache,
nss.ns(),
@@ -355,7 +359,7 @@ TEST(IndexFilterCommandsTest, SetAndClearFilters) {
// Replacing the hint for the same query shape ({a: 1, b: 1} and {b: 2, a: 3}
// share same shape) should not change the query settings size.
- ASSERT_OK(SetFilter::set(txn.get(),
+ ASSERT_OK(SetFilter::set(opCtx.get(),
&querySettings,
&planCache,
nss.ns(),
@@ -371,7 +375,7 @@ TEST(IndexFilterCommandsTest, SetAndClearFilters) {
ASSERT_BSONOBJ_EQ(filterArray[0].Obj(), fromjson("{a: 1, b: 1}"));
// Add hint for different query shape.
- ASSERT_OK(SetFilter::set(txn.get(),
+ ASSERT_OK(SetFilter::set(opCtx.get(),
&querySettings,
&planCache,
nss.ns(),
@@ -380,7 +384,7 @@ TEST(IndexFilterCommandsTest, SetAndClearFilters) {
ASSERT_EQUALS(filters.size(), 2U);
// Add hint for 3rd query shape. This is to prepare for ClearHint tests.
- ASSERT_OK(SetFilter::set(txn.get(),
+ ASSERT_OK(SetFilter::set(opCtx.get(),
&querySettings,
&planCache,
nss.ns(),
@@ -389,12 +393,12 @@ TEST(IndexFilterCommandsTest, SetAndClearFilters) {
ASSERT_EQUALS(filters.size(), 3U);
// Add 2 entries to plan cache and check plan cache after clearing one/all filters.
- addQueryShapeToPlanCache(txn.get(), &planCache, "{a: 1}", "{}", "{}", "{}");
- addQueryShapeToPlanCache(txn.get(), &planCache, "{b: 1}", "{}", "{}", "{}");
+ addQueryShapeToPlanCache(opCtx.get(), &planCache, "{a: 1}", "{}", "{}", "{}");
+ addQueryShapeToPlanCache(opCtx.get(), &planCache, "{b: 1}", "{}", "{}", "{}");
// Clear single hint.
ASSERT_OK(ClearFilters::clear(
- txn.get(), &querySettings, &planCache, nss.ns(), fromjson("{query: {a: 1}}")));
+ opCtx.get(), &querySettings, &planCache, nss.ns(), fromjson("{query: {a: 1}}")));
filters = getFilters(querySettings);
ASSERT_EQUALS(filters.size(), 2U);
@@ -403,7 +407,8 @@ TEST(IndexFilterCommandsTest, SetAndClearFilters) {
ASSERT_TRUE(planCacheContains(planCache, "{b: 1}", "{}", "{}", "{}"));
// Clear all filters
- ASSERT_OK(ClearFilters::clear(txn.get(), &querySettings, &planCache, nss.ns(), fromjson("{}")));
+ ASSERT_OK(
+ ClearFilters::clear(opCtx.get(), &querySettings, &planCache, nss.ns(), fromjson("{}")));
filters = getFilters(querySettings);
ASSERT_TRUE(filters.empty());
@@ -413,7 +418,7 @@ TEST(IndexFilterCommandsTest, SetAndClearFilters) {
TEST(IndexFilterCommandsTest, SetAndClearFiltersCollation) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
QuerySettings querySettings;
// Create a plan cache. Add an index so that indexability is included in the plan cache keys.
@@ -423,13 +428,13 @@ TEST(IndexFilterCommandsTest, SetAndClearFiltersCollation) {
// Inject query shapes with and without collation into plan cache.
addQueryShapeToPlanCache(
- txn.get(), &planCache, "{a: 'foo'}", "{}", "{}", "{locale: 'mock_reverse_string'}");
- addQueryShapeToPlanCache(txn.get(), &planCache, "{a: 'foo'}", "{}", "{}", "{}");
+ opCtx.get(), &planCache, "{a: 'foo'}", "{}", "{}", "{locale: 'mock_reverse_string'}");
+ addQueryShapeToPlanCache(opCtx.get(), &planCache, "{a: 'foo'}", "{}", "{}", "{}");
ASSERT_TRUE(
planCacheContains(planCache, "{a: 'foo'}", "{}", "{}", "{locale: 'mock_reverse_string'}"));
ASSERT_TRUE(planCacheContains(planCache, "{a: 'foo'}", "{}", "{}", "{}"));
- ASSERT_OK(SetFilter::set(txn.get(),
+ ASSERT_OK(SetFilter::set(opCtx.get(),
&querySettings,
&planCache,
nss.ns(),
@@ -450,7 +455,7 @@ TEST(IndexFilterCommandsTest, SetAndClearFiltersCollation) {
ASSERT_TRUE(planCacheContains(planCache, "{a: 'foo'}", "{}", "{}", "{}"));
// Add filter for query shape without collation.
- ASSERT_OK(SetFilter::set(txn.get(),
+ ASSERT_OK(SetFilter::set(opCtx.get(),
&querySettings,
&planCache,
nss.ns(),
@@ -460,12 +465,12 @@ TEST(IndexFilterCommandsTest, SetAndClearFiltersCollation) {
// Add plan cache entries for both queries.
addQueryShapeToPlanCache(
- txn.get(), &planCache, "{a: 'foo'}", "{}", "{}", "{locale: 'mock_reverse_string'}");
- addQueryShapeToPlanCache(txn.get(), &planCache, "{a: 'foo'}", "{}", "{}", "{}");
+ opCtx.get(), &planCache, "{a: 'foo'}", "{}", "{}", "{locale: 'mock_reverse_string'}");
+ addQueryShapeToPlanCache(opCtx.get(), &planCache, "{a: 'foo'}", "{}", "{}", "{}");
// Clear filter for query with collation.
ASSERT_OK(ClearFilters::clear(
- txn.get(),
+ opCtx.get(),
&querySettings,
&planCache,
nss.ns(),
@@ -490,7 +495,7 @@ TEST(IndexFilterCommandsTest, SetFilterAcceptsIndexNames) {
fromjson("{a: 1}"), false, false, false, "a_1:rev", nullptr, BSONObj());
collatedIndex.collator = &reverseCollator;
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
QuerySettings querySettings;
PlanCache planCache;
@@ -498,10 +503,10 @@ TEST(IndexFilterCommandsTest, SetFilterAcceptsIndexNames) {
{IndexEntry(fromjson("{a: 1}"), false, false, false, "a_1", nullptr, BSONObj()),
collatedIndex});
- addQueryShapeToPlanCache(txn.get(), &planCache, "{a: 2}", "{}", "{}", "{}");
+ addQueryShapeToPlanCache(opCtx.get(), &planCache, "{a: 2}", "{}", "{}", "{}");
ASSERT_TRUE(planCacheContains(planCache, "{a: 2}", "{}", "{}", "{}"));
- ASSERT_OK(SetFilter::set(txn.get(),
+ ASSERT_OK(SetFilter::set(opCtx.get(),
&querySettings,
&planCache,
nss.ns(),
diff --git a/src/mongo/db/commands/isself.cpp b/src/mongo/db/commands/isself.cpp
index 0db7ba01440..6e8c1509b5f 100644
--- a/src/mongo/db/commands/isself.cpp
+++ b/src/mongo/db/commands/isself.cpp
@@ -54,7 +54,7 @@ public:
virtual void addRequiredPrivileges(const std::string& dbname,
const BSONObj& cmdObj,
std::vector<Privilege>* out) {} // No auth required
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
diff --git a/src/mongo/db/commands/kill_op.cpp b/src/mongo/db/commands/kill_op.cpp
index f96f3692ecd..1d16113ca93 100644
--- a/src/mongo/db/commands/kill_op.cpp
+++ b/src/mongo/db/commands/kill_op.cpp
@@ -128,7 +128,7 @@ public:
return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& db,
BSONObj& cmdObj,
int options,
@@ -138,12 +138,12 @@ public:
log() << "going to kill op: " << opId;
result.append("info", "attempting to kill op");
- auto swLkAndOp = _findOp(txn->getClient(), opId);
+ auto swLkAndOp = _findOp(opCtx->getClient(), opId);
if (swLkAndOp.isOK()) {
stdx::unique_lock<Client> lk;
OperationContext* opCtx;
std::tie(lk, opCtx) = std::move(swLkAndOp.getValue());
- txn->getServiceContext()->killOperation(opCtx);
+ opCtx->getServiceContext()->killOperation(opCtx);
}
return true;
diff --git a/src/mongo/db/commands/killcursors_cmd.cpp b/src/mongo/db/commands/killcursors_cmd.cpp
index 5831d3b2cc0..e51e4d65f8a 100644
--- a/src/mongo/db/commands/killcursors_cmd.cpp
+++ b/src/mongo/db/commands/killcursors_cmd.cpp
@@ -45,7 +45,9 @@ public:
KillCursorsCmd() = default;
private:
- Status _killCursor(OperationContext* txn, const NamespaceString& nss, CursorId cursorId) final {
+ Status _killCursor(OperationContext* opCtx,
+ const NamespaceString& nss,
+ CursorId cursorId) final {
std::unique_ptr<AutoGetCollectionOrViewForRead> ctx;
CursorManager* cursorManager;
@@ -55,22 +57,22 @@ private:
// data within a collection.
cursorManager = CursorManager::getGlobalCursorManager();
} else {
- ctx = stdx::make_unique<AutoGetCollectionOrViewForRead>(txn, nss);
+ ctx = stdx::make_unique<AutoGetCollectionOrViewForRead>(opCtx, nss);
Collection* collection = ctx->getCollection();
ViewDefinition* view = ctx->getView();
if (view) {
Database* db = ctx->getDb();
- auto resolved = db->getViewCatalog()->resolveView(txn, nss);
+ auto resolved = db->getViewCatalog()->resolveView(opCtx, nss);
if (!resolved.isOK()) {
return resolved.getStatus();
}
ctx->releaseLocksForView();
- Status status = _killCursor(txn, resolved.getValue().getNamespace(), cursorId);
+ Status status = _killCursor(opCtx, resolved.getValue().getNamespace(), cursorId);
{
// Set the namespace of the curop back to the view namespace so ctx records
// stats on this view namespace on destruction.
- stdx::lock_guard<Client> lk(*txn->getClient());
- CurOp::get(txn)->setNS_inlock(nss.ns());
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
+ CurOp::get(opCtx)->setNS_inlock(nss.ns());
}
return status;
}
@@ -82,7 +84,7 @@ private:
}
invariant(cursorManager);
- return cursorManager->eraseCursor(txn, cursorId, true /*shouldAudit*/);
+ return cursorManager->eraseCursor(opCtx, cursorId, true /*shouldAudit*/);
}
} killCursorsCmd;
diff --git a/src/mongo/db/commands/killcursors_common.cpp b/src/mongo/db/commands/killcursors_common.cpp
index 194882feee2..570c1e1df0e 100644
--- a/src/mongo/db/commands/killcursors_common.cpp
+++ b/src/mongo/db/commands/killcursors_common.cpp
@@ -63,7 +63,7 @@ Status KillCursorsCmdBase::checkAuthForCommand(Client* client,
return Status::OK();
}
-bool KillCursorsCmdBase::run(OperationContext* txn,
+bool KillCursorsCmdBase::run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -81,7 +81,7 @@ bool KillCursorsCmdBase::run(OperationContext* txn,
std::vector<CursorId> cursorsUnknown;
for (CursorId id : killCursorsRequest.cursorIds) {
- Status status = _killCursor(txn, killCursorsRequest.nss, id);
+ Status status = _killCursor(opCtx, killCursorsRequest.nss, id);
if (status.isOK()) {
cursorsKilled.push_back(id);
} else if (status.code() == ErrorCodes::CursorNotFound) {
@@ -91,7 +91,7 @@ bool KillCursorsCmdBase::run(OperationContext* txn,
}
audit::logKillCursorsAuthzCheck(
- txn->getClient(), killCursorsRequest.nss, id, status.code());
+ opCtx->getClient(), killCursorsRequest.nss, id, status.code());
}
KillCursorsResponse killCursorsResponse(
diff --git a/src/mongo/db/commands/killcursors_common.h b/src/mongo/db/commands/killcursors_common.h
index 3f66f845ef0..c5b6e9db31d 100644
--- a/src/mongo/db/commands/killcursors_common.h
+++ b/src/mongo/db/commands/killcursors_common.h
@@ -70,7 +70,7 @@ public:
const std::string& dbname,
const BSONObj& cmdObj) final;
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -79,12 +79,12 @@ public:
private:
/**
- * Kill the cursor with id 'cursorId' in namespace 'nss'. Use 'txn' if necessary.
+ * Kill the cursor with id 'cursorId' in namespace 'nss'. Use 'opCtx' if necessary.
*
* Returns Status::OK() if the cursor was killed, or ErrorCodes::CursorNotFound if there is no
* such cursor, or ErrorCodes::OperationFailed if the cursor cannot be killed.
*/
- virtual Status _killCursor(OperationContext* txn,
+ virtual Status _killCursor(OperationContext* opCtx,
const NamespaceString& nss,
CursorId cursorId) = 0;
};
diff --git a/src/mongo/db/commands/list_collections.cpp b/src/mongo/db/commands/list_collections.cpp
index 0309abe8c7c..68a6337f548 100644
--- a/src/mongo/db/commands/list_collections.cpp
+++ b/src/mongo/db/commands/list_collections.cpp
@@ -111,7 +111,7 @@ boost::optional<vector<StringData>> _getExactNameMatches(const MatchExpression*
*
* Does not add any information about the system.namespaces collection, or non-existent collections.
*/
-void _addWorkingSetMember(OperationContext* txn,
+void _addWorkingSetMember(OperationContext* opCtx,
const BSONObj& maybe,
const MatchExpression* matcher,
WorkingSet* ws,
@@ -147,7 +147,7 @@ BSONObj buildViewBson(const ViewDefinition& view) {
return b.obj();
}
-BSONObj buildCollectionBson(OperationContext* txn, const Collection* collection) {
+BSONObj buildCollectionBson(OperationContext* opCtx, const Collection* collection) {
if (!collection) {
return {};
@@ -162,13 +162,13 @@ BSONObj buildCollectionBson(OperationContext* txn, const Collection* collection)
b.append("name", collectionName);
b.append("type", "collection");
- CollectionOptions options = collection->getCatalogEntry()->getCollectionOptions(txn);
+ CollectionOptions options = collection->getCatalogEntry()->getCollectionOptions(opCtx);
b.append("options", options.toBSON());
BSONObj info = BSON("readOnly" << storageGlobalParams.readOnly);
b.append("info", info);
- auto idIndex = collection->getIndexCatalog()->findIdIndex(txn);
+ auto idIndex = collection->getIndexCatalog()->findIdIndex(opCtx);
if (idIndex) {
b.append("idIndex", idIndex->infoObj());
}
@@ -216,7 +216,7 @@ public:
CmdListCollections() : Command("listCollections") {}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& jsobj,
int,
@@ -247,29 +247,29 @@ public:
return appendCommandStatus(result, parseCursorStatus);
}
- ScopedTransaction scopedXact(txn, MODE_IS);
- AutoGetDb autoDb(txn, dbname, MODE_S);
+ ScopedTransaction scopedXact(opCtx, MODE_IS);
+ AutoGetDb autoDb(opCtx, dbname, MODE_S);
Database* db = autoDb.getDb();
auto ws = make_unique<WorkingSet>();
- auto root = make_unique<QueuedDataStage>(txn, ws.get());
+ auto root = make_unique<QueuedDataStage>(opCtx, ws.get());
if (db) {
if (auto collNames = _getExactNameMatches(matcher.get())) {
for (auto&& collName : *collNames) {
auto nss = NamespaceString(db->name(), collName);
Collection* collection = db->getCollection(nss);
- BSONObj collBson = buildCollectionBson(txn, collection);
+ BSONObj collBson = buildCollectionBson(opCtx, collection);
if (!collBson.isEmpty()) {
- _addWorkingSetMember(txn, collBson, matcher.get(), ws.get(), root.get());
+ _addWorkingSetMember(opCtx, collBson, matcher.get(), ws.get(), root.get());
}
}
} else {
for (auto&& collection : *db) {
- BSONObj collBson = buildCollectionBson(txn, collection);
+ BSONObj collBson = buildCollectionBson(opCtx, collection);
if (!collBson.isEmpty()) {
- _addWorkingSetMember(txn, collBson, matcher.get(), ws.get(), root.get());
+ _addWorkingSetMember(opCtx, collBson, matcher.get(), ws.get(), root.get());
}
}
}
@@ -279,10 +279,10 @@ public:
SimpleBSONObjComparator::kInstance.evaluate(
filterElt.Obj() == ListCollectionsFilter::makeTypeCollectionFilter());
if (!skipViews) {
- db->getViewCatalog()->iterate(txn, [&](const ViewDefinition& view) {
+ db->getViewCatalog()->iterate(opCtx, [&](const ViewDefinition& view) {
BSONObj viewBson = buildViewBson(view);
if (!viewBson.isEmpty()) {
- _addWorkingSetMember(txn, viewBson, matcher.get(), ws.get(), root.get());
+ _addWorkingSetMember(opCtx, viewBson, matcher.get(), ws.get(), root.get());
}
});
}
@@ -291,7 +291,7 @@ public:
const NamespaceString cursorNss = NamespaceString::makeListCollectionsNSS(dbname);
auto statusWithPlanExecutor = PlanExecutor::make(
- txn, std::move(ws), std::move(root), cursorNss.ns(), PlanExecutor::YIELD_MANUAL);
+ opCtx, std::move(ws), std::move(root), cursorNss.ns(), PlanExecutor::YIELD_MANUAL);
if (!statusWithPlanExecutor.isOK()) {
return appendCommandStatus(result, statusWithPlanExecutor.getStatus());
}
@@ -323,7 +323,7 @@ public:
auto pinnedCursor = CursorManager::getGlobalCursorManager()->registerCursor(
{exec.release(),
cursorNss.ns(),
- txn->recoveryUnit()->isReadingFromMajorityCommittedSnapshot()});
+ opCtx->recoveryUnit()->isReadingFromMajorityCommittedSnapshot()});
cursorId = pinnedCursor.getCursor()->cursorid();
}
diff --git a/src/mongo/db/commands/list_databases.cpp b/src/mongo/db/commands/list_databases.cpp
index 08de6f6cd6f..ccc2f82cc49 100644
--- a/src/mongo/db/commands/list_databases.cpp
+++ b/src/mongo/db/commands/list_databases.cpp
@@ -83,7 +83,7 @@ public:
CmdListDatabases() : Command("listDatabases", true) {}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& jsobj,
int,
@@ -114,8 +114,8 @@ public:
vector<string> dbNames;
StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
{
- ScopedTransaction transaction(txn, MODE_IS);
- Lock::GlobalLock lk(txn->lockState(), MODE_IS, UINT_MAX);
+ ScopedTransaction transaction(opCtx, MODE_IS);
+ Lock::GlobalLock lk(opCtx->lockState(), MODE_IS, UINT_MAX);
storageEngine->listDatabases(&dbNames);
}
@@ -135,17 +135,17 @@ public:
if (filterNameOnly && !filter->matchesBSON(b.asTempObj()))
continue;
- ScopedTransaction transaction(txn, MODE_IS);
- Lock::DBLock dbLock(txn->lockState(), dbname, MODE_IS);
+ ScopedTransaction transaction(opCtx, MODE_IS);
+ Lock::DBLock dbLock(opCtx->lockState(), dbname, MODE_IS);
- Database* db = dbHolder().get(txn, dbname);
+ Database* db = dbHolder().get(opCtx, dbname);
if (!db)
continue;
const DatabaseCatalogEntry* entry = db->getDatabaseCatalogEntry();
invariant(entry);
- size = entry->sizeOnDisk(txn);
+ size = entry->sizeOnDisk(opCtx);
b.append("sizeOnDisk", static_cast<double>(size));
b.appendBool("empty", entry->isEmpty());
diff --git a/src/mongo/db/commands/list_indexes.cpp b/src/mongo/db/commands/list_indexes.cpp
index 59528d8bf11..940edb13eda 100644
--- a/src/mongo/db/commands/list_indexes.cpp
+++ b/src/mongo/db/commands/list_indexes.cpp
@@ -116,7 +116,7 @@ public:
CmdListIndexes() : Command("listIndexes") {}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -132,7 +132,7 @@ public:
return appendCommandStatus(result, parseCursorStatus);
}
- AutoGetCollectionForRead autoColl(txn, ns);
+ AutoGetCollectionForRead autoColl(opCtx, ns);
if (!autoColl.getDb()) {
return appendCommandStatus(result,
Status(ErrorCodes::NamespaceNotFound, "no database"));
@@ -150,19 +150,19 @@ public:
vector<string> indexNames;
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
indexNames.clear();
- cce->getAllIndexes(txn, &indexNames);
+ cce->getAllIndexes(opCtx, &indexNames);
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "listIndexes", ns.ns());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "listIndexes", ns.ns());
auto ws = make_unique<WorkingSet>();
- auto root = make_unique<QueuedDataStage>(txn, ws.get());
+ auto root = make_unique<QueuedDataStage>(opCtx, ws.get());
for (size_t i = 0; i < indexNames.size(); i++) {
BSONObj indexSpec;
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- indexSpec = cce->getIndexSpec(txn, indexNames[i]);
+ indexSpec = cce->getIndexSpec(opCtx, indexNames[i]);
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "listIndexes", ns.ns());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "listIndexes", ns.ns());
if (ns.ns() == FeatureCompatibilityVersion::kCollection &&
indexNames[i] == FeatureCompatibilityVersion::k32IncompatibleIndexName) {
@@ -198,7 +198,7 @@ public:
dassert(ns == cursorNss.getTargetNSForListIndexes());
auto statusWithPlanExecutor = PlanExecutor::make(
- txn, std::move(ws), std::move(root), cursorNss.ns(), PlanExecutor::YIELD_MANUAL);
+ opCtx, std::move(ws), std::move(root), cursorNss.ns(), PlanExecutor::YIELD_MANUAL);
if (!statusWithPlanExecutor.isOK()) {
return appendCommandStatus(result, statusWithPlanExecutor.getStatus());
}
@@ -230,7 +230,7 @@ public:
auto pinnedCursor = CursorManager::getGlobalCursorManager()->registerCursor(
{exec.release(),
cursorNss.ns(),
- txn->recoveryUnit()->isReadingFromMajorityCommittedSnapshot()});
+ opCtx->recoveryUnit()->isReadingFromMajorityCommittedSnapshot()});
cursorId = pinnedCursor.getCursor()->cursorid();
}
diff --git a/src/mongo/db/commands/lock_info.cpp b/src/mongo/db/commands/lock_info.cpp
index 950533ae333..69dd15a6c0b 100644
--- a/src/mongo/db/commands/lock_info.cpp
+++ b/src/mongo/db/commands/lock_info.cpp
@@ -79,7 +79,7 @@ public:
CmdLockInfo() : Command("lockInfo", true) {}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& jsobj,
int,
@@ -87,7 +87,7 @@ public:
BSONObjBuilder& result) {
std::map<LockerId, BSONObj> lockToClientMap;
- for (ServiceContext::LockedClientsCursor cursor(txn->getClient()->getServiceContext());
+ for (ServiceContext::LockedClientsCursor cursor(opCtx->getClient()->getServiceContext());
Client* client = cursor.next();) {
invariant(client);
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index 781c0d1d5af..25e9590a6d9 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -372,40 +372,40 @@ Config::Config(const string& _dbname, const BSONObj& cmdObj) {
void State::dropTempCollections() {
if (!_config.tempNamespace.isEmpty()) {
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction scopedXact(_txn, MODE_IX);
- AutoGetDb autoDb(_txn, _config.tempNamespace.db(), MODE_X);
+ ScopedTransaction scopedXact(_opCtx, MODE_IX);
+ AutoGetDb autoDb(_opCtx, _config.tempNamespace.db(), MODE_X);
if (auto db = autoDb.getDb()) {
- WriteUnitOfWork wunit(_txn);
+ WriteUnitOfWork wunit(_opCtx);
uassert(ErrorCodes::PrimarySteppedDown,
"no longer primary",
repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(
- _txn, _config.tempNamespace));
- db->dropCollection(_txn, _config.tempNamespace.ns());
+ _opCtx, _config.tempNamespace));
+ db->dropCollection(_opCtx, _config.tempNamespace.ns());
wunit.commit();
}
}
MONGO_WRITE_CONFLICT_RETRY_LOOP_END(
- _txn, "M/R dropTempCollections", _config.tempNamespace.ns())
+ _opCtx, "M/R dropTempCollections", _config.tempNamespace.ns())
// Always forget about temporary namespaces, so we don't cache lots of them
ShardConnection::forgetNS(_config.tempNamespace.ns());
}
if (_useIncremental && !_config.incLong.isEmpty()) {
// We don't want to log the deletion of incLong as it isn't replicated. While
// harmless, this would lead to a scary looking warning on the secondaries.
- bool shouldReplicateWrites = _txn->writesAreReplicated();
- _txn->setReplicatedWrites(false);
- ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, _txn, shouldReplicateWrites);
+ bool shouldReplicateWrites = _opCtx->writesAreReplicated();
+ _opCtx->setReplicatedWrites(false);
+ ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, _opCtx, shouldReplicateWrites);
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction scopedXact(_txn, MODE_IX);
- Lock::DBLock lk(_txn->lockState(), _config.incLong.db(), MODE_X);
- if (Database* db = dbHolder().get(_txn, _config.incLong.ns())) {
- WriteUnitOfWork wunit(_txn);
- db->dropCollection(_txn, _config.incLong.ns());
+ ScopedTransaction scopedXact(_opCtx, MODE_IX);
+ Lock::DBLock lk(_opCtx->lockState(), _config.incLong.db(), MODE_X);
+ if (Database* db = dbHolder().get(_opCtx, _config.incLong.ns())) {
+ WriteUnitOfWork wunit(_opCtx);
+ db->dropCollection(_opCtx, _config.incLong.ns());
wunit.commit();
}
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(_txn, "M/R dropTempCollections", _config.incLong.ns())
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(_opCtx, "M/R dropTempCollections", _config.incLong.ns())
ShardConnection::forgetNS(_config.incLong.ns());
}
@@ -422,20 +422,20 @@ void State::prepTempCollection() {
if (_useIncremental) {
// Create the inc collection and make sure we have index on "0" key.
// Intentionally not replicating the inc collection to secondaries.
- bool shouldReplicateWrites = _txn->writesAreReplicated();
- _txn->setReplicatedWrites(false);
- ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, _txn, shouldReplicateWrites);
+ bool shouldReplicateWrites = _opCtx->writesAreReplicated();
+ _opCtx->setReplicatedWrites(false);
+ ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, _opCtx, shouldReplicateWrites);
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- OldClientWriteContext incCtx(_txn, _config.incLong.ns());
- WriteUnitOfWork wuow(_txn);
+ OldClientWriteContext incCtx(_opCtx, _config.incLong.ns());
+ WriteUnitOfWork wuow(_opCtx);
Collection* incColl = incCtx.getCollection();
invariant(!incColl);
CollectionOptions options;
options.setNoIdIndex();
options.temp = true;
- incColl = incCtx.db()->createCollection(_txn, _config.incLong.ns(), options);
+ incColl = incCtx.db()->createCollection(_opCtx, _config.incLong.ns(), options);
invariant(incColl);
// We explicitly create a v=2 index on the "0" field so that it is always possible for a
@@ -448,7 +448,7 @@ void State::prepTempCollection() {
<< "v"
<< static_cast<int>(IndexVersion::kV2));
Status status = incColl->getIndexCatalog()
- ->createIndexOnEmptyCollection(_txn, indexSpec)
+ ->createIndexOnEmptyCollection(_opCtx, indexSpec)
.getStatus();
if (!status.isOK()) {
uasserted(17305,
@@ -459,7 +459,7 @@ void State::prepTempCollection() {
}
wuow.commit();
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(_txn, "M/R prepTempCollection", _config.incLong.ns());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(_opCtx, "M/R prepTempCollection", _config.incLong.ns());
}
CollectionOptions finalOptions;
@@ -467,13 +467,13 @@ void State::prepTempCollection() {
{
// copy indexes and collection options into temporary storage
- OldClientWriteContext finalCtx(_txn, _config.outputOptions.finalNamespace.ns());
+ OldClientWriteContext finalCtx(_opCtx, _config.outputOptions.finalNamespace.ns());
Collection* const finalColl = finalCtx.getCollection();
if (finalColl) {
- finalOptions = finalColl->getCatalogEntry()->getCollectionOptions(_txn);
+ finalOptions = finalColl->getCatalogEntry()->getCollectionOptions(_opCtx);
IndexCatalog::IndexIterator ii =
- finalColl->getIndexCatalog()->getIndexIterator(_txn, true);
+ finalColl->getIndexCatalog()->getIndexIterator(_opCtx, true);
// Iterate over finalColl's indexes.
while (ii.more()) {
IndexDescriptor* currIndex = ii.next();
@@ -495,23 +495,23 @@ void State::prepTempCollection() {
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
// create temp collection and insert the indexes from temporary storage
- OldClientWriteContext tempCtx(_txn, _config.tempNamespace.ns());
- WriteUnitOfWork wuow(_txn);
+ OldClientWriteContext tempCtx(_opCtx, _config.tempNamespace.ns());
+ WriteUnitOfWork wuow(_opCtx);
uassert(ErrorCodes::PrimarySteppedDown,
"no longer primary",
- repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(_txn,
+ repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(_opCtx,
_config.tempNamespace));
Collection* tempColl = tempCtx.getCollection();
invariant(!tempColl);
CollectionOptions options = finalOptions;
options.temp = true;
- tempColl = tempCtx.db()->createCollection(_txn, _config.tempNamespace.ns(), options);
+ tempColl = tempCtx.db()->createCollection(_opCtx, _config.tempNamespace.ns(), options);
for (vector<BSONObj>::iterator it = indexesToInsert.begin(); it != indexesToInsert.end();
++it) {
Status status =
- tempColl->getIndexCatalog()->createIndexOnEmptyCollection(_txn, *it).getStatus();
+ tempColl->getIndexCatalog()->createIndexOnEmptyCollection(_opCtx, *it).getStatus();
if (!status.isOK()) {
if (status.code() == ErrorCodes::IndexAlreadyExists) {
continue;
@@ -520,11 +520,12 @@ void State::prepTempCollection() {
}
// Log the createIndex operation.
string logNs = _config.tempNamespace.db() + ".system.indexes";
- getGlobalServiceContext()->getOpObserver()->onCreateIndex(_txn, logNs, *it, false);
+ getGlobalServiceContext()->getOpObserver()->onCreateIndex(_opCtx, logNs, *it, false);
}
wuow.commit();
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(_txn, "M/R prepTempCollection", _config.tempNamespace.ns())
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(
+ _opCtx, "M/R prepTempCollection", _config.tempNamespace.ns())
}
/**
@@ -605,7 +606,7 @@ void State::appendResults(BSONObjBuilder& final) {
* Does post processing on output collection.
* This may involve replacing, merging or reducing.
*/
-long long State::postProcessCollection(OperationContext* txn,
+long long State::postProcessCollection(OperationContext* opCtx,
CurOp* curOp,
ProgressMeterHolder& pm) {
if (_onDisk == false || _config.outputOptions.outType == Config::INMEMORY)
@@ -613,22 +614,22 @@ long long State::postProcessCollection(OperationContext* txn,
bool holdingGlobalLock = false;
if (_config.outputOptions.outNonAtomic)
- return postProcessCollectionNonAtomic(txn, curOp, pm, holdingGlobalLock);
+ return postProcessCollectionNonAtomic(opCtx, curOp, pm, holdingGlobalLock);
- invariant(!txn->lockState()->isLocked());
+ invariant(!opCtx->lockState()->isLocked());
- ScopedTransaction transaction(txn, MODE_X);
+ ScopedTransaction transaction(opCtx, MODE_X);
// This must be global because we may write across different databases.
- Lock::GlobalWrite lock(txn->lockState());
+ Lock::GlobalWrite lock(opCtx->lockState());
holdingGlobalLock = true;
- return postProcessCollectionNonAtomic(txn, curOp, pm, holdingGlobalLock);
+ return postProcessCollectionNonAtomic(opCtx, curOp, pm, holdingGlobalLock);
}
namespace {
// Runs a count against the namespace specified by 'ns'. If the caller holds the global write lock,
// then this function does not acquire any additional locks.
-unsigned long long _collectionCount(OperationContext* txn,
+unsigned long long _collectionCount(OperationContext* opCtx,
const NamespaceString& nss,
bool callerHoldsGlobalLock) {
Collection* coll = nullptr;
@@ -637,32 +638,32 @@ unsigned long long _collectionCount(OperationContext* txn,
// If the global write lock is held, we must avoid using AutoGetCollectionForRead as it may lead
// to deadlock when waiting for a majority snapshot to be committed. See SERVER-24596.
if (callerHoldsGlobalLock) {
- Database* db = dbHolder().get(txn, nss.ns());
+ Database* db = dbHolder().get(opCtx, nss.ns());
if (db) {
coll = db->getCollection(nss);
}
} else {
- ctx.emplace(txn, nss);
+ ctx.emplace(opCtx, nss);
coll = ctx->getCollection();
}
- return coll ? coll->numRecords(txn) : 0;
+ return coll ? coll->numRecords(opCtx) : 0;
}
} // namespace
-long long State::postProcessCollectionNonAtomic(OperationContext* txn,
+long long State::postProcessCollectionNonAtomic(OperationContext* opCtx,
CurOp* curOp,
ProgressMeterHolder& pm,
bool callerHoldsGlobalLock) {
if (_config.outputOptions.finalNamespace == _config.tempNamespace)
- return _collectionCount(txn, _config.outputOptions.finalNamespace, callerHoldsGlobalLock);
+ return _collectionCount(opCtx, _config.outputOptions.finalNamespace, callerHoldsGlobalLock);
if (_config.outputOptions.outType == Config::REPLACE ||
- _collectionCount(txn, _config.outputOptions.finalNamespace, callerHoldsGlobalLock) == 0) {
- ScopedTransaction transaction(txn, MODE_X);
+ _collectionCount(opCtx, _config.outputOptions.finalNamespace, callerHoldsGlobalLock) == 0) {
+ ScopedTransaction transaction(opCtx, MODE_X);
// This must be global because we may write across different databases.
- Lock::GlobalWrite lock(txn->lockState());
+ Lock::GlobalWrite lock(opCtx->lockState());
// replace: just rename from temp to final collection name, dropping previous collection
_db.dropCollection(_config.outputOptions.finalNamespace.ns());
BSONObj info;
@@ -680,17 +681,19 @@ long long State::postProcessCollectionNonAtomic(OperationContext* txn,
} else if (_config.outputOptions.outType == Config::MERGE) {
// merge: upsert new docs into old collection
{
- const auto count = _collectionCount(txn, _config.tempNamespace, callerHoldsGlobalLock);
- stdx::lock_guard<Client> lk(*txn->getClient());
+ const auto count =
+ _collectionCount(opCtx, _config.tempNamespace, callerHoldsGlobalLock);
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
curOp->setMessage_inlock(
"m/r: merge post processing", "M/R Merge Post Processing Progress", count);
}
unique_ptr<DBClientCursor> cursor = _db.query(_config.tempNamespace.ns(), BSONObj());
while (cursor->more()) {
- ScopedTransaction scopedXact(txn, MODE_X);
- Lock::DBLock lock(txn->lockState(), _config.outputOptions.finalNamespace.db(), MODE_X);
+ ScopedTransaction scopedXact(opCtx, MODE_X);
+ Lock::DBLock lock(
+ opCtx->lockState(), _config.outputOptions.finalNamespace.db(), MODE_X);
BSONObj o = cursor->nextSafe();
- Helpers::upsert(txn, _config.outputOptions.finalNamespace.ns(), o);
+ Helpers::upsert(opCtx, _config.outputOptions.finalNamespace.ns(), o);
pm.hit();
}
_db.dropCollection(_config.tempNamespace.ns());
@@ -700,25 +703,26 @@ long long State::postProcessCollectionNonAtomic(OperationContext* txn,
BSONList values;
{
- const auto count = _collectionCount(txn, _config.tempNamespace, callerHoldsGlobalLock);
- stdx::lock_guard<Client> lk(*txn->getClient());
+ const auto count =
+ _collectionCount(opCtx, _config.tempNamespace, callerHoldsGlobalLock);
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
curOp->setMessage_inlock(
"m/r: reduce post processing", "M/R Reduce Post Processing Progress", count);
}
unique_ptr<DBClientCursor> cursor = _db.query(_config.tempNamespace.ns(), BSONObj());
while (cursor->more()) {
- ScopedTransaction transaction(txn, MODE_X);
+ ScopedTransaction transaction(opCtx, MODE_X);
// This must be global because we may write across different databases.
- Lock::GlobalWrite lock(txn->lockState());
+ Lock::GlobalWrite lock(opCtx->lockState());
BSONObj temp = cursor->nextSafe();
BSONObj old;
bool found;
{
- OldClientContext tx(txn, _config.outputOptions.finalNamespace.ns());
+ OldClientContext tx(opCtx, _config.outputOptions.finalNamespace.ns());
Collection* coll =
getCollectionOrUassert(tx.db(), _config.outputOptions.finalNamespace);
- found = Helpers::findOne(txn, coll, temp["_id"].wrap(), old, true);
+ found = Helpers::findOne(opCtx, coll, temp["_id"].wrap(), old, true);
}
if (found) {
@@ -726,18 +730,18 @@ long long State::postProcessCollectionNonAtomic(OperationContext* txn,
values.clear();
values.push_back(temp);
values.push_back(old);
- Helpers::upsert(txn,
+ Helpers::upsert(opCtx,
_config.outputOptions.finalNamespace.ns(),
_config.reducer->finalReduce(values, _config.finalizer.get()));
} else {
- Helpers::upsert(txn, _config.outputOptions.finalNamespace.ns(), temp);
+ Helpers::upsert(opCtx, _config.outputOptions.finalNamespace.ns(), temp);
}
pm.hit();
}
pm.finished();
}
- return _collectionCount(txn, _config.outputOptions.finalNamespace, callerHoldsGlobalLock);
+ return _collectionCount(opCtx, _config.outputOptions.finalNamespace, callerHoldsGlobalLock);
}
/**
@@ -747,11 +751,11 @@ void State::insert(const NamespaceString& nss, const BSONObj& o) {
verify(_onDisk);
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- OldClientWriteContext ctx(_txn, nss.ns());
- WriteUnitOfWork wuow(_txn);
+ OldClientWriteContext ctx(_opCtx, nss.ns());
+ WriteUnitOfWork wuow(_opCtx);
uassert(ErrorCodes::PrimarySteppedDown,
"no longer primary",
- repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(_txn, nss));
+ repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(_opCtx, nss));
Collection* coll = getCollectionOrUassert(ctx.db(), nss);
BSONObjBuilder b;
@@ -761,7 +765,7 @@ void State::insert(const NamespaceString& nss, const BSONObj& o) {
b.appendElements(o);
BSONObj bo = b.obj();
- StatusWith<BSONObj> res = fixDocumentForInsert(_txn->getServiceContext(), bo);
+ StatusWith<BSONObj> res = fixDocumentForInsert(_opCtx->getServiceContext(), bo);
uassertStatusOK(res.getStatus());
if (!res.getValue().isEmpty()) {
bo = res.getValue();
@@ -769,10 +773,10 @@ void State::insert(const NamespaceString& nss, const BSONObj& o) {
// TODO: Consider whether to pass OpDebug for stats tracking under SERVER-23261.
OpDebug* const nullOpDebug = nullptr;
- uassertStatusOK(coll->insertDocument(_txn, bo, nullOpDebug, true));
+ uassertStatusOK(coll->insertDocument(_opCtx, bo, nullOpDebug, true));
wuow.commit();
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(_txn, "M/R insert", nss.ns());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(_opCtx, "M/R insert", nss.ns());
}
/**
@@ -782,12 +786,12 @@ void State::_insertToInc(BSONObj& o) {
verify(_onDisk);
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- OldClientWriteContext ctx(_txn, _config.incLong.ns());
- WriteUnitOfWork wuow(_txn);
+ OldClientWriteContext ctx(_opCtx, _config.incLong.ns());
+ WriteUnitOfWork wuow(_opCtx);
Collection* coll = getCollectionOrUassert(ctx.db(), _config.incLong);
- bool shouldReplicateWrites = _txn->writesAreReplicated();
- _txn->setReplicatedWrites(false);
- ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, _txn, shouldReplicateWrites);
+ bool shouldReplicateWrites = _opCtx->writesAreReplicated();
+ _opCtx->setReplicatedWrites(false);
+ ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, _opCtx, shouldReplicateWrites);
// The documents inserted into the incremental collection are of the form
// {"0": <key>, "1": <value>}, so we cannot call fixDocumentForInsert(o) here because the
@@ -804,14 +808,20 @@ void State::_insertToInc(BSONObj& o) {
// TODO: Consider whether to pass OpDebug for stats tracking under SERVER-23261.
OpDebug* const nullOpDebug = nullptr;
- uassertStatusOK(coll->insertDocument(_txn, o, nullOpDebug, true, false));
+ uassertStatusOK(coll->insertDocument(_opCtx, o, nullOpDebug, true, false));
wuow.commit();
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(_txn, "M/R insertToInc", _config.incLong.ns());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(_opCtx, "M/R insertToInc", _config.incLong.ns());
}
-State::State(OperationContext* txn, const Config& c)
- : _config(c), _db(txn), _useIncremental(true), _txn(txn), _size(0), _dupCount(0), _numEmits(0) {
+State::State(OperationContext* opCtx, const Config& c)
+ : _config(c),
+ _db(opCtx),
+ _useIncremental(true),
+ _opCtx(opCtx),
+ _size(0),
+ _dupCount(0),
+ _numEmits(0) {
_temp.reset(new InMemory());
_onDisk = _config.outputOptions.outType != Config::INMEMORY;
}
@@ -849,9 +859,9 @@ void State::init() {
const string userToken =
AuthorizationSession::get(Client::getCurrent())->getAuthenticatedUserNamesToken();
_scope.reset(getGlobalScriptEngine()->newScopeForCurrentThread());
- _scope->registerOperation(_txn);
+ _scope->registerOperation(_opCtx);
_scope->setLocalDB(_config.dbname);
- _scope->loadStored(_txn, true);
+ _scope->loadStored(_opCtx, true);
if (!_config.scopeSetup.isEmpty())
_scope->init(&_config.scopeSetup);
@@ -1027,7 +1037,7 @@ BSONObj _nativeToTemp(const BSONObj& args, void* data) {
* After calling this method, the temp collection will be completed.
* If inline, the results will be in the in memory map
*/
-void State::finalReduce(OperationContext* txn, CurOp* curOp, ProgressMeterHolder& pm) {
+void State::finalReduce(OperationContext* opCtx, CurOp* curOp, ProgressMeterHolder& pm) {
if (_jsMode) {
// apply the reduce within JS
if (_onDisk) {
@@ -1066,12 +1076,12 @@ void State::finalReduce(OperationContext* txn, CurOp* curOp, ProgressMeterHolder
BSONObj sortKey = BSON("0" << 1);
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- OldClientWriteContext incCtx(_txn, _config.incLong.ns());
- WriteUnitOfWork wuow(_txn);
+ OldClientWriteContext incCtx(_opCtx, _config.incLong.ns());
+ WriteUnitOfWork wuow(_opCtx);
Collection* incColl = getCollectionOrUassert(incCtx.db(), _config.incLong);
bool foundIndex = false;
- IndexCatalog::IndexIterator ii = incColl->getIndexCatalog()->getIndexIterator(_txn, true);
+ IndexCatalog::IndexIterator ii = incColl->getIndexCatalog()->getIndexIterator(_opCtx, true);
// Iterate over incColl's indexes.
while (ii.more()) {
IndexDescriptor* currIndex = ii.next();
@@ -1085,28 +1095,28 @@ void State::finalReduce(OperationContext* txn, CurOp* curOp, ProgressMeterHolder
verify(foundIndex);
wuow.commit();
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(_txn, "finalReduce", _config.incLong.ns());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(_opCtx, "finalReduce", _config.incLong.ns());
- unique_ptr<AutoGetCollectionForRead> ctx(new AutoGetCollectionForRead(_txn, _config.incLong));
+ unique_ptr<AutoGetCollectionForRead> ctx(new AutoGetCollectionForRead(_opCtx, _config.incLong));
BSONObj prev;
BSONList all;
{
const auto count = _db.count(_config.incLong.ns(), BSONObj(), QueryOption_SlaveOk);
- stdx::lock_guard<Client> lk(*_txn->getClient());
+ stdx::lock_guard<Client> lk(*_opCtx->getClient());
verify(pm ==
curOp->setMessage_inlock("m/r: (3/3) final reduce to collection",
"M/R: (3/3) Final Reduce Progress",
count));
}
- const ExtensionsCallbackReal extensionsCallback(_txn, &_config.incLong);
+ const ExtensionsCallbackReal extensionsCallback(_opCtx, &_config.incLong);
auto qr = stdx::make_unique<QueryRequest>(_config.incLong);
qr->setSort(sortKey);
- auto statusWithCQ = CanonicalQuery::canonicalize(txn, std::move(qr), extensionsCallback);
+ auto statusWithCQ = CanonicalQuery::canonicalize(opCtx, std::move(qr), extensionsCallback);
verify(statusWithCQ.isOK());
std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
@@ -1114,7 +1124,7 @@ void State::finalReduce(OperationContext* txn, CurOp* curOp, ProgressMeterHolder
invariant(coll);
auto statusWithPlanExecutor = getExecutor(
- _txn, coll, std::move(cq), PlanExecutor::YIELD_AUTO, QueryPlannerParams::NO_TABLE_SCAN);
+ _opCtx, coll, std::move(cq), PlanExecutor::YIELD_AUTO, QueryPlannerParams::NO_TABLE_SCAN);
verify(statusWithPlanExecutor.isOK());
unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
@@ -1130,7 +1140,7 @@ void State::finalReduce(OperationContext* txn, CurOp* curOp, ProgressMeterHolder
// object is same as previous, add to array
all.push_back(o);
if (pm->hits() % 100 == 0) {
- _txn->checkForInterrupt();
+ _opCtx->checkForInterrupt();
}
continue;
}
@@ -1142,7 +1152,7 @@ void State::finalReduce(OperationContext* txn, CurOp* curOp, ProgressMeterHolder
// reduce a finalize array
finalReduce(all);
- ctx.reset(new AutoGetCollectionForRead(_txn, _config.incLong));
+ ctx.reset(new AutoGetCollectionForRead(_opCtx, _config.incLong));
all.clear();
prev = o;
@@ -1152,7 +1162,7 @@ void State::finalReduce(OperationContext* txn, CurOp* curOp, ProgressMeterHolder
uasserted(34375, "Plan executor killed during mapReduce final reduce");
}
- _txn->checkForInterrupt();
+ _opCtx->checkForInterrupt();
}
uassert(34428,
@@ -1162,7 +1172,7 @@ void State::finalReduce(OperationContext* txn, CurOp* curOp, ProgressMeterHolder
ctx.reset();
// reduce and finalize last array
finalReduce(all);
- ctx.reset(new AutoGetCollectionForRead(_txn, _config.incLong));
+ ctx.reset(new AutoGetCollectionForRead(_opCtx, _config.incLong));
pm.finished();
}
@@ -1247,7 +1257,7 @@ int State::_add(InMemory* im, const BSONObj& a) {
void State::reduceAndSpillInMemoryStateIfNeeded() {
// Make sure no DB locks are held, because this method manages its own locking and
// write units of work.
- invariant(!_txn->lockState()->isLocked());
+ invariant(!_opCtx->lockState()->isLocked());
if (_jsMode) {
// try to reduce if it is beneficial
@@ -1362,7 +1372,7 @@ public:
addPrivilegesRequiredForMapReduce(this, dbname, cmdObj, out);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmd,
int,
@@ -1372,9 +1382,9 @@ public:
boost::optional<DisableDocumentValidation> maybeDisableValidation;
if (shouldBypassDocumentValidationForCommand(cmd))
- maybeDisableValidation.emplace(txn);
+ maybeDisableValidation.emplace(opCtx);
- auto client = txn->getClient();
+ auto client = opCtx->getClient();
if (client->isInDirectClient()) {
return appendCommandStatus(
@@ -1382,7 +1392,7 @@ public:
Status(ErrorCodes::IllegalOperation, "Cannot run mapReduce command from eval()"));
}
- auto curOp = CurOp::get(txn);
+ auto curOp = CurOp::get(opCtx);
const Config config(dbname, cmd);
@@ -1404,7 +1414,7 @@ public:
unique_ptr<RangePreserver> rangePreserver;
ScopedCollectionMetadata collMetadata;
{
- AutoGetCollectionForRead ctx(txn, config.nss);
+ AutoGetCollectionForRead ctx(opCtx, config.nss);
Collection* collection = ctx.getCollection();
if (collection) {
@@ -1413,19 +1423,19 @@ public:
// Get metadata before we check our version, to make sure it doesn't increment
// in the meantime. Need to do this in the same lock scope as the block.
- if (ShardingState::get(txn)->needCollectionMetadata(txn, config.nss.ns())) {
- collMetadata = CollectionShardingState::get(txn, config.nss)->getMetadata();
+ if (ShardingState::get(opCtx)->needCollectionMetadata(opCtx, config.nss.ns())) {
+ collMetadata = CollectionShardingState::get(opCtx, config.nss)->getMetadata();
}
}
// Ensure that the RangePreserver is freed under the lock. This is necessary since the
// RangePreserver's destructor unpins a ClientCursor, and access to the CursorManager must
// be done under the lock.
- ON_BLOCK_EXIT([txn, &config, &rangePreserver] {
+ ON_BLOCK_EXIT([opCtx, &config, &rangePreserver] {
if (rangePreserver) {
// Be sure not to use AutoGetCollectionForRead here, since that has side-effects
// other than lock acquisition.
- AutoGetCollection ctx(txn, config.nss, MODE_IS);
+ AutoGetCollection ctx(opCtx, config.nss, MODE_IS);
rangePreserver.reset();
}
});
@@ -1434,7 +1444,7 @@ public:
BSONObjBuilder countsBuilder;
BSONObjBuilder timingBuilder;
- State state(txn, config);
+ State state(opCtx, config);
if (!state.sourceExists()) {
return appendCommandStatus(
result,
@@ -1444,7 +1454,7 @@ public:
if (state.isOnDisk()) {
// this means that it will be doing a write operation, make sure we are on Master
// ideally this check should be in slaveOk(), but at that point config is not known
- if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor_UNSAFE(txn,
+ if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor_UNSAFE(opCtx,
config.nss)) {
errmsg = "not master";
return false;
@@ -1460,7 +1470,7 @@ public:
bool showTotal = true;
if (state.config().filter.isEmpty()) {
const bool holdingGlobalLock = false;
- const auto count = _collectionCount(txn, config.nss, holdingGlobalLock);
+ const auto count = _collectionCount(opCtx, config.nss, holdingGlobalLock);
progressTotal =
(config.limit && (unsigned)config.limit < count) ? config.limit : count;
} else {
@@ -1469,7 +1479,7 @@ public:
progressTotal = 1;
}
- stdx::unique_lock<Client> lk(*txn->getClient());
+ stdx::unique_lock<Client> lk(*opCtx->getClient());
ProgressMeter& progress(curOp->setMessage_inlock(
"m/r: (1/3) emit phase", "M/R: (1/3) Emit Progress", progressTotal));
lk.unlock();
@@ -1488,18 +1498,18 @@ public:
// useful cursor.
// Need lock and context to use it
- unique_ptr<ScopedTransaction> scopedXact(new ScopedTransaction(txn, MODE_IS));
- unique_ptr<AutoGetDb> scopedAutoDb(new AutoGetDb(txn, config.nss.db(), MODE_S));
+ unique_ptr<ScopedTransaction> scopedXact(new ScopedTransaction(opCtx, MODE_IS));
+ unique_ptr<AutoGetDb> scopedAutoDb(new AutoGetDb(opCtx, config.nss.db(), MODE_S));
auto qr = stdx::make_unique<QueryRequest>(config.nss);
qr->setFilter(config.filter);
qr->setSort(config.sort);
qr->setCollation(config.collation);
- const ExtensionsCallbackReal extensionsCallback(txn, &config.nss);
+ const ExtensionsCallbackReal extensionsCallback(opCtx, &config.nss);
auto statusWithCQ =
- CanonicalQuery::canonicalize(txn, std::move(qr), extensionsCallback);
+ CanonicalQuery::canonicalize(opCtx, std::move(qr), extensionsCallback);
if (!statusWithCQ.isOK()) {
uasserted(17238, "Can't canonicalize query " + config.filter.toString());
return 0;
@@ -1513,7 +1523,7 @@ public:
invariant(coll);
auto statusWithPlanExecutor =
- getExecutor(txn, coll, std::move(cq), PlanExecutor::YIELD_AUTO);
+ getExecutor(opCtx, coll, std::move(cq), PlanExecutor::YIELD_AUTO);
if (!statusWithPlanExecutor.isOK()) {
uasserted(17239,
"Can't get executor for query " + config.filter.toString());
@@ -1524,8 +1534,8 @@ public:
}
{
- stdx::lock_guard<Client> lk(*txn->getClient());
- CurOp::get(txn)->setPlanSummary_inlock(Explain::getPlanSummary(exec.get()));
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
+ CurOp::get(opCtx)->setPlanSummary_inlock(Explain::getPlanSummary(exec.get()));
}
Timer mt;
@@ -1568,8 +1578,8 @@ public:
state.reduceAndSpillInMemoryStateIfNeeded();
- scopedXact.reset(new ScopedTransaction(txn, MODE_IS));
- scopedAutoDb.reset(new AutoGetDb(txn, config.nss.db(), MODE_S));
+ scopedXact.reset(new ScopedTransaction(opCtx, MODE_IS));
+ scopedAutoDb.reset(new AutoGetDb(opCtx, config.nss.db(), MODE_S));
if (!exec->restoreState()) {
return appendCommandStatus(
@@ -1581,7 +1591,7 @@ public:
reduceTime += t.micros();
- txn->checkForInterrupt();
+ opCtx->checkForInterrupt();
}
pm.hit();
@@ -1608,7 +1618,7 @@ public:
Collection* coll = scopedAutoDb->getDb()->getCollection(config.nss);
invariant(coll); // 'exec' hasn't been killed, so collection must be alive.
- coll->infoCache()->notifyOfQuery(txn, stats.indexesUsed);
+ coll->infoCache()->notifyOfQuery(opCtx, stats.indexesUsed);
if (curOp->shouldDBProfile()) {
BSONObjBuilder execStatsBob;
@@ -1618,7 +1628,7 @@ public:
}
pm.finished();
- txn->checkForInterrupt();
+ opCtx->checkForInterrupt();
// update counters
countsBuilder.appendNumber("input", numInputs);
@@ -1630,7 +1640,7 @@ public:
timingBuilder.append("emitLoop", t.millis());
{
- stdx::lock_guard<Client> lk(*txn->getClient());
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
curOp->setMessage_inlock("m/r: (2/3) final reduce in memory",
"M/R: (2/3) Final In-Memory Reduce Progress");
}
@@ -1641,13 +1651,13 @@ public:
// if not inline: dump the in memory map to inc collection, all data is on disk
state.dumpToInc();
// final reduce
- state.finalReduce(txn, curOp, pm);
+ state.finalReduce(opCtx, curOp, pm);
reduceTime += rt.micros();
// Ensure the profile shows the source namespace. If the output was not inline, the
// active namespace will be the temporary collection we inserted into.
{
- stdx::lock_guard<Client> lk(*txn->getClient());
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
curOp->setNS_inlock(config.nss.ns());
}
@@ -1655,7 +1665,7 @@ public:
timingBuilder.appendNumber("reduceTime", reduceTime / 1000);
timingBuilder.append("mode", state.jsMode() ? "js" : "mixed");
- long long finalCount = state.postProcessCollection(txn, curOp, pm);
+ long long finalCount = state.postProcessCollection(opCtx, curOp, pm);
state.appendResults(result);
timingBuilder.appendNumber("total", t.millis());
@@ -1718,7 +1728,7 @@ public:
actions.addAction(ActionType::internal);
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -1734,7 +1744,7 @@ public:
boost::optional<DisableDocumentValidation> maybeDisableValidation;
if (shouldBypassDocumentValidationForCommand(cmdObj))
- maybeDisableValidation.emplace(txn);
+ maybeDisableValidation.emplace(opCtx);
ShardedConnectionInfo::addHook();
@@ -1754,10 +1764,10 @@ public:
inputNS = NamespaceString(dbname, shardedOutputCollection).ns();
}
- CurOp* curOp = CurOp::get(txn);
+ CurOp* curOp = CurOp::get(opCtx);
Config config(dbname, cmdObj.firstElement().embeddedObjectUserCheck());
- State state(txn, config);
+ State state(opCtx, config);
state.init();
// no need for incremental collection because records are already sorted
@@ -1767,7 +1777,7 @@ public:
BSONObj shardCounts = cmdObj["shardCounts"].embeddedObjectUserCheck();
BSONObj counts = cmdObj["counts"].embeddedObjectUserCheck();
- stdx::unique_lock<Client> lk(*txn->getClient());
+ stdx::unique_lock<Client> lk(*opCtx->getClient());
ProgressMeterHolder pm(curOp->setMessage_inlock("m/r: merge sort and reduce",
"M/R Merge Sort and Reduce Progress"));
lk.unlock();
@@ -1781,7 +1791,7 @@ public:
std::string server = e.fieldName();
servers.insert(server);
- uassertStatusOK(Grid::get(txn)->shardRegistry()->getShard(txn, server));
+ uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, server));
}
}
@@ -1801,7 +1811,7 @@ public:
result.append("result", config.outputOptions.collectionName);
}
- auto scopedDbStatus = ScopedShardDatabase::getExisting(txn, dbname);
+ auto scopedDbStatus = ScopedShardDatabase::getExisting(opCtx, dbname);
if (!scopedDbStatus.isOK()) {
return appendCommandStatus(result, scopedDbStatus.getStatus());
}
@@ -1812,11 +1822,11 @@ public:
if (confOut->isSharded(config.outputOptions.finalNamespace.ns())) {
shared_ptr<ChunkManager> cm =
- confOut->getChunkManager(txn, config.outputOptions.finalNamespace.ns());
+ confOut->getChunkManager(opCtx, config.outputOptions.finalNamespace.ns());
// Fetch result from other shards 1 chunk at a time. It would be better to do just one
// big $or query, but then the sorting would not be efficient.
- const string shardName = ShardingState::get(txn)->getShardName();
+ const string shardName = ShardingState::get(opCtx)->getShardName();
const ChunkMap& chunkMap = cm->getChunkMap();
for (ChunkMap::const_iterator it = chunkMap.begin(); it != chunkMap.end(); ++it) {
@@ -1846,7 +1856,7 @@ public:
BSONObj sortKey = BSON("_id" << 1);
ParallelSortClusteredCursor cursor(
servers, inputNS, Query(query).sort(sortKey), QueryOption_NoCursorTimeout);
- cursor.init(txn);
+ cursor.init(opCtx);
int chunkSize = 0;
while (cursor.more() || !values.empty()) {
@@ -1890,7 +1900,7 @@ public:
result.append("chunkSizes", chunkSizes.arr());
- long long outputCount = state.postProcessCollection(txn, curOp, pm);
+ long long outputCount = state.postProcessCollection(opCtx, curOp, pm);
state.appendResults(result);
BSONObjBuilder countsB(32);
diff --git a/src/mongo/db/commands/mr.h b/src/mongo/db/commands/mr.h
index aa729f49e7f..15baf5e8fb9 100644
--- a/src/mongo/db/commands/mr.h
+++ b/src/mongo/db/commands/mr.h
@@ -260,9 +260,9 @@ public:
class State {
public:
/**
- * txn must outlive this State.
+ * opCtx must outlive this State.
*/
- State(OperationContext* txn, const Config& c);
+ State(OperationContext* opCtx, const Config& c);
~State();
void init();
@@ -305,7 +305,7 @@ public:
void finalReduce(BSONList& values);
- void finalReduce(OperationContext* txn, CurOp* op, ProgressMeterHolder& pm);
+ void finalReduce(OperationContext* opCtx, CurOp* op, ProgressMeterHolder& pm);
// ------- cleanup/data positioning ----------
@@ -317,8 +317,8 @@ public:
/**
@return number objects in collection
*/
- long long postProcessCollection(OperationContext* txn, CurOp* op, ProgressMeterHolder& pm);
- long long postProcessCollectionNonAtomic(OperationContext* txn,
+ long long postProcessCollection(OperationContext* opCtx, CurOp* op, ProgressMeterHolder& pm);
+ long long postProcessCollectionNonAtomic(OperationContext* opCtx,
CurOp* op,
ProgressMeterHolder& pm,
bool callerHoldsGlobalLock);
@@ -388,7 +388,7 @@ protected:
*/
int _add(InMemory* im, const BSONObj& a);
- OperationContext* _txn;
+ OperationContext* _opCtx;
std::unique_ptr<Scope> _scope;
bool _onDisk; // if the end result of this map reduce is disk or not
diff --git a/src/mongo/db/commands/oplog_note.cpp b/src/mongo/db/commands/oplog_note.cpp
index 35062313941..39d3d175ff0 100644
--- a/src/mongo/db/commands/oplog_note.cpp
+++ b/src/mongo/db/commands/oplog_note.cpp
@@ -69,7 +69,7 @@ public:
}
return Status::OK();
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -87,11 +87,11 @@ public:
return appendCommandStatus(result, status);
}
- ScopedTransaction scopedXact(txn, MODE_X);
- Lock::GlobalWrite globalWrite(txn->lockState());
+ ScopedTransaction scopedXact(opCtx, MODE_X);
+ Lock::GlobalWrite globalWrite(opCtx->lockState());
- WriteUnitOfWork wuow(txn);
- getGlobalServiceContext()->getOpObserver()->onOpMessage(txn, dataElement.Obj());
+ WriteUnitOfWork wuow(opCtx);
+ getGlobalServiceContext()->getOpObserver()->onOpMessage(opCtx, dataElement.Obj());
wuow.commit();
return true;
}
diff --git a/src/mongo/db/commands/parallel_collection_scan.cpp b/src/mongo/db/commands/parallel_collection_scan.cpp
index 41689016961..df783b46062 100644
--- a/src/mongo/db/commands/parallel_collection_scan.cpp
+++ b/src/mongo/db/commands/parallel_collection_scan.cpp
@@ -85,7 +85,7 @@ public:
return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -93,7 +93,7 @@ public:
BSONObjBuilder& result) {
const NamespaceString ns(parseNsCollectionRequired(dbname, cmdObj));
- AutoGetCollectionForRead ctx(txn, ns);
+ AutoGetCollectionForRead ctx(opCtx, ns);
Collection* collection = ctx.getCollection();
if (!collection)
@@ -111,7 +111,7 @@ public:
<< " was: "
<< numCursors));
- auto iterators = collection->getManyCursors(txn);
+ auto iterators = collection->getManyCursors(opCtx);
if (iterators.size() < numCursors) {
numCursors = iterators.size();
}
@@ -120,11 +120,11 @@ public:
for (size_t i = 0; i < numCursors; i++) {
unique_ptr<WorkingSet> ws = make_unique<WorkingSet>();
unique_ptr<MultiIteratorStage> mis =
- make_unique<MultiIteratorStage>(txn, ws.get(), collection);
+ make_unique<MultiIteratorStage>(opCtx, ws.get(), collection);
// Takes ownership of 'ws' and 'mis'.
auto statusWithPlanExecutor = PlanExecutor::make(
- txn, std::move(ws), std::move(mis), collection, PlanExecutor::YIELD_AUTO);
+ opCtx, std::move(ws), std::move(mis), collection, PlanExecutor::YIELD_AUTO);
invariant(statusWithPlanExecutor.isOK());
execs.push_back(std::move(statusWithPlanExecutor.getValue()));
}
@@ -152,9 +152,9 @@ public:
auto pinnedCursor = collection->getCursorManager()->registerCursor(
{exec.release(),
ns.ns(),
- txn->recoveryUnit()->isReadingFromMajorityCommittedSnapshot()});
+ opCtx->recoveryUnit()->isReadingFromMajorityCommittedSnapshot()});
pinnedCursor.getCursor()->setLeftoverMaxTimeMicros(
- txn->getRemainingMaxTimeMicros());
+ opCtx->getRemainingMaxTimeMicros());
BSONObjBuilder threadResult;
appendCursorResponseObject(
diff --git a/src/mongo/db/commands/parameters.cpp b/src/mongo/db/commands/parameters.cpp
index a3c252d050a..f2e10dac2b5 100644
--- a/src/mongo/db/commands/parameters.cpp
+++ b/src/mongo/db/commands/parameters.cpp
@@ -88,7 +88,7 @@ public:
appendParameterNames(help);
help << "{ getParameter:'*' } to get everything\n";
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -101,7 +101,7 @@ public:
const ServerParameter::Map& m = ServerParameterSet::getGlobal()->getMap();
for (ServerParameter::Map::const_iterator i = m.begin(); i != m.end(); ++i) {
if (all || cmdObj.hasElement(i->first.c_str())) {
- i->second->append(txn, result, i->second->name());
+ i->second->append(opCtx, result, i->second->name());
}
}
@@ -137,7 +137,7 @@ public:
help << "{ setParameter:1, <param>:<value> }\n";
appendParameterNames(help);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -213,7 +213,7 @@ public:
}
if (numSet == 0) {
- foundParameter->second->append(txn, result, "was");
+ foundParameter->second->append(opCtx, result, "was");
}
Status status = foundParameter->second->set(parameter);
@@ -247,7 +247,7 @@ class LogLevelSetting : public ServerParameter {
public:
LogLevelSetting() : ServerParameter(ServerParameterSet::getGlobal(), "logLevel") {}
- virtual void append(OperationContext* txn, BSONObjBuilder& b, const std::string& name) {
+ virtual void append(OperationContext* opCtx, BSONObjBuilder& b, const std::string& name) {
b << name << globalLogDomain()->getMinimumLogSeverity().toInt();
}
@@ -290,7 +290,7 @@ public:
LogComponentVerbositySetting()
: ServerParameter(ServerParameterSet::getGlobal(), "logComponentVerbosity") {}
- virtual void append(OperationContext* txn, BSONObjBuilder& b, const std::string& name) {
+ virtual void append(OperationContext* opCtx, BSONObjBuilder& b, const std::string& name) {
BSONObj currentSettings;
_get(&currentSettings);
b << name << currentSettings;
@@ -459,7 +459,7 @@ public:
}
}
- virtual void append(OperationContext* txn, BSONObjBuilder& b, const std::string& name) {
+ virtual void append(OperationContext* opCtx, BSONObjBuilder& b, const std::string& name) {
b << name << sslModeStr();
}
@@ -530,7 +530,7 @@ public:
}
}
- virtual void append(OperationContext* txn, BSONObjBuilder& b, const std::string& name) {
+ virtual void append(OperationContext* opCtx, BSONObjBuilder& b, const std::string& name) {
b << name << clusterAuthModeStr();
}
@@ -607,7 +607,7 @@ public:
AutomationServiceDescriptor()
: ServerParameter(ServerParameterSet::getGlobal(), kName.toString(), true, true) {}
- virtual void append(OperationContext* txn,
+ virtual void append(OperationContext* opCtx,
BSONObjBuilder& builder,
const std::string& name) override {
const stdx::lock_guard<stdx::mutex> lock(_mutex);
diff --git a/src/mongo/db/commands/pipeline_command.cpp b/src/mongo/db/commands/pipeline_command.cpp
index 0f07e38c830..4fcfb97f574 100644
--- a/src/mongo/db/commands/pipeline_command.cpp
+++ b/src/mongo/db/commands/pipeline_command.cpp
@@ -88,7 +88,7 @@ namespace {
* namespace used in the returned cursor. In the case of views, this can be different from that
* in 'request'.
*/
-bool handleCursorCommand(OperationContext* txn,
+bool handleCursorCommand(OperationContext* opCtx,
const string& nsForCursor,
ClientCursor* cursor,
PlanExecutor* exec,
@@ -150,16 +150,16 @@ bool handleCursorCommand(OperationContext* txn,
if (cursor) {
// If a time limit was set on the pipeline, remaining time is "rolled over" to the
// cursor (for use by future getmore ops).
- cursor->setLeftoverMaxTimeMicros(txn->getRemainingMaxTimeMicros());
+ cursor->setLeftoverMaxTimeMicros(opCtx->getRemainingMaxTimeMicros());
- CurOp::get(txn)->debug().cursorid = cursor->cursorid();
+ CurOp::get(opCtx)->debug().cursorid = cursor->cursorid();
// Cursor needs to be in a saved state while we yield locks for getmore. State
// will be restored in getMore().
exec->saveState();
exec->detachFromOperationContext();
} else {
- CurOp::get(txn)->debug().cursorExhausted = true;
+ CurOp::get(opCtx)->debug().cursorExhausted = true;
}
const long long cursorId = cursor ? cursor->cursorid() : 0LL;
@@ -169,12 +169,12 @@ bool handleCursorCommand(OperationContext* txn,
}
StatusWith<StringMap<ExpressionContext::ResolvedNamespace>> resolveInvolvedNamespaces(
- OperationContext* txn, const AggregationRequest& request) {
+ OperationContext* opCtx, const AggregationRequest& request) {
// We intentionally do not drop and reacquire our DB lock after resolving the view definition in
// order to prevent the definition for any view namespaces we've already resolved from changing.
// This is necessary to prevent a cycle from being formed among the view definitions cached in
// 'resolvedNamespaces' because we won't re-resolve a view namespace we've already encountered.
- AutoGetDb autoDb(txn, request.getNamespaceString().db(), MODE_IS);
+ AutoGetDb autoDb(opCtx, request.getNamespaceString().db(), MODE_IS);
Database* const db = autoDb.getDb();
ViewCatalog* viewCatalog = db ? db->getViewCatalog() : nullptr;
@@ -199,9 +199,9 @@ StatusWith<StringMap<ExpressionContext::ResolvedNamespace>> resolveInvolvedNames
// pipeline because 'involvedNs' doesn't refer to a view namespace in our consistent
// snapshot of the view catalog.
resolvedNamespaces[involvedNs.coll()] = {involvedNs, std::vector<BSONObj>{}};
- } else if (viewCatalog->lookup(txn, involvedNs.ns())) {
+ } else if (viewCatalog->lookup(opCtx, involvedNs.ns())) {
// If 'involvedNs' refers to a view namespace, then we resolve its definition.
- auto resolvedView = viewCatalog->resolveView(txn, involvedNs);
+ auto resolvedView = viewCatalog->resolveView(opCtx, involvedNs);
if (!resolvedView.isOK()) {
return {ErrorCodes::FailedToParse,
str::stream() << "Failed to resolve view '" << involvedNs.ns() << "': "
@@ -265,7 +265,7 @@ boost::intrusive_ptr<Pipeline> reparsePipeline(
* Returns Status::OK if each view namespace in 'pipeline' has a default collator equivalent to
* 'collator'. Otherwise, returns ErrorCodes::OptionNotSupportedOnView.
*/
-Status collatorCompatibleWithPipeline(OperationContext* txn,
+Status collatorCompatibleWithPipeline(OperationContext* opCtx,
Database* db,
const CollatorInterface* collator,
const intrusive_ptr<Pipeline> pipeline) {
@@ -277,7 +277,7 @@ Status collatorCompatibleWithPipeline(OperationContext* txn,
continue;
}
- auto view = db->getViewCatalog()->lookup(txn, potentialViewNs.ns());
+ auto view = db->getViewCatalog()->lookup(opCtx, potentialViewNs.ns());
if (!view) {
continue;
}
@@ -339,7 +339,7 @@ public:
return AuthorizationSession::get(client)->checkAuthForAggregate(nss, cmdObj);
}
- bool runParsed(OperationContext* txn,
+ bool runParsed(OperationContext* opCtx,
const NamespaceString& origNss,
const AggregationRequest& request,
BSONObj& cmdObj,
@@ -351,14 +351,14 @@ public:
// Parse the user-specified collation, if any.
std::unique_ptr<CollatorInterface> userSpecifiedCollator = request.getCollation().isEmpty()
? nullptr
- : uassertStatusOK(CollatorFactoryInterface::get(txn->getServiceContext())
+ : uassertStatusOK(CollatorFactoryInterface::get(opCtx->getServiceContext())
->makeFromBSON(request.getCollation()));
boost::optional<ClientCursorPin> pin; // either this OR the exec will be non-null
unique_ptr<PlanExecutor> exec;
boost::intrusive_ptr<ExpressionContext> expCtx;
boost::intrusive_ptr<Pipeline> pipeline;
- auto curOp = CurOp::get(txn);
+ auto curOp = CurOp::get(opCtx);
{
// This will throw if the sharding version for this connection is out of date. If the
// namespace is a view, the lock will be released before re-running the aggregation.
@@ -367,7 +367,7 @@ public:
// same sharding version that we synchronize on here. This is also why we always need to
// create a ClientCursor even when we aren't outputting to a cursor. See the comment on
// ShardFilterStage for more details.
- AutoGetCollectionOrViewForRead ctx(txn, nss);
+ AutoGetCollectionOrViewForRead ctx(opCtx, nss);
Collection* collection = ctx.getCollection();
// If this is a view, resolve it by finding the underlying collection and stitching view
@@ -390,7 +390,7 @@ public:
}
auto viewDefinition =
- ViewShardingCheck::getResolvedViewIfSharded(txn, ctx.getDb(), ctx.getView());
+ ViewShardingCheck::getResolvedViewIfSharded(opCtx, ctx.getDb(), ctx.getView());
if (!viewDefinition.isOK()) {
return appendCommandStatus(result, viewDefinition.getStatus());
}
@@ -400,7 +400,7 @@ public:
return false;
}
- auto resolvedView = ctx.getDb()->getViewCatalog()->resolveView(txn, nss);
+ auto resolvedView = ctx.getDb()->getViewCatalog()->resolveView(opCtx, nss);
if (!resolvedView.isOK()) {
return appendCommandStatus(result, resolvedView.getStatus());
}
@@ -425,11 +425,11 @@ public:
newRequest.getValue().setCollation(collationSpec);
bool status = runParsed(
- txn, origNss, newRequest.getValue(), newCmd.getValue(), errmsg, result);
+ opCtx, origNss, newRequest.getValue(), newCmd.getValue(), errmsg, result);
{
// Set the namespace of the curop back to the view namespace so ctx records
// stats on this view namespace on destruction.
- stdx::lock_guard<Client> lk(*txn->getClient());
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
curOp->setNS_inlock(nss.ns());
}
return status;
@@ -449,10 +449,10 @@ public:
}
expCtx.reset(
- new ExpressionContext(txn,
+ new ExpressionContext(opCtx,
request,
std::move(collatorToUse),
- uassertStatusOK(resolveInvolvedNamespaces(txn, request))));
+ uassertStatusOK(resolveInvolvedNamespaces(opCtx, request))));
expCtx->tempDir = storageGlobalParams.dbpath + "/_tmp";
// Parse the pipeline.
@@ -465,7 +465,7 @@ public:
// Check that the view's collation matches the collation of any views involved
// in the pipeline.
auto pipelineCollationStatus =
- collatorCompatibleWithPipeline(txn, ctx.getDb(), expCtx->getCollator(), pipeline);
+ collatorCompatibleWithPipeline(opCtx, ctx.getDb(), expCtx->getCollator(), pipeline);
if (!pipelineCollationStatus.isOK()) {
return appendCommandStatus(result, pipelineCollationStatus);
}
@@ -488,19 +488,22 @@ public:
// ('ws') and the PipelineProxyStage ('proxy') will be owned by the created
// PlanExecutor.
auto ws = make_unique<WorkingSet>();
- auto proxy = make_unique<PipelineProxyStage>(txn, pipeline, ws.get());
+ auto proxy = make_unique<PipelineProxyStage>(opCtx, pipeline, ws.get());
auto statusWithPlanExecutor = (NULL == collection)
? PlanExecutor::make(
- txn, std::move(ws), std::move(proxy), nss.ns(), PlanExecutor::YIELD_MANUAL)
- : PlanExecutor::make(
- txn, std::move(ws), std::move(proxy), collection, PlanExecutor::YIELD_MANUAL);
+ opCtx, std::move(ws), std::move(proxy), nss.ns(), PlanExecutor::YIELD_MANUAL)
+ : PlanExecutor::make(opCtx,
+ std::move(ws),
+ std::move(proxy),
+ collection,
+ PlanExecutor::YIELD_MANUAL);
invariant(statusWithPlanExecutor.isOK());
exec = std::move(statusWithPlanExecutor.getValue());
{
auto planSummary = Explain::getPlanSummary(exec.get());
- stdx::lock_guard<Client> lk(*txn->getClient());
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
curOp->setPlanSummary_inlock(std::move(planSummary));
}
@@ -509,7 +512,7 @@ public:
pin.emplace(collection->getCursorManager()->registerCursor(
{exec.release(),
nss.ns(),
- txn->recoveryUnit()->isReadingFromMajorityCommittedSnapshot(),
+ opCtx->recoveryUnit()->isReadingFromMajorityCommittedSnapshot(),
0,
cmdObj.getOwned(),
isAggCursor}));
@@ -533,7 +536,7 @@ public:
result << "stages" << Value(pipeline->writeExplainOps());
} else {
// Cursor must be specified, if explain is not.
- keepCursor = handleCursorCommand(txn,
+ keepCursor = handleCursorCommand(opCtx,
origNss.ns(),
pin ? pin->getCursor() : nullptr,
pin ? pin->getCursor()->getExecutor() : exec.get(),
@@ -556,8 +559,8 @@ public:
// AutoGetCollectionForRead. AutoGetCollectionForRead will throw if the
// sharding version is out of date, and we don't care if the sharding version
// has changed.
- Lock::DBLock dbLock(txn->lockState(), nss.db(), MODE_IS);
- Lock::CollectionLock collLock(txn->lockState(), nss.ns(), MODE_IS);
+ Lock::DBLock dbLock(opCtx->lockState(), nss.db(), MODE_IS);
+ Lock::CollectionLock collLock(opCtx->lockState(), nss.ns(), MODE_IS);
if (keepCursor) {
pin->release();
} else {
@@ -567,8 +570,8 @@ public:
} catch (...) {
// On our way out of scope, we clean up our ClientCursorPin if needed.
if (pin) {
- Lock::DBLock dbLock(txn->lockState(), nss.db(), MODE_IS);
- Lock::CollectionLock collLock(txn->lockState(), nss.ns(), MODE_IS);
+ Lock::DBLock dbLock(opCtx->lockState(), nss.db(), MODE_IS);
+ Lock::CollectionLock collLock(opCtx->lockState(), nss.ns(), MODE_IS);
pin->deleteUnderlying();
}
throw;
@@ -577,7 +580,7 @@ public:
return appendCommandStatus(result, Status::OK());
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& db,
BSONObj& cmdObj,
int options,
@@ -607,7 +610,7 @@ public:
"http://dochub.mongodb.org/core/3.4-feature-compatibility."));
}
- return runParsed(txn, nss, request.getValue(), cmdObj, errmsg, result);
+ return runParsed(opCtx, nss, request.getValue(), cmdObj, errmsg, result);
}
};
diff --git a/src/mongo/db/commands/plan_cache_commands.cpp b/src/mongo/db/commands/plan_cache_commands.cpp
index 67b244d6bb3..4cd4ca26992 100644
--- a/src/mongo/db/commands/plan_cache_commands.cpp
+++ b/src/mongo/db/commands/plan_cache_commands.cpp
@@ -58,7 +58,7 @@ using namespace mongo;
/**
* Retrieves a collection's plan cache from the database.
*/
-static Status getPlanCache(OperationContext* txn,
+static Status getPlanCache(OperationContext* opCtx,
Collection* collection,
const string& ns,
PlanCache** planCacheOut) {
@@ -110,14 +110,14 @@ PlanCacheCommand::PlanCacheCommand(const string& name,
ActionType actionType)
: Command(name), helpText(helpText), actionType(actionType) {}
-bool PlanCacheCommand::run(OperationContext* txn,
+bool PlanCacheCommand::run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
string& errmsg,
BSONObjBuilder& result) {
const NamespaceString nss(parseNsCollectionRequired(dbname, cmdObj));
- Status status = runPlanCacheCommand(txn, nss.ns(), cmdObj, &result);
+ Status status = runPlanCacheCommand(opCtx, nss.ns(), cmdObj, &result);
return appendCommandStatus(result, status);
}
@@ -152,7 +152,7 @@ Status PlanCacheCommand::checkAuthForCommand(Client* client,
}
// static
-StatusWith<unique_ptr<CanonicalQuery>> PlanCacheCommand::canonicalize(OperationContext* txn,
+StatusWith<unique_ptr<CanonicalQuery>> PlanCacheCommand::canonicalize(OperationContext* opCtx,
const string& ns,
const BSONObj& cmdObj) {
// query - required
@@ -208,8 +208,8 @@ StatusWith<unique_ptr<CanonicalQuery>> PlanCacheCommand::canonicalize(OperationC
qr->setSort(sortObj);
qr->setProj(projObj);
qr->setCollation(collationObj);
- const ExtensionsCallbackReal extensionsCallback(txn, &nss);
- auto statusWithCQ = CanonicalQuery::canonicalize(txn, std::move(qr), extensionsCallback);
+ const ExtensionsCallbackReal extensionsCallback(opCtx, &nss);
+ auto statusWithCQ = CanonicalQuery::canonicalize(opCtx, std::move(qr), extensionsCallback);
if (!statusWithCQ.isOK()) {
return statusWithCQ.getStatus();
}
@@ -222,15 +222,15 @@ PlanCacheListQueryShapes::PlanCacheListQueryShapes()
"Displays all query shapes in a collection.",
ActionType::planCacheRead) {}
-Status PlanCacheListQueryShapes::runPlanCacheCommand(OperationContext* txn,
+Status PlanCacheListQueryShapes::runPlanCacheCommand(OperationContext* opCtx,
const string& ns,
BSONObj& cmdObj,
BSONObjBuilder* bob) {
// This is a read lock. The query cache is owned by the collection.
- AutoGetCollectionForRead ctx(txn, NamespaceString(ns));
+ AutoGetCollectionForRead ctx(opCtx, NamespaceString(ns));
PlanCache* planCache;
- Status status = getPlanCache(txn, ctx.getCollection(), ns, &planCache);
+ Status status = getPlanCache(opCtx, ctx.getCollection(), ns, &planCache);
if (!status.isOK()) {
// No collection - return results with empty shapes array.
BSONArrayBuilder arrayBuilder(bob->subarrayStart("shapes"));
@@ -274,24 +274,24 @@ PlanCacheClear::PlanCacheClear()
"Drops one or all cached queries in a collection.",
ActionType::planCacheWrite) {}
-Status PlanCacheClear::runPlanCacheCommand(OperationContext* txn,
+Status PlanCacheClear::runPlanCacheCommand(OperationContext* opCtx,
const std::string& ns,
BSONObj& cmdObj,
BSONObjBuilder* bob) {
// This is a read lock. The query cache is owned by the collection.
- AutoGetCollectionForRead ctx(txn, NamespaceString(ns));
+ AutoGetCollectionForRead ctx(opCtx, NamespaceString(ns));
PlanCache* planCache;
- Status status = getPlanCache(txn, ctx.getCollection(), ns, &planCache);
+ Status status = getPlanCache(opCtx, ctx.getCollection(), ns, &planCache);
if (!status.isOK()) {
// No collection - nothing to do. Return OK status.
return Status::OK();
}
- return clear(txn, planCache, ns, cmdObj);
+ return clear(opCtx, planCache, ns, cmdObj);
}
// static
-Status PlanCacheClear::clear(OperationContext* txn,
+Status PlanCacheClear::clear(OperationContext* opCtx,
PlanCache* planCache,
const string& ns,
const BSONObj& cmdObj) {
@@ -302,7 +302,7 @@ Status PlanCacheClear::clear(OperationContext* txn,
// - clear plans for single query shape when a query shape is described in the
// command arguments.
if (cmdObj.hasField("query")) {
- auto statusWithCQ = PlanCacheCommand::canonicalize(txn, ns, cmdObj);
+ auto statusWithCQ = PlanCacheCommand::canonicalize(opCtx, ns, cmdObj);
if (!statusWithCQ.isOK()) {
return statusWithCQ.getStatus();
}
@@ -351,30 +351,30 @@ PlanCacheListPlans::PlanCacheListPlans()
"Displays the cached plans for a query shape.",
ActionType::planCacheRead) {}
-Status PlanCacheListPlans::runPlanCacheCommand(OperationContext* txn,
+Status PlanCacheListPlans::runPlanCacheCommand(OperationContext* opCtx,
const std::string& ns,
BSONObj& cmdObj,
BSONObjBuilder* bob) {
- AutoGetCollectionForRead ctx(txn, NamespaceString(ns));
+ AutoGetCollectionForRead ctx(opCtx, NamespaceString(ns));
PlanCache* planCache;
- Status status = getPlanCache(txn, ctx.getCollection(), ns, &planCache);
+ Status status = getPlanCache(opCtx, ctx.getCollection(), ns, &planCache);
if (!status.isOK()) {
// No collection - return empty plans array.
BSONArrayBuilder plansBuilder(bob->subarrayStart("plans"));
plansBuilder.doneFast();
return Status::OK();
}
- return list(txn, *planCache, ns, cmdObj, bob);
+ return list(opCtx, *planCache, ns, cmdObj, bob);
}
// static
-Status PlanCacheListPlans::list(OperationContext* txn,
+Status PlanCacheListPlans::list(OperationContext* opCtx,
const PlanCache& planCache,
const std::string& ns,
const BSONObj& cmdObj,
BSONObjBuilder* bob) {
- auto statusWithCQ = canonicalize(txn, ns, cmdObj);
+ auto statusWithCQ = canonicalize(opCtx, ns, cmdObj);
if (!statusWithCQ.isOK()) {
return statusWithCQ.getStatus();
}
diff --git a/src/mongo/db/commands/plan_cache_commands.h b/src/mongo/db/commands/plan_cache_commands.h
index 1b6afaf2171..881bf475433 100644
--- a/src/mongo/db/commands/plan_cache_commands.h
+++ b/src/mongo/db/commands/plan_cache_commands.h
@@ -57,7 +57,7 @@ public:
* implement plan cache command functionality.
*/
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -86,7 +86,7 @@ public:
* Should contain just enough logic to invoke run*Command() function
* in plan_cache.h
*/
- virtual Status runPlanCacheCommand(OperationContext* txn,
+ virtual Status runPlanCacheCommand(OperationContext* opCtx,
const std::string& ns,
BSONObj& cmdObj,
BSONObjBuilder* bob) = 0;
@@ -94,7 +94,7 @@ public:
/**
* Validatess query shape from command object and returns canonical query.
*/
- static StatusWith<std::unique_ptr<CanonicalQuery>> canonicalize(OperationContext* txn,
+ static StatusWith<std::unique_ptr<CanonicalQuery>> canonicalize(OperationContext* opCtx,
const std::string& ns,
const BSONObj& cmdObj);
@@ -112,7 +112,7 @@ private:
class PlanCacheListQueryShapes : public PlanCacheCommand {
public:
PlanCacheListQueryShapes();
- virtual Status runPlanCacheCommand(OperationContext* txn,
+ virtual Status runPlanCacheCommand(OperationContext* opCtx,
const std::string& ns,
BSONObj& cmdObj,
BSONObjBuilder* bob);
@@ -138,7 +138,7 @@ public:
class PlanCacheClear : public PlanCacheCommand {
public:
PlanCacheClear();
- virtual Status runPlanCacheCommand(OperationContext* txn,
+ virtual Status runPlanCacheCommand(OperationContext* opCtx,
const std::string& ns,
BSONObj& cmdObj,
BSONObjBuilder* bob);
@@ -147,7 +147,7 @@ public:
* Clears collection's plan cache.
* If query shape is provided, clears plans for that single query shape only.
*/
- static Status clear(OperationContext* txn,
+ static Status clear(OperationContext* opCtx,
PlanCache* planCache,
const std::string& ns,
const BSONObj& cmdObj);
@@ -167,7 +167,7 @@ public:
class PlanCacheListPlans : public PlanCacheCommand {
public:
PlanCacheListPlans();
- virtual Status runPlanCacheCommand(OperationContext* txn,
+ virtual Status runPlanCacheCommand(OperationContext* opCtx,
const std::string& ns,
BSONObj& cmdObj,
BSONObjBuilder* bob);
@@ -175,7 +175,7 @@ public:
/**
* Displays the cached plans for a query shape.
*/
- static Status list(OperationContext* txn,
+ static Status list(OperationContext* opCtx,
const PlanCache& planCache,
const std::string& ns,
const BSONObj& cmdObj,
diff --git a/src/mongo/db/commands/plan_cache_commands_test.cpp b/src/mongo/db/commands/plan_cache_commands_test.cpp
index 4975a557443..1ec3611ccdf 100644
--- a/src/mongo/db/commands/plan_cache_commands_test.cpp
+++ b/src/mongo/db/commands/plan_cache_commands_test.cpp
@@ -133,7 +133,7 @@ TEST(PlanCacheCommandsTest, planCacheListQueryShapesEmpty) {
TEST(PlanCacheCommandsTest, planCacheListQueryShapesOneKey) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
// Create a canonical query
auto qr = stdx::make_unique<QueryRequest>(nss);
@@ -142,7 +142,7 @@ TEST(PlanCacheCommandsTest, planCacheListQueryShapesOneKey) {
qr->setProj(fromjson("{_id: 0}"));
qr->setCollation(fromjson("{locale: 'mock_reverse_string'}"));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
@@ -168,13 +168,13 @@ TEST(PlanCacheCommandsTest, planCacheListQueryShapesOneKey) {
TEST(PlanCacheCommandsTest, planCacheClearAllShapes) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
// Create a canonical query
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(fromjson("{a: 1}"));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
@@ -189,7 +189,7 @@ TEST(PlanCacheCommandsTest, planCacheClearAllShapes) {
ASSERT_EQUALS(getShapes(planCache).size(), 1U);
// Clear cache and confirm number of keys afterwards.
- ASSERT_OK(PlanCacheClear::clear(txn.get(), &planCache, nss.ns(), BSONObj()));
+ ASSERT_OK(PlanCacheClear::clear(opCtx.get(), &planCache, nss.ns(), BSONObj()));
ASSERT_EQUALS(getShapes(planCache).size(), 0U);
}
@@ -202,68 +202,69 @@ TEST(PlanCacheCommandsTest, Canonicalize) {
// Invalid parameters
PlanCache planCache;
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
// Missing query field
- ASSERT_NOT_OK(PlanCacheCommand::canonicalize(txn.get(), nss.ns(), fromjson("{}")).getStatus());
+ ASSERT_NOT_OK(
+ PlanCacheCommand::canonicalize(opCtx.get(), nss.ns(), fromjson("{}")).getStatus());
// Query needs to be an object
ASSERT_NOT_OK(
- PlanCacheCommand::canonicalize(txn.get(), nss.ns(), fromjson("{query: 1}")).getStatus());
+ PlanCacheCommand::canonicalize(opCtx.get(), nss.ns(), fromjson("{query: 1}")).getStatus());
// Sort needs to be an object
ASSERT_NOT_OK(
- PlanCacheCommand::canonicalize(txn.get(), nss.ns(), fromjson("{query: {}, sort: 1}"))
+ PlanCacheCommand::canonicalize(opCtx.get(), nss.ns(), fromjson("{query: {}, sort: 1}"))
.getStatus());
// Projection needs to be an object.
- ASSERT_NOT_OK(
- PlanCacheCommand::canonicalize(txn.get(), nss.ns(), fromjson("{query: {}, projection: 1}"))
- .getStatus());
+ ASSERT_NOT_OK(PlanCacheCommand::canonicalize(
+ opCtx.get(), nss.ns(), fromjson("{query: {}, projection: 1}"))
+ .getStatus());
// Collation needs to be an object.
ASSERT_NOT_OK(
- PlanCacheCommand::canonicalize(txn.get(), nss.ns(), fromjson("{query: {}, collation: 1}"))
+ PlanCacheCommand::canonicalize(opCtx.get(), nss.ns(), fromjson("{query: {}, collation: 1}"))
.getStatus());
// Bad query (invalid sort order)
ASSERT_NOT_OK(
- PlanCacheCommand::canonicalize(txn.get(), nss.ns(), fromjson("{query: {}, sort: {a: 0}}"))
+ PlanCacheCommand::canonicalize(opCtx.get(), nss.ns(), fromjson("{query: {}, sort: {a: 0}}"))
.getStatus());
// Valid parameters
auto statusWithCQ =
- PlanCacheCommand::canonicalize(txn.get(), nss.ns(), fromjson("{query: {a: 1, b: 1}}"));
+ PlanCacheCommand::canonicalize(opCtx.get(), nss.ns(), fromjson("{query: {a: 1, b: 1}}"));
ASSERT_OK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> query = std::move(statusWithCQ.getValue());
// Equivalent query should generate same key.
statusWithCQ =
- PlanCacheCommand::canonicalize(txn.get(), nss.ns(), fromjson("{query: {b: 1, a: 1}}"));
+ PlanCacheCommand::canonicalize(opCtx.get(), nss.ns(), fromjson("{query: {b: 1, a: 1}}"));
ASSERT_OK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> equivQuery = std::move(statusWithCQ.getValue());
ASSERT_EQUALS(planCache.computeKey(*query), planCache.computeKey(*equivQuery));
// Sort query should generate different key from unsorted query.
statusWithCQ = PlanCacheCommand::canonicalize(
- txn.get(), nss.ns(), fromjson("{query: {a: 1, b: 1}, sort: {a: 1, b: 1}}"));
+ opCtx.get(), nss.ns(), fromjson("{query: {a: 1, b: 1}, sort: {a: 1, b: 1}}"));
ASSERT_OK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> sortQuery1 = std::move(statusWithCQ.getValue());
ASSERT_NOT_EQUALS(planCache.computeKey(*query), planCache.computeKey(*sortQuery1));
// Confirm sort arguments are properly delimited (SERVER-17158)
statusWithCQ = PlanCacheCommand::canonicalize(
- txn.get(), nss.ns(), fromjson("{query: {a: 1, b: 1}, sort: {aab: 1}}"));
+ opCtx.get(), nss.ns(), fromjson("{query: {a: 1, b: 1}, sort: {aab: 1}}"));
ASSERT_OK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> sortQuery2 = std::move(statusWithCQ.getValue());
ASSERT_NOT_EQUALS(planCache.computeKey(*sortQuery1), planCache.computeKey(*sortQuery2));
// Changing order and/or value of predicates should not change key
statusWithCQ = PlanCacheCommand::canonicalize(
- txn.get(), nss.ns(), fromjson("{query: {b: 3, a: 3}, sort: {a: 1, b: 1}}"));
+ opCtx.get(), nss.ns(), fromjson("{query: {b: 3, a: 3}, sort: {a: 1, b: 1}}"));
ASSERT_OK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> sortQuery3 = std::move(statusWithCQ.getValue());
ASSERT_EQUALS(planCache.computeKey(*sortQuery1), planCache.computeKey(*sortQuery3));
// Projected query should generate different key from unprojected query.
statusWithCQ = PlanCacheCommand::canonicalize(
- txn.get(), nss.ns(), fromjson("{query: {a: 1, b: 1}, projection: {_id: 0, a: 1}}"));
+ opCtx.get(), nss.ns(), fromjson("{query: {a: 1, b: 1}, projection: {_id: 0, a: 1}}"));
ASSERT_OK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> projectionQuery = std::move(statusWithCQ.getValue());
ASSERT_NOT_EQUALS(planCache.computeKey(*query), planCache.computeKey(*projectionQuery));
@@ -275,47 +276,47 @@ TEST(PlanCacheCommandsTest, Canonicalize) {
TEST(PlanCacheCommandsTest, planCacheClearInvalidParameter) {
PlanCache planCache;
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
// Query field type must be BSON object.
- ASSERT_NOT_OK(PlanCacheClear::clear(&txn, &planCache, nss.ns(), fromjson("{query: 12345}")));
+ ASSERT_NOT_OK(PlanCacheClear::clear(&opCtx, &planCache, nss.ns(), fromjson("{query: 12345}")));
ASSERT_NOT_OK(
- PlanCacheClear::clear(&txn, &planCache, nss.ns(), fromjson("{query: /keyisnotregex/}")));
+ PlanCacheClear::clear(&opCtx, &planCache, nss.ns(), fromjson("{query: /keyisnotregex/}")));
// Query must pass canonicalization.
ASSERT_NOT_OK(PlanCacheClear::clear(
- &txn, &planCache, nss.ns(), fromjson("{query: {a: {$no_such_op: 1}}}")));
+ &opCtx, &planCache, nss.ns(), fromjson("{query: {a: {$no_such_op: 1}}}")));
// Sort present without query is an error.
- ASSERT_NOT_OK(PlanCacheClear::clear(&txn, &planCache, nss.ns(), fromjson("{sort: {a: 1}}")));
+ ASSERT_NOT_OK(PlanCacheClear::clear(&opCtx, &planCache, nss.ns(), fromjson("{sort: {a: 1}}")));
// Projection present without query is an error.
ASSERT_NOT_OK(PlanCacheClear::clear(
- &txn, &planCache, nss.ns(), fromjson("{projection: {_id: 0, a: 1}}")));
+ &opCtx, &planCache, nss.ns(), fromjson("{projection: {_id: 0, a: 1}}")));
// Collation present without query is an error.
ASSERT_NOT_OK(PlanCacheClear::clear(
- &txn, &planCache, nss.ns(), fromjson("{collation: {locale: 'en_US'}}")));
+ &opCtx, &planCache, nss.ns(), fromjson("{collation: {locale: 'en_US'}}")));
}
TEST(PlanCacheCommandsTest, planCacheClearUnknownKey) {
PlanCache planCache;
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
- ASSERT_OK(PlanCacheClear::clear(&txn, &planCache, nss.ns(), fromjson("{query: {a: 1}}")));
+ ASSERT_OK(PlanCacheClear::clear(&opCtx, &planCache, nss.ns(), fromjson("{query: {a: 1}}")));
}
TEST(PlanCacheCommandsTest, planCacheClearOneKey) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
// Create 2 canonical queries.
auto qrA = stdx::make_unique<QueryRequest>(nss);
qrA->setFilter(fromjson("{a: 1}"));
auto statusWithCQA = CanonicalQuery::canonicalize(
- txn.get(), std::move(qrA), ExtensionsCallbackDisallowExtensions());
+ opCtx.get(), std::move(qrA), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQA.getStatus());
auto qrB = stdx::make_unique<QueryRequest>(nss);
qrB->setFilter(fromjson("{b: 1}"));
unique_ptr<CanonicalQuery> cqA = std::move(statusWithCQA.getValue());
auto statusWithCQB = CanonicalQuery::canonicalize(
- txn.get(), std::move(qrB), ExtensionsCallbackDisallowExtensions());
+ opCtx.get(), std::move(qrB), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQB.getStatus());
unique_ptr<CanonicalQuery> cqB = std::move(statusWithCQB.getValue());
@@ -350,7 +351,7 @@ TEST(PlanCacheCommandsTest, planCacheClearOneKey) {
BSONObjBuilder bob;
ASSERT_OK(PlanCacheClear::clear(
- txn.get(), &planCache, nss.ns(), BSON("query" << cqB->getQueryObj())));
+ opCtx.get(), &planCache, nss.ns(), BSON("query" << cqB->getQueryObj())));
vector<BSONObj> shapesAfter = getShapes(planCache);
ASSERT_EQUALS(shapesAfter.size(), 1U);
ASSERT_BSONOBJ_EQ(shapesAfter[0], shapeA);
@@ -358,20 +359,20 @@ TEST(PlanCacheCommandsTest, planCacheClearOneKey) {
TEST(PlanCacheCommandsTest, planCacheClearOneKeyCollation) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
// Create 2 canonical queries, one with collation.
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(fromjson("{a: 'foo'}"));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
auto qrCollation = stdx::make_unique<QueryRequest>(nss);
qrCollation->setFilter(fromjson("{a: 'foo'}"));
qrCollation->setCollation(fromjson("{locale: 'mock_reverse_string'}"));
auto statusWithCQCollation = CanonicalQuery::canonicalize(
- txn.get(), std::move(qrCollation), ExtensionsCallbackDisallowExtensions());
+ opCtx.get(), std::move(qrCollation), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQCollation.getStatus());
unique_ptr<CanonicalQuery> cqCollation = std::move(statusWithCQCollation.getValue());
@@ -412,7 +413,7 @@ TEST(PlanCacheCommandsTest, planCacheClearOneKeyCollation) {
// Drop query with collation from cache. Make other query is still in cache afterwards.
BSONObjBuilder bob;
- ASSERT_OK(PlanCacheClear::clear(txn.get(), &planCache, nss.ns(), shapeWithCollation));
+ ASSERT_OK(PlanCacheClear::clear(opCtx.get(), &planCache, nss.ns(), shapeWithCollation));
vector<BSONObj> shapesAfter = getShapes(planCache);
ASSERT_EQUALS(shapesAfter.size(), 1U);
ASSERT_BSONOBJ_EQ(shapesAfter[0], shape);
@@ -464,7 +465,7 @@ vector<BSONObj> getPlans(const PlanCache& planCache,
const BSONObj& projection,
const BSONObj& collation) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
BSONObjBuilder bob;
BSONObjBuilder cmdObjBuilder;
@@ -475,7 +476,7 @@ vector<BSONObj> getPlans(const PlanCache& planCache,
cmdObjBuilder.append("collation", collation);
}
BSONObj cmdObj = cmdObjBuilder.obj();
- ASSERT_OK(PlanCacheListPlans::list(txn.get(), planCache, nss.ns(), cmdObj, &bob));
+ ASSERT_OK(PlanCacheListPlans::list(opCtx.get(), planCache, nss.ns(), cmdObj, &bob));
BSONObj resultObj = bob.obj();
BSONElement plansElt = resultObj.getField("plans");
ASSERT_EQUALS(plansElt.type(), mongo::Array);
@@ -489,36 +490,36 @@ vector<BSONObj> getPlans(const PlanCache& planCache,
TEST(PlanCacheCommandsTest, planCacheListPlansInvalidParameter) {
PlanCache planCache;
BSONObjBuilder ignored;
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
// Missing query field is not ok.
- ASSERT_NOT_OK(PlanCacheListPlans::list(&txn, planCache, nss.ns(), BSONObj(), &ignored));
+ ASSERT_NOT_OK(PlanCacheListPlans::list(&opCtx, planCache, nss.ns(), BSONObj(), &ignored));
// Query field type must be BSON object.
- ASSERT_NOT_OK(
- PlanCacheListPlans::list(&txn, planCache, nss.ns(), fromjson("{query: 12345}"), &ignored));
ASSERT_NOT_OK(PlanCacheListPlans::list(
- &txn, planCache, nss.ns(), fromjson("{query: /keyisnotregex/}"), &ignored));
+ &opCtx, planCache, nss.ns(), fromjson("{query: 12345}"), &ignored));
+ ASSERT_NOT_OK(PlanCacheListPlans::list(
+ &opCtx, planCache, nss.ns(), fromjson("{query: /keyisnotregex/}"), &ignored));
}
TEST(PlanCacheCommandsTest, planCacheListPlansUnknownKey) {
// Leave the plan cache empty.
PlanCache planCache;
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
BSONObjBuilder ignored;
- ASSERT_OK(
- PlanCacheListPlans::list(&txn, planCache, nss.ns(), fromjson("{query: {a: 1}}"), &ignored));
+ ASSERT_OK(PlanCacheListPlans::list(
+ &opCtx, planCache, nss.ns(), fromjson("{query: {a: 1}}"), &ignored));
}
TEST(PlanCacheCommandsTest, planCacheListPlansOnlyOneSolutionTrue) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
// Create a canonical query
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(fromjson("{a: 1}"));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
@@ -540,13 +541,13 @@ TEST(PlanCacheCommandsTest, planCacheListPlansOnlyOneSolutionTrue) {
TEST(PlanCacheCommandsTest, planCacheListPlansOnlyOneSolutionFalse) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
// Create a canonical query
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(fromjson("{a: 1}"));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
@@ -571,20 +572,20 @@ TEST(PlanCacheCommandsTest, planCacheListPlansOnlyOneSolutionFalse) {
TEST(PlanCacheCommandsTest, planCacheListPlansCollation) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
// Create 2 canonical queries, one with collation.
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(fromjson("{a: 'foo'}"));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
auto qrCollation = stdx::make_unique<QueryRequest>(nss);
qrCollation->setFilter(fromjson("{a: 'foo'}"));
qrCollation->setCollation(fromjson("{locale: 'mock_reverse_string'}"));
auto statusWithCQCollation = CanonicalQuery::canonicalize(
- txn.get(), std::move(qrCollation), ExtensionsCallbackDisallowExtensions());
+ opCtx.get(), std::move(qrCollation), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQCollation.getStatus());
unique_ptr<CanonicalQuery> cqCollation = std::move(statusWithCQCollation.getValue());
diff --git a/src/mongo/db/commands/rename_collection_cmd.cpp b/src/mongo/db/commands/rename_collection_cmd.cpp
index 99f5617cf94..6aaa3b5f744 100644
--- a/src/mongo/db/commands/rename_collection_cmd.cpp
+++ b/src/mongo/db/commands/rename_collection_cmd.cpp
@@ -77,15 +77,15 @@ public:
help << " example: { renameCollection: foo.a, to: bar.b }";
}
- static void dropCollection(OperationContext* txn, Database* db, StringData collName) {
- WriteUnitOfWork wunit(txn);
- if (db->dropCollection(txn, collName).isOK()) {
+ static void dropCollection(OperationContext* opCtx, Database* db, StringData collName) {
+ WriteUnitOfWork wunit(opCtx);
+ if (db->dropCollection(opCtx, collName).isOK()) {
// ignoring failure case
wunit.commit();
}
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -146,7 +146,7 @@ public:
}
return appendCommandStatus(result,
- renameCollection(txn,
+ renameCollection(opCtx,
source,
target,
cmdObj["dropTarget"].trueValue(),
diff --git a/src/mongo/db/commands/repair_cursor.cpp b/src/mongo/db/commands/repair_cursor.cpp
index 4e34e0bbb0f..b5d7c2fde6f 100644
--- a/src/mongo/db/commands/repair_cursor.cpp
+++ b/src/mongo/db/commands/repair_cursor.cpp
@@ -67,7 +67,7 @@ public:
return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -75,7 +75,7 @@ public:
BSONObjBuilder& result) {
NamespaceString ns(parseNs(dbname, cmdObj));
- AutoGetCollectionForRead ctx(txn, ns);
+ AutoGetCollectionForRead ctx(opCtx, ns);
Collection* collection = ctx.getCollection();
if (!collection) {
@@ -83,7 +83,7 @@ public:
result, Status(ErrorCodes::NamespaceNotFound, "ns does not exist: " + ns.ns()));
}
- auto cursor = collection->getRecordStore()->getCursorForRepair(txn);
+ auto cursor = collection->getRecordStore()->getCursorForRepair(opCtx);
if (!cursor) {
return appendCommandStatus(
result, Status(ErrorCodes::CommandNotSupported, "repair iterator not supported"));
@@ -91,11 +91,11 @@ public:
std::unique_ptr<WorkingSet> ws(new WorkingSet());
std::unique_ptr<MultiIteratorStage> stage(
- new MultiIteratorStage(txn, ws.get(), collection));
+ new MultiIteratorStage(opCtx, ws.get(), collection));
stage->addIterator(std::move(cursor));
auto statusWithPlanExecutor = PlanExecutor::make(
- txn, std::move(ws), std::move(stage), collection, PlanExecutor::YIELD_AUTO);
+ opCtx, std::move(ws), std::move(stage), collection, PlanExecutor::YIELD_AUTO);
invariant(statusWithPlanExecutor.isOK());
std::unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
@@ -109,7 +109,7 @@ public:
auto pinnedCursor = collection->getCursorManager()->registerCursor(
{exec.release(),
ns.ns(),
- txn->recoveryUnit()->isReadingFromMajorityCommittedSnapshot()});
+ opCtx->recoveryUnit()->isReadingFromMajorityCommittedSnapshot()});
appendCursorResponseObject(
pinnedCursor.getCursor()->cursorid(), ns.ns(), BSONArray(), &result);
diff --git a/src/mongo/db/commands/server_status.cpp b/src/mongo/db/commands/server_status.cpp
index 67716cf7a14..fd429ed21b1 100644
--- a/src/mongo/db/commands/server_status.cpp
+++ b/src/mongo/db/commands/server_status.cpp
@@ -85,7 +85,7 @@ public:
actions.addAction(ActionType::serverStatus);
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -93,7 +93,7 @@ public:
BSONObjBuilder& result) {
_runCalled = true;
- const auto service = txn->getServiceContext();
+ const auto service = opCtx->getServiceContext();
const auto clock = service->getFastClockSource();
const auto runStart = clock->now();
BSONObjBuilder timeBuilder(256);
@@ -135,7 +135,7 @@ public:
continue;
}
- section->appendSection(txn, elem, &result);
+ section->appendSection(opCtx, elem, &result);
timeBuilder.appendNumber(
static_cast<string>(str::stream() << "after " << section->getSectionName()),
durationCount<Milliseconds>(clock->now() - runStart));
@@ -201,7 +201,7 @@ OpCounterServerStatusSection::OpCounterServerStatusSection(const string& section
OpCounters* counters)
: ServerStatusSection(sectionName), _counters(counters) {}
-BSONObj OpCounterServerStatusSection::generateSection(OperationContext* txn,
+BSONObj OpCounterServerStatusSection::generateSection(OperationContext* opCtx,
const BSONElement& configElement) const {
return _counters->getObj();
}
@@ -220,9 +220,9 @@ public:
return true;
}
- BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const {
+ BSONObj generateSection(OperationContext* opCtx, const BSONElement& configElement) const {
BSONObjBuilder bb;
- auto stats = txn->getServiceContext()->getTransportLayer()->sessionStats();
+ auto stats = opCtx->getServiceContext()->getTransportLayer()->sessionStats();
bb.append("current", static_cast<int>(stats.numOpenSessions));
bb.append("available", static_cast<int>(stats.numAvailableSessions));
bb.append("totalCreated", static_cast<int>(stats.numCreatedSessions));
@@ -238,7 +238,7 @@ public:
return true;
}
- BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const {
+ BSONObj generateSection(OperationContext* opCtx, const BSONElement& configElement) const {
BSONObjBuilder bb;
bb.append("note", "fields vary by platform");
@@ -258,7 +258,7 @@ public:
return true;
}
- BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const {
+ BSONObj generateSection(OperationContext* opCtx, const BSONElement& configElement) const {
BSONObjBuilder asserts;
asserts.append("regular", assertionCount.regular);
asserts.append("warning", assertionCount.warning);
@@ -278,7 +278,7 @@ public:
return true;
}
- BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const {
+ BSONObj generateSection(OperationContext* opCtx, const BSONElement& configElement) const {
BSONObjBuilder b;
networkCounter.append(b);
appendMessageCompressionStats(&b);
@@ -295,7 +295,7 @@ public:
return true;
}
- BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const {
+ BSONObj generateSection(OperationContext* opCtx, const BSONElement& configElement) const {
BSONObj result;
if (getSSLManager()) {
result = getSSLManager()->getSSLConfiguration().getServerStatusBSON();
@@ -334,7 +334,7 @@ public:
return false;
}
- void appendSection(OperationContext* txn,
+ void appendSection(OperationContext* opCtx,
const BSONElement& configElement,
BSONObjBuilder* out) const override {
out->append(
diff --git a/src/mongo/db/commands/server_status.h b/src/mongo/db/commands/server_status.h
index b017688acf2..506c1428629 100644
--- a/src/mongo/db/commands/server_status.h
+++ b/src/mongo/db/commands/server_status.h
@@ -80,7 +80,8 @@ public:
* @param configElement the element from the actual command related to this section
* so if the section is 'foo', this is cmdObj['foo']
*/
- virtual BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const {
+ virtual BSONObj generateSection(OperationContext* opCtx,
+ const BSONElement& configElement) const {
return BSONObj{};
};
@@ -94,10 +95,10 @@ public:
* If you are doing something a bit more complicated, you can implement this and have
* full control over what gets included in the command result.
*/
- virtual void appendSection(OperationContext* txn,
+ virtual void appendSection(OperationContext* opCtx,
const BSONElement& configElement,
BSONObjBuilder* result) const {
- const auto ret = generateSection(txn, configElement);
+ const auto ret = generateSection(opCtx, configElement);
if (ret.isEmpty())
return;
result->append(getSectionName(), ret);
@@ -114,7 +115,8 @@ public:
return true;
}
- virtual BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const;
+ virtual BSONObj generateSection(OperationContext* opCtx,
+ const BSONElement& configElement) const;
private:
const OpCounters* _counters;
diff --git a/src/mongo/db/commands/set_feature_compatibility_version_command.cpp b/src/mongo/db/commands/set_feature_compatibility_version_command.cpp
index 4d6c7f9867d..6e173143efc 100644
--- a/src/mongo/db/commands/set_feature_compatibility_version_command.cpp
+++ b/src/mongo/db/commands/set_feature_compatibility_version_command.cpp
@@ -81,7 +81,7 @@ public:
return Status::OK();
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -90,7 +90,7 @@ public:
const auto version = uassertStatusOK(
FeatureCompatibilityVersionCommandParser::extractVersionFromCommand(getName(), cmdObj));
- FeatureCompatibilityVersion::set(txn, version);
+ FeatureCompatibilityVersion::set(opCtx, version);
return true;
}
diff --git a/src/mongo/db/commands/snapshot_management.cpp b/src/mongo/db/commands/snapshot_management.cpp
index 5c215c4c6f4..8ab963eb71e 100644
--- a/src/mongo/db/commands/snapshot_management.cpp
+++ b/src/mongo/db/commands/snapshot_management.cpp
@@ -63,7 +63,7 @@ public:
h << "Creates a new named snapshot";
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int,
@@ -75,14 +75,15 @@ public:
return appendCommandStatus(result, {ErrorCodes::CommandNotSupported, ""});
}
- ScopedTransaction st(txn, MODE_IX);
- Lock::GlobalLock lk(txn->lockState(), MODE_IX, UINT_MAX);
+ ScopedTransaction st(opCtx, MODE_IX);
+ Lock::GlobalLock lk(opCtx->lockState(), MODE_IX, UINT_MAX);
- auto status = snapshotManager->prepareForCreateSnapshot(txn);
+ auto status = snapshotManager->prepareForCreateSnapshot(opCtx);
if (status.isOK()) {
- const auto name = repl::ReplicationCoordinator::get(txn)->reserveSnapshotName(nullptr);
+ const auto name =
+ repl::ReplicationCoordinator::get(opCtx)->reserveSnapshotName(nullptr);
result.append("name", static_cast<long long>(name.asU64()));
- status = snapshotManager->createSnapshot(txn, name);
+ status = snapshotManager->createSnapshot(opCtx, name);
}
return appendCommandStatus(result, status);
}
@@ -113,7 +114,7 @@ public:
h << "Sets the snapshot for {readConcern: {level: 'majority'}}";
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int,
@@ -125,8 +126,8 @@ public:
return appendCommandStatus(result, {ErrorCodes::CommandNotSupported, ""});
}
- ScopedTransaction st(txn, MODE_IX);
- Lock::GlobalLock lk(txn->lockState(), MODE_IX, UINT_MAX);
+ ScopedTransaction st(opCtx, MODE_IX);
+ Lock::GlobalLock lk(opCtx->lockState(), MODE_IX, UINT_MAX);
auto name = SnapshotName(cmdObj.firstElement().Long());
snapshotManager->setCommittedSnapshot(name);
return true;
diff --git a/src/mongo/db/commands/test_commands.cpp b/src/mongo/db/commands/test_commands.cpp
index 560e6496688..34bc757d554 100644
--- a/src/mongo/db/commands/test_commands.cpp
+++ b/src/mongo/db/commands/test_commands.cpp
@@ -75,7 +75,7 @@ public:
virtual void help(stringstream& help) const {
help << "internal. for testing only.";
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -85,23 +85,23 @@ public:
log() << "test only command godinsert invoked coll:" << nss.coll();
BSONObj obj = cmdObj["obj"].embeddedObjectUserCheck();
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock lk(txn->lockState(), dbname, MODE_X);
- OldClientContext ctx(txn, nss.ns());
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ Lock::DBLock lk(opCtx->lockState(), dbname, MODE_X);
+ OldClientContext ctx(opCtx, nss.ns());
Database* db = ctx.db();
- WriteUnitOfWork wunit(txn);
- UnreplicatedWritesBlock unreplicatedWritesBlock(txn);
+ WriteUnitOfWork wunit(opCtx);
+ UnreplicatedWritesBlock unreplicatedWritesBlock(opCtx);
Collection* collection = db->getCollection(nss);
if (!collection) {
- collection = db->createCollection(txn, nss.ns());
+ collection = db->createCollection(opCtx, nss.ns());
if (!collection) {
errmsg = "could not create collection";
return false;
}
}
OpDebug* const nullOpDebug = nullptr;
- Status status = collection->insertDocument(txn, obj, nullOpDebug, false);
+ Status status = collection->insertDocument(opCtx, obj, nullOpDebug, false);
if (status.isOK()) {
wunit.commit();
}
@@ -140,20 +140,20 @@ public:
const BSONObj& cmdObj,
std::vector<Privilege>* out) {}
- void _sleepInReadLock(mongo::OperationContext* txn, long long millis) {
- ScopedTransaction transaction(txn, MODE_S);
- Lock::GlobalRead lk(txn->lockState());
+ void _sleepInReadLock(mongo::OperationContext* opCtx, long long millis) {
+ ScopedTransaction transaction(opCtx, MODE_S);
+ Lock::GlobalRead lk(opCtx->lockState());
sleepmillis(millis);
}
- void _sleepInWriteLock(mongo::OperationContext* txn, long long millis) {
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite lk(txn->lockState());
+ void _sleepInWriteLock(mongo::OperationContext* opCtx, long long millis) {
+ ScopedTransaction transaction(opCtx, MODE_X);
+ Lock::GlobalWrite lk(opCtx->lockState());
sleepmillis(millis);
}
CmdSleep() : Command("sleep") {}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& ns,
BSONObj& cmdObj,
int,
@@ -178,9 +178,9 @@ public:
if (!cmdObj["lock"]) {
// Legacy implementation
if (cmdObj.getBoolField("w")) {
- _sleepInWriteLock(txn, millis);
+ _sleepInWriteLock(opCtx, millis);
} else {
- _sleepInReadLock(txn, millis);
+ _sleepInReadLock(opCtx, millis);
}
} else {
uassert(34346, "Only one of 'w' and 'lock' may be set.", !cmdObj["w"]);
@@ -189,15 +189,15 @@ public:
if (lock == "none") {
sleepmillis(millis);
} else if (lock == "w") {
- _sleepInWriteLock(txn, millis);
+ _sleepInWriteLock(opCtx, millis);
} else {
uassert(34347, "'lock' must be one of 'r', 'w', 'none'.", lock == "r");
- _sleepInReadLock(txn, millis);
+ _sleepInReadLock(opCtx, millis);
}
}
// Interrupt point for testing (e.g. maxTimeMS).
- txn->checkForInterrupt();
+ opCtx->checkForInterrupt();
return true;
}
@@ -217,7 +217,7 @@ public:
virtual void addRequiredPrivileges(const std::string& dbname,
const BSONObj& cmdObj,
std::vector<Privilege>* out) {}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -232,11 +232,11 @@ public:
{ErrorCodes::BadValue, "n must be a positive integer"});
}
- OldClientWriteContext ctx(txn, fullNs.ns());
+ OldClientWriteContext ctx(opCtx, fullNs.ns());
Collection* collection = ctx.getCollection();
if (!collection) {
- if (ctx.db()->getViewCatalog()->lookup(txn, fullNs.ns())) {
+ if (ctx.db()->getViewCatalog()->lookup(opCtx, fullNs.ns())) {
return appendCommandStatus(
result,
{ErrorCodes::CommandNotSupportedOnView,
@@ -259,7 +259,7 @@ public:
// We will remove 'n' documents, so start truncating from the (n + 1)th document to the
// end.
std::unique_ptr<PlanExecutor> exec(
- InternalPlanner::collectionScan(txn,
+ InternalPlanner::collectionScan(opCtx,
fullNs.ns(),
collection,
PlanExecutor::YIELD_MANUAL,
@@ -277,7 +277,7 @@ public:
}
}
- collection->cappedTruncateAfter(txn, end, inc);
+ collection->cappedTruncateAfter(opCtx, end, inc);
return true;
}
@@ -298,7 +298,7 @@ public:
const BSONObj& cmdObj,
std::vector<Privilege>* out) {}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -306,7 +306,7 @@ public:
BSONObjBuilder& result) {
const NamespaceString nss = parseNsCollectionRequired(dbname, cmdObj);
- return appendCommandStatus(result, emptyCapped(txn, nss));
+ return appendCommandStatus(result, emptyCapped(opCtx, nss));
}
};
diff --git a/src/mongo/db/commands/top_command.cpp b/src/mongo/db/commands/top_command.cpp
index 6f236de90da..80ef9171efd 100644
--- a/src/mongo/db/commands/top_command.cpp
+++ b/src/mongo/db/commands/top_command.cpp
@@ -65,7 +65,7 @@ public:
actions.addAction(ActionType::top);
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& db,
BSONObj& cmdObj,
int options,
@@ -74,7 +74,7 @@ public:
{
BSONObjBuilder b(result.subobjStart("totals"));
b.append("note", "all times in microseconds");
- Top::get(txn->getClient()->getServiceContext()).append(b);
+ Top::get(opCtx->getClient()->getServiceContext()).append(b);
b.done();
}
return true;
diff --git a/src/mongo/db/commands/touch.cpp b/src/mongo/db/commands/touch.cpp
index a1fe53e84d8..1f28da9e3fc 100644
--- a/src/mongo/db/commands/touch.cpp
+++ b/src/mongo/db/commands/touch.cpp
@@ -82,7 +82,7 @@ public:
}
TouchCmd() : Command("touch") {}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -102,7 +102,7 @@ public:
return false;
}
- AutoGetCollectionForRead context(txn, nss);
+ AutoGetCollectionForRead context(opCtx, nss);
Collection* collection = context.getCollection();
if (!collection) {
@@ -111,7 +111,7 @@ public:
}
return appendCommandStatus(result,
- collection->touch(txn, touch_data, touch_indexes, &result));
+ collection->touch(opCtx, touch_data, touch_indexes, &result));
}
};
static TouchCmd touchCmd;
diff --git a/src/mongo/db/commands/user_management_commands.cpp b/src/mongo/db/commands/user_management_commands.cpp
index 7d14e2f1416..b6a55727c80 100644
--- a/src/mongo/db/commands/user_management_commands.cpp
+++ b/src/mongo/db/commands/user_management_commands.cpp
@@ -135,13 +135,13 @@ Status privilegeVectorToBSONArray(const PrivilegeVector& privileges, BSONArray*
/**
* Used to get all current roles of the user identified by 'userName'.
*/
-Status getCurrentUserRoles(OperationContext* txn,
+Status getCurrentUserRoles(OperationContext* opCtx,
AuthorizationManager* authzManager,
const UserName& userName,
unordered_set<RoleName>* roles) {
User* user;
authzManager->invalidateUserByName(userName); // Need to make sure cache entry is up to date
- Status status = authzManager->acquireUser(txn, userName, &user);
+ Status status = authzManager->acquireUser(opCtx, userName, &user);
if (!status.isOK()) {
return status;
}
@@ -159,7 +159,7 @@ Status getCurrentUserRoles(OperationContext* txn,
* same database as the role it is being added to (or that the role being added to is from the
* "admin" database.
*/
-Status checkOkayToGrantRolesToRole(OperationContext* txn,
+Status checkOkayToGrantRolesToRole(OperationContext* opCtx,
const RoleName& role,
const std::vector<RoleName> rolesToAdd,
AuthorizationManager* authzManager) {
@@ -180,8 +180,8 @@ Status checkOkayToGrantRolesToRole(OperationContext* txn,
}
BSONObj roleToAddDoc;
- Status status =
- authzManager->getRoleDescription(txn, roleToAdd, PrivilegeFormat::kOmit, &roleToAddDoc);
+ Status status = authzManager->getRoleDescription(
+ opCtx, roleToAdd, PrivilegeFormat::kOmit, &roleToAddDoc);
if (status == ErrorCodes::RoleNotFound) {
return Status(ErrorCodes::RoleNotFound,
"Cannot grant nonexistent role " + roleToAdd.toString());
@@ -242,13 +242,13 @@ void appendBSONObjToBSONArrayBuilder(BSONArrayBuilder* array, const BSONObj& obj
* Should only be called on collections with authorization documents in them
* (ie admin.system.users and admin.system.roles).
*/
-Status queryAuthzDocument(OperationContext* txn,
+Status queryAuthzDocument(OperationContext* opCtx,
const NamespaceString& collectionName,
const BSONObj& query,
const BSONObj& projection,
const stdx::function<void(const BSONObj&)>& resultProcessor) {
try {
- DBDirectClient client(txn);
+ DBDirectClient client(opCtx);
client.query(resultProcessor, collectionName.ns(), query, &projection);
return Status::OK();
} catch (const DBException& e) {
@@ -263,11 +263,11 @@ Status queryAuthzDocument(OperationContext* txn,
* Should only be called on collections with authorization documents in them
* (ie admin.system.users and admin.system.roles).
*/
-Status insertAuthzDocument(OperationContext* txn,
+Status insertAuthzDocument(OperationContext* opCtx,
const NamespaceString& collectionName,
const BSONObj& document) {
try {
- DBDirectClient client(txn);
+ DBDirectClient client(opCtx);
BatchedInsertRequest req;
req.setNS(collectionName);
@@ -293,7 +293,7 @@ Status insertAuthzDocument(OperationContext* txn,
* Should only be called on collections with authorization documents in them
* (ie admin.system.users and admin.system.roles).
*/
-Status updateAuthzDocuments(OperationContext* txn,
+Status updateAuthzDocuments(OperationContext* opCtx,
const NamespaceString& collectionName,
const BSONObj& query,
const BSONObj& updatePattern,
@@ -301,7 +301,7 @@ Status updateAuthzDocuments(OperationContext* txn,
bool multi,
long long* nMatched) {
try {
- DBDirectClient client(txn);
+ DBDirectClient client(opCtx);
auto doc = stdx::make_unique<BatchedUpdateDocument>();
doc->setQuery(query);
@@ -342,14 +342,14 @@ Status updateAuthzDocuments(OperationContext* txn,
* Should only be called on collections with authorization documents in them
* (ie admin.system.users and admin.system.roles).
*/
-Status updateOneAuthzDocument(OperationContext* txn,
+Status updateOneAuthzDocument(OperationContext* opCtx,
const NamespaceString& collectionName,
const BSONObj& query,
const BSONObj& updatePattern,
bool upsert) {
long long nMatched;
Status status =
- updateAuthzDocuments(txn, collectionName, query, updatePattern, upsert, false, &nMatched);
+ updateAuthzDocuments(opCtx, collectionName, query, updatePattern, upsert, false, &nMatched);
if (!status.isOK()) {
return status;
}
@@ -366,12 +366,12 @@ Status updateOneAuthzDocument(OperationContext* txn,
* Should only be called on collections with authorization documents in them
* (ie admin.system.users and admin.system.roles).
*/
-Status removeAuthzDocuments(OperationContext* txn,
+Status removeAuthzDocuments(OperationContext* opCtx,
const NamespaceString& collectionName,
const BSONObj& query,
long long* numRemoved) {
try {
- DBDirectClient client(txn);
+ DBDirectClient client(opCtx);
auto doc = stdx::make_unique<BatchedDeleteDocument>();
doc->setQuery(query);
@@ -401,9 +401,9 @@ Status removeAuthzDocuments(OperationContext* txn,
/**
* Creates the given role object in the given database.
*/
-Status insertRoleDocument(OperationContext* txn, const BSONObj& roleObj) {
+Status insertRoleDocument(OperationContext* opCtx, const BSONObj& roleObj) {
Status status =
- insertAuthzDocument(txn, AuthorizationManager::rolesCollectionNamespace, roleObj);
+ insertAuthzDocument(opCtx, AuthorizationManager::rolesCollectionNamespace, roleObj);
if (status.isOK()) {
return status;
}
@@ -422,8 +422,8 @@ Status insertRoleDocument(OperationContext* txn, const BSONObj& roleObj) {
/**
* Updates the given role object with the given update modifier.
*/
-Status updateRoleDocument(OperationContext* txn, const RoleName& role, const BSONObj& updateObj) {
- Status status = updateOneAuthzDocument(txn,
+Status updateRoleDocument(OperationContext* opCtx, const RoleName& role, const BSONObj& updateObj) {
+ Status status = updateOneAuthzDocument(opCtx,
AuthorizationManager::rolesCollectionNamespace,
BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
<< role.getRole()
@@ -448,9 +448,9 @@ Status updateRoleDocument(OperationContext* txn, const RoleName& role, const BSO
* Removes roles matching the given query.
* Writes into *numRemoved the number of role documents that were modified.
*/
-Status removeRoleDocuments(OperationContext* txn, const BSONObj& query, long long* numRemoved) {
+Status removeRoleDocuments(OperationContext* opCtx, const BSONObj& query, long long* numRemoved) {
Status status = removeAuthzDocuments(
- txn, AuthorizationManager::rolesCollectionNamespace, query, numRemoved);
+ opCtx, AuthorizationManager::rolesCollectionNamespace, query, numRemoved);
if (status.code() == ErrorCodes::UnknownError) {
return Status(ErrorCodes::RoleModificationFailed, status.reason());
}
@@ -460,9 +460,9 @@ Status removeRoleDocuments(OperationContext* txn, const BSONObj& query, long lon
/**
* Creates the given user object in the given database.
*/
-Status insertPrivilegeDocument(OperationContext* txn, const BSONObj& userObj) {
+Status insertPrivilegeDocument(OperationContext* opCtx, const BSONObj& userObj) {
Status status =
- insertAuthzDocument(txn, AuthorizationManager::usersCollectionNamespace, userObj);
+ insertAuthzDocument(opCtx, AuthorizationManager::usersCollectionNamespace, userObj);
if (status.isOK()) {
return status;
}
@@ -481,10 +481,10 @@ Status insertPrivilegeDocument(OperationContext* txn, const BSONObj& userObj) {
/**
* Updates the given user object with the given update modifier.
*/
-Status updatePrivilegeDocument(OperationContext* txn,
+Status updatePrivilegeDocument(OperationContext* opCtx,
const UserName& user,
const BSONObj& updateObj) {
- Status status = updateOneAuthzDocument(txn,
+ Status status = updateOneAuthzDocument(opCtx,
AuthorizationManager::usersCollectionNamespace,
BSON(AuthorizationManager::USER_NAME_FIELD_NAME
<< user.getUser()
@@ -509,11 +509,11 @@ Status updatePrivilegeDocument(OperationContext* txn,
* Removes users for the given database matching the given query.
* Writes into *numRemoved the number of user documents that were modified.
*/
-Status removePrivilegeDocuments(OperationContext* txn,
+Status removePrivilegeDocuments(OperationContext* opCtx,
const BSONObj& query,
long long* numRemoved) {
Status status = removeAuthzDocuments(
- txn, AuthorizationManager::usersCollectionNamespace, query, numRemoved);
+ opCtx, AuthorizationManager::usersCollectionNamespace, query, numRemoved);
if (status.code() == ErrorCodes::UnknownError) {
return Status(ErrorCodes::UserModificationFailed, status.reason());
}
@@ -524,11 +524,11 @@ Status removePrivilegeDocuments(OperationContext* txn,
* Updates the auth schema version document to reflect the current state of the system.
* 'foundSchemaVersion' is the authSchemaVersion to update with.
*/
-Status writeAuthSchemaVersionIfNeeded(OperationContext* txn,
+Status writeAuthSchemaVersionIfNeeded(OperationContext* opCtx,
AuthorizationManager* authzManager,
int foundSchemaVersion) {
Status status = updateOneAuthzDocument(
- txn,
+ opCtx,
AuthorizationManager::versionCollectionNamespace,
AuthorizationManager::versionDocumentQuery,
BSON("$set" << BSON(AuthorizationManager::schemaVersionFieldName << foundSchemaVersion)),
@@ -546,9 +546,10 @@ Status writeAuthSchemaVersionIfNeeded(OperationContext* txn,
* for the MongoDB 2.6 and 3.0 MongoDB-CR/SCRAM mixed auth mode.
* Returns an error otherwise.
*/
-Status requireAuthSchemaVersion26Final(OperationContext* txn, AuthorizationManager* authzManager) {
+Status requireAuthSchemaVersion26Final(OperationContext* opCtx,
+ AuthorizationManager* authzManager) {
int foundSchemaVersion;
- Status status = authzManager->getAuthorizationVersion(txn, &foundSchemaVersion);
+ Status status = authzManager->getAuthorizationVersion(opCtx, &foundSchemaVersion);
if (!status.isOK()) {
return status;
}
@@ -562,7 +563,7 @@ Status requireAuthSchemaVersion26Final(OperationContext* txn, AuthorizationManag
<< " but found "
<< foundSchemaVersion);
}
- return writeAuthSchemaVersionIfNeeded(txn, authzManager, foundSchemaVersion);
+ return writeAuthSchemaVersionIfNeeded(opCtx, authzManager, foundSchemaVersion);
}
/**
@@ -570,10 +571,10 @@ Status requireAuthSchemaVersion26Final(OperationContext* txn, AuthorizationManag
* for MongoDB 2.6 during the upgrade process.
* Returns an error otherwise.
*/
-Status requireAuthSchemaVersion26UpgradeOrFinal(OperationContext* txn,
+Status requireAuthSchemaVersion26UpgradeOrFinal(OperationContext* opCtx,
AuthorizationManager* authzManager) {
int foundSchemaVersion;
- Status status = authzManager->getAuthorizationVersion(txn, &foundSchemaVersion);
+ Status status = authzManager->getAuthorizationVersion(opCtx, &foundSchemaVersion);
if (!status.isOK()) {
return status;
}
@@ -614,7 +615,7 @@ public:
return auth::checkAuthForCreateUserCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -670,10 +671,10 @@ public:
userObjBuilder.append(AuthorizationManager::USER_NAME_FIELD_NAME, args.userName.getUser());
userObjBuilder.append(AuthorizationManager::USER_DB_FIELD_NAME, args.userName.getDB());
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
int authzVersion;
- status = authzManager->getAuthorizationVersion(txn, &authzVersion);
+ status = authzManager->getAuthorizationVersion(opCtx, &authzVersion);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -708,7 +709,7 @@ public:
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
- status = requireAuthSchemaVersion26Final(txn, authzManager);
+ status = requireAuthSchemaVersion26Final(opCtx, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -717,7 +718,7 @@ public:
for (size_t i = 0; i < args.roles.size(); ++i) {
BSONObj ignored;
status = authzManager->getRoleDescription(
- txn, args.roles[i], PrivilegeFormat::kOmit, &ignored);
+ opCtx, args.roles[i], PrivilegeFormat::kOmit, &ignored);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -728,7 +729,7 @@ public:
args.hasHashedPassword,
args.hasCustomData ? &args.customData : NULL,
args.roles);
- status = insertPrivilegeDocument(txn, userObj);
+ status = insertPrivilegeDocument(opCtx, userObj);
return appendCommandStatus(result, status);
}
@@ -760,7 +761,7 @@ public:
return auth::checkAuthForUpdateUserCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -793,7 +794,7 @@ public:
AuthorizationManager* authzManager = getGlobalAuthorizationManager();
int authzVersion;
- Status status = authzManager->getAuthorizationVersion(txn, &authzVersion);
+ Status status = authzManager->getAuthorizationVersion(opCtx, &authzVersion);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -815,11 +816,11 @@ public:
updateSetBuilder.append("roles", rolesVectorToBSONArray(args.roles));
}
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- status = requireAuthSchemaVersion26Final(txn, authzManager);
+ status = requireAuthSchemaVersion26Final(opCtx, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -830,7 +831,7 @@ public:
for (size_t i = 0; i < args.roles.size(); ++i) {
BSONObj ignored;
status = authzManager->getRoleDescription(
- txn, args.roles[i], PrivilegeFormat::kOmit, &ignored);
+ opCtx, args.roles[i], PrivilegeFormat::kOmit, &ignored);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -844,7 +845,7 @@ public:
args.hasRoles ? &args.roles : NULL);
status =
- updatePrivilegeDocument(txn, args.userName, BSON("$set" << updateSetBuilder.done()));
+ updatePrivilegeDocument(opCtx, args.userName, BSON("$set" << updateSetBuilder.done()));
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserByName(args.userName);
return appendCommandStatus(result, status);
@@ -878,7 +879,7 @@ public:
return auth::checkAuthForDropUserCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -890,10 +891,10 @@ public:
return appendCommandStatus(result, status);
}
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- status = requireAuthSchemaVersion26Final(txn, authzManager);
+ status = requireAuthSchemaVersion26Final(opCtx, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -901,7 +902,7 @@ public:
audit::logDropUser(Client::getCurrent(), userName);
long long nMatched;
- status = removePrivilegeDocuments(txn,
+ status = removePrivilegeDocuments(opCtx,
BSON(AuthorizationManager::USER_NAME_FIELD_NAME
<< userName.getUser()
<< AuthorizationManager::USER_DB_FIELD_NAME
@@ -947,7 +948,7 @@ public:
return auth::checkAuthForDropAllUsersFromDatabaseCommand(client, dbname);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -957,11 +958,11 @@ public:
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- status = requireAuthSchemaVersion26Final(txn, authzManager);
+ status = requireAuthSchemaVersion26Final(opCtx, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -970,7 +971,7 @@ public:
long long numRemoved;
status = removePrivilegeDocuments(
- txn, BSON(AuthorizationManager::USER_DB_FIELD_NAME << dbname), &numRemoved);
+ opCtx, BSON(AuthorizationManager::USER_DB_FIELD_NAME << dbname), &numRemoved);
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUsersFromDB(dbname);
if (!status.isOK()) {
@@ -1005,7 +1006,7 @@ public:
return auth::checkAuthForGrantRolesToUserCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -1019,18 +1020,18 @@ public:
return appendCommandStatus(result, status);
}
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- status = requireAuthSchemaVersion26Final(txn, authzManager);
+ status = requireAuthSchemaVersion26Final(opCtx, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
UserName userName(userNameString, dbname);
unordered_set<RoleName> userRoles;
- status = getCurrentUserRoles(txn, authzManager, userName, &userRoles);
+ status = getCurrentUserRoles(opCtx, authzManager, userName, &userRoles);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -1039,7 +1040,7 @@ public:
RoleName& roleName = *it;
BSONObj roleDoc;
status =
- authzManager->getRoleDescription(txn, roleName, PrivilegeFormat::kOmit, &roleDoc);
+ authzManager->getRoleDescription(opCtx, roleName, PrivilegeFormat::kOmit, &roleDoc);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -1050,7 +1051,7 @@ public:
audit::logGrantRolesToUser(Client::getCurrent(), userName, roles);
BSONArray newRolesBSONArray = roleSetToBSONArray(userRoles);
status = updatePrivilegeDocument(
- txn, userName, BSON("$set" << BSON("roles" << newRolesBSONArray)));
+ opCtx, userName, BSON("$set" << BSON("roles" << newRolesBSONArray)));
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserByName(userName);
return appendCommandStatus(result, status);
@@ -1080,7 +1081,7 @@ public:
return auth::checkAuthForRevokeRolesFromUserCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -1094,18 +1095,18 @@ public:
return appendCommandStatus(result, status);
}
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- status = requireAuthSchemaVersion26Final(txn, authzManager);
+ status = requireAuthSchemaVersion26Final(opCtx, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
UserName userName(userNameString, dbname);
unordered_set<RoleName> userRoles;
- status = getCurrentUserRoles(txn, authzManager, userName, &userRoles);
+ status = getCurrentUserRoles(opCtx, authzManager, userName, &userRoles);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -1114,7 +1115,7 @@ public:
RoleName& roleName = *it;
BSONObj roleDoc;
status =
- authzManager->getRoleDescription(txn, roleName, PrivilegeFormat::kOmit, &roleDoc);
+ authzManager->getRoleDescription(opCtx, roleName, PrivilegeFormat::kOmit, &roleDoc);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -1125,7 +1126,7 @@ public:
audit::logRevokeRolesFromUser(Client::getCurrent(), userName, roles);
BSONArray newRolesBSONArray = roleSetToBSONArray(userRoles);
status = updatePrivilegeDocument(
- txn, userName, BSON("$set" << BSON("roles" << newRolesBSONArray)));
+ opCtx, userName, BSON("$set" << BSON("roles" << newRolesBSONArray)));
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserByName(userName);
return appendCommandStatus(result, status);
@@ -1159,7 +1160,7 @@ public:
return auth::checkAuthForUsersInfoCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -1171,7 +1172,7 @@ public:
return appendCommandStatus(result, status);
}
- status = requireAuthSchemaVersion26UpgradeOrFinal(txn, getGlobalAuthorizationManager());
+ status = requireAuthSchemaVersion26UpgradeOrFinal(opCtx, getGlobalAuthorizationManager());
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -1190,7 +1191,7 @@ public:
for (size_t i = 0; i < args.userNames.size(); ++i) {
BSONObj userDetails;
status = getGlobalAuthorizationManager()->getUserDescription(
- txn, args.userNames[i], &userDetails);
+ opCtx, args.userNames[i], &userDetails);
if (status.code() == ErrorCodes::UserNotFound) {
continue;
}
@@ -1236,7 +1237,7 @@ public:
}
const stdx::function<void(const BSONObj&)> function = stdx::bind(
appendBSONObjToBSONArrayBuilder, &usersArrayBuilder, stdx::placeholders::_1);
- queryAuthzDocument(txn,
+ queryAuthzDocument(opCtx,
AuthorizationManager::usersCollectionNamespace,
queryBuilder.done(),
projection,
@@ -1270,7 +1271,7 @@ public:
return auth::checkAuthForCreateRoleCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -1334,17 +1335,17 @@ public:
roleObjBuilder.append("roles", rolesVectorToBSONArray(args.roles));
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- status = requireAuthSchemaVersion26Final(txn, authzManager);
+ status = requireAuthSchemaVersion26Final(opCtx, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
// Role existence has to be checked after acquiring the update lock
- status = checkOkayToGrantRolesToRole(txn, args.roleName, args.roles, authzManager);
+ status = checkOkayToGrantRolesToRole(opCtx, args.roleName, args.roles, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -1356,7 +1357,7 @@ public:
audit::logCreateRole(Client::getCurrent(), args.roleName, args.roles, args.privileges);
- status = insertRoleDocument(txn, roleObjBuilder.done());
+ status = insertRoleDocument(opCtx, roleObjBuilder.done());
return appendCommandStatus(result, status);
}
@@ -1384,7 +1385,7 @@ public:
return auth::checkAuthForUpdateRoleCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -1418,25 +1419,25 @@ public:
updateSetBuilder.append("roles", rolesVectorToBSONArray(args.roles));
}
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- status = requireAuthSchemaVersion26Final(txn, authzManager);
+ status = requireAuthSchemaVersion26Final(opCtx, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
// Role existence has to be checked after acquiring the update lock
BSONObj ignored;
- status =
- authzManager->getRoleDescription(txn, args.roleName, PrivilegeFormat::kOmit, &ignored);
+ status = authzManager->getRoleDescription(
+ opCtx, args.roleName, PrivilegeFormat::kOmit, &ignored);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
if (args.hasRoles) {
- status = checkOkayToGrantRolesToRole(txn, args.roleName, args.roles, authzManager);
+ status = checkOkayToGrantRolesToRole(opCtx, args.roleName, args.roles, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -1454,7 +1455,7 @@ public:
args.hasRoles ? &args.roles : NULL,
args.hasPrivileges ? &args.privileges : NULL);
- status = updateRoleDocument(txn, args.roleName, BSON("$set" << updateSetBuilder.done()));
+ status = updateRoleDocument(opCtx, args.roleName, BSON("$set" << updateSetBuilder.done()));
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserCache();
return appendCommandStatus(result, status);
@@ -1483,7 +1484,7 @@ public:
return auth::checkAuthForGrantPrivilegesToRoleCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -1498,11 +1499,11 @@ public:
return appendCommandStatus(result, status);
}
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- status = requireAuthSchemaVersion26Final(txn, authzManager);
+ status = requireAuthSchemaVersion26Final(opCtx, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -1522,7 +1523,7 @@ public:
BSONObj roleDoc;
status = authzManager->getRoleDescription(
- txn, roleName, PrivilegeFormat::kShowSeparate, &roleDoc);
+ opCtx, roleName, PrivilegeFormat::kShowSeparate, &roleDoc);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -1562,7 +1563,7 @@ public:
audit::logGrantPrivilegesToRole(Client::getCurrent(), roleName, privilegesToAdd);
- status = updateRoleDocument(txn, roleName, updateBSONBuilder.done());
+ status = updateRoleDocument(opCtx, roleName, updateBSONBuilder.done());
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserCache();
return appendCommandStatus(result, status);
@@ -1592,7 +1593,7 @@ public:
return auth::checkAuthForRevokePrivilegesFromRoleCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -1606,11 +1607,11 @@ public:
return appendCommandStatus(result, status);
}
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- status = requireAuthSchemaVersion26Final(txn, authzManager);
+ status = requireAuthSchemaVersion26Final(opCtx, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -1625,7 +1626,7 @@ public:
BSONObj roleDoc;
status = authzManager->getRoleDescription(
- txn, roleName, PrivilegeFormat::kShowSeparate, &roleDoc);
+ opCtx, roleName, PrivilegeFormat::kShowSeparate, &roleDoc);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -1673,7 +1674,7 @@ public:
BSONObjBuilder updateBSONBuilder;
updateObj.writeTo(&updateBSONBuilder);
- status = updateRoleDocument(txn, roleName, updateBSONBuilder.done());
+ status = updateRoleDocument(opCtx, roleName, updateBSONBuilder.done());
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserCache();
return appendCommandStatus(result, status);
@@ -1703,7 +1704,7 @@ public:
return auth::checkAuthForGrantRolesToRoleCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -1726,24 +1727,25 @@ public:
<< " is a built-in role and cannot be modified."));
}
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- status = requireAuthSchemaVersion26Final(txn, authzManager);
+ status = requireAuthSchemaVersion26Final(opCtx, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
// Role existence has to be checked after acquiring the update lock
BSONObj roleDoc;
- status = authzManager->getRoleDescription(txn, roleName, PrivilegeFormat::kOmit, &roleDoc);
+ status =
+ authzManager->getRoleDescription(opCtx, roleName, PrivilegeFormat::kOmit, &roleDoc);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
// Check for cycles
- status = checkOkayToGrantRolesToRole(txn, roleName, rolesToAdd, authzManager);
+ status = checkOkayToGrantRolesToRole(opCtx, roleName, rolesToAdd, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -1764,7 +1766,7 @@ public:
audit::logGrantRolesToRole(Client::getCurrent(), roleName, rolesToAdd);
status = updateRoleDocument(
- txn, roleName, BSON("$set" << BSON("roles" << rolesVectorToBSONArray(directRoles))));
+ opCtx, roleName, BSON("$set" << BSON("roles" << rolesVectorToBSONArray(directRoles))));
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserCache();
return appendCommandStatus(result, status);
@@ -1794,7 +1796,7 @@ public:
return auth::checkAuthForRevokeRolesFromRoleCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -1808,11 +1810,11 @@ public:
return appendCommandStatus(result, status);
}
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- status = requireAuthSchemaVersion26Final(txn, authzManager);
+ status = requireAuthSchemaVersion26Final(opCtx, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -1827,7 +1829,8 @@ public:
}
BSONObj roleDoc;
- status = authzManager->getRoleDescription(txn, roleName, PrivilegeFormat::kOmit, &roleDoc);
+ status =
+ authzManager->getRoleDescription(opCtx, roleName, PrivilegeFormat::kOmit, &roleDoc);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -1850,7 +1853,7 @@ public:
audit::logRevokeRolesFromRole(Client::getCurrent(), roleName, rolesToRemove);
status = updateRoleDocument(
- txn, roleName, BSON("$set" << BSON("roles" << rolesVectorToBSONArray(roles))));
+ opCtx, roleName, BSON("$set" << BSON("roles" << rolesVectorToBSONArray(roles))));
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserCache();
return appendCommandStatus(result, status);
@@ -1884,7 +1887,7 @@ public:
return auth::checkAuthForDropRoleCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -1896,11 +1899,11 @@ public:
return appendCommandStatus(result, status);
}
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- status = requireAuthSchemaVersion26Final(txn, authzManager);
+ status = requireAuthSchemaVersion26Final(opCtx, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -1914,7 +1917,8 @@ public:
}
BSONObj roleDoc;
- status = authzManager->getRoleDescription(txn, roleName, PrivilegeFormat::kOmit, &roleDoc);
+ status =
+ authzManager->getRoleDescription(opCtx, roleName, PrivilegeFormat::kOmit, &roleDoc);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -1922,7 +1926,7 @@ public:
// Remove this role from all users
long long nMatched;
status = updateAuthzDocuments(
- txn,
+ opCtx,
AuthorizationManager::usersCollectionNamespace,
BSON("roles" << BSON("$elemMatch" << BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
<< roleName.getRole()
@@ -1951,7 +1955,7 @@ public:
// Remove this role from all other roles
status = updateAuthzDocuments(
- txn,
+ opCtx,
AuthorizationManager::rolesCollectionNamespace,
BSON("roles" << BSON("$elemMatch" << BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
<< roleName.getRole()
@@ -1980,7 +1984,7 @@ public:
audit::logDropRole(Client::getCurrent(), roleName);
// Finally, remove the actual role document
- status = removeRoleDocuments(txn,
+ status = removeRoleDocuments(opCtx,
BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
<< roleName.getRole()
<< AuthorizationManager::ROLE_DB_FIELD_NAME
@@ -2038,7 +2042,7 @@ public:
return auth::checkAuthForDropAllRolesFromDatabaseCommand(client, dbname);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -2049,11 +2053,11 @@ public:
return appendCommandStatus(result, status);
}
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- status = requireAuthSchemaVersion26Final(txn, authzManager);
+ status = requireAuthSchemaVersion26Final(opCtx, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -2061,7 +2065,7 @@ public:
// Remove these roles from all users
long long nMatched;
status = updateAuthzDocuments(
- txn,
+ opCtx,
AuthorizationManager::usersCollectionNamespace,
BSON("roles" << BSON(AuthorizationManager::ROLE_DB_FIELD_NAME << dbname)),
BSON("$pull" << BSON("roles"
@@ -2087,7 +2091,7 @@ public:
std::string sourceFieldName = str::stream() << "roles."
<< AuthorizationManager::ROLE_DB_FIELD_NAME;
status = updateAuthzDocuments(
- txn,
+ opCtx,
AuthorizationManager::rolesCollectionNamespace,
BSON(sourceFieldName << dbname),
BSON("$pull" << BSON("roles"
@@ -2112,7 +2116,7 @@ public:
audit::logDropAllRolesFromDatabase(Client::getCurrent(), dbname);
// Finally, remove the actual role documents
status = removeRoleDocuments(
- txn, BSON(AuthorizationManager::ROLE_DB_FIELD_NAME << dbname), &nMatched);
+ opCtx, BSON(AuthorizationManager::ROLE_DB_FIELD_NAME << dbname), &nMatched);
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserCache();
if (!status.isOK()) {
@@ -2182,7 +2186,7 @@ public:
return auth::checkAuthForRolesInfoCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -2194,7 +2198,7 @@ public:
return appendCommandStatus(result, status);
}
- status = requireAuthSchemaVersion26UpgradeOrFinal(txn, getGlobalAuthorizationManager());
+ status = requireAuthSchemaVersion26UpgradeOrFinal(opCtx, getGlobalAuthorizationManager());
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -2202,7 +2206,7 @@ public:
if (args.allForDB) {
std::vector<BSONObj> rolesDocs;
status = getGlobalAuthorizationManager()->getRoleDescriptionsForDB(
- txn, dbname, args.privilegeFormat, args.showBuiltinRoles, &rolesDocs);
+ opCtx, dbname, args.privilegeFormat, args.showBuiltinRoles, &rolesDocs);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -2221,7 +2225,7 @@ public:
} else {
BSONObj roleDetails;
status = getGlobalAuthorizationManager()->getRolesDescription(
- txn, args.roleNames, args.privilegeFormat, &roleDetails);
+ opCtx, args.roleNames, args.privilegeFormat, &roleDetails);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -2264,7 +2268,7 @@ public:
return auth::checkAuthForInvalidateUserCacheCommand(client);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -2303,7 +2307,7 @@ public:
return auth::checkAuthForGetUserCacheGenerationCommand(client);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -2451,7 +2455,7 @@ public:
* admin.system.users collection.
* Also removes any users it encounters from the usersToDrop set.
*/
- static void addUser(OperationContext* txn,
+ static void addUser(OperationContext* opCtx,
AuthorizationManager* authzManager,
StringData db,
bool update,
@@ -2464,7 +2468,7 @@ public:
if (update && usersToDrop->count(userName)) {
auditCreateOrUpdateUser(userObj, false);
- Status status = updatePrivilegeDocument(txn, userName, userObj);
+ Status status = updatePrivilegeDocument(opCtx, userName, userObj);
if (!status.isOK()) {
// Match the behavior of mongorestore to continue on failure
warning() << "Could not update user " << userName
@@ -2472,7 +2476,7 @@ public:
}
} else {
auditCreateOrUpdateUser(userObj, true);
- Status status = insertPrivilegeDocument(txn, userObj);
+ Status status = insertPrivilegeDocument(opCtx, userObj);
if (!status.isOK()) {
// Match the behavior of mongorestore to continue on failure
warning() << "Could not insert user " << userName
@@ -2489,7 +2493,7 @@ public:
* admin.system.roles collection.
* Also removes any roles it encounters from the rolesToDrop set.
*/
- static void addRole(OperationContext* txn,
+ static void addRole(OperationContext* opCtx,
AuthorizationManager* authzManager,
StringData db,
bool update,
@@ -2502,7 +2506,7 @@ public:
if (update && rolesToDrop->count(roleName)) {
auditCreateOrUpdateRole(roleObj, false);
- Status status = updateRoleDocument(txn, roleName, roleObj);
+ Status status = updateRoleDocument(opCtx, roleName, roleObj);
if (!status.isOK()) {
// Match the behavior of mongorestore to continue on failure
warning() << "Could not update role " << roleName
@@ -2510,7 +2514,7 @@ public:
}
} else {
auditCreateOrUpdateRole(roleObj, true);
- Status status = insertRoleDocument(txn, roleObj);
+ Status status = insertRoleDocument(opCtx, roleObj);
if (!status.isOK()) {
// Match the behavior of mongorestore to continue on failure
warning() << "Could not insert role " << roleName
@@ -2524,7 +2528,7 @@ public:
* Moves all user objects from usersCollName into admin.system.users. If drop is true,
* removes any users that were in admin.system.users but not in usersCollName.
*/
- Status processUsers(OperationContext* txn,
+ Status processUsers(OperationContext* opCtx,
AuthorizationManager* authzManager,
StringData usersCollName,
StringData db,
@@ -2550,7 +2554,7 @@ public:
<< 1);
Status status =
- queryAuthzDocument(txn,
+ queryAuthzDocument(opCtx,
AuthorizationManager::usersCollectionNamespace,
query,
fields,
@@ -2563,12 +2567,12 @@ public:
}
Status status = queryAuthzDocument(
- txn,
+ opCtx,
NamespaceString(usersCollName),
db.empty() ? BSONObj() : BSON(AuthorizationManager::USER_DB_FIELD_NAME << db),
BSONObj(),
stdx::bind(&CmdMergeAuthzCollections::addUser,
- txn,
+ opCtx,
authzManager,
db,
drop,
@@ -2585,7 +2589,7 @@ public:
++it) {
const UserName& userName = *it;
audit::logDropUser(Client::getCurrent(), userName);
- status = removePrivilegeDocuments(txn,
+ status = removePrivilegeDocuments(opCtx,
BSON(AuthorizationManager::USER_NAME_FIELD_NAME
<< userName.getUser().toString()
<< AuthorizationManager::USER_DB_FIELD_NAME
@@ -2605,7 +2609,7 @@ public:
* Moves all user objects from usersCollName into admin.system.users. If drop is true,
* removes any users that were in admin.system.users but not in usersCollName.
*/
- Status processRoles(OperationContext* txn,
+ Status processRoles(OperationContext* opCtx,
AuthorizationManager* authzManager,
StringData rolesCollName,
StringData db,
@@ -2630,7 +2634,7 @@ public:
<< 1);
Status status =
- queryAuthzDocument(txn,
+ queryAuthzDocument(opCtx,
AuthorizationManager::rolesCollectionNamespace,
query,
fields,
@@ -2643,12 +2647,12 @@ public:
}
Status status = queryAuthzDocument(
- txn,
+ opCtx,
NamespaceString(rolesCollName),
db.empty() ? BSONObj() : BSON(AuthorizationManager::ROLE_DB_FIELD_NAME << db),
BSONObj(),
stdx::bind(&CmdMergeAuthzCollections::addRole,
- txn,
+ opCtx,
authzManager,
db,
drop,
@@ -2665,7 +2669,7 @@ public:
++it) {
const RoleName& roleName = *it;
audit::logDropRole(Client::getCurrent(), roleName);
- status = removeRoleDocuments(txn,
+ status = removeRoleDocuments(opCtx,
BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
<< roleName.getRole().toString()
<< AuthorizationManager::ROLE_DB_FIELD_NAME
@@ -2681,7 +2685,7 @@ public:
return Status::OK();
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -2701,24 +2705,26 @@ public:
"\"tempRolescollection\""));
}
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- status = requireAuthSchemaVersion26Final(txn, authzManager);
+ status = requireAuthSchemaVersion26Final(opCtx, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
if (!args.usersCollName.empty()) {
- Status status = processUsers(txn, authzManager, args.usersCollName, args.db, args.drop);
+ Status status =
+ processUsers(opCtx, authzManager, args.usersCollName, args.db, args.drop);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
}
if (!args.rolesCollName.empty()) {
- Status status = processRoles(txn, authzManager, args.rolesCollName, args.db, args.drop);
+ Status status =
+ processRoles(opCtx, authzManager, args.rolesCollName, args.db, args.drop);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -2742,7 +2748,7 @@ Status logUpgradeFailed(const Status& status) {
*
* Throws a DBException on errors.
*/
-void updateUserCredentials(OperationContext* txn,
+void updateUserCredentials(OperationContext* opCtx,
const StringData& sourceDB,
const BSONObj& userDoc) {
// Skip users in $external, SERVER-18475
@@ -2789,7 +2795,7 @@ void updateUserCredentials(OperationContext* txn,
}
uassertStatusOK(updateOneAuthzDocument(
- txn, NamespaceString("admin", "system.users"), query, updateBuilder.obj(), true));
+ opCtx, NamespaceString("admin", "system.users"), query, updateBuilder.obj(), true));
}
/** Loop through all the user documents in the admin.system.users collection.
@@ -2798,20 +2804,20 @@ void updateUserCredentials(OperationContext* txn,
* 2. Remove the MONGODB-CR hash
* 3. Add SCRAM credentials to the user document credentials section
*/
-Status updateCredentials(OperationContext* txn) {
+Status updateCredentials(OperationContext* opCtx) {
// Loop through and update the user documents in admin.system.users.
- Status status =
- queryAuthzDocument(txn,
- NamespaceString("admin", "system.users"),
- BSONObj(),
- BSONObj(),
- stdx::bind(updateUserCredentials, txn, "admin", stdx::placeholders::_1));
+ Status status = queryAuthzDocument(
+ opCtx,
+ NamespaceString("admin", "system.users"),
+ BSONObj(),
+ BSONObj(),
+ stdx::bind(updateUserCredentials, opCtx, "admin", stdx::placeholders::_1));
if (!status.isOK())
return logUpgradeFailed(status);
// Update the schema version document.
status =
- updateOneAuthzDocument(txn,
+ updateOneAuthzDocument(opCtx,
AuthorizationManager::versionCollectionNamespace,
AuthorizationManager::versionDocumentQuery,
BSON("$set" << BSON(AuthorizationManager::schemaVersionFieldName
@@ -2836,11 +2842,11 @@ Status updateCredentials(OperationContext* txn) {
* On failure, returns a status other than Status::OK(). In this case, is is typically safe
* to try again.
*/
-Status upgradeAuthSchemaStep(OperationContext* txn,
+Status upgradeAuthSchemaStep(OperationContext* opCtx,
AuthorizationManager* authzManager,
bool* isDone) {
int authzVersion;
- Status status = authzManager->getAuthorizationVersion(txn, &authzVersion);
+ Status status = authzManager->getAuthorizationVersion(opCtx, &authzVersion);
if (!status.isOK()) {
return status;
}
@@ -2848,7 +2854,7 @@ Status upgradeAuthSchemaStep(OperationContext* txn,
switch (authzVersion) {
case AuthorizationManager::schemaVersion26Final:
case AuthorizationManager::schemaVersion28SCRAM: {
- Status status = updateCredentials(txn);
+ Status status = updateCredentials(opCtx);
if (status.isOK())
*isDone = true;
return status;
@@ -2874,7 +2880,9 @@ Status upgradeAuthSchemaStep(OperationContext* txn,
* progress performing the upgrade, and the specific code and message in the returned status
* may provide additional information.
*/
-Status upgradeAuthSchema(OperationContext* txn, AuthorizationManager* authzManager, int maxSteps) {
+Status upgradeAuthSchema(OperationContext* opCtx,
+ AuthorizationManager* authzManager,
+ int maxSteps) {
if (maxSteps < 1) {
return Status(ErrorCodes::BadValue,
"Minimum value for maxSteps parameter to upgradeAuthSchema is 1");
@@ -2882,7 +2890,7 @@ Status upgradeAuthSchema(OperationContext* txn, AuthorizationManager* authzManag
authzManager->invalidateUserCache();
for (int i = 0; i < maxSteps; ++i) {
bool isDone;
- Status status = upgradeAuthSchemaStep(txn, authzManager, &isDone);
+ Status status = upgradeAuthSchemaStep(opCtx, authzManager, &isDone);
authzManager->invalidateUserCache();
if (!status.isOK() || isDone) {
return status;
@@ -2919,7 +2927,7 @@ public:
return auth::checkAuthForAuthSchemaUpgradeCommand(client);
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -2931,12 +2939,12 @@ public:
return appendCommandStatus(result, status);
}
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
- status = upgradeAuthSchema(txn, authzManager, parsedArgs.maxSteps);
+ status = upgradeAuthSchema(opCtx, authzManager, parsedArgs.maxSteps);
if (status.isOK())
result.append("done", true);
return appendCommandStatus(result, status);
diff --git a/src/mongo/db/commands/validate.cpp b/src/mongo/db/commands/validate.cpp
index 02c577da9c8..2fc05974986 100644
--- a/src/mongo/db/commands/validate.cpp
+++ b/src/mongo/db/commands/validate.cpp
@@ -75,7 +75,7 @@ public:
}
//{ validate: "collectionnamewithoutthedbpart" [, scandata: <bool>] [, full: <bool> } */
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -109,11 +109,11 @@ public:
LOG(0) << "CMD: validate " << nss.ns();
}
- AutoGetDb ctx(txn, nss.db(), MODE_IX);
- Lock::CollectionLock collLk(txn->lockState(), nss.ns(), MODE_X);
+ AutoGetDb ctx(opCtx, nss.db(), MODE_IX);
+ Lock::CollectionLock collLk(opCtx->lockState(), nss.ns(), MODE_X);
Collection* collection = ctx.getDb() ? ctx.getDb()->getCollection(nss) : NULL;
if (!collection) {
- if (ctx.getDb() && ctx.getDb()->getViewCatalog()->lookup(txn, nss.ns())) {
+ if (ctx.getDb() && ctx.getDb()->getViewCatalog()->lookup(opCtx, nss.ns())) {
errmsg = "Cannot validate a view";
return appendCommandStatus(result, {ErrorCodes::CommandNotSupportedOnView, errmsg});
}
@@ -125,7 +125,7 @@ public:
result.append("ns", nss.ns());
ValidateResults results;
- Status status = collection->validate(txn, level, &results, &result);
+ Status status = collection->validate(opCtx, level, &results, &result);
if (!status.isOK())
return appendCommandStatus(result, status);
diff --git a/src/mongo/db/commands/write_commands/write_commands.cpp b/src/mongo/db/commands/write_commands/write_commands.cpp
index 506709096b8..4093bcf083e 100644
--- a/src/mongo/db/commands/write_commands/write_commands.cpp
+++ b/src/mongo/db/commands/write_commands/write_commands.cpp
@@ -87,21 +87,21 @@ Status checkAuthForWriteCommand(Client* client,
return status;
}
-bool shouldSkipOutput(OperationContext* txn) {
- const WriteConcernOptions& writeConcern = txn->getWriteConcern();
+bool shouldSkipOutput(OperationContext* opCtx) {
+ const WriteConcernOptions& writeConcern = opCtx->getWriteConcern();
return writeConcern.wMode.empty() && writeConcern.wNumNodes == 0 &&
(writeConcern.syncMode == WriteConcernOptions::SyncMode::NONE ||
writeConcern.syncMode == WriteConcernOptions::SyncMode::UNSET);
}
enum class ReplyStyle { kUpdate, kNotUpdate }; // update has extra fields.
-void serializeReply(OperationContext* txn,
+void serializeReply(OperationContext* opCtx,
ReplyStyle replyStyle,
bool continueOnError,
size_t opsInBatch,
const WriteResult& result,
BSONObjBuilder* out) {
- if (shouldSkipOutput(txn))
+ if (shouldSkipOutput(opCtx))
return;
long long n = 0;
@@ -170,10 +170,10 @@ void serializeReply(OperationContext* txn,
{
// Undocumented repl fields that mongos depends on.
- auto* replCoord = repl::ReplicationCoordinator::get(txn->getServiceContext());
+ auto* replCoord = repl::ReplicationCoordinator::get(opCtx->getServiceContext());
const auto replMode = replCoord->getReplicationMode();
if (replMode != repl::ReplicationCoordinator::modeNone) {
- const auto lastOp = repl::ReplClientInfo::forClient(txn->getClient()).getLastOp();
+ const auto lastOp = repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp();
if (lastOp.getTerm() == repl::OpTime::kUninitializedTerm) {
out->append("opTime", lastOp.getTimestamp());
} else {
@@ -207,22 +207,22 @@ public:
return ReadWriteType::kWrite;
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
std::string& errmsg,
BSONObjBuilder& result) final {
try {
- runImpl(txn, dbname, cmdObj, result);
+ runImpl(opCtx, dbname, cmdObj, result);
return true;
} catch (const DBException& ex) {
- LastError::get(txn->getClient()).setLastError(ex.getCode(), ex.getInfo().msg);
+ LastError::get(opCtx->getClient()).setLastError(ex.getCode(), ex.getInfo().msg);
throw;
}
}
- virtual void runImpl(OperationContext* txn,
+ virtual void runImpl(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
BSONObjBuilder& result) = 0;
@@ -251,13 +251,13 @@ public:
cmdObj);
}
- void runImpl(OperationContext* txn,
+ void runImpl(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
BSONObjBuilder& result) final {
const auto batch = parseInsertCommand(dbname, cmdObj);
- const auto reply = performInserts(txn, batch);
- serializeReply(txn,
+ const auto reply = performInserts(opCtx, batch);
+ serializeReply(opCtx,
ReplyStyle::kNotUpdate,
batch.continueOnError,
batch.documents.size(),
@@ -287,17 +287,21 @@ public:
cmdObj);
}
- void runImpl(OperationContext* txn,
+ void runImpl(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
BSONObjBuilder& result) final {
const auto batch = parseUpdateCommand(dbname, cmdObj);
- const auto reply = performUpdates(txn, batch);
- serializeReply(
- txn, ReplyStyle::kUpdate, batch.continueOnError, batch.updates.size(), reply, &result);
+ const auto reply = performUpdates(opCtx, batch);
+ serializeReply(opCtx,
+ ReplyStyle::kUpdate,
+ batch.continueOnError,
+ batch.updates.size(),
+ reply,
+ &result);
}
- Status explain(OperationContext* txn,
+ Status explain(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
ExplainCommon::Verbosity verbosity,
@@ -319,16 +323,16 @@ public:
updateRequest.setYieldPolicy(PlanExecutor::YIELD_AUTO);
updateRequest.setExplain();
- ParsedUpdate parsedUpdate(txn, &updateRequest);
+ ParsedUpdate parsedUpdate(opCtx, &updateRequest);
uassertStatusOK(parsedUpdate.parseRequest());
// Explains of write commands are read-only, but we take write locks so that timing
// info is more accurate.
- ScopedTransaction scopedXact(txn, MODE_IX);
- AutoGetCollection collection(txn, batch.ns, MODE_IX);
+ ScopedTransaction scopedXact(opCtx, MODE_IX);
+ AutoGetCollection collection(opCtx, batch.ns, MODE_IX);
auto exec = uassertStatusOK(getExecutorUpdate(
- txn, &CurOp::get(txn)->debug(), collection.getCollection(), &parsedUpdate));
+ opCtx, &CurOp::get(opCtx)->debug(), collection.getCollection(), &parsedUpdate));
Explain::explainStages(exec.get(), collection.getCollection(), verbosity, out);
return Status::OK();
}
@@ -355,13 +359,13 @@ public:
cmdObj);
}
- void runImpl(OperationContext* txn,
+ void runImpl(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
BSONObjBuilder& result) final {
const auto batch = parseDeleteCommand(dbname, cmdObj);
- const auto reply = performDeletes(txn, batch);
- serializeReply(txn,
+ const auto reply = performDeletes(opCtx, batch);
+ serializeReply(opCtx,
ReplyStyle::kNotUpdate,
batch.continueOnError,
batch.deletes.size(),
@@ -369,7 +373,7 @@ public:
&result);
}
- Status explain(OperationContext* txn,
+ Status explain(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
ExplainCommon::Verbosity verbosity,
@@ -387,17 +391,17 @@ public:
deleteRequest.setYieldPolicy(PlanExecutor::YIELD_AUTO);
deleteRequest.setExplain();
- ParsedDelete parsedDelete(txn, &deleteRequest);
+ ParsedDelete parsedDelete(opCtx, &deleteRequest);
uassertStatusOK(parsedDelete.parseRequest());
// Explains of write commands are read-only, but we take write locks so that timing
// info is more accurate.
- ScopedTransaction scopedXact(txn, MODE_IX);
- AutoGetCollection collection(txn, batch.ns, MODE_IX);
+ ScopedTransaction scopedXact(opCtx, MODE_IX);
+ AutoGetCollection collection(opCtx, batch.ns, MODE_IX);
// Explain the plan tree.
auto exec = uassertStatusOK(getExecutorDelete(
- txn, &CurOp::get(txn)->debug(), collection.getCollection(), &parsedDelete));
+ opCtx, &CurOp::get(opCtx)->debug(), collection.getCollection(), &parsedDelete));
Explain::explainStages(exec.get(), collection.getCollection(), verbosity, out);
return Status::OK();
}