summaryrefslogtreecommitdiff
path: root/src/mongo/db/ops
diff options
context:
space:
mode:
authorMaria van Keulen <maria@mongodb.com>2017-03-07 12:00:08 -0500
committerMaria van Keulen <maria@mongodb.com>2017-03-07 12:00:08 -0500
commit589a5c169ced8f6e9ddcd3d182ae1b75db6b7d79 (patch)
treec7a090ffdd56a91ae677e2492c61b820af44f964 /src/mongo/db/ops
parent3cba97198638df3750e3b455e2ad57af7ee536ae (diff)
downloadmongo-589a5c169ced8f6e9ddcd3d182ae1b75db6b7d79.tar.gz
SERVER-27938 Rename all OperationContext variables to opCtx
This commit is an automated rename of all whole word instances of txn, _txn, and txnPtr to opCtx, _opCtx, and opCtxPtr, respectively in all .cpp and .h files in src/mongo.
Diffstat (limited to 'src/mongo/db/ops')
-rw-r--r--src/mongo/db/ops/delete.cpp10
-rw-r--r--src/mongo/db/ops/delete.h2
-rw-r--r--src/mongo/db/ops/parsed_delete.cpp8
-rw-r--r--src/mongo/db/ops/parsed_delete.h4
-rw-r--r--src/mongo/db/ops/parsed_update.cpp16
-rw-r--r--src/mongo/db/ops/parsed_update.h4
-rw-r--r--src/mongo/db/ops/update.cpp26
-rw-r--r--src/mongo/db/ops/update.h2
-rw-r--r--src/mongo/db/ops/update_driver.cpp5
-rw-r--r--src/mongo/db/ops/update_driver.h2
-rw-r--r--src/mongo/db/ops/update_driver_test.cpp60
-rw-r--r--src/mongo/db/ops/write_ops_exec.cpp218
-rw-r--r--src/mongo/db/ops/write_ops_exec.h6
13 files changed, 182 insertions, 181 deletions
diff --git a/src/mongo/db/ops/delete.cpp b/src/mongo/db/ops/delete.cpp
index 7f509308ad4..636a9aa17ee 100644
--- a/src/mongo/db/ops/delete.cpp
+++ b/src/mongo/db/ops/delete.cpp
@@ -44,7 +44,7 @@ namespace mongo {
justOne: stop after 1 match
god: allow access to system namespaces, and don't yield
*/
-long long deleteObjects(OperationContext* txn,
+long long deleteObjects(OperationContext* opCtx,
Collection* collection,
StringData ns,
BSONObj pattern,
@@ -60,20 +60,20 @@ long long deleteObjects(OperationContext* txn,
request.setFromMigrate(fromMigrate);
request.setYieldPolicy(policy);
- ParsedDelete parsedDelete(txn, &request);
+ ParsedDelete parsedDelete(opCtx, &request);
uassertStatusOK(parsedDelete.parseRequest());
- auto client = txn->getClient();
+ auto client = opCtx->getClient();
auto lastOpAtOperationStart = repl::ReplClientInfo::forClient(client).getLastOp();
std::unique_ptr<PlanExecutor> exec = uassertStatusOK(
- getExecutorDelete(txn, &CurOp::get(txn)->debug(), collection, &parsedDelete));
+ getExecutorDelete(opCtx, &CurOp::get(opCtx)->debug(), collection, &parsedDelete));
uassertStatusOK(exec->executePlan());
// No-ops need to reset lastOp in the client, for write concern.
if (repl::ReplClientInfo::forClient(client).getLastOp() == lastOpAtOperationStart) {
- repl::ReplClientInfo::forClient(client).setLastOpToSystemLastOpTime(txn);
+ repl::ReplClientInfo::forClient(client).setLastOpToSystemLastOpTime(opCtx);
}
return DeleteStage::getNumDeleted(*exec);
diff --git a/src/mongo/db/ops/delete.h b/src/mongo/db/ops/delete.h
index f45a2674cb4..bead641a0b8 100644
--- a/src/mongo/db/ops/delete.h
+++ b/src/mongo/db/ops/delete.h
@@ -39,7 +39,7 @@ namespace mongo {
class Database;
class OperationContext;
-long long deleteObjects(OperationContext* txn,
+long long deleteObjects(OperationContext* opCtx,
Collection* collection,
StringData ns,
BSONObj pattern,
diff --git a/src/mongo/db/ops/parsed_delete.cpp b/src/mongo/db/ops/parsed_delete.cpp
index 6508c99c120..854ff7bb480 100644
--- a/src/mongo/db/ops/parsed_delete.cpp
+++ b/src/mongo/db/ops/parsed_delete.cpp
@@ -48,8 +48,8 @@
namespace mongo {
-ParsedDelete::ParsedDelete(OperationContext* txn, const DeleteRequest* request)
- : _txn(txn), _request(request) {}
+ParsedDelete::ParsedDelete(OperationContext* opCtx, const DeleteRequest* request)
+ : _opCtx(opCtx), _request(request) {}
Status ParsedDelete::parseRequest() {
dassert(!_canonicalQuery.get());
@@ -79,7 +79,7 @@ Status ParsedDelete::parseRequest() {
Status ParsedDelete::parseQueryToCQ() {
dassert(!_canonicalQuery.get());
- const ExtensionsCallbackReal extensionsCallback(_txn, &_request->getNamespaceString());
+ const ExtensionsCallbackReal extensionsCallback(_opCtx, &_request->getNamespaceString());
// The projection needs to be applied after the delete operation, so we do not specify a
// projection during canonicalization.
@@ -99,7 +99,7 @@ Status ParsedDelete::parseQueryToCQ() {
qr->setLimit(1);
}
- auto statusWithCQ = CanonicalQuery::canonicalize(_txn, std::move(qr), extensionsCallback);
+ auto statusWithCQ = CanonicalQuery::canonicalize(_opCtx, std::move(qr), extensionsCallback);
if (statusWithCQ.isOK()) {
_canonicalQuery = std::move(statusWithCQ.getValue());
diff --git a/src/mongo/db/ops/parsed_delete.h b/src/mongo/db/ops/parsed_delete.h
index 1a36b9b78ea..9cda64718a1 100644
--- a/src/mongo/db/ops/parsed_delete.h
+++ b/src/mongo/db/ops/parsed_delete.h
@@ -63,7 +63,7 @@ public:
* The object pointed to by "request" must stay in scope for the life of the constructed
* ParsedDelete.
*/
- ParsedDelete(OperationContext* txn, const DeleteRequest* request);
+ ParsedDelete(OperationContext* opCtx, const DeleteRequest* request);
/**
* Parses the delete request to a canonical query. On success, the parsed delete can be
@@ -106,7 +106,7 @@ public:
private:
// Transactional context. Not owned by us.
- OperationContext* _txn;
+ OperationContext* _opCtx;
// Unowned pointer to the request object that this executor will process.
const DeleteRequest* const _request;
diff --git a/src/mongo/db/ops/parsed_update.cpp b/src/mongo/db/ops/parsed_update.cpp
index da9eba4bd93..6df14116c1f 100644
--- a/src/mongo/db/ops/parsed_update.cpp
+++ b/src/mongo/db/ops/parsed_update.cpp
@@ -39,8 +39,8 @@
namespace mongo {
-ParsedUpdate::ParsedUpdate(OperationContext* txn, const UpdateRequest* request)
- : _txn(txn), _request(request), _driver(UpdateDriver::Options()), _canonicalQuery() {}
+ParsedUpdate::ParsedUpdate(OperationContext* opCtx, const UpdateRequest* request)
+ : _opCtx(opCtx), _request(request), _driver(UpdateDriver::Options()), _canonicalQuery() {}
Status ParsedUpdate::parseRequest() {
// It is invalid to request that the UpdateStage return the prior or newly-updated version
@@ -59,7 +59,7 @@ Status ParsedUpdate::parseRequest() {
"http://dochub.mongodb.org/core/3.4-feature-compatibility.");
}
- auto collator = CollatorFactoryInterface::get(_txn->getServiceContext())
+ auto collator = CollatorFactoryInterface::get(_opCtx->getServiceContext())
->makeFromBSON(_request->getCollation());
if (!collator.isOK()) {
return collator.getStatus();
@@ -93,7 +93,7 @@ Status ParsedUpdate::parseQuery() {
Status ParsedUpdate::parseQueryToCQ() {
dassert(!_canonicalQuery.get());
- const ExtensionsCallbackReal extensionsCallback(_txn, &_request->getNamespaceString());
+ const ExtensionsCallbackReal extensionsCallback(_opCtx, &_request->getNamespaceString());
// The projection needs to be applied after the update operation, so we do not specify a
// projection during canonicalization.
@@ -113,7 +113,7 @@ Status ParsedUpdate::parseQueryToCQ() {
qr->setLimit(1);
}
- auto statusWithCQ = CanonicalQuery::canonicalize(_txn, std::move(qr), extensionsCallback);
+ auto statusWithCQ = CanonicalQuery::canonicalize(_opCtx, std::move(qr), extensionsCallback);
if (statusWithCQ.isOK()) {
_canonicalQuery = std::move(statusWithCQ.getValue());
}
@@ -129,11 +129,11 @@ Status ParsedUpdate::parseUpdate() {
// Config db docs shouldn't get checked for valid field names since the shard key can have
// a dot (".") in it.
const bool shouldValidate =
- !(!_txn->writesAreReplicated() || ns.isConfigDB() || _request->isFromMigration());
+ !(!_opCtx->writesAreReplicated() || ns.isConfigDB() || _request->isFromMigration());
_driver.setLogOp(true);
- _driver.setModOptions(
- ModifierInterface::Options(!_txn->writesAreReplicated(), shouldValidate, _collator.get()));
+ _driver.setModOptions(ModifierInterface::Options(
+ !_opCtx->writesAreReplicated(), shouldValidate, _collator.get()));
return _driver.parse(_request->getUpdates(), _request->isMulti());
}
diff --git a/src/mongo/db/ops/parsed_update.h b/src/mongo/db/ops/parsed_update.h
index 5547cfc8cfd..7f844a1a166 100644
--- a/src/mongo/db/ops/parsed_update.h
+++ b/src/mongo/db/ops/parsed_update.h
@@ -64,7 +64,7 @@ public:
* The object pointed to by "request" must stay in scope for the life of the constructed
* ParsedUpdate.
*/
- ParsedUpdate(OperationContext* txn, const UpdateRequest* request);
+ ParsedUpdate(OperationContext* opCtx, const UpdateRequest* request);
/**
* Parses the update request to a canonical query and an update driver. On success, the
@@ -138,7 +138,7 @@ private:
Status parseUpdate();
// Unowned pointer to the transactional context.
- OperationContext* _txn;
+ OperationContext* _opCtx;
// Unowned pointer to the request object to process.
const UpdateRequest* const _request;
diff --git a/src/mongo/db/ops/update.cpp b/src/mongo/db/ops/update.cpp
index 54e66be6133..4a02a753a78 100644
--- a/src/mongo/db/ops/update.cpp
+++ b/src/mongo/db/ops/update.cpp
@@ -57,17 +57,17 @@
namespace mongo {
-UpdateResult update(OperationContext* txn, Database* db, const UpdateRequest& request) {
+UpdateResult update(OperationContext* opCtx, Database* db, const UpdateRequest& request) {
invariant(db);
// Explain should never use this helper.
invariant(!request.isExplain());
- auto client = txn->getClient();
+ auto client = opCtx->getClient();
auto lastOpAtOperationStart = repl::ReplClientInfo::forClient(client).getLastOp();
ScopeGuard lastOpSetterGuard = MakeObjGuard(repl::ReplClientInfo::forClient(client),
&repl::ReplClientInfo::setLastOpToSystemLastOpTime,
- txn);
+ opCtx);
const NamespaceString& nsString = request.getNamespaceString();
Collection* collection = db->getCollection(nsString.ns());
@@ -82,16 +82,16 @@ UpdateResult update(OperationContext* txn, Database* db, const UpdateRequest& re
if (!collection && request.isUpsert()) {
// We have to have an exclusive lock on the db to be allowed to create the collection.
// Callers should either get an X or create the collection.
- const Locker* locker = txn->lockState();
+ const Locker* locker = opCtx->lockState();
invariant(locker->isW() ||
locker->isLockHeldForMode(ResourceId(RESOURCE_DATABASE, nsString.db()), MODE_X));
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock lk(txn->lockState(), nsString.db(), MODE_X);
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ Lock::DBLock lk(opCtx->lockState(), nsString.db(), MODE_X);
- const bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(txn, nsString);
+ const bool userInitiatedWritesAndNotPrimary = opCtx->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, nsString);
if (userInitiatedWritesAndNotPrimary) {
uassertStatusOK(Status(ErrorCodes::PrimarySteppedDown,
@@ -99,21 +99,21 @@ UpdateResult update(OperationContext* txn, Database* db, const UpdateRequest& re
<< nsString.ns()
<< " during upsert"));
}
- WriteUnitOfWork wuow(txn);
- collection = db->createCollection(txn, nsString.ns(), CollectionOptions());
+ WriteUnitOfWork wuow(opCtx);
+ collection = db->createCollection(opCtx, nsString.ns(), CollectionOptions());
invariant(collection);
wuow.commit();
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "createCollection", nsString.ns());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "createCollection", nsString.ns());
}
// Parse the update, get an executor for it, run the executor, get stats out.
- ParsedUpdate parsedUpdate(txn, &request);
+ ParsedUpdate parsedUpdate(opCtx, &request);
uassertStatusOK(parsedUpdate.parseRequest());
OpDebug* const nullOpDebug = nullptr;
std::unique_ptr<PlanExecutor> exec =
- uassertStatusOK(getExecutorUpdate(txn, nullOpDebug, collection, &parsedUpdate));
+ uassertStatusOK(getExecutorUpdate(opCtx, nullOpDebug, collection, &parsedUpdate));
uassertStatusOK(exec->executePlan());
if (repl::ReplClientInfo::forClient(client).getLastOp() != lastOpAtOperationStart) {
diff --git a/src/mongo/db/ops/update.h b/src/mongo/db/ops/update.h
index 8ff64538a9d..2c5e0fc0f97 100644
--- a/src/mongo/db/ops/update.h
+++ b/src/mongo/db/ops/update.h
@@ -47,7 +47,7 @@ class UpdateDriver;
*
* Caller must hold the appropriate database locks.
*/
-UpdateResult update(OperationContext* txn, Database* db, const UpdateRequest& request);
+UpdateResult update(OperationContext* opCtx, Database* db, const UpdateRequest& request);
/**
* takes the from document and returns a new document
diff --git a/src/mongo/db/ops/update_driver.cpp b/src/mongo/db/ops/update_driver.cpp
index e5a63d64ccb..f94f520f032 100644
--- a/src/mongo/db/ops/update_driver.cpp
+++ b/src/mongo/db/ops/update_driver.cpp
@@ -173,7 +173,7 @@ inline Status UpdateDriver::addAndParse(const modifiertable::ModifierType type,
return Status::OK();
}
-Status UpdateDriver::populateDocumentWithQueryFields(OperationContext* txn,
+Status UpdateDriver::populateDocumentWithQueryFields(OperationContext* opCtx,
const BSONObj& query,
const vector<FieldRef*>* immutablePaths,
mutablebson::Document& doc) const {
@@ -182,7 +182,8 @@ Status UpdateDriver::populateDocumentWithQueryFields(OperationContext* txn,
// $where/$text clauses do not make sense, hence empty ExtensionsCallback.
auto qr = stdx::make_unique<QueryRequest>(NamespaceString(""));
qr->setFilter(query);
- auto statusWithCQ = CanonicalQuery::canonicalize(txn, std::move(qr), ExtensionsCallbackNoop());
+ auto statusWithCQ =
+ CanonicalQuery::canonicalize(opCtx, std::move(qr), ExtensionsCallbackNoop());
if (!statusWithCQ.isOK()) {
return statusWithCQ.getStatus();
}
diff --git a/src/mongo/db/ops/update_driver.h b/src/mongo/db/ops/update_driver.h
index a9579a89719..73f231f9764 100644
--- a/src/mongo/db/ops/update_driver.h
+++ b/src/mongo/db/ops/update_driver.h
@@ -70,7 +70,7 @@ public:
* Returns Status::OK() if the document can be used. If there are any error or
* conflicts along the way then those errors will be returned.
*/
- Status populateDocumentWithQueryFields(OperationContext* txn,
+ Status populateDocumentWithQueryFields(OperationContext* opCtx,
const BSONObj& query,
const std::vector<FieldRef*>* immutablePaths,
mutablebson::Document& doc) const;
diff --git a/src/mongo/db/ops/update_driver_test.cpp b/src/mongo/db/ops/update_driver_test.cpp
index 794ede90844..c7ef14b3e9f 100644
--- a/src/mongo/db/ops/update_driver_test.cpp
+++ b/src/mongo/db/ops/update_driver_test.cpp
@@ -181,7 +181,7 @@ public:
return *_driverRepl;
}
- OperationContext* txn() {
+ OperationContext* opCtx() {
return _opCtx.get();
}
@@ -254,139 +254,139 @@ static void assertSameFields(const BSONObj& docA, const BSONObj& docB) {
TEST_F(CreateFromQuery, BasicOp) {
BSONObj query = fromjson("{a:1,b:2}");
- ASSERT_OK(driverOps().populateDocumentWithQueryFields(txn(), query, NULL, doc()));
+ ASSERT_OK(driverOps().populateDocumentWithQueryFields(opCtx(), query, NULL, doc()));
assertSameFields(query, doc().getObject());
}
TEST_F(CreateFromQuery, BasicOpEq) {
BSONObj query = fromjson("{a:{$eq:1}}");
- ASSERT_OK(driverOps().populateDocumentWithQueryFields(txn(), query, NULL, doc()));
+ ASSERT_OK(driverOps().populateDocumentWithQueryFields(opCtx(), query, NULL, doc()));
assertSameFields(fromjson("{a:1}"), doc().getObject());
}
TEST_F(CreateFromQuery, BasicOpWithId) {
BSONObj query = fromjson("{_id:1,a:1,b:2}");
- ASSERT_OK(driverOps().populateDocumentWithQueryFields(txn(), query, NULL, doc()));
+ ASSERT_OK(driverOps().populateDocumentWithQueryFields(opCtx(), query, NULL, doc()));
assertSameFields(query, doc().getObject());
}
TEST_F(CreateFromQuery, BasicRepl) {
BSONObj query = fromjson("{a:1,b:2}");
- ASSERT_OK(driverRepl().populateDocumentWithQueryFields(txn(), query, NULL, doc()));
+ ASSERT_OK(driverRepl().populateDocumentWithQueryFields(opCtx(), query, NULL, doc()));
assertSameFields(fromjson("{}"), doc().getObject());
}
TEST_F(CreateFromQuery, BasicReplWithId) {
BSONObj query = fromjson("{_id:1,a:1,b:2}");
- ASSERT_OK(driverRepl().populateDocumentWithQueryFields(txn(), query, NULL, doc()));
+ ASSERT_OK(driverRepl().populateDocumentWithQueryFields(opCtx(), query, NULL, doc()));
assertSameFields(fromjson("{_id:1}"), doc().getObject());
}
TEST_F(CreateFromQuery, BasicReplWithIdEq) {
BSONObj query = fromjson("{_id:{$eq:1},a:1,b:2}");
- ASSERT_OK(driverRepl().populateDocumentWithQueryFields(txn(), query, NULL, doc()));
+ ASSERT_OK(driverRepl().populateDocumentWithQueryFields(opCtx(), query, NULL, doc()));
assertSameFields(fromjson("{_id:1}"), doc().getObject());
}
TEST_F(CreateFromQuery, NoRootIdOp) {
BSONObj query = fromjson("{'_id.a':1,'_id.b':2}");
- ASSERT_OK(driverOps().populateDocumentWithQueryFields(txn(), query, NULL, doc()));
+ ASSERT_OK(driverOps().populateDocumentWithQueryFields(opCtx(), query, NULL, doc()));
assertSameFields(fromjson("{_id:{a:1,b:2}}"), doc().getObject());
}
TEST_F(CreateFromQuery, NoRootIdRepl) {
BSONObj query = fromjson("{'_id.a':1,'_id.b':2}");
- ASSERT_NOT_OK(driverRepl().populateDocumentWithQueryFields(txn(), query, NULL, doc()));
+ ASSERT_NOT_OK(driverRepl().populateDocumentWithQueryFields(opCtx(), query, NULL, doc()));
}
TEST_F(CreateFromQuery, NestedSharedRootOp) {
BSONObj query = fromjson("{'a.c':1,'a.b':{$eq:2}}");
- ASSERT_OK(driverOps().populateDocumentWithQueryFields(txn(), query, NULL, doc()));
+ ASSERT_OK(driverOps().populateDocumentWithQueryFields(opCtx(), query, NULL, doc()));
assertSameFields(fromjson("{a:{c:1,b:2}}"), doc().getObject());
}
TEST_F(CreateFromQuery, OrQueryOp) {
BSONObj query = fromjson("{$or:[{a:1}]}");
- ASSERT_OK(driverOps().populateDocumentWithQueryFields(txn(), query, NULL, doc()));
+ ASSERT_OK(driverOps().populateDocumentWithQueryFields(opCtx(), query, NULL, doc()));
assertSameFields(fromjson("{a:1}"), doc().getObject());
}
TEST_F(CreateFromQuery, OrQueryIdRepl) {
BSONObj query = fromjson("{$or:[{_id:1}]}");
- ASSERT_OK(driverRepl().populateDocumentWithQueryFields(txn(), query, NULL, doc()));
+ ASSERT_OK(driverRepl().populateDocumentWithQueryFields(opCtx(), query, NULL, doc()));
assertSameFields(fromjson("{_id:1}"), doc().getObject());
}
TEST_F(CreateFromQuery, OrQueryNoExtractOps) {
BSONObj query = fromjson("{$or:[{a:1}, {b:2}]}");
- ASSERT_OK(driverOps().populateDocumentWithQueryFields(txn(), query, NULL, doc()));
+ ASSERT_OK(driverOps().populateDocumentWithQueryFields(opCtx(), query, NULL, doc()));
assertSameFields(BSONObj(), doc().getObject());
}
TEST_F(CreateFromQuery, OrQueryNoExtractIdRepl) {
BSONObj query = fromjson("{$or:[{_id:1}, {_id:2}]}");
- ASSERT_OK(driverRepl().populateDocumentWithQueryFields(txn(), query, NULL, doc()));
+ ASSERT_OK(driverRepl().populateDocumentWithQueryFields(opCtx(), query, NULL, doc()));
assertSameFields(BSONObj(), doc().getObject());
}
TEST_F(CreateFromQuery, AndQueryOp) {
BSONObj query = fromjson("{$and:[{'a.c':1},{'a.b':{$eq:2}}]}");
- ASSERT_OK(driverOps().populateDocumentWithQueryFields(txn(), query, NULL, doc()));
+ ASSERT_OK(driverOps().populateDocumentWithQueryFields(opCtx(), query, NULL, doc()));
assertSameFields(fromjson("{a:{c:1,b:2}}"), doc().getObject());
}
TEST_F(CreateFromQuery, AndQueryIdRepl) {
BSONObj query = fromjson("{$and:[{_id:1},{a:{$eq:2}}]}");
- ASSERT_OK(driverRepl().populateDocumentWithQueryFields(txn(), query, NULL, doc()));
+ ASSERT_OK(driverRepl().populateDocumentWithQueryFields(opCtx(), query, NULL, doc()));
assertSameFields(fromjson("{_id:1}"), doc().getObject());
}
TEST_F(CreateFromQuery, AllArrayOp) {
BSONObj query = fromjson("{a:{$all:[1]}}");
- ASSERT_OK(driverOps().populateDocumentWithQueryFields(txn(), query, NULL, doc()));
+ ASSERT_OK(driverOps().populateDocumentWithQueryFields(opCtx(), query, NULL, doc()));
assertSameFields(fromjson("{a:1}"), doc().getObject());
}
TEST_F(CreateFromQuery, AllArrayIdRepl) {
BSONObj query = fromjson("{_id:{$all:[1]}, b:2}");
- ASSERT_OK(driverRepl().populateDocumentWithQueryFields(txn(), query, NULL, doc()));
+ ASSERT_OK(driverRepl().populateDocumentWithQueryFields(opCtx(), query, NULL, doc()));
assertSameFields(fromjson("{_id:1}"), doc().getObject());
}
TEST_F(CreateFromQuery, ConflictFieldsFailOp) {
BSONObj query = fromjson("{a:1,'a.b':1}");
- ASSERT_NOT_OK(driverOps().populateDocumentWithQueryFields(txn(), query, NULL, doc()));
+ ASSERT_NOT_OK(driverOps().populateDocumentWithQueryFields(opCtx(), query, NULL, doc()));
}
TEST_F(CreateFromQuery, ConflictFieldsFailSameValueOp) {
BSONObj query = fromjson("{a:{b:1},'a.b':1}");
- ASSERT_NOT_OK(driverOps().populateDocumentWithQueryFields(txn(), query, NULL, doc()));
+ ASSERT_NOT_OK(driverOps().populateDocumentWithQueryFields(opCtx(), query, NULL, doc()));
}
TEST_F(CreateFromQuery, ConflictWithIdRepl) {
BSONObj query = fromjson("{_id:1,'_id.a':1}");
- ASSERT_NOT_OK(driverRepl().populateDocumentWithQueryFields(txn(), query, NULL, doc()));
+ ASSERT_NOT_OK(driverRepl().populateDocumentWithQueryFields(opCtx(), query, NULL, doc()));
}
TEST_F(CreateFromQuery, ConflictAndQueryOp) {
BSONObj query = fromjson("{$and:[{a:{b:1}},{'a.b':{$eq:1}}]}");
- ASSERT_NOT_OK(driverOps().populateDocumentWithQueryFields(txn(), query, NULL, doc()));
+ ASSERT_NOT_OK(driverOps().populateDocumentWithQueryFields(opCtx(), query, NULL, doc()));
}
TEST_F(CreateFromQuery, ConflictAllMultipleValsOp) {
BSONObj query = fromjson("{a:{$all:[1, 2]}}");
- ASSERT_NOT_OK(driverOps().populateDocumentWithQueryFields(txn(), query, NULL, doc()));
+ ASSERT_NOT_OK(driverOps().populateDocumentWithQueryFields(opCtx(), query, NULL, doc()));
}
TEST_F(CreateFromQuery, NoConflictOrQueryOp) {
BSONObj query = fromjson("{$or:[{a:{b:1}},{'a.b':{$eq:1}}]}");
- ASSERT_OK(driverOps().populateDocumentWithQueryFields(txn(), query, NULL, doc()));
+ ASSERT_OK(driverOps().populateDocumentWithQueryFields(opCtx(), query, NULL, doc()));
assertSameFields(BSONObj(), doc().getObject());
}
TEST_F(CreateFromQuery, ImmutableFieldsOp) {
BSONObj query = fromjson("{$or:[{a:{b:1}},{'a.b':{$eq:1}}]}");
- ASSERT_OK(driverOps().populateDocumentWithQueryFields(txn(), query, NULL, doc()));
+ ASSERT_OK(driverOps().populateDocumentWithQueryFields(opCtx(), query, NULL, doc()));
assertSameFields(BSONObj(), doc().getObject());
}
@@ -395,7 +395,7 @@ TEST_F(CreateFromQuery, ShardKeyRepl) {
OwnedPointerVector<FieldRef> immutablePaths;
immutablePaths.push_back(new FieldRef("a"));
ASSERT_OK(driverRepl().populateDocumentWithQueryFields(
- txn(), query, &immutablePaths.vector(), doc()));
+ opCtx(), query, &immutablePaths.vector(), doc()));
assertSameFields(fromjson("{a:1}"), doc().getObject());
}
@@ -405,7 +405,7 @@ TEST_F(CreateFromQuery, NestedShardKeyRepl) {
immutablePaths.push_back(new FieldRef("a"));
immutablePaths.push_back(new FieldRef("b.c"));
ASSERT_OK(driverRepl().populateDocumentWithQueryFields(
- txn(), query, &immutablePaths.vector(), doc()));
+ opCtx(), query, &immutablePaths.vector(), doc()));
assertSameFields(fromjson("{a:1,b:{c:2}}"), doc().getObject());
}
@@ -414,8 +414,8 @@ TEST_F(CreateFromQuery, NestedShardKeyOp) {
OwnedPointerVector<FieldRef> immutablePaths;
immutablePaths.push_back(new FieldRef("a"));
immutablePaths.push_back(new FieldRef("b.c"));
- ASSERT_OK(
- driverOps().populateDocumentWithQueryFields(txn(), query, &immutablePaths.vector(), doc()));
+ ASSERT_OK(driverOps().populateDocumentWithQueryFields(
+ opCtx(), query, &immutablePaths.vector(), doc()));
assertSameFields(fromjson("{a:1,b:{c:2},d:3}"), doc().getObject());
}
@@ -425,7 +425,7 @@ TEST_F(CreateFromQuery, NotFullShardKeyRepl) {
immutablePaths.push_back(new FieldRef("a"));
immutablePaths.push_back(new FieldRef("b"));
ASSERT_NOT_OK(driverRepl().populateDocumentWithQueryFields(
- txn(), query, &immutablePaths.vector(), doc()));
+ opCtx(), query, &immutablePaths.vector(), doc()));
}
} // unnamed namespace
diff --git a/src/mongo/db/ops/write_ops_exec.cpp b/src/mongo/db/ops/write_ops_exec.cpp
index f3f183a60fe..a0d0f067333 100644
--- a/src/mongo/db/ops/write_ops_exec.cpp
+++ b/src/mongo/db/ops/write_ops_exec.cpp
@@ -83,15 +83,15 @@ MONGO_FP_DECLARE(failAllInserts);
MONGO_FP_DECLARE(failAllUpdates);
MONGO_FP_DECLARE(failAllRemoves);
-void finishCurOp(OperationContext* txn, CurOp* curOp) {
+void finishCurOp(OperationContext* opCtx, CurOp* curOp) {
try {
curOp->done();
long long executionTimeMicros = curOp->totalTimeMicros();
curOp->debug().executionTimeMicros = executionTimeMicros;
- recordCurOpMetrics(txn);
- Top::get(txn->getServiceContext())
- .record(txn,
+ recordCurOpMetrics(opCtx);
+ Top::get(opCtx->getServiceContext())
+ .record(opCtx,
curOp->getNS(),
curOp->getLogicalOp(),
1, // "write locked"
@@ -111,16 +111,16 @@ void finishCurOp(OperationContext* txn, CurOp* curOp) {
const bool shouldSample = serverGlobalParams.sampleRate == 1.0
? true
- : txn->getClient()->getPrng().nextCanonicalDouble() < serverGlobalParams.sampleRate;
+ : opCtx->getClient()->getPrng().nextCanonicalDouble() < serverGlobalParams.sampleRate;
if (logAll || (shouldSample && logSlow)) {
Locker::LockerInfo lockerInfo;
- txn->lockState()->getLockerInfo(&lockerInfo);
- log() << curOp->debug().report(txn->getClient(), *curOp, lockerInfo.stats);
+ opCtx->lockState()->getLockerInfo(&lockerInfo);
+ log() << curOp->debug().report(opCtx->getClient(), *curOp, lockerInfo.stats);
}
if (shouldSample && curOp->shouldDBProfile()) {
- profile(txn, CurOp::get(txn)->getNetworkOp());
+ profile(opCtx, CurOp::get(opCtx)->getNetworkOp());
}
} catch (const DBException& ex) {
// We need to ignore all errors here. We don't want a successful op to fail because of a
@@ -135,8 +135,8 @@ void finishCurOp(OperationContext* txn, CurOp* curOp) {
*/
class LastOpFixer {
public:
- LastOpFixer(OperationContext* txn, const NamespaceString& ns)
- : _txn(txn), _isOnLocalDb(ns.isLocal()) {}
+ LastOpFixer(OperationContext* opCtx, const NamespaceString& ns)
+ : _opCtx(opCtx), _isOnLocalDb(ns.isLocal()) {}
~LastOpFixer() {
if (_needToFixLastOp && !_isOnLocalDb) {
@@ -144,7 +144,7 @@ public:
// here. No-op updates will not generate a new lastOp, so we still need the
// guard to fire in that case. Operations on the local DB aren't replicated, so they
// don't need to bump the lastOp.
- replClientInfo().setLastOpToSystemLastOpTime(_txn);
+ replClientInfo().setLastOpToSystemLastOpTime(_opCtx);
}
}
@@ -161,45 +161,45 @@ public:
private:
repl::ReplClientInfo& replClientInfo() {
- return repl::ReplClientInfo::forClient(_txn->getClient());
+ return repl::ReplClientInfo::forClient(_opCtx->getClient());
}
- OperationContext* const _txn;
+ OperationContext* const _opCtx;
bool _needToFixLastOp = true;
const bool _isOnLocalDb;
repl::OpTime _opTimeAtLastOpStart;
};
-void assertCanWrite_inlock(OperationContext* txn, const NamespaceString& ns) {
- uassert(
- ErrorCodes::PrimarySteppedDown,
- str::stream() << "Not primary while writing to " << ns.ns(),
- repl::ReplicationCoordinator::get(txn->getServiceContext())->canAcceptWritesFor(txn, ns));
- CollectionShardingState::get(txn, ns)->checkShardVersionOrThrow(txn);
+void assertCanWrite_inlock(OperationContext* opCtx, const NamespaceString& ns) {
+ uassert(ErrorCodes::PrimarySteppedDown,
+ str::stream() << "Not primary while writing to " << ns.ns(),
+ repl::ReplicationCoordinator::get(opCtx->getServiceContext())
+ ->canAcceptWritesFor(opCtx, ns));
+ CollectionShardingState::get(opCtx, ns)->checkShardVersionOrThrow(opCtx);
}
-void makeCollection(OperationContext* txn, const NamespaceString& ns) {
+void makeCollection(OperationContext* opCtx, const NamespaceString& ns) {
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- AutoGetOrCreateDb db(txn, ns.db(), MODE_X);
- assertCanWrite_inlock(txn, ns);
+ AutoGetOrCreateDb db(opCtx, ns.db(), MODE_X);
+ assertCanWrite_inlock(opCtx, ns);
if (!db.getDb()->getCollection(ns.ns())) { // someone else may have beat us to it.
- WriteUnitOfWork wuow(txn);
- uassertStatusOK(userCreateNS(txn, db.getDb(), ns.ns(), BSONObj()));
+ WriteUnitOfWork wuow(opCtx);
+ uassertStatusOK(userCreateNS(opCtx, db.getDb(), ns.ns(), BSONObj()));
wuow.commit();
}
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "implicit collection creation", ns.ns());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "implicit collection creation", ns.ns());
}
/**
* Returns true if the operation can continue.
*/
-bool handleError(OperationContext* txn,
+bool handleError(OperationContext* opCtx,
const DBException& ex,
const ParsedWriteOp& wholeOp,
WriteResult* out) {
- LastError::get(txn->getClient()).setLastError(ex.getCode(), ex.getInfo().msg);
- auto& curOp = *CurOp::get(txn);
+ LastError::get(opCtx->getClient()).setLastError(ex.getCode(), ex.getInfo().msg);
+ auto& curOp = *CurOp::get(opCtx);
curOp.debug().exceptionInfo = ex.getInfo();
if (ErrorCodes::isInterruption(ErrorCodes::Error(ex.getCode()))) {
@@ -221,8 +221,8 @@ bool handleError(OperationContext* txn,
<< demangleName(typeid(ex)));
}
- ShardingState::get(txn)->onStaleShardVersion(
- txn, wholeOp.ns, staleConfigException->getVersionReceived());
+ ShardingState::get(opCtx)->onStaleShardVersion(
+ opCtx, wholeOp.ns, staleConfigException->getVersionReceived());
out->staleConfigException =
stdx::make_unique<SendStaleConfigException>(*staleConfigException);
return false;
@@ -234,7 +234,7 @@ bool handleError(OperationContext* txn,
} // namespace
-static WriteResult::SingleResult createIndex(OperationContext* txn,
+static WriteResult::SingleResult createIndex(OperationContext* opCtx,
const NamespaceString& systemIndexes,
const BSONObj& spec) {
BSONElement nsElement = spec["ns"];
@@ -264,7 +264,7 @@ static WriteResult::SingleResult createIndex(OperationContext* txn,
.done();
rpc::CommandRequest cmdRequest(&cmdRequestMsg);
rpc::CommandReplyBuilder cmdReplyBuilder;
- Command::findCommand("createIndexes")->run(txn, cmdRequest, &cmdReplyBuilder);
+ Command::findCommand("createIndexes")->run(opCtx, cmdRequest, &cmdReplyBuilder);
auto cmdReplyMsg = cmdReplyBuilder.done();
rpc::CommandReply cmdReply(&cmdReplyMsg);
auto cmdResult = cmdReply.getCommandReply();
@@ -273,12 +273,12 @@ static WriteResult::SingleResult createIndex(OperationContext* txn,
// Unlike normal inserts, it is not an error to "insert" a duplicate index.
long long n =
cmdResult["numIndexesAfter"].numberInt() - cmdResult["numIndexesBefore"].numberInt();
- CurOp::get(txn)->debug().ninserted += n;
+ CurOp::get(opCtx)->debug().ninserted += n;
return {n};
}
-static WriteResult performCreateIndexes(OperationContext* txn, const InsertOp& wholeOp) {
+static WriteResult performCreateIndexes(OperationContext* opCtx, const InsertOp& wholeOp) {
// Currently this creates each index independently. We could pass multiple indexes to
// createIndexes, but there is a lot of complexity involved in doing it correctly. For one
// thing, createIndexes only takes indexes to a single collection, but this batch could include
@@ -287,15 +287,15 @@ static WriteResult performCreateIndexes(OperationContext* txn, const InsertOp& w
// errors or stops at the first one. These could theoretically be worked around, but it doesn't
// seem worth it since users that want faster index builds should just use the createIndexes
// command rather than a legacy emulation.
- LastOpFixer lastOpFixer(txn, wholeOp.ns);
+ LastOpFixer lastOpFixer(opCtx, wholeOp.ns);
WriteResult out;
for (auto&& spec : wholeOp.documents) {
try {
lastOpFixer.startingOp();
- out.results.emplace_back(createIndex(txn, wholeOp.ns, spec));
+ out.results.emplace_back(createIndex(opCtx, wholeOp.ns, spec));
lastOpFixer.finishedOpSuccessfully();
} catch (const DBException& ex) {
- const bool canContinue = handleError(txn, ex, wholeOp, &out);
+ const bool canContinue = handleError(opCtx, ex, wholeOp, &out);
if (!canContinue)
break;
}
@@ -303,22 +303,22 @@ static WriteResult performCreateIndexes(OperationContext* txn, const InsertOp& w
return out;
}
-static void insertDocuments(OperationContext* txn,
+static void insertDocuments(OperationContext* opCtx,
Collection* collection,
std::vector<BSONObj>::const_iterator begin,
std::vector<BSONObj>::const_iterator end) {
// Intentionally not using a WRITE_CONFLICT_RETRY_LOOP. That is handled by the caller so it can
// react to oversized batches.
- WriteUnitOfWork wuow(txn);
+ WriteUnitOfWork wuow(opCtx);
uassertStatusOK(collection->insertDocuments(
- txn, begin, end, &CurOp::get(txn)->debug(), /*enforceQuota*/ true));
+ opCtx, begin, end, &CurOp::get(opCtx)->debug(), /*enforceQuota*/ true));
wuow.commit();
}
/**
* Returns true if caller should try to insert more documents. Does nothing else if batch is empty.
*/
-static bool insertBatchAndHandleErrors(OperationContext* txn,
+static bool insertBatchAndHandleErrors(OperationContext* opCtx,
const InsertOp& wholeOp,
const std::vector<BSONObj>& batch,
LastOpFixer* lastOpFixer,
@@ -326,27 +326,27 @@ static bool insertBatchAndHandleErrors(OperationContext* txn,
if (batch.empty())
return true;
- auto& curOp = *CurOp::get(txn);
+ auto& curOp = *CurOp::get(opCtx);
boost::optional<AutoGetCollection> collection;
auto acquireCollection = [&] {
while (true) {
- txn->checkForInterrupt();
+ opCtx->checkForInterrupt();
if (MONGO_FAIL_POINT(failAllInserts)) {
uasserted(ErrorCodes::InternalError, "failAllInserts failpoint active!");
}
- collection.emplace(txn, wholeOp.ns, MODE_IX);
+ collection.emplace(opCtx, wholeOp.ns, MODE_IX);
if (collection->getCollection())
break;
collection.reset(); // unlock.
- makeCollection(txn, wholeOp.ns);
+ makeCollection(opCtx, wholeOp.ns);
}
curOp.raiseDbProfileLevel(collection->getDb()->getProfilingLevel());
- assertCanWrite_inlock(txn, wholeOp.ns);
+ assertCanWrite_inlock(opCtx, wholeOp.ns);
};
try {
@@ -355,7 +355,7 @@ static bool insertBatchAndHandleErrors(OperationContext* txn,
// First try doing it all together. If all goes well, this is all we need to do.
// See Collection::_insertDocuments for why we do all capped inserts one-at-a-time.
lastOpFixer->startingOp();
- insertDocuments(txn, collection->getCollection(), batch.begin(), batch.end());
+ insertDocuments(opCtx, collection->getCollection(), batch.begin(), batch.end());
lastOpFixer->finishedOpSuccessfully();
globalOpCounters.gotInserts(batch.size());
std::fill_n(
@@ -379,7 +379,7 @@ static bool insertBatchAndHandleErrors(OperationContext* txn,
if (!collection)
acquireCollection();
lastOpFixer->startingOp();
- insertDocuments(txn, collection->getCollection(), it, it + 1);
+ insertDocuments(opCtx, collection->getCollection(), it, it + 1);
lastOpFixer->finishedOpSuccessfully();
out->results.emplace_back(WriteResult::SingleResult{1});
curOp.debug().ninserted++;
@@ -390,9 +390,9 @@ static bool insertBatchAndHandleErrors(OperationContext* txn,
throw;
}
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "insert", wholeOp.ns.ns());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "insert", wholeOp.ns.ns());
} catch (const DBException& ex) {
- bool canContinue = handleError(txn, ex, wholeOp, out);
+ bool canContinue = handleError(opCtx, ex, wholeOp, out);
if (!canContinue)
return false;
}
@@ -401,15 +401,15 @@ static bool insertBatchAndHandleErrors(OperationContext* txn,
return true;
}
-WriteResult performInserts(OperationContext* txn, const InsertOp& wholeOp) {
- invariant(!txn->lockState()->inAWriteUnitOfWork()); // Does own retries.
- auto& curOp = *CurOp::get(txn);
+WriteResult performInserts(OperationContext* opCtx, const InsertOp& wholeOp) {
+ invariant(!opCtx->lockState()->inAWriteUnitOfWork()); // Does own retries.
+ auto& curOp = *CurOp::get(opCtx);
ON_BLOCK_EXIT([&] {
// This is the only part of finishCurOp we need to do for inserts because they reuse the
// top-level curOp. The rest is handled by the top-level entrypoint.
curOp.done();
- Top::get(txn->getServiceContext())
- .record(txn,
+ Top::get(opCtx->getServiceContext())
+ .record(opCtx,
wholeOp.ns.ns(),
LogicalOp::opInsert,
1 /* write locked*/,
@@ -420,7 +420,7 @@ WriteResult performInserts(OperationContext* txn, const InsertOp& wholeOp) {
});
{
- stdx::lock_guard<Client> lk(*txn->getClient());
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
curOp.setNS_inlock(wholeOp.ns.ns());
curOp.setLogicalOp_inlock(LogicalOp::opInsert);
curOp.ensureStarted();
@@ -430,11 +430,11 @@ WriteResult performInserts(OperationContext* txn, const InsertOp& wholeOp) {
uassertStatusOK(userAllowedWriteNS(wholeOp.ns));
if (wholeOp.ns.isSystemDotIndexes()) {
- return performCreateIndexes(txn, wholeOp);
+ return performCreateIndexes(opCtx, wholeOp);
}
- DisableDocumentValidationIfTrue docValidationDisabler(txn, wholeOp.bypassDocumentValidation);
- LastOpFixer lastOpFixer(txn, wholeOp.ns);
+ DisableDocumentValidationIfTrue docValidationDisabler(opCtx, wholeOp.bypassDocumentValidation);
+ LastOpFixer lastOpFixer(opCtx, wholeOp.ns);
WriteResult out;
out.results.reserve(wholeOp.documents.size());
@@ -446,7 +446,7 @@ WriteResult performInserts(OperationContext* txn, const InsertOp& wholeOp) {
for (auto&& doc : wholeOp.documents) {
const bool isLastDoc = (&doc == &wholeOp.documents.back());
- auto fixedDoc = fixDocumentForInsert(txn->getServiceContext(), doc);
+ auto fixedDoc = fixDocumentForInsert(opCtx->getServiceContext(), doc);
if (!fixedDoc.isOK()) {
// Handled after we insert anything in the batch to be sure we report errors in the
// correct order. In an ordered insert, if one of the docs ahead of us fails, we should
@@ -458,14 +458,14 @@ WriteResult performInserts(OperationContext* txn, const InsertOp& wholeOp) {
continue; // Add more to batch before inserting.
}
- bool canContinue = insertBatchAndHandleErrors(txn, wholeOp, batch, &lastOpFixer, &out);
+ bool canContinue = insertBatchAndHandleErrors(opCtx, wholeOp, batch, &lastOpFixer, &out);
batch.clear(); // We won't need the current batch any more.
bytesInBatch = 0;
if (canContinue && !fixedDoc.isOK()) {
globalOpCounters.gotInsert();
canContinue = handleError(
- txn,
+ opCtx,
UserException(fixedDoc.getStatus().code(), fixedDoc.getStatus().reason()),
wholeOp,
&out);
@@ -478,13 +478,13 @@ WriteResult performInserts(OperationContext* txn, const InsertOp& wholeOp) {
return out;
}
-static WriteResult::SingleResult performSingleUpdateOp(OperationContext* txn,
+static WriteResult::SingleResult performSingleUpdateOp(OperationContext* opCtx,
const NamespaceString& ns,
const UpdateOp::SingleUpdate& op) {
globalOpCounters.gotUpdate();
- auto& curOp = *CurOp::get(txn);
+ auto& curOp = *CurOp::get(opCtx);
{
- stdx::lock_guard<Client> lk(*txn->getClient());
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
curOp.setNS_inlock(ns.ns());
curOp.setNetworkOp_inlock(dbUpdate);
curOp.setLogicalOp_inlock(LogicalOp::opUpdate);
@@ -503,18 +503,18 @@ static WriteResult::SingleResult performSingleUpdateOp(OperationContext* txn,
request.setUpsert(op.upsert);
request.setYieldPolicy(PlanExecutor::YIELD_AUTO); // ParsedUpdate overrides this for $isolated.
- ParsedUpdate parsedUpdate(txn, &request);
+ ParsedUpdate parsedUpdate(opCtx, &request);
uassertStatusOK(parsedUpdate.parseRequest());
- ScopedTransaction scopedXact(txn, MODE_IX);
+ ScopedTransaction scopedXact(opCtx, MODE_IX);
boost::optional<AutoGetCollection> collection;
while (true) {
- txn->checkForInterrupt();
+ opCtx->checkForInterrupt();
if (MONGO_FAIL_POINT(failAllUpdates)) {
uasserted(ErrorCodes::InternalError, "failAllUpdates failpoint active!");
}
- collection.emplace(txn,
+ collection.emplace(opCtx,
ns,
MODE_IX, // DB is always IX, even if collection is X.
parsedUpdate.isIsolated() ? MODE_X : MODE_IX);
@@ -522,21 +522,21 @@ static WriteResult::SingleResult performSingleUpdateOp(OperationContext* txn,
break;
collection.reset(); // unlock.
- makeCollection(txn, ns);
+ makeCollection(opCtx, ns);
}
if (collection->getDb()) {
curOp.raiseDbProfileLevel(collection->getDb()->getProfilingLevel());
}
- assertCanWrite_inlock(txn, ns);
+ assertCanWrite_inlock(opCtx, ns);
auto exec = uassertStatusOK(
- getExecutorUpdate(txn, &curOp.debug(), collection->getCollection(), &parsedUpdate));
+ getExecutorUpdate(opCtx, &curOp.debug(), collection->getCollection(), &parsedUpdate));
{
- stdx::lock_guard<Client> lk(*txn->getClient());
- CurOp::get(txn)->setPlanSummary_inlock(Explain::getPlanSummary(exec.get()));
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
+ CurOp::get(opCtx)->setPlanSummary_inlock(Explain::getPlanSummary(exec.get()));
}
uassertStatusOK(exec->executePlan());
@@ -544,7 +544,7 @@ static WriteResult::SingleResult performSingleUpdateOp(OperationContext* txn,
PlanSummaryStats summary;
Explain::getSummaryStats(*exec, &summary);
if (collection->getCollection()) {
- collection->getCollection()->infoCache()->notifyOfQuery(txn, summary.indexesUsed);
+ collection->getCollection()->infoCache()->notifyOfQuery(opCtx, summary.indexesUsed);
}
if (curOp.shouldDBProfile()) {
@@ -560,37 +560,37 @@ static WriteResult::SingleResult performSingleUpdateOp(OperationContext* txn,
const bool didInsert = !res.upserted.isEmpty();
const long long nMatchedOrInserted = didInsert ? 1 : res.numMatched;
- LastError::get(txn->getClient()).recordUpdate(res.existing, nMatchedOrInserted, res.upserted);
+ LastError::get(opCtx->getClient()).recordUpdate(res.existing, nMatchedOrInserted, res.upserted);
return {nMatchedOrInserted, res.numDocsModified, res.upserted};
}
-WriteResult performUpdates(OperationContext* txn, const UpdateOp& wholeOp) {
- invariant(!txn->lockState()->inAWriteUnitOfWork()); // Does own retries.
+WriteResult performUpdates(OperationContext* opCtx, const UpdateOp& wholeOp) {
+ invariant(!opCtx->lockState()->inAWriteUnitOfWork()); // Does own retries.
uassertStatusOK(userAllowedWriteNS(wholeOp.ns));
- DisableDocumentValidationIfTrue docValidationDisabler(txn, wholeOp.bypassDocumentValidation);
- LastOpFixer lastOpFixer(txn, wholeOp.ns);
+ DisableDocumentValidationIfTrue docValidationDisabler(opCtx, wholeOp.bypassDocumentValidation);
+ LastOpFixer lastOpFixer(opCtx, wholeOp.ns);
WriteResult out;
out.results.reserve(wholeOp.updates.size());
for (auto&& singleOp : wholeOp.updates) {
// TODO: don't create nested CurOp for legacy writes.
// Add Command pointer to the nested CurOp.
- auto& parentCurOp = *CurOp::get(txn);
+ auto& parentCurOp = *CurOp::get(opCtx);
Command* cmd = parentCurOp.getCommand();
- CurOp curOp(txn);
+ CurOp curOp(opCtx);
{
- stdx::lock_guard<Client> lk(*txn->getClient());
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
curOp.setCommand_inlock(cmd);
}
- ON_BLOCK_EXIT([&] { finishCurOp(txn, &curOp); });
+ ON_BLOCK_EXIT([&] { finishCurOp(opCtx, &curOp); });
try {
lastOpFixer.startingOp();
- out.results.emplace_back(performSingleUpdateOp(txn, wholeOp.ns, singleOp));
+ out.results.emplace_back(performSingleUpdateOp(opCtx, wholeOp.ns, singleOp));
lastOpFixer.finishedOpSuccessfully();
} catch (const DBException& ex) {
- const bool canContinue = handleError(txn, ex, wholeOp, &out);
+ const bool canContinue = handleError(opCtx, ex, wholeOp, &out);
if (!canContinue)
break;
}
@@ -599,13 +599,13 @@ WriteResult performUpdates(OperationContext* txn, const UpdateOp& wholeOp) {
return out;
}
-static WriteResult::SingleResult performSingleDeleteOp(OperationContext* txn,
+static WriteResult::SingleResult performSingleDeleteOp(OperationContext* opCtx,
const NamespaceString& ns,
const DeleteOp::SingleDelete& op) {
globalOpCounters.gotDelete();
- auto& curOp = *CurOp::get(txn);
+ auto& curOp = *CurOp::get(opCtx);
{
- stdx::lock_guard<Client> lk(*txn->getClient());
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
curOp.setNS_inlock(ns.ns());
curOp.setNetworkOp_inlock(dbDelete);
curOp.setLogicalOp_inlock(LogicalOp::opDelete);
@@ -622,17 +622,17 @@ static WriteResult::SingleResult performSingleDeleteOp(OperationContext* txn,
request.setMulti(op.multi);
request.setYieldPolicy(PlanExecutor::YIELD_AUTO); // ParsedDelete overrides this for $isolated.
- ParsedDelete parsedDelete(txn, &request);
+ ParsedDelete parsedDelete(opCtx, &request);
uassertStatusOK(parsedDelete.parseRequest());
- txn->checkForInterrupt();
+ opCtx->checkForInterrupt();
if (MONGO_FAIL_POINT(failAllRemoves)) {
uasserted(ErrorCodes::InternalError, "failAllRemoves failpoint active!");
}
- ScopedTransaction scopedXact(txn, MODE_IX);
- AutoGetCollection collection(txn,
+ ScopedTransaction scopedXact(opCtx, MODE_IX);
+ AutoGetCollection collection(opCtx,
ns,
MODE_IX, // DB is always IX, even if collection is X.
parsedDelete.isIsolated() ? MODE_X : MODE_IX);
@@ -640,14 +640,14 @@ static WriteResult::SingleResult performSingleDeleteOp(OperationContext* txn,
curOp.raiseDbProfileLevel(collection.getDb()->getProfilingLevel());
}
- assertCanWrite_inlock(txn, ns);
+ assertCanWrite_inlock(opCtx, ns);
auto exec = uassertStatusOK(
- getExecutorDelete(txn, &curOp.debug(), collection.getCollection(), &parsedDelete));
+ getExecutorDelete(opCtx, &curOp.debug(), collection.getCollection(), &parsedDelete));
{
- stdx::lock_guard<Client> lk(*txn->getClient());
- CurOp::get(txn)->setPlanSummary_inlock(Explain::getPlanSummary(exec.get()));
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
+ CurOp::get(opCtx)->setPlanSummary_inlock(Explain::getPlanSummary(exec.get()));
}
uassertStatusOK(exec->executePlan());
@@ -657,7 +657,7 @@ static WriteResult::SingleResult performSingleDeleteOp(OperationContext* txn,
PlanSummaryStats summary;
Explain::getSummaryStats(*exec, &summary);
if (collection.getCollection()) {
- collection.getCollection()->infoCache()->notifyOfQuery(txn, summary.indexesUsed);
+ collection.getCollection()->infoCache()->notifyOfQuery(opCtx, summary.indexesUsed);
}
curOp.debug().setPlanSummaryMetrics(summary);
@@ -667,37 +667,37 @@ static WriteResult::SingleResult performSingleDeleteOp(OperationContext* txn,
curOp.debug().execStats = execStatsBob.obj();
}
- LastError::get(txn->getClient()).recordDelete(n);
+ LastError::get(opCtx->getClient()).recordDelete(n);
return {n};
}
-WriteResult performDeletes(OperationContext* txn, const DeleteOp& wholeOp) {
- invariant(!txn->lockState()->inAWriteUnitOfWork()); // Does own retries.
+WriteResult performDeletes(OperationContext* opCtx, const DeleteOp& wholeOp) {
+ invariant(!opCtx->lockState()->inAWriteUnitOfWork()); // Does own retries.
uassertStatusOK(userAllowedWriteNS(wholeOp.ns));
- DisableDocumentValidationIfTrue docValidationDisabler(txn, wholeOp.bypassDocumentValidation);
- LastOpFixer lastOpFixer(txn, wholeOp.ns);
+ DisableDocumentValidationIfTrue docValidationDisabler(opCtx, wholeOp.bypassDocumentValidation);
+ LastOpFixer lastOpFixer(opCtx, wholeOp.ns);
WriteResult out;
out.results.reserve(wholeOp.deletes.size());
for (auto&& singleOp : wholeOp.deletes) {
// TODO: don't create nested CurOp for legacy writes.
// Add Command pointer to the nested CurOp.
- auto& parentCurOp = *CurOp::get(txn);
+ auto& parentCurOp = *CurOp::get(opCtx);
Command* cmd = parentCurOp.getCommand();
- CurOp curOp(txn);
+ CurOp curOp(opCtx);
{
- stdx::lock_guard<Client> lk(*txn->getClient());
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
curOp.setCommand_inlock(cmd);
}
- ON_BLOCK_EXIT([&] { finishCurOp(txn, &curOp); });
+ ON_BLOCK_EXIT([&] { finishCurOp(opCtx, &curOp); });
try {
lastOpFixer.startingOp();
- out.results.emplace_back(performSingleDeleteOp(txn, wholeOp.ns, singleOp));
+ out.results.emplace_back(performSingleDeleteOp(opCtx, wholeOp.ns, singleOp));
lastOpFixer.finishedOpSuccessfully();
} catch (const DBException& ex) {
- const bool canContinue = handleError(txn, ex, wholeOp, &out);
+ const bool canContinue = handleError(opCtx, ex, wholeOp, &out);
if (!canContinue)
break;
}
diff --git a/src/mongo/db/ops/write_ops_exec.h b/src/mongo/db/ops/write_ops_exec.h
index 49d3d2e0cf1..f67a8cee657 100644
--- a/src/mongo/db/ops/write_ops_exec.h
+++ b/src/mongo/db/ops/write_ops_exec.h
@@ -76,8 +76,8 @@ struct WriteResult {
* exception being thrown from these functions. Callers are responsible for managing LastError in
* that case. This should generally be combined with LastError handling from parse failures.
*/
-WriteResult performInserts(OperationContext* txn, const InsertOp& op);
-WriteResult performUpdates(OperationContext* txn, const UpdateOp& op);
-WriteResult performDeletes(OperationContext* txn, const DeleteOp& op);
+WriteResult performInserts(OperationContext* opCtx, const InsertOp& op);
+WriteResult performUpdates(OperationContext* opCtx, const UpdateOp& op);
+WriteResult performDeletes(OperationContext* opCtx, const DeleteOp& op);
} // namespace mongo