diff options
author | Maria van Keulen <maria@mongodb.com> | 2017-03-07 12:00:08 -0500 |
---|---|---|
committer | Maria van Keulen <maria@mongodb.com> | 2017-03-07 12:00:08 -0500 |
commit | 589a5c169ced8f6e9ddcd3d182ae1b75db6b7d79 (patch) | |
tree | c7a090ffdd56a91ae677e2492c61b820af44f964 /src/mongo/dbtests | |
parent | 3cba97198638df3750e3b455e2ad57af7ee536ae (diff) | |
download | mongo-589a5c169ced8f6e9ddcd3d182ae1b75db6b7d79.tar.gz |
SERVER-27938 Rename all OperationContext variables to opCtx
This commit is an automated rename of all whole word instances of txn,
_txn, and txnPtr to opCtx, _opCtx, and opCtxPtr, respectively in all
.cpp and .h files in src/mongo.
Diffstat (limited to 'src/mongo/dbtests')
46 files changed, 1599 insertions, 1555 deletions
diff --git a/src/mongo/dbtests/clienttests.cpp b/src/mongo/dbtests/clienttests.cpp index 9cb2d0b70c1..722618b9320 100644 --- a/src/mongo/dbtests/clienttests.cpp +++ b/src/mongo/dbtests/clienttests.cpp @@ -47,17 +47,17 @@ using std::vector; class Base { public: Base(string coll) : _ns("test." + coll) { - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; - DBDirectClient db(&txn); + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; + DBDirectClient db(&opCtx); db.dropDatabase("test"); } virtual ~Base() { - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; - DBDirectClient db(&txn); + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; + DBDirectClient db(&opCtx); db.dropCollection(_ns); } @@ -74,20 +74,20 @@ class DropIndex : public Base { public: DropIndex() : Base("dropindex") {} void run() { - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; - DBDirectClient db(&txn); + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; + DBDirectClient db(&opCtx); db.insert(ns(), BSON("x" << 2)); ASSERT_EQUALS(1u, db.getIndexSpecs(ns()).size()); - ASSERT_OK(dbtests::createIndex(&txn, ns(), BSON("x" << 1))); + ASSERT_OK(dbtests::createIndex(&opCtx, ns(), BSON("x" << 1))); ASSERT_EQUALS(2u, db.getIndexSpecs(ns()).size()); db.dropIndex(ns(), BSON("x" << 1)); ASSERT_EQUALS(1u, db.getIndexSpecs(ns()).size()); - ASSERT_OK(dbtests::createIndex(&txn, ns(), BSON("x" << 1))); + ASSERT_OK(dbtests::createIndex(&opCtx, ns(), BSON("x" << 1))); ASSERT_EQUALS(2u, db.getIndexSpecs(ns()).size()); db.dropIndexes(ns()); @@ -104,11 +104,11 @@ class BuildIndex : public Base { public: BuildIndex() : Base("buildIndex") {} void run() { - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; - OldClientWriteContext ctx(&txn, ns()); - DBDirectClient db(&txn); + OldClientWriteContext ctx(&opCtx, ns()); + DBDirectClient db(&opCtx); db.insert(ns(), BSON("x" << 1 << "y" << 2)); db.insert(ns(), BSON("x" << 2 << "y" << 2)); @@ -117,19 +117,19 @@ public: ASSERT(collection); IndexCatalog* indexCatalog = collection->getIndexCatalog(); - ASSERT_EQUALS(1, indexCatalog->numIndexesReady(&txn)); + ASSERT_EQUALS(1, indexCatalog->numIndexesReady(&opCtx)); // _id index ASSERT_EQUALS(1U, db.getIndexSpecs(ns()).size()); ASSERT_EQUALS(ErrorCodes::DuplicateKey, - dbtests::createIndex(&txn, ns(), BSON("y" << 1), true)); + dbtests::createIndex(&opCtx, ns(), BSON("y" << 1), true)); - ASSERT_EQUALS(1, indexCatalog->numIndexesReady(&txn)); + ASSERT_EQUALS(1, indexCatalog->numIndexesReady(&opCtx)); ASSERT_EQUALS(1U, db.getIndexSpecs(ns()).size()); - ASSERT_OK(dbtests::createIndex(&txn, ns(), BSON("x" << 1), true)); + ASSERT_OK(dbtests::createIndex(&opCtx, ns(), BSON("x" << 1), true)); - ASSERT_EQUALS(2, indexCatalog->numIndexesReady(&txn)); + ASSERT_EQUALS(2, indexCatalog->numIndexesReady(&opCtx)); ASSERT_EQUALS(2U, db.getIndexSpecs(ns()).size()); } }; @@ -138,16 +138,16 @@ class CS_10 : public Base { public: CS_10() : Base("CS_10") {} void run() { - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; - DBDirectClient db(&txn); + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; + DBDirectClient db(&opCtx); const string longs(770, 'c'); for (int i = 0; i < 1111; ++i) { db.insert(ns(), BSON("a" << i << "b" << longs)); } - ASSERT_OK(dbtests::createIndex(&txn, ns(), BSON("a" << 1 << "b" << 1))); + ASSERT_OK(dbtests::createIndex(&opCtx, ns(), BSON("a" << 1 << "b" << 1))); unique_ptr<DBClientCursor> c = db.query(ns(), Query().sort(BSON("a" << 1 << "b" << 1))); ASSERT_EQUALS(1111, c->itcount()); @@ -158,9 +158,9 @@ class PushBack : public Base { public: PushBack() : Base("PushBack") {} void run() { - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; - DBDirectClient db(&txn); + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; + DBDirectClient db(&opCtx); for (int i = 0; i < 10; ++i) { db.insert(ns(), BSON("i" << i)); @@ -204,9 +204,9 @@ class Create : public Base { public: Create() : Base("Create") {} void run() { - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; - DBDirectClient db(&txn); + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; + DBDirectClient db(&opCtx); db.createCollection("unittests.clienttests.create", 4096, true); BSONObj info; @@ -237,9 +237,9 @@ class CreateSimpleV1Index : public Base { public: CreateSimpleV1Index() : Base("CreateSimpleV1Index") {} void run() { - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; - DBDirectClient db(&txn); + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; + DBDirectClient db(&opCtx); db.createIndex(ns(), IndexSpec().addKey("aField").version(1)); } @@ -249,9 +249,9 @@ class CreateSimpleNamedV1Index : public Base { public: CreateSimpleNamedV1Index() : Base("CreateSimpleNamedV1Index") {} void run() { - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; - DBDirectClient db(&txn); + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; + DBDirectClient db(&opCtx); db.createIndex(ns(), IndexSpec().addKey("aField").version(1).name("aFieldV1Index")); } @@ -261,9 +261,9 @@ class CreateCompoundNamedV1Index : public Base { public: CreateCompoundNamedV1Index() : Base("CreateCompoundNamedV1Index") {} void run() { - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; - DBDirectClient db(&txn); + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; + DBDirectClient db(&opCtx); db.createIndex(ns(), IndexSpec() @@ -279,9 +279,9 @@ public: CreateUniqueSparseDropDupsIndexInBackground() : Base("CreateUniqueSparseDropDupsIndexInBackground") {} void run() { - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; - DBDirectClient db(&txn); + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; + DBDirectClient db(&opCtx); db.createIndex( ns(), IndexSpec().addKey("aField").background().unique().sparse().dropDuplicates()); @@ -292,9 +292,9 @@ class CreateComplexTextIndex : public Base { public: CreateComplexTextIndex() : Base("CreateComplexTextIndex") {} void run() { - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; - DBDirectClient db(&txn); + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; + DBDirectClient db(&opCtx); db.createIndex(ns(), IndexSpec() @@ -311,9 +311,9 @@ class Create2DIndex : public Base { public: Create2DIndex() : Base("Create2DIndex") {} void run() { - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; - DBDirectClient db(&txn); + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; + DBDirectClient db(&opCtx); db.createIndex(ns(), IndexSpec() @@ -328,9 +328,9 @@ class CreateHaystackIndex : public Base { public: CreateHaystackIndex() : Base("CreateHaystackIndex") {} void run() { - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; - DBDirectClient db(&txn); + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; + DBDirectClient db(&opCtx); db.createIndex(ns(), IndexSpec() @@ -344,9 +344,9 @@ class Create2DSphereIndex : public Base { public: Create2DSphereIndex() : Base("Create2DSphereIndex") {} void run() { - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; - DBDirectClient db(&txn); + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; + DBDirectClient db(&opCtx); db.createIndex(ns(), IndexSpec() @@ -359,9 +359,9 @@ class CreateHashedIndex : public Base { public: CreateHashedIndex() : Base("CreateHashedIndex") {} void run() { - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; - DBDirectClient db(&txn); + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; + DBDirectClient db(&opCtx); db.createIndex(ns(), IndexSpec().addKey("aField", IndexSpec::kIndexTypeHashed)); } @@ -371,9 +371,9 @@ class CreateIndexFailure : public Base { public: CreateIndexFailure() : Base("CreateIndexFailure") {} void run() { - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; - DBDirectClient db(&txn); + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; + DBDirectClient db(&opCtx); db.createIndex(ns(), IndexSpec().addKey("aField")); ASSERT_THROWS(db.createIndex(ns(), IndexSpec().addKey("aField").unique()), UserException); diff --git a/src/mongo/dbtests/commandtests.cpp b/src/mongo/dbtests/commandtests.cpp index a310184db78..a2fe4911e02 100644 --- a/src/mongo/dbtests/commandtests.cpp +++ b/src/mongo/dbtests/commandtests.cpp @@ -47,7 +47,7 @@ using std::string; */ class Base { public: - Base() : db(&_txn) { + Base() : db(&_opCtx) { db.dropCollection(ns()); } @@ -62,16 +62,16 @@ public: } const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext(); - OperationContext& _txn = *_txnPtr; + OperationContext& _opCtx = *_txnPtr; DBDirectClient db; }; // one namespace per command namespace FileMD5 { struct Base { - Base() : db(&_txn) { + Base() : db(&_opCtx) { db.dropCollection(ns()); - ASSERT_OK(dbtests::createIndex(&_txn, ns(), BSON("files_id" << 1 << "n" << 1))); + ASSERT_OK(dbtests::createIndex(&_opCtx, ns(), BSON("files_id" << 1 << "n" << 1))); } const char* ns() { @@ -79,7 +79,7 @@ struct Base { } const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext(); - OperationContext& _txn = *_txnPtr; + OperationContext& _opCtx = *_txnPtr; DBDirectClient db; }; struct Type0 : Base { diff --git a/src/mongo/dbtests/counttests.cpp b/src/mongo/dbtests/counttests.cpp index 689b87fe599..e20410a3f6c 100644 --- a/src/mongo/dbtests/counttests.cpp +++ b/src/mongo/dbtests/counttests.cpp @@ -51,19 +51,19 @@ const auto kIndexVersion = IndexDescriptor::IndexVersion::kV2; class Base { public: Base() - : _scopedXact(&_txn, MODE_IX), - _lk(_txn.lockState(), nsToDatabaseSubstring(ns()), MODE_X), - _context(&_txn, ns()), - _client(&_txn) { + : _scopedXact(&_opCtx, MODE_IX), + _lk(_opCtx.lockState(), nsToDatabaseSubstring(ns()), MODE_X), + _context(&_opCtx, ns()), + _client(&_opCtx) { _database = _context.db(); { - WriteUnitOfWork wunit(&_txn); + WriteUnitOfWork wunit(&_opCtx); _collection = _database->getCollection(ns()); if (_collection) { - _database->dropCollection(&_txn, ns()); + _database->dropCollection(&_opCtx, ns()); } - _collection = _database->createCollection(&_txn, ns()); + _collection = _database->createCollection(&_opCtx, ns()); wunit.commit(); } @@ -71,8 +71,8 @@ public: } ~Base() { try { - WriteUnitOfWork wunit(&_txn); - uassertStatusOK(_database->dropCollection(&_txn, ns())); + WriteUnitOfWork wunit(&_opCtx); + uassertStatusOK(_database->dropCollection(&_opCtx, ns())); wunit.commit(); } catch (...) { FAIL("Exception while cleaning up collection"); @@ -85,7 +85,7 @@ protected: } void addIndex(const BSONObj& key) { - Helpers::ensureIndex(&_txn, + Helpers::ensureIndex(&_opCtx, _collection, key, kIndexVersion, @@ -94,7 +94,7 @@ protected: } void insert(const char* s) { - WriteUnitOfWork wunit(&_txn); + WriteUnitOfWork wunit(&_opCtx); const BSONObj o = fromjson(s); OpDebug* const nullOpDebug = nullptr; @@ -104,16 +104,16 @@ protected: oid.init(); b.appendOID("_id", &oid); b.appendElements(o); - _collection->insertDocument(&_txn, b.obj(), nullOpDebug, false); + _collection->insertDocument(&_opCtx, b.obj(), nullOpDebug, false); } else { - _collection->insertDocument(&_txn, o, nullOpDebug, false); + _collection->insertDocument(&_opCtx, o, nullOpDebug, false); } wunit.commit(); } const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext(); - OperationContext& _txn = *_txnPtr; + OperationContext& _opCtx = *_txnPtr; ScopedTransaction _scopedXact; Lock::DBLock _lk; diff --git a/src/mongo/dbtests/dbhelper_tests.cpp b/src/mongo/dbtests/dbhelper_tests.cpp index fd0835ea4a2..0d2255ebea9 100644 --- a/src/mongo/dbtests/dbhelper_tests.cpp +++ b/src/mongo/dbtests/dbhelper_tests.cpp @@ -60,9 +60,9 @@ public: RemoveRange() : _min(4), _max(8) {} void run() { - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; - DBDirectClient client(&txn); + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; + DBDirectClient client(&opCtx); for (int i = 0; i < 10; ++i) { client.insert(ns, BSON("_id" << i)); @@ -70,18 +70,18 @@ public: { // Remove _id range [_min, _max). - ScopedTransaction transaction(&txn, MODE_IX); - Lock::DBLock lk(txn.lockState(), nsToDatabaseSubstring(ns), MODE_X); - OldClientContext ctx(&txn, ns); + ScopedTransaction transaction(&opCtx, MODE_IX); + Lock::DBLock lk(opCtx.lockState(), nsToDatabaseSubstring(ns), MODE_X); + OldClientContext ctx(&opCtx, ns); KeyRange range(ns, BSON("_id" << _min), BSON("_id" << _max), BSON("_id" << 1)); mongo::WriteConcernOptions dummyWriteConcern; Helpers::removeRange( - &txn, range, BoundInclusion::kIncludeStartKeyOnly, dummyWriteConcern); + &opCtx, range, BoundInclusion::kIncludeStartKeyOnly, dummyWriteConcern); } // Check that the expected documents remain. - ASSERT_BSONOBJ_EQ(expected(), docs(&txn)); + ASSERT_BSONOBJ_EQ(expected(), docs(&opCtx)); } private: @@ -96,8 +96,8 @@ private: return bab.arr(); } - BSONArray docs(OperationContext* txn) const { - DBDirectClient client(txn); + BSONArray docs(OperationContext* opCtx) const { + DBDirectClient client(opCtx); unique_ptr<DBClientCursor> cursor = client.query(ns, Query().hint(BSON("_id" << 1))); BSONArrayBuilder bab; while (cursor->more()) { diff --git a/src/mongo/dbtests/dbtests.cpp b/src/mongo/dbtests/dbtests.cpp index 75883c53091..586ecb3ea13 100644 --- a/src/mongo/dbtests/dbtests.cpp +++ b/src/mongo/dbtests/dbtests.cpp @@ -80,7 +80,7 @@ void initWireSpec() { spec.outgoing.maxWireVersion = COMMANDS_ACCEPT_WRITE_CONCERN; } -Status createIndex(OperationContext* txn, StringData ns, const BSONObj& keys, bool unique) { +Status createIndex(OperationContext* opCtx, StringData ns, const BSONObj& keys, bool unique) { BSONObjBuilder specBuilder; specBuilder.append("name", DBClientBase::genIndexName(keys)); specBuilder.append("ns", ns); @@ -89,19 +89,19 @@ Status createIndex(OperationContext* txn, StringData ns, const BSONObj& keys, bo if (unique) { specBuilder.appendBool("unique", true); } - return createIndexFromSpec(txn, ns, specBuilder.done()); + return createIndexFromSpec(opCtx, ns, specBuilder.done()); } -Status createIndexFromSpec(OperationContext* txn, StringData ns, const BSONObj& spec) { - AutoGetOrCreateDb autoDb(txn, nsToDatabaseSubstring(ns), MODE_X); +Status createIndexFromSpec(OperationContext* opCtx, StringData ns, const BSONObj& spec) { + AutoGetOrCreateDb autoDb(opCtx, nsToDatabaseSubstring(ns), MODE_X); Collection* coll; { - WriteUnitOfWork wunit(txn); - coll = autoDb.getDb()->getOrCreateCollection(txn, ns); + WriteUnitOfWork wunit(opCtx); + coll = autoDb.getDb()->getOrCreateCollection(opCtx, ns); invariant(coll); wunit.commit(); } - MultiIndexBlock indexer(txn, coll); + MultiIndexBlock indexer(opCtx, coll); Status status = indexer.init(spec).getStatus(); if (status == ErrorCodes::IndexAlreadyExists) { return Status::OK(); @@ -113,7 +113,7 @@ Status createIndexFromSpec(OperationContext* txn, StringData ns, const BSONObj& if (!status.isOK()) { return status; } - WriteUnitOfWork wunit(txn); + WriteUnitOfWork wunit(opCtx); indexer.commit(); wunit.commit(); return Status::OK(); diff --git a/src/mongo/dbtests/dbtests.h b/src/mongo/dbtests/dbtests.h index 86de296e53d..68b7b0825fa 100644 --- a/src/mongo/dbtests/dbtests.h +++ b/src/mongo/dbtests/dbtests.h @@ -48,11 +48,14 @@ namespace dbtests { /** * Creates an index if it does not already exist. */ -Status createIndex(OperationContext* txn, StringData ns, const BSONObj& keys, bool unique = false); +Status createIndex(OperationContext* opCtx, + StringData ns, + const BSONObj& keys, + bool unique = false); /** * Creates an index from a BSON spec, if it does not already exist. */ -Status createIndexFromSpec(OperationContext* txn, StringData ns, const BSONObj& spec); +Status createIndexFromSpec(OperationContext* opCtx, StringData ns, const BSONObj& spec); } // namespace dbtests } // namespace mongo diff --git a/src/mongo/dbtests/directclienttests.cpp b/src/mongo/dbtests/directclienttests.cpp index 8412ba0c1e4..b9b06e81892 100644 --- a/src/mongo/dbtests/directclienttests.cpp +++ b/src/mongo/dbtests/directclienttests.cpp @@ -61,9 +61,9 @@ const char* ns = "a.b"; class Capped : public ClientBase { public: virtual void run() { - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; - DBDirectClient client(&txn); + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; + DBDirectClient client(&opCtx); for (int pass = 0; pass < 3; pass++) { client.createCollection(ns, 1024 * 1024, true, 999); for (int j = 0; j < pass * 3; j++) @@ -92,9 +92,9 @@ public: class InsertMany : ClientBase { public: virtual void run() { - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; - DBDirectClient client(&txn); + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; + DBDirectClient client(&opCtx); vector<BSONObj> objs; objs.push_back(BSON("_id" << 1)); @@ -117,9 +117,9 @@ public: class BadNSCmd : ClientBase { public: virtual void run() { - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; - DBDirectClient client(&txn); + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; + DBDirectClient client(&opCtx); BSONObj result; BSONObj cmdObj = BSON("count" @@ -131,9 +131,9 @@ public: class BadNSQuery : ClientBase { public: virtual void run() { - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; - DBDirectClient client(&txn); + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; + DBDirectClient client(&opCtx); unique_ptr<DBClientCursor> cursor = client.query("", Query(), 1); ASSERT(cursor->more()); @@ -146,9 +146,9 @@ public: class BadNSGetMore : ClientBase { public: virtual void run() { - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; - DBDirectClient client(&txn); + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; + DBDirectClient client(&opCtx); unique_ptr<DBClientCursor> cursor = client.getMore("", 1, 1); ASSERT(cursor->more()); @@ -161,9 +161,9 @@ public: class BadNSInsert : ClientBase { public: virtual void run() { - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; - DBDirectClient client(&txn); + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; + DBDirectClient client(&opCtx); client.insert("", BSONObj(), 0); ASSERT(!client.getLastError().empty()); @@ -173,9 +173,9 @@ public: class BadNSUpdate : ClientBase { public: virtual void run() { - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; - DBDirectClient client(&txn); + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; + DBDirectClient client(&opCtx); client.update("", Query(), BSON("$set" << BSON("x" << 1))); ASSERT(!client.getLastError().empty()); @@ -185,9 +185,9 @@ public: class BadNSRemove : ClientBase { public: virtual void run() { - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; - DBDirectClient client(&txn); + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; + DBDirectClient client(&opCtx); client.remove("", Query()); ASSERT(!client.getLastError().empty()); diff --git a/src/mongo/dbtests/extensions_callback_real_test.cpp b/src/mongo/dbtests/extensions_callback_real_test.cpp index 60b1c662f79..7ea4e6a4af0 100644 --- a/src/mongo/dbtests/extensions_callback_real_test.cpp +++ b/src/mongo/dbtests/extensions_callback_real_test.cpp @@ -49,45 +49,45 @@ public: ExtensionsCallbackRealTest() : _nss("unittests.extensions_callback_real_test") {} void setUp() final { - AutoGetOrCreateDb autoDb(&_txn, _nss.db(), MODE_X); + AutoGetOrCreateDb autoDb(&_opCtx, _nss.db(), MODE_X); Database* database = autoDb.getDb(); { - WriteUnitOfWork wunit(&_txn); - ASSERT(database->createCollection(&_txn, _nss.ns())); + WriteUnitOfWork wunit(&_opCtx); + ASSERT(database->createCollection(&_opCtx, _nss.ns())); wunit.commit(); } } void tearDown() final { - AutoGetDb autoDb(&_txn, _nss.db(), MODE_X); + AutoGetDb autoDb(&_opCtx, _nss.db(), MODE_X); Database* database = autoDb.getDb(); if (!database) { return; } { - WriteUnitOfWork wunit(&_txn); - static_cast<void>(database->dropCollection(&_txn, _nss.ns())); + WriteUnitOfWork wunit(&_opCtx); + static_cast<void>(database->dropCollection(&_opCtx, _nss.ns())); wunit.commit(); } } protected: const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext(); - OperationContext& _txn = *_txnPtr; + OperationContext& _opCtx = *_txnPtr; const NamespaceString _nss; }; TEST_F(ExtensionsCallbackRealTest, TextNoIndex) { BSONObj query = fromjson("{$text: {$search:\"awesome\"}}"); StatusWithMatchExpression result = - ExtensionsCallbackReal(&_txn, &_nss).parseText(query.firstElement()); + ExtensionsCallbackReal(&_opCtx, &_nss).parseText(query.firstElement()); ASSERT_NOT_OK(result.getStatus()); ASSERT_EQ(ErrorCodes::IndexNotFound, result.getStatus()); } TEST_F(ExtensionsCallbackRealTest, TextBasic) { - ASSERT_OK(dbtests::createIndex(&_txn, + ASSERT_OK(dbtests::createIndex(&_opCtx, _nss.ns(), BSON("a" << "text"), @@ -95,7 +95,7 @@ TEST_F(ExtensionsCallbackRealTest, TextBasic) { BSONObj query = fromjson("{$text: {$search:\"awesome\", $language:\"english\"}}"); auto expr = - unittest::assertGet(ExtensionsCallbackReal(&_txn, &_nss).parseText(query.firstElement())); + unittest::assertGet(ExtensionsCallbackReal(&_opCtx, &_nss).parseText(query.firstElement())); ASSERT_EQUALS(MatchExpression::TEXT, expr->matchType()); std::unique_ptr<TextMatchExpression> textExpr( @@ -109,7 +109,7 @@ TEST_F(ExtensionsCallbackRealTest, TextBasic) { } TEST_F(ExtensionsCallbackRealTest, TextLanguageError) { - ASSERT_OK(dbtests::createIndex(&_txn, + ASSERT_OK(dbtests::createIndex(&_opCtx, _nss.ns(), BSON("a" << "text"), @@ -117,13 +117,13 @@ TEST_F(ExtensionsCallbackRealTest, TextLanguageError) { BSONObj query = fromjson("{$text: {$search:\"awesome\", $language:\"spanglish\"}}"); StatusWithMatchExpression result = - ExtensionsCallbackReal(&_txn, &_nss).parseText(query.firstElement()); + ExtensionsCallbackReal(&_opCtx, &_nss).parseText(query.firstElement()); ASSERT_NOT_OK(result.getStatus()); } TEST_F(ExtensionsCallbackRealTest, TextCaseSensitiveTrue) { - ASSERT_OK(dbtests::createIndex(&_txn, + ASSERT_OK(dbtests::createIndex(&_opCtx, _nss.ns(), BSON("a" << "text"), @@ -131,7 +131,7 @@ TEST_F(ExtensionsCallbackRealTest, TextCaseSensitiveTrue) { BSONObj query = fromjson("{$text: {$search:\"awesome\", $caseSensitive: true}}"); auto expr = - unittest::assertGet(ExtensionsCallbackReal(&_txn, &_nss).parseText(query.firstElement())); + unittest::assertGet(ExtensionsCallbackReal(&_opCtx, &_nss).parseText(query.firstElement())); ASSERT_EQUALS(MatchExpression::TEXT, expr->matchType()); std::unique_ptr<TextMatchExpression> textExpr( @@ -140,7 +140,7 @@ TEST_F(ExtensionsCallbackRealTest, TextCaseSensitiveTrue) { } TEST_F(ExtensionsCallbackRealTest, TextCaseSensitiveFalse) { - ASSERT_OK(dbtests::createIndex(&_txn, + ASSERT_OK(dbtests::createIndex(&_opCtx, _nss.ns(), BSON("a" << "text"), @@ -148,7 +148,7 @@ TEST_F(ExtensionsCallbackRealTest, TextCaseSensitiveFalse) { BSONObj query = fromjson("{$text: {$search:\"awesome\", $caseSensitive: false}}"); auto expr = - unittest::assertGet(ExtensionsCallbackReal(&_txn, &_nss).parseText(query.firstElement())); + unittest::assertGet(ExtensionsCallbackReal(&_opCtx, &_nss).parseText(query.firstElement())); ASSERT_EQUALS(MatchExpression::TEXT, expr->matchType()); std::unique_ptr<TextMatchExpression> textExpr( @@ -157,7 +157,7 @@ TEST_F(ExtensionsCallbackRealTest, TextCaseSensitiveFalse) { } TEST_F(ExtensionsCallbackRealTest, TextCaseSensitiveError) { - ASSERT_OK(dbtests::createIndex(&_txn, + ASSERT_OK(dbtests::createIndex(&_opCtx, _nss.ns(), BSON("a" << "text"), @@ -165,13 +165,13 @@ TEST_F(ExtensionsCallbackRealTest, TextCaseSensitiveError) { BSONObj query = fromjson("{$text:{$search:\"awesome\", $caseSensitive: 0}}"); StatusWithMatchExpression result = - ExtensionsCallbackReal(&_txn, &_nss).parseText(query.firstElement()); + ExtensionsCallbackReal(&_opCtx, &_nss).parseText(query.firstElement()); ASSERT_NOT_OK(result.getStatus()); } TEST_F(ExtensionsCallbackRealTest, TextDiacriticSensitiveTrue) { - ASSERT_OK(dbtests::createIndex(&_txn, + ASSERT_OK(dbtests::createIndex(&_opCtx, _nss.ns(), BSON("a" << "text"), @@ -179,7 +179,7 @@ TEST_F(ExtensionsCallbackRealTest, TextDiacriticSensitiveTrue) { BSONObj query = fromjson("{$text: {$search:\"awesome\", $diacriticSensitive: true}}"); auto expr = - unittest::assertGet(ExtensionsCallbackReal(&_txn, &_nss).parseText(query.firstElement())); + unittest::assertGet(ExtensionsCallbackReal(&_opCtx, &_nss).parseText(query.firstElement())); ASSERT_EQUALS(MatchExpression::TEXT, expr->matchType()); std::unique_ptr<TextMatchExpression> textExpr( @@ -188,7 +188,7 @@ TEST_F(ExtensionsCallbackRealTest, TextDiacriticSensitiveTrue) { } TEST_F(ExtensionsCallbackRealTest, TextDiacriticSensitiveFalse) { - ASSERT_OK(dbtests::createIndex(&_txn, + ASSERT_OK(dbtests::createIndex(&_opCtx, _nss.ns(), BSON("a" << "text"), @@ -196,7 +196,7 @@ TEST_F(ExtensionsCallbackRealTest, TextDiacriticSensitiveFalse) { BSONObj query = fromjson("{$text: {$search:\"awesome\", $diacriticSensitive: false}}"); auto expr = - unittest::assertGet(ExtensionsCallbackReal(&_txn, &_nss).parseText(query.firstElement())); + unittest::assertGet(ExtensionsCallbackReal(&_opCtx, &_nss).parseText(query.firstElement())); ASSERT_EQUALS(MatchExpression::TEXT, expr->matchType()); std::unique_ptr<TextMatchExpression> textExpr( @@ -205,7 +205,7 @@ TEST_F(ExtensionsCallbackRealTest, TextDiacriticSensitiveFalse) { } TEST_F(ExtensionsCallbackRealTest, TextDiacriticSensitiveError) { - ASSERT_OK(dbtests::createIndex(&_txn, + ASSERT_OK(dbtests::createIndex(&_opCtx, _nss.ns(), BSON("a" << "text"), @@ -213,13 +213,13 @@ TEST_F(ExtensionsCallbackRealTest, TextDiacriticSensitiveError) { BSONObj query = fromjson("{$text:{$search:\"awesome\", $diacriticSensitive: 0}}"); StatusWithMatchExpression result = - ExtensionsCallbackReal(&_txn, &_nss).parseText(query.firstElement()); + ExtensionsCallbackReal(&_opCtx, &_nss).parseText(query.firstElement()); ASSERT_NOT_OK(result.getStatus()); } TEST_F(ExtensionsCallbackRealTest, TextDiacriticSensitiveAndCaseSensitiveTrue) { - ASSERT_OK(dbtests::createIndex(&_txn, + ASSERT_OK(dbtests::createIndex(&_opCtx, _nss.ns(), BSON("a" << "text"), @@ -228,7 +228,7 @@ TEST_F(ExtensionsCallbackRealTest, TextDiacriticSensitiveAndCaseSensitiveTrue) { BSONObj query = fromjson("{$text: {$search:\"awesome\", $diacriticSensitive: true, $caseSensitive: true}}"); auto expr = - unittest::assertGet(ExtensionsCallbackReal(&_txn, &_nss).parseText(query.firstElement())); + unittest::assertGet(ExtensionsCallbackReal(&_opCtx, &_nss).parseText(query.firstElement())); ASSERT_EQUALS(MatchExpression::TEXT, expr->matchType()); std::unique_ptr<TextMatchExpression> textExpr( @@ -245,14 +245,14 @@ TEST_F(ExtensionsCallbackRealTest, WhereExpressionsWithSameScopeHaveSameBSONRepr const char code[] = "function(){ return a; }"; BSONObj query1 = BSON("$where" << BSONCodeWScope(code, BSON("a" << true))); - auto expr1 = - unittest::assertGet(ExtensionsCallbackReal(&_txn, &_nss).parseWhere(query1.firstElement())); + auto expr1 = unittest::assertGet( + ExtensionsCallbackReal(&_opCtx, &_nss).parseWhere(query1.firstElement())); BSONObjBuilder builder1; expr1->serialize(&builder1); BSONObj query2 = BSON("$where" << BSONCodeWScope(code, BSON("a" << true))); - auto expr2 = - unittest::assertGet(ExtensionsCallbackReal(&_txn, &_nss).parseWhere(query2.firstElement())); + auto expr2 = unittest::assertGet( + ExtensionsCallbackReal(&_opCtx, &_nss).parseWhere(query2.firstElement())); BSONObjBuilder builder2; expr2->serialize(&builder2); @@ -264,14 +264,14 @@ TEST_F(ExtensionsCallbackRealTest, const char code[] = "function(){ return a; }"; BSONObj query1 = BSON("$where" << BSONCodeWScope(code, BSON("a" << true))); - auto expr1 = - unittest::assertGet(ExtensionsCallbackReal(&_txn, &_nss).parseWhere(query1.firstElement())); + auto expr1 = unittest::assertGet( + ExtensionsCallbackReal(&_opCtx, &_nss).parseWhere(query1.firstElement())); BSONObjBuilder builder1; expr1->serialize(&builder1); BSONObj query2 = BSON("$where" << BSONCodeWScope(code, BSON("a" << false))); - auto expr2 = - unittest::assertGet(ExtensionsCallbackReal(&_txn, &_nss).parseWhere(query2.firstElement())); + auto expr2 = unittest::assertGet( + ExtensionsCallbackReal(&_opCtx, &_nss).parseWhere(query2.firstElement())); BSONObjBuilder builder2; expr2->serialize(&builder2); @@ -282,12 +282,12 @@ TEST_F(ExtensionsCallbackRealTest, WhereExpressionsWithSameScopeAreEquivalent) { const char code[] = "function(){ return a; }"; BSONObj query1 = BSON("$where" << BSONCodeWScope(code, BSON("a" << true))); - auto expr1 = - unittest::assertGet(ExtensionsCallbackReal(&_txn, &_nss).parseWhere(query1.firstElement())); + auto expr1 = unittest::assertGet( + ExtensionsCallbackReal(&_opCtx, &_nss).parseWhere(query1.firstElement())); BSONObj query2 = BSON("$where" << BSONCodeWScope(code, BSON("a" << true))); - auto expr2 = - unittest::assertGet(ExtensionsCallbackReal(&_txn, &_nss).parseWhere(query2.firstElement())); + auto expr2 = unittest::assertGet( + ExtensionsCallbackReal(&_opCtx, &_nss).parseWhere(query2.firstElement())); ASSERT(expr1->equivalent(expr2.get())); ASSERT(expr2->equivalent(expr1.get())); @@ -297,12 +297,12 @@ TEST_F(ExtensionsCallbackRealTest, WhereExpressionsWithDifferentScopesAreNotEqui const char code[] = "function(){ return a; }"; BSONObj query1 = BSON("$where" << BSONCodeWScope(code, BSON("a" << true))); - auto expr1 = - unittest::assertGet(ExtensionsCallbackReal(&_txn, &_nss).parseWhere(query1.firstElement())); + auto expr1 = unittest::assertGet( + ExtensionsCallbackReal(&_opCtx, &_nss).parseWhere(query1.firstElement())); BSONObj query2 = BSON("$where" << BSONCodeWScope(code, BSON("a" << false))); - auto expr2 = - unittest::assertGet(ExtensionsCallbackReal(&_txn, &_nss).parseWhere(query2.firstElement())); + auto expr2 = unittest::assertGet( + ExtensionsCallbackReal(&_opCtx, &_nss).parseWhere(query2.firstElement())); ASSERT_FALSE(expr1->equivalent(expr2.get())); ASSERT_FALSE(expr2->equivalent(expr1.get())); diff --git a/src/mongo/dbtests/framework.cpp b/src/mongo/dbtests/framework.cpp index 68537ee5705..5a6e6099150 100644 --- a/src/mongo/dbtests/framework.cpp +++ b/src/mongo/dbtests/framework.cpp @@ -82,8 +82,8 @@ int runDbTests(int argc, char** argv) { // DBTests run as if in the database, so allow them to create direct clients. DBDirectClientFactory::get(globalServiceContext) - .registerImplementation([](OperationContext* txn) { - return std::unique_ptr<DBClientBase>(new DBDirectClient(txn)); + .registerImplementation([](OperationContext* opCtx) { + return std::unique_ptr<DBClientBase>(new DBDirectClient(opCtx)); }); srand((unsigned)frameworkGlobalParams.seed); diff --git a/src/mongo/dbtests/gle_test.cpp b/src/mongo/dbtests/gle_test.cpp index 75233fb048d..1a3b1f9d2a5 100644 --- a/src/mongo/dbtests/gle_test.cpp +++ b/src/mongo/dbtests/gle_test.cpp @@ -50,9 +50,9 @@ static const char* const _ns = "unittests.gle"; class GetLastErrorCommandFailure { public: void run() { - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; - DBDirectClient client(&txn); + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; + DBDirectClient client(&opCtx); client.insert(_ns, BSON("test" @@ -70,9 +70,9 @@ public: class GetLastErrorClean { public: void run() { - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; - DBDirectClient client(&txn); + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; + DBDirectClient client(&opCtx); client.insert(_ns, BSON("test" @@ -90,9 +90,9 @@ public: class GetLastErrorFromDup { public: void run() { - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; - DBDirectClient client(&txn); + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; + DBDirectClient client(&opCtx); client.insert(_ns, BSON("_id" << 1)); diff --git a/src/mongo/dbtests/indexcatalogtests.cpp b/src/mongo/dbtests/indexcatalogtests.cpp index 068ede905a1..fd1f10f8a55 100644 --- a/src/mongo/dbtests/indexcatalogtests.cpp +++ b/src/mongo/dbtests/indexcatalogtests.cpp @@ -38,44 +38,44 @@ static const char* const _ns = "unittests.indexcatalog"; class IndexIteratorTests { public: IndexIteratorTests() { - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; - ScopedTransaction transaction(&txn, MODE_IX); - Lock::DBLock lk(txn.lockState(), nsToDatabaseSubstring(_ns), MODE_X); - OldClientContext ctx(&txn, _ns); - WriteUnitOfWork wuow(&txn); + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; + ScopedTransaction transaction(&opCtx, MODE_IX); + Lock::DBLock lk(opCtx.lockState(), nsToDatabaseSubstring(_ns), MODE_X); + OldClientContext ctx(&opCtx, _ns); + WriteUnitOfWork wuow(&opCtx); _db = ctx.db(); - _coll = _db->createCollection(&txn, _ns); + _coll = _db->createCollection(&opCtx, _ns); _catalog = _coll->getIndexCatalog(); wuow.commit(); } ~IndexIteratorTests() { - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; - ScopedTransaction transaction(&txn, MODE_IX); - Lock::DBLock lk(txn.lockState(), nsToDatabaseSubstring(_ns), MODE_X); - OldClientContext ctx(&txn, _ns); - WriteUnitOfWork wuow(&txn); - - _db->dropCollection(&txn, _ns); + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; + ScopedTransaction transaction(&opCtx, MODE_IX); + Lock::DBLock lk(opCtx.lockState(), nsToDatabaseSubstring(_ns), MODE_X); + OldClientContext ctx(&opCtx, _ns); + WriteUnitOfWork wuow(&opCtx); + + _db->dropCollection(&opCtx, _ns); wuow.commit(); } void run() { - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; - OldClientWriteContext ctx(&txn, _ns); + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; + OldClientWriteContext ctx(&opCtx, _ns); - int numFinishedIndexesStart = _catalog->numIndexesReady(&txn); + int numFinishedIndexesStart = _catalog->numIndexesReady(&opCtx); - dbtests::createIndex(&txn, _ns, BSON("x" << 1)); - dbtests::createIndex(&txn, _ns, BSON("y" << 1)); + dbtests::createIndex(&opCtx, _ns, BSON("x" << 1)); + dbtests::createIndex(&opCtx, _ns, BSON("y" << 1)); - ASSERT_TRUE(_catalog->numIndexesReady(&txn) == numFinishedIndexesStart + 2); + ASSERT_TRUE(_catalog->numIndexesReady(&opCtx) == numFinishedIndexesStart + 2); - IndexCatalog::IndexIterator ii = _catalog->getIndexIterator(&txn, false); + IndexCatalog::IndexIterator ii = _catalog->getIndexIterator(&opCtx, false); int indexesIterated = 0; bool foundIndex = false; while (ii.more()) { @@ -91,7 +91,7 @@ public: } } - ASSERT_TRUE(indexesIterated == _catalog->numIndexesReady(&txn)); + ASSERT_TRUE(indexesIterated == _catalog->numIndexesReady(&opCtx)); ASSERT_TRUE(foundIndex); } @@ -107,64 +107,64 @@ private: class RefreshEntry { public: RefreshEntry() { - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; - ScopedTransaction transaction(&txn, MODE_IX); - Lock::DBLock lk(txn.lockState(), nsToDatabaseSubstring(_ns), MODE_X); - OldClientContext ctx(&txn, _ns); - WriteUnitOfWork wuow(&txn); + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; + ScopedTransaction transaction(&opCtx, MODE_IX); + Lock::DBLock lk(opCtx.lockState(), nsToDatabaseSubstring(_ns), MODE_X); + OldClientContext ctx(&opCtx, _ns); + WriteUnitOfWork wuow(&opCtx); _db = ctx.db(); - _coll = _db->createCollection(&txn, _ns); + _coll = _db->createCollection(&opCtx, _ns); _catalog = _coll->getIndexCatalog(); wuow.commit(); } ~RefreshEntry() { - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; - ScopedTransaction transaction(&txn, MODE_IX); - Lock::DBLock lk(txn.lockState(), nsToDatabaseSubstring(_ns), MODE_X); - OldClientContext ctx(&txn, _ns); - WriteUnitOfWork wuow(&txn); - - _db->dropCollection(&txn, _ns); + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; + ScopedTransaction transaction(&opCtx, MODE_IX); + Lock::DBLock lk(opCtx.lockState(), nsToDatabaseSubstring(_ns), MODE_X); + OldClientContext ctx(&opCtx, _ns); + WriteUnitOfWork wuow(&opCtx); + + _db->dropCollection(&opCtx, _ns); wuow.commit(); } void run() { - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; - OldClientWriteContext ctx(&txn, _ns); + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; + OldClientWriteContext ctx(&opCtx, _ns); const std::string indexName = "x_1"; ASSERT_OK(dbtests::createIndexFromSpec( - &txn, + &opCtx, _ns, BSON("name" << indexName << "ns" << _ns << "key" << BSON("x" << 1) << "v" << static_cast<int>(kIndexVersion) << "expireAfterSeconds" << 5))); - const IndexDescriptor* desc = _catalog->findIndexByName(&txn, indexName); + const IndexDescriptor* desc = _catalog->findIndexByName(&opCtx, indexName); ASSERT(desc); ASSERT_EQUALS(5, desc->infoObj()["expireAfterSeconds"].numberLong()); // Change value of "expireAfterSeconds" on disk. { - WriteUnitOfWork wuow(&txn); - _coll->getCatalogEntry()->updateTTLSetting(&txn, "x_1", 10); + WriteUnitOfWork wuow(&opCtx); + _coll->getCatalogEntry()->updateTTLSetting(&opCtx, "x_1", 10); wuow.commit(); } // Verify that the catalog does not yet know of the change. - desc = _catalog->findIndexByName(&txn, indexName); + desc = _catalog->findIndexByName(&opCtx, indexName); ASSERT_EQUALS(5, desc->infoObj()["expireAfterSeconds"].numberLong()); { // Notify the catalog of the change. - WriteUnitOfWork wuow(&txn); - desc = _catalog->refreshEntry(&txn, desc); + WriteUnitOfWork wuow(&opCtx); + desc = _catalog->refreshEntry(&opCtx, desc); wuow.commit(); } diff --git a/src/mongo/dbtests/indexupdatetests.cpp b/src/mongo/dbtests/indexupdatetests.cpp index 4f7b0fa1b19..f0ab83fb9f0 100644 --- a/src/mongo/dbtests/indexupdatetests.cpp +++ b/src/mongo/dbtests/indexupdatetests.cpp @@ -61,7 +61,7 @@ static const char* const _ns = "unittests.indexupdate"; */ class IndexBuildBase { public: - IndexBuildBase() : _ctx(&_txn, _ns), _client(&_txn) { + IndexBuildBase() : _ctx(&_opCtx, _ns), _client(&_opCtx) { _client.createCollection(_ns); } ~IndexBuildBase() { @@ -77,13 +77,13 @@ protected: bool buildIndexInterrupted(const BSONObj& key, bool allowInterruption) { try { - MultiIndexBlock indexer(&_txn, collection()); + MultiIndexBlock indexer(&_opCtx, collection()); if (allowInterruption) indexer.allowInterruption(); uassertStatusOK(indexer.init(key)); uassertStatusOK(indexer.insertAllDocumentsInCollection()); - WriteUnitOfWork wunit(&_txn); + WriteUnitOfWork wunit(&_opCtx); indexer.commit(); wunit.commit(); } catch (const DBException& e) { @@ -96,7 +96,7 @@ protected: } const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext(); - OperationContext& _txn = *_txnPtr; + OperationContext& _opCtx = *_txnPtr; OldClientWriteContext _ctx; DBDirectClient _client; }; @@ -110,17 +110,17 @@ public: Database* db = _ctx.db(); Collection* coll; { - WriteUnitOfWork wunit(&_txn); - db->dropCollection(&_txn, _ns); - coll = db->createCollection(&_txn, _ns); + WriteUnitOfWork wunit(&_opCtx); + db->dropCollection(&_opCtx, _ns); + coll = db->createCollection(&_opCtx, _ns); OpDebug* const nullOpDebug = nullptr; - coll->insertDocument(&_txn, + coll->insertDocument(&_opCtx, BSON("_id" << 1 << "a" << "dup"), nullOpDebug, true); - coll->insertDocument(&_txn, + coll->insertDocument(&_opCtx, BSON("_id" << 2 << "a" << "dup"), nullOpDebug, @@ -128,7 +128,7 @@ public: wunit.commit(); } - MultiIndexBlock indexer(&_txn, coll); + MultiIndexBlock indexer(&_opCtx, coll); indexer.allowBackgroundBuilding(); indexer.allowInterruption(); indexer.ignoreUniqueConstraint(); @@ -149,7 +149,7 @@ public: ASSERT_OK(indexer.init(spec).getStatus()); ASSERT_OK(indexer.insertAllDocumentsInCollection()); - WriteUnitOfWork wunit(&_txn); + WriteUnitOfWork wunit(&_opCtx); indexer.commit(); wunit.commit(); } @@ -164,17 +164,17 @@ public: Database* db = _ctx.db(); Collection* coll; { - WriteUnitOfWork wunit(&_txn); - db->dropCollection(&_txn, _ns); - coll = db->createCollection(&_txn, _ns); + WriteUnitOfWork wunit(&_opCtx); + db->dropCollection(&_opCtx, _ns); + coll = db->createCollection(&_opCtx, _ns); OpDebug* const nullOpDebug = nullptr; - coll->insertDocument(&_txn, + coll->insertDocument(&_opCtx, BSON("_id" << 1 << "a" << "dup"), nullOpDebug, true); - coll->insertDocument(&_txn, + coll->insertDocument(&_opCtx, BSON("_id" << 2 << "a" << "dup"), nullOpDebug, @@ -182,7 +182,7 @@ public: wunit.commit(); } - MultiIndexBlock indexer(&_txn, coll); + MultiIndexBlock indexer(&_opCtx, coll); indexer.allowBackgroundBuilding(); indexer.allowInterruption(); // indexer.ignoreUniqueConstraint(); // not calling this @@ -217,17 +217,17 @@ public: RecordId loc1; RecordId loc2; { - WriteUnitOfWork wunit(&_txn); - db->dropCollection(&_txn, _ns); - coll = db->createCollection(&_txn, _ns); + WriteUnitOfWork wunit(&_opCtx); + db->dropCollection(&_opCtx, _ns); + coll = db->createCollection(&_opCtx, _ns); OpDebug* const nullOpDebug = nullptr; - ASSERT_OK(coll->insertDocument(&_txn, + ASSERT_OK(coll->insertDocument(&_opCtx, BSON("_id" << 1 << "a" << "dup"), nullOpDebug, true)); - ASSERT_OK(coll->insertDocument(&_txn, + ASSERT_OK(coll->insertDocument(&_opCtx, BSON("_id" << 2 << "a" << "dup"), nullOpDebug, @@ -235,7 +235,7 @@ public: wunit.commit(); } - MultiIndexBlock indexer(&_txn, coll); + MultiIndexBlock indexer(&_opCtx, coll); indexer.allowBackgroundBuilding(); indexer.allowInterruption(); // indexer.ignoreUniqueConstraint(); // not calling this @@ -262,7 +262,7 @@ public: ASSERT_EQUALS(dups.size(), 1U); for (auto recordId : dups) { ASSERT_NOT_EQUALS(recordId, RecordId()); - BSONObj obj = coll->docFor(&_txn, recordId).value(); + BSONObj obj = coll->docFor(&_opCtx, recordId).value(); int id = obj["_id"].Int(); ASSERT(id == 1 || id == 2); } @@ -277,16 +277,16 @@ public: Database* db = _ctx.db(); Collection* coll; { - WriteUnitOfWork wunit(&_txn); - db->dropCollection(&_txn, _ns); - coll = db->createCollection(&_txn, _ns); + WriteUnitOfWork wunit(&_opCtx); + db->dropCollection(&_opCtx, _ns); + coll = db->createCollection(&_opCtx, _ns); // Drop all indexes including id index. - coll->getIndexCatalog()->dropAllIndexes(&_txn, true); + coll->getIndexCatalog()->dropAllIndexes(&_opCtx, true); // Insert some documents with enforceQuota=true. int32_t nDocs = 1000; OpDebug* const nullOpDebug = nullptr; for (int32_t i = 0; i < nDocs; ++i) { - coll->insertDocument(&_txn, BSON("a" << i), nullOpDebug, true); + coll->insertDocument(&_opCtx, BSON("a" << i), nullOpDebug, true); } wunit.commit(); } @@ -301,7 +301,7 @@ public: // only want to interrupt the index build getGlobalServiceContext()->unsetKillAllOperations(); // The new index is not listed in the index catalog because the index build failed. - ASSERT(!coll->getIndexCatalog()->findIndexByName(&_txn, "a_1")); + ASSERT(!coll->getIndexCatalog()->findIndexByName(&_opCtx, "a_1")); } }; @@ -313,15 +313,15 @@ public: Database* db = _ctx.db(); Collection* coll; { - WriteUnitOfWork wunit(&_txn); - db->dropCollection(&_txn, _ns); - coll = db->createCollection(&_txn, _ns); - coll->getIndexCatalog()->dropAllIndexes(&_txn, true); + WriteUnitOfWork wunit(&_opCtx); + db->dropCollection(&_opCtx, _ns); + coll = db->createCollection(&_opCtx, _ns); + coll->getIndexCatalog()->dropAllIndexes(&_opCtx, true); // Insert some documents. int32_t nDocs = 1000; OpDebug* const nullOpDebug = nullptr; for (int32_t i = 0; i < nDocs; ++i) { - coll->insertDocument(&_txn, BSON("a" << i), nullOpDebug, true); + coll->insertDocument(&_opCtx, BSON("a" << i), nullOpDebug, true); } wunit.commit(); } @@ -336,7 +336,7 @@ public: // only want to interrupt the index build getGlobalServiceContext()->unsetKillAllOperations(); // The new index is listed in the index catalog because the index build completed. - ASSERT(coll->getIndexCatalog()->findIndexByName(&_txn, "a_1")); + ASSERT(coll->getIndexCatalog()->findIndexByName(&_opCtx, "a_1")); } }; @@ -348,18 +348,18 @@ public: Database* db = _ctx.db(); Collection* coll; { - WriteUnitOfWork wunit(&_txn); - db->dropCollection(&_txn, _ns); + WriteUnitOfWork wunit(&_opCtx); + db->dropCollection(&_opCtx, _ns); CollectionOptions options; options.capped = true; options.cappedSize = 10 * 1024; - coll = db->createCollection(&_txn, _ns, options); - coll->getIndexCatalog()->dropAllIndexes(&_txn, true); + coll = db->createCollection(&_opCtx, _ns, options); + coll->getIndexCatalog()->dropAllIndexes(&_opCtx, true); // Insert some documents. int32_t nDocs = 1000; OpDebug* const nullOpDebug = nullptr; for (int32_t i = 0; i < nDocs; ++i) { - coll->insertDocument(&_txn, BSON("_id" << i), nullOpDebug, true); + coll->insertDocument(&_opCtx, BSON("_id" << i), nullOpDebug, true); } wunit.commit(); } @@ -374,7 +374,7 @@ public: // only want to interrupt the index build getGlobalServiceContext()->unsetKillAllOperations(); // The new index is not listed in the index catalog because the index build failed. - ASSERT(!coll->getIndexCatalog()->findIndexByName(&_txn, "_id_")); + ASSERT(!coll->getIndexCatalog()->findIndexByName(&_opCtx, "_id_")); } }; @@ -386,18 +386,18 @@ public: Database* db = _ctx.db(); Collection* coll; { - WriteUnitOfWork wunit(&_txn); - db->dropCollection(&_txn, _ns); + WriteUnitOfWork wunit(&_opCtx); + db->dropCollection(&_opCtx, _ns); CollectionOptions options; options.capped = true; options.cappedSize = 10 * 1024; - coll = db->createCollection(&_txn, _ns, options); - coll->getIndexCatalog()->dropAllIndexes(&_txn, true); + coll = db->createCollection(&_opCtx, _ns, options); + coll->getIndexCatalog()->dropAllIndexes(&_opCtx, true); // Insert some documents. int32_t nDocs = 1000; OpDebug* const nullOpDebug = nullptr; for (int32_t i = 0; i < nDocs; ++i) { - coll->insertDocument(&_txn, BSON("_id" << i), nullOpDebug, true); + coll->insertDocument(&_opCtx, BSON("_id" << i), nullOpDebug, true); } wunit.commit(); } @@ -412,7 +412,7 @@ public: // only want to interrupt the index build getGlobalServiceContext()->unsetKillAllOperations(); // The new index is listed in the index catalog because the index build succeeded. - ASSERT(coll->getIndexCatalog()->findIndexByName(&_txn, "_id_")); + ASSERT(coll->getIndexCatalog()->findIndexByName(&_opCtx, "_id_")); } }; @@ -430,7 +430,7 @@ public: // Request an interrupt. getGlobalServiceContext()->setKillAllOperations(); // The call is not interrupted. - Helpers::ensureIndex(&_txn, collection(), BSON("a" << 1), kIndexVersion, false, "a_1"); + Helpers::ensureIndex(&_opCtx, collection(), BSON("a" << 1), kIndexVersion, false, "a_1"); // only want to interrupt the index build getGlobalServiceContext()->unsetKillAllOperations(); // The new index is listed in getIndexSpecs because the index build completed. @@ -439,7 +439,7 @@ public: }; Status IndexBuildBase::createIndex(const std::string& dbname, const BSONObj& indexSpec) { - MultiIndexBlock indexer(&_txn, collection()); + MultiIndexBlock indexer(&_opCtx, collection()); Status status = indexer.init(indexSpec).getStatus(); if (status == ErrorCodes::IndexAlreadyExists) { return Status::OK(); @@ -451,7 +451,7 @@ Status IndexBuildBase::createIndex(const std::string& dbname, const BSONObj& ind if (!status.isOK()) { return status; } - WriteUnitOfWork wunit(&_txn); + WriteUnitOfWork wunit(&_opCtx); indexer.commit(); wunit.commit(); return Status::OK(); diff --git a/src/mongo/dbtests/jstests.cpp b/src/mongo/dbtests/jstests.cpp index fe5c44fff0f..8569720e242 100644 --- a/src/mongo/dbtests/jstests.cpp +++ b/src/mongo/dbtests/jstests.cpp @@ -1002,9 +1002,9 @@ public: string utf8ObjSpec = "{'_id':'\\u0001\\u007f\\u07ff\\uffff'}"; BSONObj utf8Obj = fromjson(utf8ObjSpec); - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; - DBDirectClient client(&txn); + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; + DBDirectClient client(&opCtx); client.insert(ns(), utf8Obj); client.eval("unittest", @@ -1023,9 +1023,9 @@ private: } void reset() { - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; - DBDirectClient client(&txn); + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; + DBDirectClient client(&opCtx); client.dropCollection(ns()); } @@ -1047,9 +1047,9 @@ public: if (!getGlobalScriptEngine()->utf8Ok()) return; - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; - DBDirectClient client(&txn); + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; + DBDirectClient client(&opCtx); client.eval("unittest", "db.jstests.longutf8string.save( {_id:'\\uffff\\uffff\\uffff\\uffff'} )"); @@ -1057,9 +1057,9 @@ public: private: void reset() { - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; - DBDirectClient client(&txn); + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; + DBDirectClient client(&opCtx); client.dropCollection(ns()); } @@ -1142,9 +1142,9 @@ public: ServerGlobalParams::FeatureCompatibility::Version::k34); // Drop the collection - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; - DBDirectClient client(&txn); + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; + DBDirectClient client(&opCtx); client.dropCollection("unittest.testroundtrip"); @@ -2251,9 +2251,9 @@ public: update.appendCode("value", "function () { db.test.find().forEach(function(obj) { continue; }); }"); - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; - DBDirectClient client(&txn); + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; + DBDirectClient client(&opCtx); client.update("test.system.js", query.obj(), update.obj(), true /* upsert */); unique_ptr<Scope> s(getGlobalScriptEngine()->newScope()); diff --git a/src/mongo/dbtests/matchertests.cpp b/src/mongo/dbtests/matchertests.cpp index 73b5c728511..9a762f017a0 100644 --- a/src/mongo/dbtests/matchertests.cpp +++ b/src/mongo/dbtests/matchertests.cpp @@ -231,15 +231,15 @@ template <typename M> class WhereSimple1 { public: void run() { - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; const NamespaceString nss("unittests.matchertests"); - AutoGetCollectionForRead ctx(&txn, nss); + AutoGetCollectionForRead ctx(&opCtx, nss); const CollatorInterface* collator = nullptr; M m(BSON("$where" << "function(){ return this.a == 1; }"), - ExtensionsCallbackReal(&txn, &nss), + ExtensionsCallbackReal(&opCtx, &nss), collator); ASSERT(m.matches(BSON("a" << 1))); ASSERT(!m.matches(BSON("a" << 2))); diff --git a/src/mongo/dbtests/mmaptests.cpp b/src/mongo/dbtests/mmaptests.cpp index ab6766b72c3..b0afc3e916f 100644 --- a/src/mongo/dbtests/mmaptests.cpp +++ b/src/mongo/dbtests/mmaptests.cpp @@ -77,16 +77,16 @@ public: MMAPV1LockerImpl lockState; Lock::GlobalWrite lk(&lockState); - auto txn = cc().makeOperationContext(); + auto opCtx = cc().makeOperationContext(); { - DurableMappedFile f(txn.get()); - ON_BLOCK_EXIT([&f, &txn] { - LockMongoFilesExclusive lock(txn.get()); - f.close(txn.get()); + DurableMappedFile f(opCtx.get()); + ON_BLOCK_EXIT([&f, &opCtx] { + LockMongoFilesExclusive lock(opCtx.get()); + f.close(opCtx.get()); }); unsigned long long len = 256 * 1024 * 1024; - verify(f.create(txn.get(), fn, len)); + verify(f.create(opCtx.get(), fn, len)); { char* p = (char*)f.getView(); verify(p); @@ -99,12 +99,12 @@ public: char* w = (char*)f.view_write(); strcpy(w + 6, "world"); } - MongoFileFinder ff(txn.get()); + MongoFileFinder ff(opCtx.get()); ASSERT(ff.findByPath(fn)); ASSERT(ff.findByPath("asdf") == 0); } { - MongoFileFinder ff(txn.get()); + MongoFileFinder ff(opCtx.get()); ASSERT(ff.findByPath(fn) == 0); } @@ -118,13 +118,14 @@ public: Timer t; for (int i = 0; i < N; i++) { // Every 4 iterations we pass the sequential hint. - DurableMappedFile f{ - txn.get(), i % 4 == 1 ? MongoFile::Options::SEQUENTIAL : MongoFile::Options::NONE}; - ON_BLOCK_EXIT([&f, &txn] { - LockMongoFilesExclusive lock(txn.get()); - f.close(txn.get()); + DurableMappedFile f{opCtx.get(), + i % 4 == 1 ? MongoFile::Options::SEQUENTIAL + : MongoFile::Options::NONE}; + ON_BLOCK_EXIT([&f, &opCtx] { + LockMongoFilesExclusive lock(opCtx.get()); + f.close(opCtx.get()); }); - verify(f.open(txn.get(), fn)); + verify(f.open(opCtx.get(), fn)); { char* p = (char*)f.getView(); verify(p); diff --git a/src/mongo/dbtests/namespacetests.cpp b/src/mongo/dbtests/namespacetests.cpp index c8a5be64b42..81c6b3125b0 100644 --- a/src/mongo/dbtests/namespacetests.cpp +++ b/src/mongo/dbtests/namespacetests.cpp @@ -69,11 +69,11 @@ namespace MissingFieldTests { class BtreeIndexMissingField { public: void run() { - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; BSONObj spec(BSON("key" << BSON("a" << 1))); ASSERT_EQUALS(jstNULL, - IndexLegacy::getMissingField(&txn, NULL, spec).firstElement().type()); + IndexLegacy::getMissingField(&opCtx, NULL, spec).firstElement().type()); } }; @@ -81,12 +81,12 @@ public: class TwoDIndexMissingField { public: void run() { - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; BSONObj spec(BSON("key" << BSON("a" << "2d"))); ASSERT_EQUALS(jstNULL, - IndexLegacy::getMissingField(&txn, NULL, spec).firstElement().type()); + IndexLegacy::getMissingField(&opCtx, NULL, spec).firstElement().type()); } }; @@ -94,8 +94,8 @@ public: class HashedIndexMissingField { public: void run() { - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; BSONObj spec(BSON("key" << BSON("a" << "hashed"))); BSONObj nullObj = BSON("a" << BSONNULL); @@ -109,7 +109,7 @@ public: ASSERT_EQUALS(ExpressionKeysPrivate::makeSingleHashKey(nullObj.firstElement(), 0, 0), nullFieldFromKey.Long()); - BSONObj missingField = IndexLegacy::getMissingField(&txn, NULL, spec); + BSONObj missingField = IndexLegacy::getMissingField(&opCtx, NULL, spec); ASSERT_EQUALS(NumberLong, missingField.firstElement().type()); ASSERT_BSONELT_EQ(nullFieldFromKey, missingField.firstElement()); } @@ -122,8 +122,8 @@ public: class HashedIndexMissingFieldAlternateSeed { public: void run() { - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; BSONObj spec(BSON("key" << BSON("a" << "hashed") << "seed" @@ -141,7 +141,7 @@ public: // Ensure that getMissingField recognizes that the seed is different (and returns // the right key). - BSONObj missingField = IndexLegacy::getMissingField(&txn, NULL, spec); + BSONObj missingField = IndexLegacy::getMissingField(&opCtx, NULL, spec); ASSERT_EQUALS(NumberLong, missingField.firstElement().type()); ASSERT_BSONELT_EQ(nullFieldFromKey, missingField.firstElement()); } @@ -159,16 +159,16 @@ namespace NamespaceDetailsTests { public: Base( const char *ns = "unittests.NamespaceDetailsTests" ) : ns_( ns ) , _context( ns ) {} virtual ~Base() { - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); OperationContext& txn = *txnPtr; + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); OperationContext& opCtx = *opCtxPtr; if ( !nsd() ) return; - _context.db()->dropCollection( &txn, ns() ); + _context.db()->dropCollection( &opCtx, ns() ); } protected: void create() { Lock::GlobalWrite lk; - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); OperationContext& txn = *txnPtr; - ASSERT( userCreateNS( &txn, db(), ns(), fromjson( spec() ), false ).isOK() ); + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); OperationContext& opCtx = *opCtxPtr; + ASSERT( userCreateNS( &opCtx, db(), ns(), fromjson( spec() ), false ).isOK() ); } virtual string spec() const = 0; int nRecords() const { @@ -250,10 +250,10 @@ namespace NamespaceDetailsTests { class SingleAlloc : public Base { public: void run() { - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); OperationContext& txn = *txnPtr; + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); OperationContext& opCtx = *opCtxPtr; create(); BSONObj b = bigObj(); - ASSERT( collection()->insertDocument( &txn, b, true ).isOK() ); + ASSERT( collection()->insertDocument( &opCtx, b, true ).isOK() ); ASSERT_EQUALS( 1, nRecords() ); } virtual string spec() const { return "{\"capped\":true,\"size\":512,\"$nExtents\":1}"; } @@ -262,7 +262,7 @@ namespace NamespaceDetailsTests { class Realloc : public Base { public: void run() { - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); OperationContext& txn = *txnPtr; + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); OperationContext& opCtx = *opCtxPtr; create(); const int N = 20; @@ -272,7 +272,7 @@ namespace NamespaceDetailsTests { for ( int i = 0; i < N; ++i ) { BSONObj b = bigObj(); StatusWith<RecordId> status = - ASSERT( collection()->insertDocument( &txn, b, true ).isOK() ); + ASSERT( collection()->insertDocument( &opCtx, b, true ).isOK() ); l[ i ] = status.getValue(); ASSERT( !l[ i ].isNull() ); ASSERT( nRecords() <= Q ); @@ -287,14 +287,14 @@ namespace NamespaceDetailsTests { class TwoExtent : public Base { public: void run() { - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); OperationContext& txn = *txnPtr; + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); OperationContext& opCtx = *opCtxPtr; create(); ASSERT_EQUALS( 2, nExtents() ); RecordId l[ 8 ]; for ( int i = 0; i < 8; ++i ) { StatusWith<RecordId> status = - ASSERT( collection()->insertDocument( &txn, bigObj(), true ).isOK() ); + ASSERT( collection()->insertDocument( &opCtx, bigObj(), true ).isOK() ); l[ i ] = status.getValue(); ASSERT( !l[ i ].isNull() ); //ASSERT_EQUALS( i < 2 ? i + 1 : 3 + i % 2, nRecords() ); @@ -308,7 +308,7 @@ namespace NamespaceDetailsTests { bob.appendOID( "_id", NULL, true ); bob.append( "a", string( MinExtentSize + 500, 'a' ) ); // min extent size is now 4096 BSONObj bigger = bob.done(); - ASSERT( !collection()->insertDocument( &txn, bigger, false ).isOK() ); + ASSERT( !collection()->insertDocument( &opCtx, bigger, false ).isOK() ); ASSERT_EQUALS( 0, nRecords() ); } private: @@ -335,13 +335,13 @@ namespace NamespaceDetailsTests { class AllocCappedNotQuantized : public Base { public: void run() { - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); OperationContext& txn = *txnPtr; + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); OperationContext& opCtx = *opCtxPtr; create(); ASSERT( nsd()->isCapped() ); ASSERT( !nsd()->isUserFlagSet( NamespaceDetails::Flag_UsePowerOf2Sizes ) ); StatusWith<RecordId> result = - collection()->insertDocument( &txn, docForRecordSize( 300 ), false ); + collection()->insertDocument( &opCtx, docForRecordSize( 300 ), false ); ASSERT( result.isOK() ); Record* record = collection()->getRecordStore()->recordFor( result.getValue() ); // Check that no quantization is performed. @@ -358,7 +358,7 @@ namespace NamespaceDetailsTests { return "{\"capped\":true,\"size\":512,\"$nExtents\":2}"; } void pass(int p) { - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); OperationContext& txn = *txnPtr; + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); OperationContext& opCtx = *opCtxPtr; create(); ASSERT_EQUALS( 2, nExtents() ); @@ -371,7 +371,7 @@ namespace NamespaceDetailsTests { //RecordId l[ 8 ]; for ( int i = 0; i < N; ++i ) { BSONObj bb = bigObj(); - StatusWith<RecordId> status = collection()->insertDocument( &txn, bb, true ); + StatusWith<RecordId> status = collection()->insertDocument( &opCtx, bb, true ); ASSERT( status.isOK() ); RecordId a = status.getValue(); if( T == i ) @@ -385,7 +385,7 @@ namespace NamespaceDetailsTests { RecordId last, first; { - unique_ptr<Runner> runner(InternalPlanner::collectionScan(&txn, + unique_ptr<Runner> runner(InternalPlanner::collectionScan(&opCtx, ns(), collection(), InternalPlanner::BACKWARD)); @@ -393,7 +393,7 @@ namespace NamespaceDetailsTests { ASSERT( !last.isNull() ); } { - unique_ptr<Runner> runner(InternalPlanner::collectionScan(&txn, + unique_ptr<Runner> runner(InternalPlanner::collectionScan(&opCtx, ns(), collection(), InternalPlanner::FORWARD)); @@ -402,12 +402,12 @@ namespace NamespaceDetailsTests { ASSERT( first != last ) ; } - collection()->cappedTruncateAfter(&txn, truncAt, false); + collection()->cappedTruncateAfter(&opCtx, truncAt, false); ASSERT_EQUALS( collection()->numRecords() , 28u ); { RecordId loc; - unique_ptr<Runner> runner(InternalPlanner::collectionScan(&txn, + unique_ptr<Runner> runner(InternalPlanner::collectionScan(&opCtx, ns(), collection(), InternalPlanner::FORWARD)); @@ -415,7 +415,7 @@ namespace NamespaceDetailsTests { ASSERT( first == loc); } { - unique_ptr<Runner> runner(InternalPlanner::collectionScan(&txn, + unique_ptr<Runner> runner(InternalPlanner::collectionScan(&opCtx, ns(), collection(), InternalPlanner::BACKWARD)); @@ -430,7 +430,7 @@ namespace NamespaceDetailsTests { bob.appendOID("_id", 0, true); bob.append( "a", string( MinExtentSize + 300, 'a' ) ); BSONObj bigger = bob.done(); - ASSERT( !collection()->insertDocument( &txn, bigger, true ).isOK() ); + ASSERT( !collection()->insertDocument( &opCtx, bigger, true ).isOK() ); ASSERT_EQUALS( 0, nRecords() ); } public: @@ -503,28 +503,28 @@ namespace NamespaceDetailsTests { create(); NamespaceDetails *nsd = collection()->detailsWritable(); - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); OperationContext& txn = *txnPtr; + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); OperationContext& opCtx = *opCtxPtr; // Set 2 & 54 as multikey - nsd->setIndexIsMultikey(&txn, 2, true); - nsd->setIndexIsMultikey(&txn, 54, true); + nsd->setIndexIsMultikey(&opCtx, 2, true); + nsd->setIndexIsMultikey(&opCtx, 54, true); ASSERT(nsd->isMultikey(2)); ASSERT(nsd->isMultikey(54)); // Flip 2 & 47 - nsd->setIndexIsMultikey(&txn, 2, false); - nsd->setIndexIsMultikey(&txn, 47, true); + nsd->setIndexIsMultikey(&opCtx, 2, false); + nsd->setIndexIsMultikey(&opCtx, 47, true); ASSERT(!nsd->isMultikey(2)); ASSERT(nsd->isMultikey(47)); // Reset entries that are already true - nsd->setIndexIsMultikey(&txn, 54, true); - nsd->setIndexIsMultikey(&txn, 47, true); + nsd->setIndexIsMultikey(&opCtx, 54, true); + nsd->setIndexIsMultikey(&opCtx, 47, true); ASSERT(nsd->isMultikey(54)); ASSERT(nsd->isMultikey(47)); // Two non-multi-key - nsd->setIndexIsMultikey(&txn, 2, false); - nsd->setIndexIsMultikey(&txn, 43, false); + nsd->setIndexIsMultikey(&opCtx, 2, false); + nsd->setIndexIsMultikey(&opCtx, 43, false); ASSERT(!nsd->isMultikey(2)); ASSERT(nsd->isMultikey(54)); ASSERT(nsd->isMultikey(47)); @@ -544,21 +544,21 @@ public: const string committedName = dbName + ".committed"; const string rolledBackName = dbName + ".rolled_back"; - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; - ScopedTransaction transaction(&txn, MODE_IX); - Lock::DBLock lk(txn.lockState(), dbName, MODE_X); + ScopedTransaction transaction(&opCtx, MODE_IX); + Lock::DBLock lk(opCtx.lockState(), dbName, MODE_X); bool justCreated; - Database* db = dbHolder().openDb(&txn, dbName, &justCreated); + Database* db = dbHolder().openDb(&opCtx, dbName, &justCreated); ASSERT(justCreated); Collection* committedColl; { - WriteUnitOfWork wunit(&txn); + WriteUnitOfWork wunit(&opCtx); ASSERT_FALSE(db->getCollection(committedName)); - committedColl = db->createCollection(&txn, committedName); + committedColl = db->createCollection(&opCtx, committedName); ASSERT_EQUALS(db->getCollection(committedName), committedColl); wunit.commit(); } @@ -566,9 +566,9 @@ public: ASSERT_EQUALS(db->getCollection(committedName), committedColl); { - WriteUnitOfWork wunit(&txn); + WriteUnitOfWork wunit(&opCtx); ASSERT_FALSE(db->getCollection(rolledBackName)); - Collection* rolledBackColl = db->createCollection(&txn, rolledBackName); + Collection* rolledBackColl = db->createCollection(&opCtx, rolledBackName); ASSERT_EQUALS(db->getCollection(rolledBackName), rolledBackColl); // not committing so creation should be rolled back } @@ -589,23 +589,23 @@ public: const string droppedName = dbName + ".dropped"; const string rolledBackName = dbName + ".rolled_back"; - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; - ScopedTransaction transaction(&txn, MODE_IX); - Lock::DBLock lk(txn.lockState(), dbName, MODE_X); + ScopedTransaction transaction(&opCtx, MODE_IX); + Lock::DBLock lk(opCtx.lockState(), dbName, MODE_X); bool justCreated; - Database* db = dbHolder().openDb(&txn, dbName, &justCreated); + Database* db = dbHolder().openDb(&opCtx, dbName, &justCreated); ASSERT(justCreated); { - WriteUnitOfWork wunit(&txn); + WriteUnitOfWork wunit(&opCtx); ASSERT_FALSE(db->getCollection(droppedName)); Collection* droppedColl; - droppedColl = db->createCollection(&txn, droppedName); + droppedColl = db->createCollection(&opCtx, droppedName); ASSERT_EQUALS(db->getCollection(droppedName), droppedColl); - db->dropCollection(&txn, droppedName); + db->dropCollection(&opCtx, droppedName); wunit.commit(); } @@ -613,12 +613,12 @@ public: ASSERT_FALSE(db->getCollection(droppedName)); { - WriteUnitOfWork wunit(&txn); + WriteUnitOfWork wunit(&opCtx); ASSERT_FALSE(db->getCollection(rolledBackName)); - Collection* rolledBackColl = db->createCollection(&txn, rolledBackName); + Collection* rolledBackColl = db->createCollection(&opCtx, rolledBackName); wunit.commit(); ASSERT_EQUALS(db->getCollection(rolledBackName), rolledBackColl); - db->dropCollection(&txn, rolledBackName); + db->dropCollection(&opCtx, rolledBackName); // not committing so dropping should be rolled back } diff --git a/src/mongo/dbtests/oplogstarttests.cpp b/src/mongo/dbtests/oplogstarttests.cpp index 5dedeac260a..4070d0b5440 100644 --- a/src/mongo/dbtests/oplogstarttests.cpp +++ b/src/mongo/dbtests/oplogstarttests.cpp @@ -45,17 +45,17 @@ static const NamespaceString nss("unittests.oplogstarttests"); class Base { public: Base() - : _scopedXact(&_txn, MODE_X), - _lk(_txn.lockState()), - _context(&_txn, nss.ns()), - _client(&_txn) { + : _scopedXact(&_opCtx, MODE_X), + _lk(_opCtx.lockState()), + _context(&_opCtx, nss.ns()), + _client(&_opCtx) { Collection* c = _context.db()->getCollection(nss.ns()); if (!c) { - WriteUnitOfWork wuow(&_txn); - c = _context.db()->createCollection(&_txn, nss.ns()); + WriteUnitOfWork wuow(&_opCtx); + c = _context.db()->createCollection(&_opCtx, nss.ns()); wuow.commit(); } - ASSERT(c->getIndexCatalog()->haveIdIndex(&_txn)); + ASSERT(c->getIndexCatalog()->haveIdIndex(&_opCtx)); } ~Base() { @@ -78,11 +78,11 @@ protected: auto qr = stdx::make_unique<QueryRequest>(nss); qr->setFilter(query); auto statusWithCQ = CanonicalQuery::canonicalize( - &_txn, std::move(qr), ExtensionsCallbackDisallowExtensions()); + &_opCtx, std::move(qr), ExtensionsCallbackDisallowExtensions()); ASSERT_OK(statusWithCQ.getStatus()); _cq = std::move(statusWithCQ.getValue()); _oplogws.reset(new WorkingSet()); - _stage.reset(new OplogStart(&_txn, collection(), _cq->root(), _oplogws.get())); + _stage.reset(new OplogStart(&_opCtx, collection(), _cq->root(), _oplogws.get())); } void assertWorkingSetMemberHasId(WorkingSetID id, int expectedId) { @@ -100,7 +100,7 @@ protected: private: // The order of these is important in order to ensure order of destruction const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext(); - OperationContext& _txn = *_txnPtr; + OperationContext& _opCtx = *_txnPtr; ScopedTransaction _scopedXact; Lock::GlobalWrite _lk; OldClientContext _context; diff --git a/src/mongo/dbtests/pdfiletests.cpp b/src/mongo/dbtests/pdfiletests.cpp index 711e8d6f9b8..59eb024b5c5 100644 --- a/src/mongo/dbtests/pdfiletests.cpp +++ b/src/mongo/dbtests/pdfiletests.cpp @@ -44,13 +44,13 @@ namespace PdfileTests { namespace Insert { class Base { public: - Base() : _scopedXact(&_txn, MODE_X), _lk(_txn.lockState()), _context(&_txn, ns()) {} + Base() : _scopedXact(&_opCtx, MODE_X), _lk(_opCtx.lockState()), _context(&_opCtx, ns()) {} virtual ~Base() { if (!collection()) return; - WriteUnitOfWork wunit(&_txn); - _context.db()->dropCollection(&_txn, ns()); + WriteUnitOfWork wunit(&_opCtx); + _context.db()->dropCollection(&_opCtx, ns()); wunit.commit(); } @@ -63,7 +63,7 @@ protected: } const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext(); - OperationContext& _txn = *_txnPtr; + OperationContext& _opCtx = *_txnPtr; ScopedTransaction _scopedXact; Lock::GlobalWrite _lk; OldClientContext _context; @@ -72,18 +72,18 @@ protected: class InsertNoId : public Base { public: void run() { - WriteUnitOfWork wunit(&_txn); + WriteUnitOfWork wunit(&_opCtx); BSONObj x = BSON("x" << 1); ASSERT(x["_id"].type() == 0); - Collection* collection = _context.db()->getOrCreateCollection(&_txn, ns()); + Collection* collection = _context.db()->getOrCreateCollection(&_opCtx, ns()); OpDebug* const nullOpDebug = nullptr; - ASSERT(!collection->insertDocument(&_txn, x, nullOpDebug, true).isOK()); + ASSERT(!collection->insertDocument(&_opCtx, x, nullOpDebug, true).isOK()); - StatusWith<BSONObj> fixed = fixDocumentForInsert(_txn.getServiceContext(), x); + StatusWith<BSONObj> fixed = fixDocumentForInsert(_opCtx.getServiceContext(), x); ASSERT(fixed.isOK()); x = fixed.getValue(); ASSERT(x["_id"].type() == jstOID); - ASSERT(collection->insertDocument(&_txn, x, nullOpDebug, true).isOK()); + ASSERT(collection->insertDocument(&_opCtx, x, nullOpDebug, true).isOK()); wunit.commit(); } }; @@ -96,7 +96,7 @@ public: b.append("_id", 1); BSONObj o = b.done(); - BSONObj fixed = fixDocumentForInsert(_txn.getServiceContext(), o).getValue(); + BSONObj fixed = fixDocumentForInsert(_opCtx.getServiceContext(), o).getValue(); ASSERT_EQUALS(2, fixed.nFields()); ASSERT(fixed.firstElement().fieldNameStringData() == "_id"); ASSERT(fixed.firstElement().number() == 1); @@ -121,7 +121,7 @@ public: o = b.obj(); } - BSONObj fixed = fixDocumentForInsert(_txn.getServiceContext(), o).getValue(); + BSONObj fixed = fixDocumentForInsert(_opCtx.getServiceContext(), o).getValue(); ASSERT_EQUALS(3, fixed.nFields()); ASSERT(fixed.firstElement().fieldNameStringData() == "_id"); ASSERT(fixed.firstElement().number() == 1); @@ -143,12 +143,12 @@ public: class ValidId : public Base { public: void run() { - ASSERT(fixDocumentForInsert(_txn.getServiceContext(), BSON("_id" << 5)).isOK()); + ASSERT(fixDocumentForInsert(_opCtx.getServiceContext(), BSON("_id" << 5)).isOK()); ASSERT( - fixDocumentForInsert(_txn.getServiceContext(), BSON("_id" << BSON("x" << 5))).isOK()); - ASSERT( - !fixDocumentForInsert(_txn.getServiceContext(), BSON("_id" << BSON("$x" << 5))).isOK()); - ASSERT(!fixDocumentForInsert(_txn.getServiceContext(), BSON("_id" << BSON("$oid" << 5))) + fixDocumentForInsert(_opCtx.getServiceContext(), BSON("_id" << BSON("x" << 5))).isOK()); + ASSERT(!fixDocumentForInsert(_opCtx.getServiceContext(), BSON("_id" << BSON("$x" << 5))) + .isOK()); + ASSERT(!fixDocumentForInsert(_opCtx.getServiceContext(), BSON("_id" << BSON("$oid" << 5))) .isOK()); } }; diff --git a/src/mongo/dbtests/perftests.cpp b/src/mongo/dbtests/perftests.cpp index 8c761037950..042b3629b5d 100644 --- a/src/mongo/dbtests/perftests.cpp +++ b/src/mongo/dbtests/perftests.cpp @@ -82,11 +82,11 @@ const bool profiling = false; class ClientBase { public: - ClientBase() : _client(&_txn) { - mongo::LastError::get(_txn.getClient()).reset(); + ClientBase() : _client(&_opCtx) { + mongo::LastError::get(_opCtx.getClient()).reset(); } virtual ~ClientBase() { - mongo::LastError::get(_txn.getClient()).reset(); + mongo::LastError::get(_opCtx.getClient()).reset(); } protected: @@ -103,13 +103,13 @@ protected: DBClientBase* client() { return &_client; } - OperationContext* txn() { - return &_txn; + OperationContext* opCtx() { + return &_opCtx; } private: const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext(); - OperationContext& _txn = *_txnPtr; + OperationContext& _opCtx = *_txnPtr; DBDirectClient _client; }; @@ -332,9 +332,9 @@ public: srand(++z ^ (unsigned)time(0)); #endif Client::initThreadIfNotAlready("perftestthr"); - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; - DBDirectClient c(&txn); + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; + DBDirectClient c(&opCtx); const unsigned int Batch = batchSize(); prepThreaded(); diff --git a/src/mongo/dbtests/plan_ranking.cpp b/src/mongo/dbtests/plan_ranking.cpp index 031a95d807a..44e2ddfba31 100644 --- a/src/mongo/dbtests/plan_ranking.cpp +++ b/src/mongo/dbtests/plan_ranking.cpp @@ -74,14 +74,14 @@ public: PlanRankingTestBase() : _internalQueryForceIntersectionPlans(internalQueryForceIntersectionPlans.load()), _enableHashIntersection(internalQueryPlannerEnableHashIntersection.load()), - _client(&_txn) { + _client(&_opCtx) { // Run all tests with hash-based intersection enabled. internalQueryPlannerEnableHashIntersection.store(true); // Ensure N is significantly larger then internalQueryPlanEvaluationWorks. ASSERT_GTE(N, internalQueryPlanEvaluationWorks.load() + 1000); - OldClientWriteContext ctx(&_txn, nss.ns()); + OldClientWriteContext ctx(&_opCtx, nss.ns()); _client.dropCollection(nss.ns()); } @@ -92,12 +92,12 @@ public: } void insert(const BSONObj& obj) { - OldClientWriteContext ctx(&_txn, nss.ns()); + OldClientWriteContext ctx(&_opCtx, nss.ns()); _client.insert(nss.ns(), obj); } void addIndex(const BSONObj& obj) { - ASSERT_OK(dbtests::createIndex(&_txn, nss.ns(), obj)); + ASSERT_OK(dbtests::createIndex(&_opCtx, nss.ns(), obj)); } /** @@ -107,11 +107,11 @@ public: * Does NOT take ownership of 'cq'. Caller DOES NOT own the returned QuerySolution*. */ QuerySolution* pickBestPlan(CanonicalQuery* cq) { - AutoGetCollectionForRead ctx(&_txn, nss); + AutoGetCollectionForRead ctx(&_opCtx, nss); Collection* collection = ctx.getCollection(); QueryPlannerParams plannerParams; - fillOutPlannerParams(&_txn, collection, cq, &plannerParams); + fillOutPlannerParams(&_opCtx, collection, cq, &plannerParams); // Turn this off otherwise it pops up in some plans. plannerParams.options &= ~QueryPlannerParams::KEEP_MUTATIONS; @@ -123,18 +123,18 @@ public: ASSERT_GREATER_THAN_OR_EQUALS(solutions.size(), 1U); // Fill out the MPR. - _mps.reset(new MultiPlanStage(&_txn, collection, cq)); + _mps.reset(new MultiPlanStage(&_opCtx, collection, cq)); unique_ptr<WorkingSet> ws(new WorkingSet()); // Put each solution from the planner into the MPR. for (size_t i = 0; i < solutions.size(); ++i) { PlanStage* root; - ASSERT(StageBuilder::build(&_txn, collection, *cq, *solutions[i], ws.get(), &root)); + ASSERT(StageBuilder::build(&_opCtx, collection, *cq, *solutions[i], ws.get(), &root)); // Takes ownership of all (actually some) arguments. _mps->addPlan(solutions[i], root, ws.get()); } // This is what sets a backup plan, should we test for it. PlanYieldPolicy yieldPolicy(PlanExecutor::YIELD_MANUAL, - _txn.getServiceContext()->getFastClockSource()); + _opCtx.getServiceContext()->getFastClockSource()); _mps->pickBestPlan(&yieldPolicy); ASSERT(_mps->bestPlanChosen()); @@ -153,8 +153,8 @@ public: return _mps->hasBackupPlan(); } - OperationContext* txn() { - return &_txn; + OperationContext* opCtx() { + return &_opCtx; } protected: @@ -164,7 +164,7 @@ protected: const int N = 12000; const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext(); - OperationContext& _txn = *_txnPtr; + OperationContext& _opCtx = *_txnPtr; private: // Holds the value of global "internalQueryForceIntersectionPlans" setParameter flag. @@ -202,7 +202,7 @@ public: auto qr = stdx::make_unique<QueryRequest>(nss); qr->setFilter(BSON("a" << 100 << "b" << 1)); auto statusWithCQ = CanonicalQuery::canonicalize( - txn(), std::move(qr), ExtensionsCallbackDisallowExtensions()); + opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions()); verify(statusWithCQ.isOK()); cq = std::move(statusWithCQ.getValue()); ASSERT(cq.get()); @@ -222,7 +222,7 @@ public: auto qr = stdx::make_unique<QueryRequest>(nss); qr->setFilter(BSON("a" << 100 << "b" << 1)); auto statusWithCQ = CanonicalQuery::canonicalize( - txn(), std::move(qr), ExtensionsCallbackDisallowExtensions()); + opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions()); verify(statusWithCQ.isOK()); cq = std::move(statusWithCQ.getValue()); } @@ -258,7 +258,7 @@ public: auto qr = stdx::make_unique<QueryRequest>(nss); qr->setFilter(BSON("a" << 1 << "b" << BSON("$gt" << 1))); auto statusWithCQ = CanonicalQuery::canonicalize( - txn(), std::move(qr), ExtensionsCallbackDisallowExtensions()); + opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions()); verify(statusWithCQ.isOK()); unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); ASSERT(NULL != cq.get()); @@ -300,7 +300,7 @@ public: qr->setFilter(BSON("a" << 27)); qr->setProj(BSON("_id" << 0 << "a" << 1 << "b" << 1)); auto statusWithCQ = CanonicalQuery::canonicalize( - txn(), std::move(qr), ExtensionsCallbackDisallowExtensions()); + opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions()); ASSERT_OK(statusWithCQ.getStatus()); unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); ASSERT(NULL != cq.get()); @@ -335,7 +335,7 @@ public: auto qr = stdx::make_unique<QueryRequest>(nss); qr->setFilter(BSON("a" << 1 << "b" << 1 << "c" << 99)); auto statusWithCQ = CanonicalQuery::canonicalize( - txn(), std::move(qr), ExtensionsCallbackDisallowExtensions()); + opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions()); ASSERT_OK(statusWithCQ.getStatus()); unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); ASSERT(NULL != cq.get()); @@ -375,7 +375,7 @@ public: qr->setProj(BSON("_id" << 0 << "a" << 1 << "b" << 1)); auto statusWithCQ = CanonicalQuery::canonicalize( - txn(), std::move(qr), ExtensionsCallbackDisallowExtensions()); + opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions()); ASSERT_OK(statusWithCQ.getStatus()); unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); ASSERT(NULL != cq.get()); @@ -409,7 +409,7 @@ public: auto qr = stdx::make_unique<QueryRequest>(nss); qr->setFilter(BSON("a" << N + 1 << "b" << 1)); auto statusWithCQ = CanonicalQuery::canonicalize( - txn(), std::move(qr), ExtensionsCallbackDisallowExtensions()); + opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions()); verify(statusWithCQ.isOK()); unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); ASSERT(NULL != cq.get()); @@ -446,7 +446,7 @@ public: auto qr = stdx::make_unique<QueryRequest>(nss); qr->setFilter(BSON("a" << BSON("$gte" << N + 1) << "b" << 1)); auto statusWithCQ = CanonicalQuery::canonicalize( - txn(), std::move(qr), ExtensionsCallbackDisallowExtensions()); + opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions()); verify(statusWithCQ.isOK()); unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); ASSERT(NULL != cq.get()); @@ -477,7 +477,7 @@ public: qr->setFilter(BSON("_id" << BSON("$gte" << 20 << "$lte" << 200))); qr->setSort(BSON("c" << 1)); auto statusWithCQ = CanonicalQuery::canonicalize( - txn(), std::move(qr), ExtensionsCallbackDisallowExtensions()); + opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions()); ASSERT_OK(statusWithCQ.getStatus()); unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); @@ -507,7 +507,7 @@ public: auto qr = stdx::make_unique<QueryRequest>(nss); qr->setFilter(BSON("foo" << 2001)); auto statusWithCQ = CanonicalQuery::canonicalize( - txn(), std::move(qr), ExtensionsCallbackDisallowExtensions()); + opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions()); verify(statusWithCQ.isOK()); unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); ASSERT(NULL != cq.get()); @@ -542,7 +542,7 @@ public: qr->setFilter(BSON("a" << 1)); qr->setSort(BSON("d" << 1)); auto statusWithCQ = CanonicalQuery::canonicalize( - txn(), std::move(qr), ExtensionsCallbackDisallowExtensions()); + opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions()); ASSERT_OK(statusWithCQ.getStatus()); unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); ASSERT(NULL != cq.get()); @@ -580,7 +580,7 @@ public: auto qr = stdx::make_unique<QueryRequest>(nss); qr->setFilter(fromjson("{a: 1, b: 1, c: {$gte: 5000}}")); auto statusWithCQ = CanonicalQuery::canonicalize( - txn(), std::move(qr), ExtensionsCallbackDisallowExtensions()); + opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions()); ASSERT_OK(statusWithCQ.getStatus()); unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); ASSERT(NULL != cq.get()); @@ -613,7 +613,7 @@ public: auto qr = stdx::make_unique<QueryRequest>(nss); qr->setFilter(fromjson("{a: 9, b: {$ne: 10}, c: 9}")); auto statusWithCQ = CanonicalQuery::canonicalize( - txn(), std::move(qr), ExtensionsCallbackDisallowExtensions()); + opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions()); ASSERT_OK(statusWithCQ.getStatus()); unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); ASSERT(NULL != cq.get()); diff --git a/src/mongo/dbtests/query_plan_executor.cpp b/src/mongo/dbtests/query_plan_executor.cpp index 71492c66683..7265c0e8dbc 100644 --- a/src/mongo/dbtests/query_plan_executor.cpp +++ b/src/mongo/dbtests/query_plan_executor.cpp @@ -64,14 +64,14 @@ static const NamespaceString nss("unittests.QueryPlanExecutor"); class PlanExecutorBase { public: - PlanExecutorBase() : _client(&_txn) {} + PlanExecutorBase() : _client(&_opCtx) {} virtual ~PlanExecutorBase() { _client.dropCollection(nss.ns()); } void addIndex(const BSONObj& obj) { - ASSERT_OK(dbtests::createIndex(&_txn, nss.ns(), obj)); + ASSERT_OK(dbtests::createIndex(&_opCtx, nss.ns(), obj)); } void insert(const BSONObj& obj) { @@ -107,17 +107,22 @@ public: auto qr = stdx::make_unique<QueryRequest>(nss); qr->setFilter(filterObj); auto statusWithCQ = CanonicalQuery::canonicalize( - &_txn, std::move(qr), ExtensionsCallbackDisallowExtensions()); + &_opCtx, std::move(qr), ExtensionsCallbackDisallowExtensions()); verify(statusWithCQ.isOK()); unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); verify(NULL != cq.get()); // Make the stage. - unique_ptr<PlanStage> root(new CollectionScan(&_txn, csparams, ws.get(), cq.get()->root())); + unique_ptr<PlanStage> root( + new CollectionScan(&_opCtx, csparams, ws.get(), cq.get()->root())); // Hand the plan off to the executor. - auto statusWithPlanExecutor = PlanExecutor::make( - &_txn, std::move(ws), std::move(root), std::move(cq), coll, PlanExecutor::YIELD_MANUAL); + auto statusWithPlanExecutor = PlanExecutor::make(&_opCtx, + std::move(ws), + std::move(root), + std::move(cq), + coll, + PlanExecutor::YIELD_MANUAL); ASSERT_OK(statusWithPlanExecutor.getStatus()); return statusWithPlanExecutor.getValue().release(); } @@ -148,25 +153,29 @@ public: const Collection* coll = db->getCollection(nss.ns()); unique_ptr<WorkingSet> ws(new WorkingSet()); - IndexScan* ix = new IndexScan(&_txn, ixparams, ws.get(), NULL); - unique_ptr<PlanStage> root(new FetchStage(&_txn, ws.get(), ix, NULL, coll)); + IndexScan* ix = new IndexScan(&_opCtx, ixparams, ws.get(), NULL); + unique_ptr<PlanStage> root(new FetchStage(&_opCtx, ws.get(), ix, NULL, coll)); auto qr = stdx::make_unique<QueryRequest>(nss); auto statusWithCQ = CanonicalQuery::canonicalize( - &_txn, std::move(qr), ExtensionsCallbackDisallowExtensions()); + &_opCtx, std::move(qr), ExtensionsCallbackDisallowExtensions()); verify(statusWithCQ.isOK()); unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); verify(NULL != cq.get()); // Hand the plan off to the executor. - auto statusWithPlanExecutor = PlanExecutor::make( - &_txn, std::move(ws), std::move(root), std::move(cq), coll, PlanExecutor::YIELD_MANUAL); + auto statusWithPlanExecutor = PlanExecutor::make(&_opCtx, + std::move(ws), + std::move(root), + std::move(cq), + coll, + PlanExecutor::YIELD_MANUAL); ASSERT_OK(statusWithPlanExecutor.getStatus()); return statusWithPlanExecutor.getValue().release(); } size_t numCursors() { - AutoGetCollectionForRead ctx(&_txn, nss); + AutoGetCollectionForRead ctx(&_opCtx, nss); Collection* collection = ctx.getCollection(); if (!collection) return 0; @@ -175,31 +184,31 @@ public: void registerExec(PlanExecutor* exec) { // TODO: This is not correct (create collection under S-lock) - AutoGetCollectionForRead ctx(&_txn, nss); - WriteUnitOfWork wunit(&_txn); - Collection* collection = ctx.getDb()->getOrCreateCollection(&_txn, nss.ns()); + AutoGetCollectionForRead ctx(&_opCtx, nss); + WriteUnitOfWork wunit(&_opCtx); + Collection* collection = ctx.getDb()->getOrCreateCollection(&_opCtx, nss.ns()); collection->getCursorManager()->registerExecutor(exec); wunit.commit(); } void deregisterExec(PlanExecutor* exec) { // TODO: This is not correct (create collection under S-lock) - AutoGetCollectionForRead ctx(&_txn, nss); - WriteUnitOfWork wunit(&_txn); - Collection* collection = ctx.getDb()->getOrCreateCollection(&_txn, nss.ns()); + AutoGetCollectionForRead ctx(&_opCtx, nss); + WriteUnitOfWork wunit(&_opCtx); + Collection* collection = ctx.getDb()->getOrCreateCollection(&_opCtx, nss.ns()); collection->getCursorManager()->deregisterExecutor(exec); wunit.commit(); } protected: const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext(); - OperationContext& _txn = *_txnPtr; + OperationContext& _opCtx = *_txnPtr; private: IndexDescriptor* getIndex(Database* db, const BSONObj& obj) { Collection* collection = db->getCollection(nss.ns()); std::vector<IndexDescriptor*> indexes; - collection->getIndexCatalog()->findIndexesByKeyPattern(&_txn, obj, false, &indexes); + collection->getIndexCatalog()->findIndexesByKeyPattern(&_opCtx, obj, false, &indexes); ASSERT_LTE(indexes.size(), 1U); return indexes.size() == 0 ? nullptr : indexes[0]; } @@ -214,7 +223,7 @@ private: class DropCollScan : public PlanExecutorBase { public: void run() { - OldClientWriteContext ctx(&_txn, nss.ns()); + OldClientWriteContext ctx(&_opCtx, nss.ns()); insert(BSON("_id" << 1)); insert(BSON("_id" << 2)); @@ -242,7 +251,7 @@ public: class DropIndexScan : public PlanExecutorBase { public: void run() { - OldClientWriteContext ctx(&_txn, nss.ns()); + OldClientWriteContext ctx(&_opCtx, nss.ns()); insert(BSON("_id" << 1 << "a" << 6)); insert(BSON("_id" << 2 << "a" << 7)); insert(BSON("_id" << 3 << "a" << 8)); @@ -270,7 +279,7 @@ public: class DropIndexScanAgg : public PlanExecutorBase { public: void run() { - OldClientWriteContext ctx(&_txn, nss.ns()); + OldClientWriteContext ctx(&_opCtx, nss.ns()); insert(BSON("_id" << 1 << "a" << 6)); insert(BSON("_id" << 2 << "a" << 7)); @@ -283,7 +292,7 @@ public: // Create the aggregation pipeline. std::vector<BSONObj> rawPipeline = {fromjson("{$match: {a: {$gte: 7, $lte: 10}}}")}; boost::intrusive_ptr<ExpressionContextForTest> expCtx = - new ExpressionContextForTest(&_txn, AggregationRequest(nss, rawPipeline)); + new ExpressionContextForTest(&_opCtx, AggregationRequest(nss, rawPipeline)); // Create an "inner" plan executor and register it with the cursor manager so that it can // get notified when the collection is dropped. @@ -299,10 +308,10 @@ public: // Create the output PlanExecutor that pulls results from the pipeline. auto ws = make_unique<WorkingSet>(); - auto proxy = make_unique<PipelineProxyStage>(&_txn, pipeline, ws.get()); + auto proxy = make_unique<PipelineProxyStage>(&_opCtx, pipeline, ws.get()); auto statusWithPlanExecutor = PlanExecutor::make( - &_txn, std::move(ws), std::move(proxy), collection, PlanExecutor::YIELD_MANUAL); + &_opCtx, std::move(ws), std::move(proxy), collection, PlanExecutor::YIELD_MANUAL); ASSERT_OK(statusWithPlanExecutor.getStatus()); unique_ptr<PlanExecutor> outerExec = std::move(statusWithPlanExecutor.getValue()); @@ -377,7 +386,7 @@ protected: class SnapshotControl : public SnapshotBase { public: void run() { - OldClientWriteContext ctx(&_txn, nss.ns()); + OldClientWriteContext ctx(&_opCtx, nss.ns()); setupCollection(); BSONObj filterObj = fromjson("{a: {$gte: 2}}"); @@ -404,7 +413,7 @@ public: class SnapshotTest : public SnapshotBase { public: void run() { - OldClientWriteContext ctx(&_txn, nss.ns()); + OldClientWriteContext ctx(&_opCtx, nss.ns()); setupCollection(); BSONObj indexSpec = BSON("_id" << 1); addIndex(indexSpec); @@ -435,7 +444,7 @@ using mongo::ClientCursor; class Invalidate : public PlanExecutorBase { public: void run() { - OldClientWriteContext ctx(&_txn, nss.ns()); + OldClientWriteContext ctx(&_opCtx, nss.ns()); insert(BSON("a" << 1 << "b" << 1)); BSONObj filterObj = fromjson("{_id: {$gt: 0}, b: {$gt: 0}}"); @@ -461,7 +470,7 @@ public: class InvalidatePinned : public PlanExecutorBase { public: void run() { - OldClientWriteContext ctx(&_txn, nss.ns()); + OldClientWriteContext ctx(&_opCtx, nss.ns()); insert(BSON("a" << 1 << "b" << 1)); Collection* collection = ctx.getCollection(); @@ -501,12 +510,12 @@ class Timeout : public PlanExecutorBase { public: void run() { { - OldClientWriteContext ctx(&_txn, nss.ns()); + OldClientWriteContext ctx(&_opCtx, nss.ns()); insert(BSON("a" << 1 << "b" << 1)); } { - AutoGetCollectionForRead ctx(&_txn, nss); + AutoGetCollectionForRead ctx(&_opCtx, nss); Collection* collection = ctx.getCollection(); BSONObj filterObj = fromjson("{_id: {$gt: 0}, b: {$gt: 0}}"); @@ -519,7 +528,7 @@ public: // There should be one cursor before timeout, // and zero cursors after timeout. ASSERT_EQUALS(1U, numCursors()); - CursorManager::timeoutCursorsGlobal(&_txn, 600001); + CursorManager::timeoutCursorsGlobal(&_opCtx, 600001); ASSERT_EQUALS(0U, numCursors()); } }; diff --git a/src/mongo/dbtests/query_stage_and.cpp b/src/mongo/dbtests/query_stage_and.cpp index 340101f66fa..fa4961ce4eb 100644 --- a/src/mongo/dbtests/query_stage_and.cpp +++ b/src/mongo/dbtests/query_stage_and.cpp @@ -61,19 +61,19 @@ using stdx::make_unique; class QueryStageAndBase { public: - QueryStageAndBase() : _client(&_txn) {} + QueryStageAndBase() : _client(&_opCtx) {} virtual ~QueryStageAndBase() { _client.dropCollection(ns()); } void addIndex(const BSONObj& obj) { - ASSERT_OK(dbtests::createIndex(&_txn, ns(), obj)); + ASSERT_OK(dbtests::createIndex(&_opCtx, ns(), obj)); } IndexDescriptor* getIndex(const BSONObj& obj, Collection* coll) { std::vector<IndexDescriptor*> indexes; - coll->getIndexCatalog()->findIndexesByKeyPattern(&_txn, obj, false, &indexes); + coll->getIndexCatalog()->findIndexesByKeyPattern(&_opCtx, obj, false, &indexes); if (indexes.empty()) { FAIL(mongoutils::str::stream() << "Unable to find index with key pattern " << obj); } @@ -81,7 +81,7 @@ public: } void getRecordIds(set<RecordId>* out, Collection* coll) { - auto cursor = coll->getCursor(&_txn); + auto cursor = coll->getCursor(&_opCtx); while (auto record = cursor->next()) { out->insert(record->id); } @@ -151,7 +151,7 @@ public: protected: const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext(); - OperationContext& _txn = *_txnPtr; + OperationContext& _opCtx = *_txnPtr; private: DBDirectClient _client; @@ -168,12 +168,12 @@ private: class QueryStageAndHashInvalidation : public QueryStageAndBase { public: void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); Database* db = ctx.db(); Collection* coll = ctx.getCollection(); if (!coll) { - WriteUnitOfWork wuow(&_txn); - coll = db->createCollection(&_txn, ns()); + WriteUnitOfWork wuow(&_opCtx); + coll = db->createCollection(&_opCtx, ns()); wuow.commit(); } @@ -185,7 +185,7 @@ public: addIndex(BSON("bar" << 1)); WorkingSet ws; - auto ah = make_unique<AndHashStage>(&_txn, &ws, coll); + auto ah = make_unique<AndHashStage>(&_opCtx, &ws, coll); // Foo <= 20 IndexScanParams params; @@ -195,7 +195,7 @@ public: params.bounds.endKey = BSONObj(); params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys; params.direction = -1; - ah->addChild(new IndexScan(&_txn, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); // Bar >= 10 params.descriptor = getIndex(BSON("bar" << 1), coll); @@ -203,7 +203,7 @@ public: params.bounds.endKey = BSONObj(); params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys; params.direction = 1; - ah->addChild(new IndexScan(&_txn, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); // ah reads the first child into its hash table. // ah should read foo=20, foo=19, ..., foo=0 in that order. @@ -221,9 +221,9 @@ public: getRecordIds(&data, coll); size_t memUsageBefore = ah->getMemUsage(); for (set<RecordId>::const_iterator it = data.begin(); it != data.end(); ++it) { - if (coll->docFor(&_txn, *it).value()["foo"].numberInt() == 15) { - ah->invalidate(&_txn, *it, INVALIDATION_DELETION); - remove(coll->docFor(&_txn, *it).value()); + if (coll->docFor(&_opCtx, *it).value()["foo"].numberInt() == 15) { + ah->invalidate(&_opCtx, *it, INVALIDATION_DELETION); + remove(coll->docFor(&_opCtx, *it).value()); break; } } @@ -273,12 +273,12 @@ public: class QueryStageAndHashInvalidateLookahead : public QueryStageAndBase { public: void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); Database* db = ctx.db(); Collection* coll = ctx.getCollection(); if (!coll) { - WriteUnitOfWork wuow(&_txn); - coll = db->createCollection(&_txn, ns()); + WriteUnitOfWork wuow(&_opCtx); + coll = db->createCollection(&_opCtx, ns()); wuow.commit(); } @@ -291,7 +291,7 @@ public: addIndex(BSON("baz" << 1)); WorkingSet ws; - auto ah = make_unique<AndHashStage>(&_txn, &ws, coll); + auto ah = make_unique<AndHashStage>(&_opCtx, &ws, coll); // Foo <= 20 (descending) IndexScanParams params; @@ -301,12 +301,12 @@ public: params.bounds.endKey = BSONObj(); params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys; params.direction = -1; - ah->addChild(new IndexScan(&_txn, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); // Bar <= 19 (descending) params.descriptor = getIndex(BSON("bar" << 1), coll); params.bounds.startKey = BSON("" << 19); - ah->addChild(new IndexScan(&_txn, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); // First call to work reads the first result from the children. // The first result is for the first scan over foo is {foo: 20, bar: 20, baz: 20}. @@ -326,8 +326,8 @@ public: size_t memUsageBefore = ah->getMemUsage(); for (set<RecordId>::const_iterator it = data.begin(); it != data.end(); ++it) { - if (0 == deletedObj.woCompare(coll->docFor(&_txn, *it).value())) { - ah->invalidate(&_txn, *it, INVALIDATION_DELETION); + if (0 == deletedObj.woCompare(coll->docFor(&_opCtx, *it).value())) { + ah->invalidate(&_opCtx, *it, INVALIDATION_DELETION); break; } } @@ -350,7 +350,8 @@ public: continue; } WorkingSetMember* wsm = ws.get(id); - ASSERT_NOT_EQUALS(0, deletedObj.woCompare(coll->docFor(&_txn, wsm->recordId).value())); + ASSERT_NOT_EQUALS(0, + deletedObj.woCompare(coll->docFor(&_opCtx, wsm->recordId).value())); ++count; } @@ -362,12 +363,12 @@ public: class QueryStageAndHashTwoLeaf : public QueryStageAndBase { public: void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); Database* db = ctx.db(); Collection* coll = ctx.getCollection(); if (!coll) { - WriteUnitOfWork wuow(&_txn); - coll = db->createCollection(&_txn, ns()); + WriteUnitOfWork wuow(&_opCtx); + coll = db->createCollection(&_opCtx, ns()); wuow.commit(); } @@ -379,7 +380,7 @@ public: addIndex(BSON("bar" << 1)); WorkingSet ws; - auto ah = make_unique<AndHashStage>(&_txn, &ws, coll); + auto ah = make_unique<AndHashStage>(&_opCtx, &ws, coll); // Foo <= 20 IndexScanParams params; @@ -389,7 +390,7 @@ public: params.bounds.endKey = BSONObj(); params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys; params.direction = -1; - ah->addChild(new IndexScan(&_txn, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); // Bar >= 10 params.descriptor = getIndex(BSON("bar" << 1), coll); @@ -397,7 +398,7 @@ public: params.bounds.endKey = BSONObj(); params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys; params.direction = 1; - ah->addChild(new IndexScan(&_txn, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); // foo == bar == baz, and foo<=20, bar>=10, so our values are: // foo == 10, 11, 12, 13, 14, 15. 16, 17, 18, 19, 20 @@ -412,12 +413,12 @@ public: class QueryStageAndHashTwoLeafFirstChildLargeKeys : public QueryStageAndBase { public: void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); Database* db = ctx.db(); Collection* coll = ctx.getCollection(); if (!coll) { - WriteUnitOfWork wuow(&_txn); - coll = db->createCollection(&_txn, ns()); + WriteUnitOfWork wuow(&_opCtx); + coll = db->createCollection(&_opCtx, ns()); wuow.commit(); } @@ -434,7 +435,7 @@ public: // before hashed AND is done reading the first child (stage has to // hold 21 keys in buffer for Foo <= 20). WorkingSet ws; - auto ah = make_unique<AndHashStage>(&_txn, &ws, coll, 20 * big.size()); + auto ah = make_unique<AndHashStage>(&_opCtx, &ws, coll, 20 * big.size()); // Foo <= 20 IndexScanParams params; @@ -444,7 +445,7 @@ public: params.bounds.endKey = BSONObj(); params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys; params.direction = -1; - ah->addChild(new IndexScan(&_txn, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); // Bar >= 10 params.descriptor = getIndex(BSON("bar" << 1), coll); @@ -452,7 +453,7 @@ public: params.bounds.endKey = BSONObj(); params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys; params.direction = 1; - ah->addChild(new IndexScan(&_txn, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); // Stage execution should fail. ASSERT_EQUALS(-1, countResults(ah.get())); @@ -465,12 +466,12 @@ public: class QueryStageAndHashTwoLeafLastChildLargeKeys : public QueryStageAndBase { public: void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); Database* db = ctx.db(); Collection* coll = ctx.getCollection(); if (!coll) { - WriteUnitOfWork wuow(&_txn); - coll = db->createCollection(&_txn, ns()); + WriteUnitOfWork wuow(&_opCtx); + coll = db->createCollection(&_opCtx, ns()); wuow.commit(); } @@ -487,7 +488,7 @@ public: // keys in last child's index are not buffered. There are 6 keys // that satisfy the criteria Foo <= 20 and Bar >= 10 and 5 <= baz <= 15. WorkingSet ws; - auto ah = make_unique<AndHashStage>(&_txn, &ws, coll, 5 * big.size()); + auto ah = make_unique<AndHashStage>(&_opCtx, &ws, coll, 5 * big.size()); // Foo <= 20 IndexScanParams params; @@ -497,7 +498,7 @@ public: params.bounds.endKey = BSONObj(); params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys; params.direction = -1; - ah->addChild(new IndexScan(&_txn, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); // Bar >= 10 params.descriptor = getIndex(BSON("bar" << 1 << "big" << 1), coll); @@ -505,7 +506,7 @@ public: params.bounds.endKey = BSONObj(); params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys; params.direction = 1; - ah->addChild(new IndexScan(&_txn, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); // foo == bar == baz, and foo<=20, bar>=10, so our values are: // foo == 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20. @@ -517,12 +518,12 @@ public: class QueryStageAndHashThreeLeaf : public QueryStageAndBase { public: void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); Database* db = ctx.db(); Collection* coll = ctx.getCollection(); if (!coll) { - WriteUnitOfWork wuow(&_txn); - coll = db->createCollection(&_txn, ns()); + WriteUnitOfWork wuow(&_opCtx); + coll = db->createCollection(&_opCtx, ns()); wuow.commit(); } @@ -535,7 +536,7 @@ public: addIndex(BSON("baz" << 1)); WorkingSet ws; - auto ah = make_unique<AndHashStage>(&_txn, &ws, coll); + auto ah = make_unique<AndHashStage>(&_opCtx, &ws, coll); // Foo <= 20 IndexScanParams params; @@ -545,7 +546,7 @@ public: params.bounds.endKey = BSONObj(); params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys; params.direction = -1; - ah->addChild(new IndexScan(&_txn, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); // Bar >= 10 params.descriptor = getIndex(BSON("bar" << 1), coll); @@ -553,7 +554,7 @@ public: params.bounds.endKey = BSONObj(); params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys; params.direction = 1; - ah->addChild(new IndexScan(&_txn, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); // 5 <= baz <= 15 params.descriptor = getIndex(BSON("baz" << 1), coll); @@ -561,7 +562,7 @@ public: params.bounds.endKey = BSON("" << 15); params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys; params.direction = 1; - ah->addChild(new IndexScan(&_txn, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); // foo == bar == baz, and foo<=20, bar>=10, 5<=baz<=15, so our values are: // foo == 10, 11, 12, 13, 14, 15. @@ -579,12 +580,12 @@ public: class QueryStageAndHashThreeLeafMiddleChildLargeKeys : public QueryStageAndBase { public: void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); Database* db = ctx.db(); Collection* coll = ctx.getCollection(); if (!coll) { - WriteUnitOfWork wuow(&_txn); - coll = db->createCollection(&_txn, ns()); + WriteUnitOfWork wuow(&_opCtx); + coll = db->createCollection(&_opCtx, ns()); wuow.commit(); } @@ -602,7 +603,7 @@ public: // before hashed AND is done reading the second child (stage has to // hold 11 keys in buffer for Foo <= 20 and Bar >= 10). WorkingSet ws; - auto ah = make_unique<AndHashStage>(&_txn, &ws, coll, 10 * big.size()); + auto ah = make_unique<AndHashStage>(&_opCtx, &ws, coll, 10 * big.size()); // Foo <= 20 IndexScanParams params; @@ -612,7 +613,7 @@ public: params.bounds.endKey = BSONObj(); params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys; params.direction = -1; - ah->addChild(new IndexScan(&_txn, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); // Bar >= 10 params.descriptor = getIndex(BSON("bar" << 1 << "big" << 1), coll); @@ -620,7 +621,7 @@ public: params.bounds.endKey = BSONObj(); params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys; params.direction = 1; - ah->addChild(new IndexScan(&_txn, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); // 5 <= baz <= 15 params.descriptor = getIndex(BSON("baz" << 1), coll); @@ -628,7 +629,7 @@ public: params.bounds.endKey = BSON("" << 15); params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys; params.direction = 1; - ah->addChild(new IndexScan(&_txn, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); // Stage execution should fail. ASSERT_EQUALS(-1, countResults(ah.get())); @@ -639,12 +640,12 @@ public: class QueryStageAndHashWithNothing : public QueryStageAndBase { public: void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); Database* db = ctx.db(); Collection* coll = ctx.getCollection(); if (!coll) { - WriteUnitOfWork wuow(&_txn); - coll = db->createCollection(&_txn, ns()); + WriteUnitOfWork wuow(&_opCtx); + coll = db->createCollection(&_opCtx, ns()); wuow.commit(); } @@ -656,7 +657,7 @@ public: addIndex(BSON("bar" << 1)); WorkingSet ws; - auto ah = make_unique<AndHashStage>(&_txn, &ws, coll); + auto ah = make_unique<AndHashStage>(&_opCtx, &ws, coll); // Foo <= 20 IndexScanParams params; @@ -666,7 +667,7 @@ public: params.bounds.endKey = BSONObj(); params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys; params.direction = -1; - ah->addChild(new IndexScan(&_txn, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); // Bar == 5. Index scan should be eof. params.descriptor = getIndex(BSON("bar" << 1), coll); @@ -674,7 +675,7 @@ public: params.bounds.endKey = BSON("" << 5); params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys; params.direction = 1; - ah->addChild(new IndexScan(&_txn, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); int count = 0; int works = 0; @@ -701,12 +702,12 @@ public: class QueryStageAndHashProducesNothing : public QueryStageAndBase { public: void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); Database* db = ctx.db(); Collection* coll = ctx.getCollection(); if (!coll) { - WriteUnitOfWork wuow(&_txn); - coll = db->createCollection(&_txn, ns()); + WriteUnitOfWork wuow(&_opCtx); + coll = db->createCollection(&_opCtx, ns()); wuow.commit(); } @@ -719,7 +720,7 @@ public: addIndex(BSON("bar" << 1)); WorkingSet ws; - auto ah = make_unique<AndHashStage>(&_txn, &ws, coll); + auto ah = make_unique<AndHashStage>(&_opCtx, &ws, coll); // Foo >= 100 IndexScanParams params; @@ -729,7 +730,7 @@ public: params.bounds.endKey = BSONObj(); params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys; params.direction = 1; - ah->addChild(new IndexScan(&_txn, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); // Bar <= 100 params.descriptor = getIndex(BSON("bar" << 1), coll); @@ -741,7 +742,7 @@ public: << ""); params.bounds.boundInclusion = BoundInclusion::kIncludeStartKeyOnly; params.direction = -1; - ah->addChild(new IndexScan(&_txn, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); ASSERT_EQUALS(0, countResults(ah.get())); } @@ -754,12 +755,12 @@ public: class QueryStageAndHashFirstChildFetched : public QueryStageAndBase { public: void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); Database* db = ctx.db(); Collection* coll = ctx.getCollection(); if (!coll) { - WriteUnitOfWork wuow(&_txn); - coll = db->createCollection(&_txn, ns()); + WriteUnitOfWork wuow(&_opCtx); + coll = db->createCollection(&_opCtx, ns()); wuow.commit(); } @@ -771,7 +772,7 @@ public: addIndex(BSON("bar" << 1)); WorkingSet ws; - auto ah = make_unique<AndHashStage>(&_txn, &ws, coll); + auto ah = make_unique<AndHashStage>(&_opCtx, &ws, coll); // Foo <= 20 IndexScanParams params; @@ -781,11 +782,11 @@ public: params.bounds.endKey = BSONObj(); params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys; params.direction = -1; - IndexScan* firstScan = new IndexScan(&_txn, params, &ws, NULL); + IndexScan* firstScan = new IndexScan(&_opCtx, params, &ws, NULL); // First child of the AND_HASH stage is a Fetch. The NULL in the // constructor means there is no filter. - FetchStage* fetch = new FetchStage(&_txn, &ws, firstScan, NULL, coll); + FetchStage* fetch = new FetchStage(&_opCtx, &ws, firstScan, NULL, coll); ah->addChild(fetch); // Bar >= 10 @@ -794,7 +795,7 @@ public: params.bounds.endKey = BSONObj(); params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys; params.direction = 1; - ah->addChild(new IndexScan(&_txn, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); // Check that the AndHash stage returns docs {foo: 10, bar: 10} // through {foo: 20, bar: 20}. @@ -813,12 +814,12 @@ public: class QueryStageAndHashSecondChildFetched : public QueryStageAndBase { public: void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); Database* db = ctx.db(); Collection* coll = ctx.getCollection(); if (!coll) { - WriteUnitOfWork wuow(&_txn); - coll = db->createCollection(&_txn, ns()); + WriteUnitOfWork wuow(&_opCtx); + coll = db->createCollection(&_opCtx, ns()); wuow.commit(); } @@ -830,7 +831,7 @@ public: addIndex(BSON("bar" << 1)); WorkingSet ws; - auto ah = make_unique<AndHashStage>(&_txn, &ws, coll); + auto ah = make_unique<AndHashStage>(&_opCtx, &ws, coll); // Foo <= 20 IndexScanParams params; @@ -840,7 +841,7 @@ public: params.bounds.endKey = BSONObj(); params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys; params.direction = -1; - ah->addChild(new IndexScan(&_txn, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); // Bar >= 10 params.descriptor = getIndex(BSON("bar" << 1), coll); @@ -848,11 +849,11 @@ public: params.bounds.endKey = BSONObj(); params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys; params.direction = 1; - IndexScan* secondScan = new IndexScan(&_txn, params, &ws, NULL); + IndexScan* secondScan = new IndexScan(&_opCtx, params, &ws, NULL); // Second child of the AND_HASH stage is a Fetch. The NULL in the // constructor means there is no filter. - FetchStage* fetch = new FetchStage(&_txn, &ws, secondScan, NULL, coll); + FetchStage* fetch = new FetchStage(&_opCtx, &ws, secondScan, NULL, coll); ah->addChild(fetch); // Check that the AndHash stage returns docs {foo: 10, bar: 10} @@ -869,12 +870,12 @@ public: class QueryStageAndHashDeadChild : public QueryStageAndBase { public: void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); Database* db = ctx.db(); Collection* coll = ctx.getCollection(); if (!coll) { - WriteUnitOfWork wuow(&_txn); - coll = db->createCollection(&_txn, ns()); + WriteUnitOfWork wuow(&_opCtx); + coll = db->createCollection(&_opCtx, ns()); wuow.commit(); } @@ -885,9 +886,9 @@ public: // Child2: NEED_TIME, DEAD { WorkingSet ws; - const auto andHashStage = make_unique<AndHashStage>(&_txn, &ws, coll); + const auto andHashStage = make_unique<AndHashStage>(&_opCtx, &ws, coll); - auto childStage1 = make_unique<QueuedDataStage>(&_txn, &ws); + auto childStage1 = make_unique<QueuedDataStage>(&_opCtx, &ws); { WorkingSetID id = ws.allocate(); WorkingSetMember* wsm = ws.get(id); @@ -897,7 +898,7 @@ public: childStage1->pushBack(id); } - auto childStage2 = make_unique<QueuedDataStage>(&_txn, &ws); + auto childStage2 = make_unique<QueuedDataStage>(&_opCtx, &ws); childStage2->pushBack(PlanStage::NEED_TIME); childStage2->pushBack(PlanStage::DEAD); @@ -918,9 +919,9 @@ public: // Child2: Data { WorkingSet ws; - const auto andHashStage = make_unique<AndHashStage>(&_txn, &ws, coll); + const auto andHashStage = make_unique<AndHashStage>(&_opCtx, &ws, coll); - auto childStage1 = make_unique<QueuedDataStage>(&_txn, &ws); + auto childStage1 = make_unique<QueuedDataStage>(&_opCtx, &ws); { WorkingSetID id = ws.allocate(); @@ -932,7 +933,7 @@ public: } childStage1->pushBack(PlanStage::DEAD); - auto childStage2 = make_unique<QueuedDataStage>(&_txn, &ws); + auto childStage2 = make_unique<QueuedDataStage>(&_opCtx, &ws); { WorkingSetID id = ws.allocate(); WorkingSetMember* wsm = ws.get(id); @@ -959,9 +960,9 @@ public: // Child2: Data, DEAD { WorkingSet ws; - const auto andHashStage = make_unique<AndHashStage>(&_txn, &ws, coll); + const auto andHashStage = make_unique<AndHashStage>(&_opCtx, &ws, coll); - auto childStage1 = make_unique<QueuedDataStage>(&_txn, &ws); + auto childStage1 = make_unique<QueuedDataStage>(&_opCtx, &ws); { WorkingSetID id = ws.allocate(); WorkingSetMember* wsm = ws.get(id); @@ -971,7 +972,7 @@ public: childStage1->pushBack(id); } - auto childStage2 = make_unique<QueuedDataStage>(&_txn, &ws); + auto childStage2 = make_unique<QueuedDataStage>(&_opCtx, &ws); { WorkingSetID id = ws.allocate(); WorkingSetMember* wsm = ws.get(id); @@ -1007,12 +1008,12 @@ public: class QueryStageAndSortedInvalidation : public QueryStageAndBase { public: void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); Database* db = ctx.db(); Collection* coll = ctx.getCollection(); if (!coll) { - WriteUnitOfWork wuow(&_txn); - coll = db->createCollection(&_txn, ns()); + WriteUnitOfWork wuow(&_opCtx); + coll = db->createCollection(&_opCtx, ns()); wuow.commit(); } @@ -1024,7 +1025,7 @@ public: addIndex(BSON("bar" << 1)); WorkingSet ws; - auto ah = make_unique<AndSortedStage>(&_txn, &ws, coll); + auto ah = make_unique<AndSortedStage>(&_opCtx, &ws, coll); // Scan over foo == 1 IndexScanParams params; @@ -1034,11 +1035,11 @@ public: params.bounds.endKey = BSON("" << 1); params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys; params.direction = 1; - ah->addChild(new IndexScan(&_txn, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); // Scan over bar == 1 params.descriptor = getIndex(BSON("bar" << 1), coll); - ah->addChild(new IndexScan(&_txn, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); // Get the set of RecordIds in our collection to use later. set<RecordId> data; @@ -1056,8 +1057,8 @@ public: // very first insert, which should be the very first thing in data. Let's invalidate it // and make sure it shows up in the flagged results. ah->saveState(); - ah->invalidate(&_txn, *data.begin(), INVALIDATION_DELETION); - remove(coll->docFor(&_txn, *data.begin()).value()); + ah->invalidate(&_opCtx, *data.begin(), INVALIDATION_DELETION); + remove(coll->docFor(&_opCtx, *data.begin()).value()); ah->restoreState(); // Make sure the nuked obj is actually in the flagged data. @@ -1099,8 +1100,8 @@ public: // Remove a result that's coming up. It's not the 'target' result of the AND so it's // not flagged. ah->saveState(); - ah->invalidate(&_txn, *it, INVALIDATION_DELETION); - remove(coll->docFor(&_txn, *it).value()); + ah->invalidate(&_opCtx, *it, INVALIDATION_DELETION); + remove(coll->docFor(&_opCtx, *it).value()); ah->restoreState(); // Get all results aside from the two we killed. @@ -1131,12 +1132,12 @@ public: class QueryStageAndSortedThreeLeaf : public QueryStageAndBase { public: void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); Database* db = ctx.db(); Collection* coll = ctx.getCollection(); if (!coll) { - WriteUnitOfWork wuow(&_txn); - coll = db->createCollection(&_txn, ns()); + WriteUnitOfWork wuow(&_opCtx); + coll = db->createCollection(&_opCtx, ns()); wuow.commit(); } @@ -1157,7 +1158,7 @@ public: addIndex(BSON("baz" << 1)); WorkingSet ws; - auto ah = make_unique<AndSortedStage>(&_txn, &ws, coll); + auto ah = make_unique<AndSortedStage>(&_opCtx, &ws, coll); // Scan over foo == 1 IndexScanParams params; @@ -1167,15 +1168,15 @@ public: params.bounds.endKey = BSON("" << 1); params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys; params.direction = 1; - ah->addChild(new IndexScan(&_txn, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); // bar == 1 params.descriptor = getIndex(BSON("bar" << 1), coll); - ah->addChild(new IndexScan(&_txn, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); // baz == 1 params.descriptor = getIndex(BSON("baz" << 1), coll); - ah->addChild(new IndexScan(&_txn, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); ASSERT_EQUALS(50, countResults(ah.get())); } @@ -1185,12 +1186,12 @@ public: class QueryStageAndSortedWithNothing : public QueryStageAndBase { public: void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); Database* db = ctx.db(); Collection* coll = ctx.getCollection(); if (!coll) { - WriteUnitOfWork wuow(&_txn); - coll = db->createCollection(&_txn, ns()); + WriteUnitOfWork wuow(&_opCtx); + coll = db->createCollection(&_opCtx, ns()); wuow.commit(); } @@ -1202,7 +1203,7 @@ public: addIndex(BSON("bar" << 1)); WorkingSet ws; - auto ah = make_unique<AndSortedStage>(&_txn, &ws, coll); + auto ah = make_unique<AndSortedStage>(&_opCtx, &ws, coll); // Foo == 7. Should be EOF. IndexScanParams params; @@ -1212,7 +1213,7 @@ public: params.bounds.endKey = BSON("" << 7); params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys; params.direction = 1; - ah->addChild(new IndexScan(&_txn, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); // Bar == 20, not EOF. params.descriptor = getIndex(BSON("bar" << 1), coll); @@ -1220,7 +1221,7 @@ public: params.bounds.endKey = BSON("" << 20); params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys; params.direction = 1; - ah->addChild(new IndexScan(&_txn, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); ASSERT_EQUALS(0, countResults(ah.get())); } @@ -1230,12 +1231,12 @@ public: class QueryStageAndSortedProducesNothing : public QueryStageAndBase { public: void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); Database* db = ctx.db(); Collection* coll = ctx.getCollection(); if (!coll) { - WriteUnitOfWork wuow(&_txn); - coll = db->createCollection(&_txn, ns()); + WriteUnitOfWork wuow(&_opCtx); + coll = db->createCollection(&_opCtx, ns()); wuow.commit(); } @@ -1251,7 +1252,7 @@ public: addIndex(BSON("bar" << 1)); WorkingSet ws; - auto ah = make_unique<AndSortedStage>(&_txn, &ws, coll); + auto ah = make_unique<AndSortedStage>(&_opCtx, &ws, coll); // foo == 7. IndexScanParams params; @@ -1261,7 +1262,7 @@ public: params.bounds.endKey = BSON("" << 7); params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys; params.direction = 1; - ah->addChild(new IndexScan(&_txn, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); // bar == 20. params.descriptor = getIndex(BSON("bar" << 1), coll); @@ -1269,7 +1270,7 @@ public: params.bounds.endKey = BSON("" << 20); params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys; params.direction = 1; - ah->addChild(new IndexScan(&_txn, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); ASSERT_EQUALS(0, countResults(ah.get())); } @@ -1279,12 +1280,12 @@ public: class QueryStageAndSortedByLastChild : public QueryStageAndBase { public: void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); Database* db = ctx.db(); Collection* coll = ctx.getCollection(); if (!coll) { - WriteUnitOfWork wuow(&_txn); - coll = db->createCollection(&_txn, ns()); + WriteUnitOfWork wuow(&_opCtx); + coll = db->createCollection(&_opCtx, ns()); wuow.commit(); } @@ -1296,7 +1297,7 @@ public: addIndex(BSON("bar" << 1)); WorkingSet ws; - auto ah = make_unique<AndHashStage>(&_txn, &ws, coll); + auto ah = make_unique<AndHashStage>(&_opCtx, &ws, coll); // Scan over foo == 1 IndexScanParams params; @@ -1306,13 +1307,13 @@ public: params.bounds.endKey = BSON("" << 1); params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys; params.direction = 1; - ah->addChild(new IndexScan(&_txn, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); // Intersect with 7 <= bar < 10000 params.descriptor = getIndex(BSON("bar" << 1), coll); params.bounds.startKey = BSON("" << 7); params.bounds.endKey = BSON("" << 10000); - ah->addChild(new IndexScan(&_txn, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); WorkingSetID lastId = WorkingSet::INVALID_ID; @@ -1323,11 +1324,11 @@ public: if (PlanStage::ADVANCED != status) { continue; } - BSONObj thisObj = coll->docFor(&_txn, ws.get(id)->recordId).value(); + BSONObj thisObj = coll->docFor(&_opCtx, ws.get(id)->recordId).value(); ASSERT_EQUALS(7 + count, thisObj["bar"].numberInt()); ++count; if (WorkingSet::INVALID_ID != lastId) { - BSONObj lastObj = coll->docFor(&_txn, ws.get(lastId)->recordId).value(); + BSONObj lastObj = coll->docFor(&_opCtx, ws.get(lastId)->recordId).value(); ASSERT_LESS_THAN(lastObj["bar"].woCompare(thisObj["bar"]), 0); } lastId = id; @@ -1344,12 +1345,12 @@ public: class QueryStageAndSortedFirstChildFetched : public QueryStageAndBase { public: void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); Database* db = ctx.db(); Collection* coll = ctx.getCollection(); if (!coll) { - WriteUnitOfWork wuow(&_txn); - coll = db->createCollection(&_txn, ns()); + WriteUnitOfWork wuow(&_opCtx); + coll = db->createCollection(&_opCtx, ns()); wuow.commit(); } @@ -1362,7 +1363,7 @@ public: addIndex(BSON("bar" << 1)); WorkingSet ws; - unique_ptr<AndSortedStage> as = make_unique<AndSortedStage>(&_txn, &ws, coll); + unique_ptr<AndSortedStage> as = make_unique<AndSortedStage>(&_opCtx, &ws, coll); // Scan over foo == 1 IndexScanParams params; @@ -1372,16 +1373,16 @@ public: params.bounds.endKey = BSON("" << 1); params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys; params.direction = 1; - IndexScan* firstScan = new IndexScan(&_txn, params, &ws, NULL); + IndexScan* firstScan = new IndexScan(&_opCtx, params, &ws, NULL); // First child of the AND_SORTED stage is a Fetch. The NULL in the // constructor means there is no filter. - FetchStage* fetch = new FetchStage(&_txn, &ws, firstScan, NULL, coll); + FetchStage* fetch = new FetchStage(&_opCtx, &ws, firstScan, NULL, coll); as->addChild(fetch); // bar == 1 params.descriptor = getIndex(BSON("bar" << 1), coll); - as->addChild(new IndexScan(&_txn, params, &ws, NULL)); + as->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); for (int i = 0; i < 50; i++) { BSONObj obj = getNext(as.get(), &ws); @@ -1398,12 +1399,12 @@ public: class QueryStageAndSortedSecondChildFetched : public QueryStageAndBase { public: void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); Database* db = ctx.db(); Collection* coll = ctx.getCollection(); if (!coll) { - WriteUnitOfWork wuow(&_txn); - coll = db->createCollection(&_txn, ns()); + WriteUnitOfWork wuow(&_opCtx); + coll = db->createCollection(&_opCtx, ns()); wuow.commit(); } @@ -1416,7 +1417,7 @@ public: addIndex(BSON("bar" << 1)); WorkingSet ws; - unique_ptr<AndSortedStage> as = make_unique<AndSortedStage>(&_txn, &ws, coll); + unique_ptr<AndSortedStage> as = make_unique<AndSortedStage>(&_opCtx, &ws, coll); // Scan over foo == 1 IndexScanParams params; @@ -1426,15 +1427,15 @@ public: params.bounds.endKey = BSON("" << 1); params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys; params.direction = 1; - as->addChild(new IndexScan(&_txn, params, &ws, NULL)); + as->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); // bar == 1 params.descriptor = getIndex(BSON("bar" << 1), coll); - IndexScan* secondScan = new IndexScan(&_txn, params, &ws, NULL); + IndexScan* secondScan = new IndexScan(&_opCtx, params, &ws, NULL); // Second child of the AND_SORTED stage is a Fetch. The NULL in the // constructor means there is no filter. - FetchStage* fetch = new FetchStage(&_txn, &ws, secondScan, NULL, coll); + FetchStage* fetch = new FetchStage(&_opCtx, &ws, secondScan, NULL, coll); as->addChild(fetch); for (int i = 0; i < 50; i++) { diff --git a/src/mongo/dbtests/query_stage_cached_plan.cpp b/src/mongo/dbtests/query_stage_cached_plan.cpp index c940e7c2baa..ed69f451117 100644 --- a/src/mongo/dbtests/query_stage_cached_plan.cpp +++ b/src/mongo/dbtests/query_stage_cached_plan.cpp @@ -63,7 +63,7 @@ public: addIndex(BSON("a" << 1)); addIndex(BSON("b" << 1)); - OldClientWriteContext ctx(&_txn, nss.ns()); + OldClientWriteContext ctx(&_opCtx, nss.ns()); Collection* collection = ctx.getCollection(); ASSERT(collection); @@ -74,38 +74,38 @@ public: } void addIndex(const BSONObj& obj) { - ASSERT_OK(dbtests::createIndex(&_txn, nss.ns(), obj)); + ASSERT_OK(dbtests::createIndex(&_opCtx, nss.ns(), obj)); } void dropCollection() { - ScopedTransaction transaction(&_txn, MODE_X); - Lock::DBLock dbLock(_txn.lockState(), nss.db(), MODE_X); - Database* database = dbHolder().get(&_txn, nss.db()); + ScopedTransaction transaction(&_opCtx, MODE_X); + Lock::DBLock dbLock(_opCtx.lockState(), nss.db(), MODE_X); + Database* database = dbHolder().get(&_opCtx, nss.db()); if (!database) { return; } - WriteUnitOfWork wuow(&_txn); - database->dropCollection(&_txn, nss.ns()); + WriteUnitOfWork wuow(&_opCtx); + database->dropCollection(&_opCtx, nss.ns()); wuow.commit(); } void insertDocument(Collection* collection, BSONObj obj) { - WriteUnitOfWork wuow(&_txn); + WriteUnitOfWork wuow(&_opCtx); const bool enforceQuota = false; OpDebug* const nullOpDebug = nullptr; - ASSERT_OK(collection->insertDocument(&_txn, obj, nullOpDebug, enforceQuota)); + ASSERT_OK(collection->insertDocument(&_opCtx, obj, nullOpDebug, enforceQuota)); wuow.commit(); } - OperationContext* txn() { - return &_txn; + OperationContext* opCtx() { + return &_opCtx; } protected: const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext(); - OperationContext& _txn = *_txnPtr; + OperationContext& _opCtx = *_txnPtr; WorkingSet _ws; }; @@ -116,7 +116,7 @@ protected: class QueryStageCachedPlanFailure : public QueryStageCachedPlanBase { public: void run() { - AutoGetCollectionForRead ctx(&_txn, nss); + AutoGetCollectionForRead ctx(&_opCtx, nss); Collection* collection = ctx.getCollection(); ASSERT(collection); @@ -124,7 +124,7 @@ public: auto qr = stdx::make_unique<QueryRequest>(nss); qr->setFilter(fromjson("{a: {$gte: 8}, b: 1}")); auto statusWithCQ = CanonicalQuery::canonicalize( - txn(), std::move(qr), ExtensionsCallbackDisallowExtensions()); + opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions()); ASSERT_OK(statusWithCQ.getStatus()); const std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); @@ -136,20 +136,20 @@ public: // Get planner params. QueryPlannerParams plannerParams; - fillOutPlannerParams(&_txn, collection, cq.get(), &plannerParams); + fillOutPlannerParams(&_opCtx, collection, cq.get(), &plannerParams); // Queued data stage will return a failure during the cached plan trial period. - auto mockChild = stdx::make_unique<QueuedDataStage>(&_txn, &_ws); + auto mockChild = stdx::make_unique<QueuedDataStage>(&_opCtx, &_ws); mockChild->pushBack(PlanStage::FAILURE); // High enough so that we shouldn't trigger a replan based on works. const size_t decisionWorks = 50; CachedPlanStage cachedPlanStage( - &_txn, collection, &_ws, cq.get(), plannerParams, decisionWorks, mockChild.release()); + &_opCtx, collection, &_ws, cq.get(), plannerParams, decisionWorks, mockChild.release()); // This should succeed after triggering a replan. PlanYieldPolicy yieldPolicy(PlanExecutor::YIELD_MANUAL, - _txn.getServiceContext()->getFastClockSource()); + _opCtx.getServiceContext()->getFastClockSource()); ASSERT_OK(cachedPlanStage.pickBestPlan(&yieldPolicy)); // Make sure that we get 2 legit results back. @@ -184,7 +184,7 @@ public: class QueryStageCachedPlanHitMaxWorks : public QueryStageCachedPlanBase { public: void run() { - AutoGetCollectionForRead ctx(&_txn, nss); + AutoGetCollectionForRead ctx(&_opCtx, nss); Collection* collection = ctx.getCollection(); ASSERT(collection); @@ -192,7 +192,7 @@ public: auto qr = stdx::make_unique<QueryRequest>(nss); qr->setFilter(fromjson("{a: {$gte: 8}, b: 1}")); auto statusWithCQ = CanonicalQuery::canonicalize( - txn(), std::move(qr), ExtensionsCallbackDisallowExtensions()); + opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions()); ASSERT_OK(statusWithCQ.getStatus()); const std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); @@ -204,24 +204,24 @@ public: // Get planner params. QueryPlannerParams plannerParams; - fillOutPlannerParams(&_txn, collection, cq.get(), &plannerParams); + fillOutPlannerParams(&_opCtx, collection, cq.get(), &plannerParams); // Set up queued data stage to take a long time before returning EOF. Should be long // enough to trigger a replan. const size_t decisionWorks = 10; const size_t mockWorks = 1U + static_cast<size_t>(internalQueryCacheEvictionRatio * decisionWorks); - auto mockChild = stdx::make_unique<QueuedDataStage>(&_txn, &_ws); + auto mockChild = stdx::make_unique<QueuedDataStage>(&_opCtx, &_ws); for (size_t i = 0; i < mockWorks; i++) { mockChild->pushBack(PlanStage::NEED_TIME); } CachedPlanStage cachedPlanStage( - &_txn, collection, &_ws, cq.get(), plannerParams, decisionWorks, mockChild.release()); + &_opCtx, collection, &_ws, cq.get(), plannerParams, decisionWorks, mockChild.release()); // This should succeed after triggering a replan. PlanYieldPolicy yieldPolicy(PlanExecutor::YIELD_MANUAL, - _txn.getServiceContext()->getFastClockSource()); + _opCtx.getServiceContext()->getFastClockSource()); ASSERT_OK(cachedPlanStage.pickBestPlan(&yieldPolicy)); // Make sure that we get 2 legit results back. diff --git a/src/mongo/dbtests/query_stage_collscan.cpp b/src/mongo/dbtests/query_stage_collscan.cpp index c1dbe116da6..e74ba02f361 100644 --- a/src/mongo/dbtests/query_stage_collscan.cpp +++ b/src/mongo/dbtests/query_stage_collscan.cpp @@ -64,8 +64,8 @@ static const NamespaceString nss{"unittests.QueryStageCollectionScan"}; class QueryStageCollectionScanBase { public: - QueryStageCollectionScanBase() : _client(&_txn) { - OldClientWriteContext ctx(&_txn, nss.ns()); + QueryStageCollectionScanBase() : _client(&_opCtx) { + OldClientWriteContext ctx(&_opCtx, nss.ns()); for (int i = 0; i < numObj(); ++i) { BSONObjBuilder bob; @@ -75,7 +75,7 @@ public: } virtual ~QueryStageCollectionScanBase() { - OldClientWriteContext ctx(&_txn, nss.ns()); + OldClientWriteContext ctx(&_opCtx, nss.ns()); _client.dropCollection(nss.ns()); } @@ -84,7 +84,7 @@ public: } int countResults(CollectionScanParams::Direction direction, const BSONObj& filterObj) { - AutoGetCollectionForRead ctx(&_txn, nss); + AutoGetCollectionForRead ctx(&_opCtx, nss); // Configure the scan. CollectionScanParams params; @@ -102,10 +102,10 @@ public: // Make a scan and have the runner own it. unique_ptr<WorkingSet> ws = make_unique<WorkingSet>(); unique_ptr<PlanStage> ps = - make_unique<CollectionScan>(&_txn, params, ws.get(), filterExpr.get()); + make_unique<CollectionScan>(&_opCtx, params, ws.get(), filterExpr.get()); auto statusWithPlanExecutor = PlanExecutor::make( - &_txn, std::move(ws), std::move(ps), params.collection, PlanExecutor::YIELD_MANUAL); + &_opCtx, std::move(ws), std::move(ps), params.collection, PlanExecutor::YIELD_MANUAL); ASSERT_OK(statusWithPlanExecutor.getStatus()); unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue()); @@ -129,7 +129,7 @@ public: params.direction = direction; params.tailable = false; - unique_ptr<CollectionScan> scan(new CollectionScan(&_txn, params, &ws, NULL)); + unique_ptr<CollectionScan> scan(new CollectionScan(&_opCtx, params, &ws, NULL)); while (!scan->isEOF()) { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState state = scan->work(&id); @@ -147,7 +147,7 @@ public: protected: const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext(); - OperationContext& _txn = *_txnPtr; + OperationContext& _opCtx = *_txnPtr; private: DBDirectClient _client; @@ -206,7 +206,7 @@ public: class QueryStageCollscanObjectsInOrderForward : public QueryStageCollectionScanBase { public: void run() { - AutoGetCollectionForRead ctx(&_txn, nss); + AutoGetCollectionForRead ctx(&_opCtx, nss); // Configure the scan. CollectionScanParams params; @@ -216,10 +216,10 @@ public: // Make a scan and have the runner own it. unique_ptr<WorkingSet> ws = make_unique<WorkingSet>(); - unique_ptr<PlanStage> ps = make_unique<CollectionScan>(&_txn, params, ws.get(), nullptr); + unique_ptr<PlanStage> ps = make_unique<CollectionScan>(&_opCtx, params, ws.get(), nullptr); auto statusWithPlanExecutor = PlanExecutor::make( - &_txn, std::move(ws), std::move(ps), params.collection, PlanExecutor::YIELD_MANUAL); + &_opCtx, std::move(ws), std::move(ps), params.collection, PlanExecutor::YIELD_MANUAL); ASSERT_OK(statusWithPlanExecutor.getStatus()); unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue()); @@ -242,7 +242,7 @@ public: class QueryStageCollscanObjectsInOrderBackward : public QueryStageCollectionScanBase { public: void run() { - AutoGetCollectionForRead ctx(&_txn, nss); + AutoGetCollectionForRead ctx(&_opCtx, nss); CollectionScanParams params; params.collection = ctx.getCollection(); @@ -250,10 +250,10 @@ public: params.tailable = false; unique_ptr<WorkingSet> ws = make_unique<WorkingSet>(); - unique_ptr<PlanStage> ps = make_unique<CollectionScan>(&_txn, params, ws.get(), nullptr); + unique_ptr<PlanStage> ps = make_unique<CollectionScan>(&_opCtx, params, ws.get(), nullptr); auto statusWithPlanExecutor = PlanExecutor::make( - &_txn, std::move(ws), std::move(ps), params.collection, PlanExecutor::YIELD_MANUAL); + &_opCtx, std::move(ws), std::move(ps), params.collection, PlanExecutor::YIELD_MANUAL); ASSERT_OK(statusWithPlanExecutor.getStatus()); unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue()); @@ -276,7 +276,7 @@ public: class QueryStageCollscanInvalidateUpcomingObject : public QueryStageCollectionScanBase { public: void run() { - OldClientWriteContext ctx(&_txn, nss.ns()); + OldClientWriteContext ctx(&_opCtx, nss.ns()); Collection* coll = ctx.getCollection(); @@ -291,7 +291,7 @@ public: params.tailable = false; WorkingSet ws; - unique_ptr<CollectionScan> scan(new CollectionScan(&_txn, params, &ws, NULL)); + unique_ptr<CollectionScan> scan(new CollectionScan(&_opCtx, params, &ws, NULL)); int count = 0; while (count < 10) { @@ -299,7 +299,7 @@ public: PlanStage::StageState state = scan->work(&id); if (PlanStage::ADVANCED == state) { WorkingSetMember* member = ws.get(id); - ASSERT_EQUALS(coll->docFor(&_txn, recordIds[count]).value()["foo"].numberInt(), + ASSERT_EQUALS(coll->docFor(&_opCtx, recordIds[count]).value()["foo"].numberInt(), member->obj.value()["foo"].numberInt()); ++count; } @@ -308,11 +308,11 @@ public: // Remove recordIds[count]. scan->saveState(); { - WriteUnitOfWork wunit(&_txn); - scan->invalidate(&_txn, recordIds[count], INVALIDATION_DELETION); + WriteUnitOfWork wunit(&_opCtx); + scan->invalidate(&_opCtx, recordIds[count], INVALIDATION_DELETION); wunit.commit(); // to avoid rollback of the invalidate } - remove(coll->docFor(&_txn, recordIds[count]).value()); + remove(coll->docFor(&_opCtx, recordIds[count]).value()); scan->restoreState(); // Skip over recordIds[count]. @@ -324,7 +324,7 @@ public: PlanStage::StageState state = scan->work(&id); if (PlanStage::ADVANCED == state) { WorkingSetMember* member = ws.get(id); - ASSERT_EQUALS(coll->docFor(&_txn, recordIds[count]).value()["foo"].numberInt(), + ASSERT_EQUALS(coll->docFor(&_opCtx, recordIds[count]).value()["foo"].numberInt(), member->obj.value()["foo"].numberInt()); ++count; } @@ -342,7 +342,7 @@ public: class QueryStageCollscanInvalidateUpcomingObjectBackward : public QueryStageCollectionScanBase { public: void run() { - OldClientWriteContext ctx(&_txn, nss.ns()); + OldClientWriteContext ctx(&_opCtx, nss.ns()); Collection* coll = ctx.getCollection(); // Get the RecordIds that would be returned by an in-order scan. @@ -356,7 +356,7 @@ public: params.tailable = false; WorkingSet ws; - unique_ptr<CollectionScan> scan(new CollectionScan(&_txn, params, &ws, NULL)); + unique_ptr<CollectionScan> scan(new CollectionScan(&_opCtx, params, &ws, NULL)); int count = 0; while (count < 10) { @@ -364,7 +364,7 @@ public: PlanStage::StageState state = scan->work(&id); if (PlanStage::ADVANCED == state) { WorkingSetMember* member = ws.get(id); - ASSERT_EQUALS(coll->docFor(&_txn, recordIds[count]).value()["foo"].numberInt(), + ASSERT_EQUALS(coll->docFor(&_opCtx, recordIds[count]).value()["foo"].numberInt(), member->obj.value()["foo"].numberInt()); ++count; } @@ -373,11 +373,11 @@ public: // Remove recordIds[count]. scan->saveState(); { - WriteUnitOfWork wunit(&_txn); - scan->invalidate(&_txn, recordIds[count], INVALIDATION_DELETION); + WriteUnitOfWork wunit(&_opCtx); + scan->invalidate(&_opCtx, recordIds[count], INVALIDATION_DELETION); wunit.commit(); // to avoid rollback of the invalidate } - remove(coll->docFor(&_txn, recordIds[count]).value()); + remove(coll->docFor(&_opCtx, recordIds[count]).value()); scan->restoreState(); // Skip over recordIds[count]. @@ -389,7 +389,7 @@ public: PlanStage::StageState state = scan->work(&id); if (PlanStage::ADVANCED == state) { WorkingSetMember* member = ws.get(id); - ASSERT_EQUALS(coll->docFor(&_txn, recordIds[count]).value()["foo"].numberInt(), + ASSERT_EQUALS(coll->docFor(&_opCtx, recordIds[count]).value()["foo"].numberInt(), member->obj.value()["foo"].numberInt()); ++count; } diff --git a/src/mongo/dbtests/query_stage_count.cpp b/src/mongo/dbtests/query_stage_count.cpp index ccc08cab5a5..6a851dc9258 100644 --- a/src/mongo/dbtests/query_stage_count.cpp +++ b/src/mongo/dbtests/query_stage_count.cpp @@ -54,9 +54,9 @@ const int kInterjections = kDocuments; class CountStageTest { public: CountStageTest() - : _scopedXact(&_txn, MODE_IX), - _dbLock(_txn.lockState(), nsToDatabaseSubstring(ns()), MODE_X), - _ctx(&_txn, ns()), + : _scopedXact(&_opCtx, MODE_IX), + _dbLock(_opCtx.lockState(), nsToDatabaseSubstring(ns()), MODE_X), + _ctx(&_opCtx, ns()), _coll(NULL) {} virtual ~CountStageTest() {} @@ -64,12 +64,12 @@ public: virtual void interject(CountStage&, int) {} virtual void setup() { - WriteUnitOfWork wunit(&_txn); + WriteUnitOfWork wunit(&_opCtx); - _ctx.db()->dropCollection(&_txn, ns()); - _coll = _ctx.db()->createCollection(&_txn, ns()); + _ctx.db()->dropCollection(&_opCtx, ns()); + _coll = _ctx.db()->createCollection(&_opCtx, ns()); - _coll->getIndexCatalog()->createIndexOnEmptyCollection(&_txn, + _coll->getIndexCatalog()->createIndexOnEmptyCollection(&_opCtx, BSON("key" << BSON("x" << 1) << "name" << "x_1" @@ -94,7 +94,7 @@ public: params.direction = CollectionScanParams::FORWARD; params.tailable = false; - unique_ptr<CollectionScan> scan(new CollectionScan(&_txn, params, &ws, NULL)); + unique_ptr<CollectionScan> scan(new CollectionScan(&_opCtx, params, &ws, NULL)); while (!scan->isEOF()) { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState state = scan->work(&id); @@ -107,27 +107,27 @@ public: } void insert(const BSONObj& doc) { - WriteUnitOfWork wunit(&_txn); + WriteUnitOfWork wunit(&_opCtx); OpDebug* const nullOpDebug = nullptr; - _coll->insertDocument(&_txn, doc, nullOpDebug, false); + _coll->insertDocument(&_opCtx, doc, nullOpDebug, false); wunit.commit(); } void remove(const RecordId& recordId) { - WriteUnitOfWork wunit(&_txn); + WriteUnitOfWork wunit(&_opCtx); OpDebug* const nullOpDebug = nullptr; - _coll->deleteDocument(&_txn, recordId, nullOpDebug); + _coll->deleteDocument(&_opCtx, recordId, nullOpDebug); wunit.commit(); } void update(const RecordId& oldrecordId, const BSONObj& newDoc) { - WriteUnitOfWork wunit(&_txn); - BSONObj oldDoc = _coll->getRecordStore()->dataFor(&_txn, oldrecordId).releaseToBson(); + WriteUnitOfWork wunit(&_opCtx); + BSONObj oldDoc = _coll->getRecordStore()->dataFor(&_opCtx, oldrecordId).releaseToBson(); OplogUpdateEntryArgs args; args.ns = _coll->ns().ns(); - _coll->updateDocument(&_txn, + _coll->updateDocument(&_opCtx, oldrecordId, - Snapshotted<BSONObj>(_txn.recoveryUnit()->getSnapshotId(), oldDoc), + Snapshotted<BSONObj>(_opCtx.recoveryUnit()->getSnapshotId(), oldDoc), newDoc, false, true, @@ -163,7 +163,7 @@ public: const bool useRecordStoreCount = false; CountStageParams params(request, useRecordStoreCount); - CountStage countStage(&_txn, _coll, std::move(params), ws.get(), scan); + CountStage countStage(&_opCtx, _coll, std::move(params), ws.get(), scan); const CountStats* stats = runCount(countStage); @@ -202,7 +202,7 @@ public: IndexScan* createIndexScan(MatchExpression* expr, WorkingSet* ws) { IndexCatalog* catalog = _coll->getIndexCatalog(); std::vector<IndexDescriptor*> indexes; - catalog->findIndexesByKeyPattern(&_txn, BSON("x" << 1), false, &indexes); + catalog->findIndexesByKeyPattern(&_opCtx, BSON("x" << 1), false, &indexes); ASSERT_EQ(indexes.size(), 1U); IndexDescriptor* descriptor = indexes[0]; @@ -216,7 +216,7 @@ public: params.direction = 1; // This child stage gets owned and freed by its parent CountStage - return new IndexScan(&_txn, params, ws, expr); + return new IndexScan(&_opCtx, params, ws, expr); } CollectionScan* createCollScan(MatchExpression* expr, WorkingSet* ws) { @@ -224,7 +224,7 @@ public: params.collection = _coll; // This child stage gets owned and freed by its parent CountStage - return new CollectionScan(&_txn, params, ws, expr); + return new CollectionScan(&_opCtx, params, ws, expr); } static const char* ns() { @@ -234,7 +234,7 @@ public: protected: vector<RecordId> _recordIds; const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext(); - OperationContext& _txn = *_txnPtr; + OperationContext& _opCtx = *_txnPtr; ScopedTransaction _scopedXact; Lock::DBLock _dbLock; OldClientContext _ctx; @@ -306,11 +306,11 @@ public: if (interjection == 0) { // At this point, our first interjection, we've counted _recordIds[0] // and are about to count _recordIds[1] - WriteUnitOfWork wunit(&_txn); - count_stage.invalidate(&_txn, _recordIds[interjection], INVALIDATION_DELETION); + WriteUnitOfWork wunit(&_opCtx); + count_stage.invalidate(&_opCtx, _recordIds[interjection], INVALIDATION_DELETION); remove(_recordIds[interjection]); - count_stage.invalidate(&_txn, _recordIds[interjection + 1], INVALIDATION_DELETION); + count_stage.invalidate(&_opCtx, _recordIds[interjection + 1], INVALIDATION_DELETION); remove(_recordIds[interjection + 1]); wunit.commit(); } @@ -331,12 +331,12 @@ public: // At the point which this is called we are in between the first and second record void interject(CountStage& count_stage, int interjection) { if (interjection == 0) { - count_stage.invalidate(&_txn, _recordIds[0], INVALIDATION_MUTATION); - OID id1 = _coll->docFor(&_txn, _recordIds[0]).value().getField("_id").OID(); + count_stage.invalidate(&_opCtx, _recordIds[0], INVALIDATION_MUTATION); + OID id1 = _coll->docFor(&_opCtx, _recordIds[0]).value().getField("_id").OID(); update(_recordIds[0], BSON("_id" << id1 << "x" << 100)); - count_stage.invalidate(&_txn, _recordIds[1], INVALIDATION_MUTATION); - OID id2 = _coll->docFor(&_txn, _recordIds[1]).value().getField("_id").OID(); + count_stage.invalidate(&_opCtx, _recordIds[1], INVALIDATION_MUTATION); + OID id2 = _coll->docFor(&_opCtx, _recordIds[1]).value().getField("_id").OID(); update(_recordIds[1], BSON("_id" << id2 << "x" << 100)); } } diff --git a/src/mongo/dbtests/query_stage_count_scan.cpp b/src/mongo/dbtests/query_stage_count_scan.cpp index 9d2e82fa118..87d2b87b283 100644 --- a/src/mongo/dbtests/query_stage_count_scan.cpp +++ b/src/mongo/dbtests/query_stage_count_scan.cpp @@ -52,15 +52,15 @@ using std::shared_ptr; class CountBase { public: - CountBase() : _client(&_txn) {} + CountBase() : _client(&_opCtx) {} virtual ~CountBase() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); _client.dropCollection(ns()); } void addIndex(const BSONObj& obj) { - ASSERT_OK(dbtests::createIndex(&_txn, ns(), obj)); + ASSERT_OK(dbtests::createIndex(&_opCtx, ns(), obj)); } void insert(const BSONObj& obj) { @@ -93,7 +93,7 @@ public: IndexDescriptor* getIndex(Database* db, const BSONObj& obj) { Collection* collection = db->getCollection(ns()); std::vector<IndexDescriptor*> indexes; - collection->getIndexCatalog()->findIndexesByKeyPattern(&_txn, obj, false, &indexes); + collection->getIndexCatalog()->findIndexesByKeyPattern(&_opCtx, obj, false, &indexes); return indexes.empty() ? nullptr : indexes[0]; } @@ -103,7 +103,7 @@ public: protected: const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext(); - OperationContext& _txn = *_txnPtr; + OperationContext& _opCtx = *_txnPtr; private: DBDirectClient _client; @@ -116,7 +116,7 @@ private: class QueryStageCountScanDups : public CountBase { public: void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); // Insert some docs insert(BSON("a" << BSON_ARRAY(5 << 7))); @@ -135,7 +135,7 @@ public: params.endKeyInclusive = true; WorkingSet ws; - CountScan count(&_txn, params, &ws); + CountScan count(&_opCtx, params, &ws); int numCounted = runCount(&count); ASSERT_EQUALS(2, numCounted); @@ -148,7 +148,7 @@ public: class QueryStageCountScanInclusiveBounds : public CountBase { public: void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); // Insert some docs for (int i = 0; i < 10; ++i) { @@ -167,7 +167,7 @@ public: params.endKeyInclusive = true; WorkingSet ws; - CountScan count(&_txn, params, &ws); + CountScan count(&_opCtx, params, &ws); int numCounted = runCount(&count); ASSERT_EQUALS(5, numCounted); @@ -180,7 +180,7 @@ public: class QueryStageCountScanExclusiveBounds : public CountBase { public: void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); // Insert some docs for (int i = 0; i < 10; ++i) { @@ -199,7 +199,7 @@ public: params.endKeyInclusive = false; WorkingSet ws; - CountScan count(&_txn, params, &ws); + CountScan count(&_opCtx, params, &ws); int numCounted = runCount(&count); ASSERT_EQUALS(3, numCounted); @@ -212,7 +212,7 @@ public: class QueryStageCountScanLowerBound : public CountBase { public: void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); // Insert doc, add index insert(BSON("a" << 2)); @@ -227,7 +227,7 @@ public: params.endKeyInclusive = false; WorkingSet ws; - CountScan count(&_txn, params, &ws); + CountScan count(&_opCtx, params, &ws); int numCounted = runCount(&count); ASSERT_EQUALS(0, numCounted); @@ -240,7 +240,7 @@ public: class QueryStageCountScanNothingInInterval : public CountBase { public: void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); // Insert documents, add index insert(BSON("a" << 2)); @@ -256,7 +256,7 @@ public: params.endKeyInclusive = false; WorkingSet ws; - CountScan count(&_txn, params, &ws); + CountScan count(&_opCtx, params, &ws); int numCounted = runCount(&count); ASSERT_EQUALS(0, numCounted); @@ -270,7 +270,7 @@ public: class QueryStageCountScanNothingInIntervalFirstMatchTooHigh : public CountBase { public: void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); // Insert some documents, add index insert(BSON("a" << 2)); @@ -286,7 +286,7 @@ public: params.endKeyInclusive = true; WorkingSet ws; - CountScan count(&_txn, params, &ws); + CountScan count(&_opCtx, params, &ws); int numCounted = runCount(&count); ASSERT_EQUALS(0, numCounted); @@ -300,7 +300,7 @@ public: class QueryStageCountScanNoChangeDuringYield : public CountBase { public: void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); // Insert documents, add index for (int i = 0; i < 10; ++i) { @@ -317,7 +317,7 @@ public: params.endKeyInclusive = true; WorkingSet ws; - CountScan count(&_txn, params, &ws); + CountScan count(&_opCtx, params, &ws); WorkingSetID wsid; int numCounted = 0; @@ -353,7 +353,7 @@ public: class QueryStageCountScanDeleteDuringYield : public CountBase { public: void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); // Insert documents, add index for (int i = 0; i < 10; ++i) { @@ -370,7 +370,7 @@ public: params.endKeyInclusive = true; WorkingSet ws; - CountScan count(&_txn, params, &ws); + CountScan count(&_opCtx, params, &ws); WorkingSetID wsid; int numCounted = 0; @@ -409,7 +409,7 @@ public: class QueryStageCountScanInsertNewDocsDuringYield : public CountBase { public: void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); // Insert documents, add index for (int i = 0; i < 10; ++i) { @@ -426,7 +426,7 @@ public: params.endKeyInclusive = true; WorkingSet ws; - CountScan count(&_txn, params, &ws); + CountScan count(&_opCtx, params, &ws); WorkingSetID wsid; int numCounted = 0; @@ -468,7 +468,7 @@ public: class QueryStageCountScanBecomesMultiKeyDuringYield : public CountBase { public: void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); // Insert documents, add index for (int i = 0; i < 10; ++i) { @@ -485,7 +485,7 @@ public: params.endKeyInclusive = true; WorkingSet ws; - CountScan count(&_txn, params, &ws); + CountScan count(&_opCtx, params, &ws); WorkingSetID wsid; int numCounted = 0; @@ -523,7 +523,7 @@ public: class QueryStageCountScanUnusedKeys : public CountBase { public: void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); // Insert docs, add index for (int i = 0; i < 10; ++i) { @@ -545,7 +545,7 @@ public: params.endKeyInclusive = true; WorkingSet ws; - CountScan count(&_txn, params, &ws); + CountScan count(&_opCtx, params, &ws); int numCounted = runCount(&count); ASSERT_EQUALS(7, numCounted); @@ -558,7 +558,7 @@ public: class QueryStageCountScanUnusedEndKey : public CountBase { public: void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); // Insert docs, add index for (int i = 0; i < 10; ++i) { @@ -578,7 +578,7 @@ public: params.endKeyInclusive = true; // yes? WorkingSet ws; - CountScan count(&_txn, params, &ws); + CountScan count(&_opCtx, params, &ws); int numCounted = runCount(&count); ASSERT_EQUALS(9, numCounted); @@ -591,7 +591,7 @@ public: class QueryStageCountScanKeyBecomesUnusedDuringYield : public CountBase { public: void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); // Insert documents, add index for (int i = 0; i < 10; ++i) { @@ -608,7 +608,7 @@ public: params.endKeyInclusive = true; WorkingSet ws; - CountScan count(&_txn, params, &ws); + CountScan count(&_opCtx, params, &ws); WorkingSetID wsid; int numCounted = 0; diff --git a/src/mongo/dbtests/query_stage_delete.cpp b/src/mongo/dbtests/query_stage_delete.cpp index b487bc2c655..b3892c41ffa 100644 --- a/src/mongo/dbtests/query_stage_delete.cpp +++ b/src/mongo/dbtests/query_stage_delete.cpp @@ -61,8 +61,8 @@ static const NamespaceString nss("unittests.QueryStageDelete"); class QueryStageDeleteBase { public: - QueryStageDeleteBase() : _client(&_txn) { - OldClientWriteContext ctx(&_txn, nss.ns()); + QueryStageDeleteBase() : _client(&_opCtx) { + OldClientWriteContext ctx(&_opCtx, nss.ns()); for (size_t i = 0; i < numObj(); ++i) { BSONObjBuilder bob; @@ -73,7 +73,7 @@ public: } virtual ~QueryStageDeleteBase() { - OldClientWriteContext ctx(&_txn, nss.ns()); + OldClientWriteContext ctx(&_opCtx, nss.ns()); _client.dropCollection(nss.ns()); } @@ -91,7 +91,7 @@ public: params.direction = direction; params.tailable = false; - unique_ptr<CollectionScan> scan(new CollectionScan(&_txn, params, &ws, NULL)); + unique_ptr<CollectionScan> scan(new CollectionScan(&_opCtx, params, &ws, NULL)); while (!scan->isEOF()) { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState state = scan->work(&id); @@ -107,7 +107,7 @@ public: auto qr = stdx::make_unique<QueryRequest>(nss); qr->setFilter(query); auto statusWithCQ = CanonicalQuery::canonicalize( - &_txn, std::move(qr), ExtensionsCallbackDisallowExtensions()); + &_opCtx, std::move(qr), ExtensionsCallbackDisallowExtensions()); ASSERT_OK(statusWithCQ.getStatus()); return std::move(statusWithCQ.getValue()); } @@ -118,7 +118,7 @@ public: protected: const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext(); - OperationContext& _txn = *_txnPtr; + OperationContext& _opCtx = *_txnPtr; private: DBDirectClient _client; @@ -132,7 +132,7 @@ private: class QueryStageDeleteInvalidateUpcomingObject : public QueryStageDeleteBase { public: void run() { - OldClientWriteContext ctx(&_txn, nss.ns()); + OldClientWriteContext ctx(&_opCtx, nss.ns()); Collection* coll = ctx.getCollection(); @@ -151,11 +151,11 @@ public: deleteStageParams.isMulti = true; WorkingSet ws; - DeleteStage deleteStage(&_txn, + DeleteStage deleteStage(&_opCtx, deleteStageParams, &ws, coll, - new CollectionScan(&_txn, collScanParams, &ws, NULL)); + new CollectionScan(&_opCtx, collScanParams, &ws, NULL)); const DeleteStats* stats = static_cast<const DeleteStats*>(deleteStage.getSpecificStats()); @@ -170,11 +170,11 @@ public: // Remove recordIds[targetDocIndex]; deleteStage.saveState(); { - WriteUnitOfWork wunit(&_txn); - deleteStage.invalidate(&_txn, recordIds[targetDocIndex], INVALIDATION_DELETION); + WriteUnitOfWork wunit(&_opCtx); + deleteStage.invalidate(&_opCtx, recordIds[targetDocIndex], INVALIDATION_DELETION); wunit.commit(); } - BSONObj targetDoc = coll->docFor(&_txn, recordIds[targetDocIndex]).value(); + BSONObj targetDoc = coll->docFor(&_opCtx, recordIds[targetDocIndex]).value(); ASSERT(!targetDoc.isEmpty()); remove(targetDoc); deleteStage.restoreState(); @@ -198,7 +198,7 @@ class QueryStageDeleteReturnOldDoc : public QueryStageDeleteBase { public: void run() { // Various variables we'll need. - OldClientWriteContext ctx(&_txn, nss.ns()); + OldClientWriteContext ctx(&_opCtx, nss.ns()); Collection* coll = ctx.getCollection(); const int targetDocIndex = 0; const BSONObj query = BSON("foo" << BSON("$gte" << targetDocIndex)); @@ -211,7 +211,7 @@ public: // Configure a QueuedDataStage to pass the first object in the collection back in a // RID_AND_OBJ state. - auto qds = make_unique<QueuedDataStage>(&_txn, ws.get()); + auto qds = make_unique<QueuedDataStage>(&_opCtx, ws.get()); WorkingSetID id = ws->allocate(); WorkingSetMember* member = ws->get(id); member->recordId = recordIds[targetDocIndex]; @@ -226,7 +226,7 @@ public: deleteParams.canonicalQuery = cq.get(); const auto deleteStage = - make_unique<DeleteStage>(&_txn, deleteParams, ws.get(), coll, qds.release()); + make_unique<DeleteStage>(&_opCtx, deleteParams, ws.get(), coll, qds.release()); const DeleteStats* stats = static_cast<const DeleteStats*>(deleteStage->getSpecificStats()); @@ -267,14 +267,14 @@ class QueryStageDeleteSkipOwnedObjects : public QueryStageDeleteBase { public: void run() { // Various variables we'll need. - OldClientWriteContext ctx(&_txn, nss.ns()); + OldClientWriteContext ctx(&_opCtx, nss.ns()); Collection* coll = ctx.getCollection(); const BSONObj query = BSONObj(); const auto ws = make_unique<WorkingSet>(); const unique_ptr<CanonicalQuery> cq(canonicalize(query)); // Configure a QueuedDataStage to pass an OWNED_OBJ to the delete stage. - auto qds = make_unique<QueuedDataStage>(&_txn, ws.get()); + auto qds = make_unique<QueuedDataStage>(&_opCtx, ws.get()); { WorkingSetID id = ws->allocate(); WorkingSetMember* member = ws->get(id); @@ -289,7 +289,7 @@ public: deleteParams.canonicalQuery = cq.get(); const auto deleteStage = - make_unique<DeleteStage>(&_txn, deleteParams, ws.get(), coll, qds.release()); + make_unique<DeleteStage>(&_opCtx, deleteParams, ws.get(), coll, qds.release()); const DeleteStats* stats = static_cast<const DeleteStats*>(deleteStage->getSpecificStats()); // Call work, passing the set up member to the delete stage. diff --git a/src/mongo/dbtests/query_stage_distinct.cpp b/src/mongo/dbtests/query_stage_distinct.cpp index d0bab154208..f5b2ac7bf48 100644 --- a/src/mongo/dbtests/query_stage_distinct.cpp +++ b/src/mongo/dbtests/query_stage_distinct.cpp @@ -52,14 +52,14 @@ static const NamespaceString nss{"unittests.QueryStageDistinct"}; class DistinctBase { public: - DistinctBase() : _client(&_txn) {} + DistinctBase() : _client(&_opCtx) {} virtual ~DistinctBase() { _client.dropCollection(nss.ns()); } void addIndex(const BSONObj& obj) { - ASSERT_OK(dbtests::createIndex(&_txn, nss.ns(), obj)); + ASSERT_OK(dbtests::createIndex(&_opCtx, nss.ns(), obj)); } void insert(const BSONObj& obj) { @@ -95,7 +95,7 @@ public: protected: const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext(); - OperationContext& _txn = *_txnPtr; + OperationContext& _opCtx = *_txnPtr; private: DBDirectClient _client; @@ -121,12 +121,12 @@ public: // Make an index on a:1 addIndex(BSON("a" << 1)); - AutoGetCollectionForRead ctx(&_txn, nss); + AutoGetCollectionForRead ctx(&_opCtx, nss); Collection* coll = ctx.getCollection(); // Set up the distinct stage. std::vector<IndexDescriptor*> indexes; - coll->getIndexCatalog()->findIndexesByKeyPattern(&_txn, BSON("a" << 1), false, &indexes); + coll->getIndexCatalog()->findIndexesByKeyPattern(&_opCtx, BSON("a" << 1), false, &indexes); ASSERT_EQ(indexes.size(), 1U); DistinctParams params; @@ -141,7 +141,7 @@ public: params.bounds.fields.push_back(oil); WorkingSet ws; - DistinctScan distinct(&_txn, params, &ws); + DistinctScan distinct(&_opCtx, params, &ws); WorkingSetID wsid; // Get our first result. @@ -188,17 +188,17 @@ public: // Make an index on a:1 addIndex(BSON("a" << 1)); - AutoGetCollectionForRead ctx(&_txn, nss); + AutoGetCollectionForRead ctx(&_opCtx, nss); Collection* coll = ctx.getCollection(); // Set up the distinct stage. std::vector<IndexDescriptor*> indexes; - coll->getIndexCatalog()->findIndexesByKeyPattern(&_txn, BSON("a" << 1), false, &indexes); + coll->getIndexCatalog()->findIndexesByKeyPattern(&_opCtx, BSON("a" << 1), false, &indexes); verify(indexes.size() == 1); DistinctParams params; params.descriptor = indexes[0]; - ASSERT_TRUE(params.descriptor->isMultikey(&_txn)); + ASSERT_TRUE(params.descriptor->isMultikey(&_opCtx)); verify(params.descriptor); params.direction = 1; @@ -211,7 +211,7 @@ public: params.bounds.fields.push_back(oil); WorkingSet ws; - DistinctScan distinct(&_txn, params, &ws); + DistinctScan distinct(&_opCtx, params, &ws); // We should see each number in the range [1, 6] exactly once. std::set<int> seen; @@ -257,12 +257,12 @@ public: addIndex(BSON("a" << 1 << "b" << 1)); - AutoGetCollectionForRead ctx(&_txn, nss); + AutoGetCollectionForRead ctx(&_opCtx, nss); Collection* coll = ctx.getCollection(); std::vector<IndexDescriptor*> indices; coll->getIndexCatalog()->findIndexesByKeyPattern( - &_txn, BSON("a" << 1 << "b" << 1), false, &indices); + &_opCtx, BSON("a" << 1 << "b" << 1), false, &indices); ASSERT_EQ(1U, indices.size()); DistinctParams params; @@ -282,7 +282,7 @@ public: params.bounds.fields.push_back(bOil); WorkingSet ws; - DistinctScan distinct(&_txn, params, &ws); + DistinctScan distinct(&_opCtx, params, &ws); WorkingSetID wsid; PlanStage::StageState state; diff --git a/src/mongo/dbtests/query_stage_ensure_sorted.cpp b/src/mongo/dbtests/query_stage_ensure_sorted.cpp index 9e930375317..982da6ad70a 100644 --- a/src/mongo/dbtests/query_stage_ensure_sorted.cpp +++ b/src/mongo/dbtests/query_stage_ensure_sorted.cpp @@ -56,10 +56,10 @@ public: const char* inputStr, const char* expectedStr, CollatorInterface* collator = nullptr) { - auto txn = _serviceContext.makeOperationContext(); + auto opCtx = _serviceContext.makeOperationContext(); WorkingSet ws; - auto queuedDataStage = stdx::make_unique<QueuedDataStage>(txn.get(), &ws); + auto queuedDataStage = stdx::make_unique<QueuedDataStage>(opCtx.get(), &ws); BSONObj inputObj = fromjson(inputStr); BSONElement inputElt = inputObj["input"]; ASSERT(inputElt.isABSONObj()); @@ -79,8 +79,8 @@ public: // Initialization. BSONObj pattern = fromjson(patternStr); auto sortKeyGen = stdx::make_unique<SortKeyGeneratorStage>( - txn.get(), queuedDataStage.release(), &ws, pattern, BSONObj(), collator); - EnsureSortedStage ess(txn.get(), pattern, &ws, sortKeyGen.release()); + opCtx.get(), queuedDataStage.release(), &ws, pattern, BSONObj(), collator); + EnsureSortedStage ess(opCtx.get(), pattern, &ws, sortKeyGen.release()); WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState state = PlanStage::NEED_TIME; @@ -112,13 +112,13 @@ protected: }; TEST_F(QueryStageEnsureSortedTest, EnsureSortedEmptyWorkingSet) { - auto txn = _serviceContext.makeOperationContext(); + auto opCtx = _serviceContext.makeOperationContext(); WorkingSet ws; - auto queuedDataStage = stdx::make_unique<QueuedDataStage>(txn.get(), &ws); + auto queuedDataStage = stdx::make_unique<QueuedDataStage>(opCtx.get(), &ws); auto sortKeyGen = stdx::make_unique<SortKeyGeneratorStage>( - txn.get(), queuedDataStage.release(), &ws, BSONObj(), BSONObj(), nullptr); - EnsureSortedStage ess(txn.get(), BSONObj(), &ws, sortKeyGen.release()); + opCtx.get(), queuedDataStage.release(), &ws, BSONObj(), BSONObj(), nullptr); + EnsureSortedStage ess(opCtx.get(), BSONObj(), &ws, sortKeyGen.release()); WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState state = PlanStage::NEED_TIME; diff --git a/src/mongo/dbtests/query_stage_fetch.cpp b/src/mongo/dbtests/query_stage_fetch.cpp index fef93098606..709e2d1b760 100644 --- a/src/mongo/dbtests/query_stage_fetch.cpp +++ b/src/mongo/dbtests/query_stage_fetch.cpp @@ -56,14 +56,14 @@ using stdx::make_unique; class QueryStageFetchBase { public: - QueryStageFetchBase() : _client(&_txn) {} + QueryStageFetchBase() : _client(&_opCtx) {} virtual ~QueryStageFetchBase() { _client.dropCollection(ns()); } void getRecordIds(set<RecordId>* out, Collection* coll) { - auto cursor = coll->getCursor(&_txn); + auto cursor = coll->getCursor(&_opCtx); while (auto record = cursor->next()) { out->insert(record->id); } @@ -83,7 +83,7 @@ public: protected: const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext(); - OperationContext& _txn = *_txnPtr; + OperationContext& _opCtx = *_txnPtr; DBDirectClient _client; }; @@ -94,12 +94,12 @@ protected: class FetchStageAlreadyFetched : public QueryStageFetchBase { public: void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); Database* db = ctx.db(); Collection* coll = db->getCollection(ns()); if (!coll) { - WriteUnitOfWork wuow(&_txn); - coll = db->createCollection(&_txn, ns()); + WriteUnitOfWork wuow(&_opCtx); + coll = db->createCollection(&_opCtx, ns()); wuow.commit(); } @@ -112,14 +112,14 @@ public: ASSERT_EQUALS(size_t(1), recordIds.size()); // Create a mock stage that returns the WSM. - auto mockStage = make_unique<QueuedDataStage>(&_txn, &ws); + auto mockStage = make_unique<QueuedDataStage>(&_opCtx, &ws); // Mock data. { WorkingSetID id = ws.allocate(); WorkingSetMember* mockMember = ws.get(id); mockMember->recordId = *recordIds.begin(); - mockMember->obj = coll->docFor(&_txn, mockMember->recordId); + mockMember->obj = coll->docFor(&_opCtx, mockMember->recordId); ws.transitionToRecordIdAndObj(id); // Points into our DB. mockStage->pushBack(id); @@ -135,7 +135,7 @@ public: } unique_ptr<FetchStage> fetchStage( - new FetchStage(&_txn, &ws, mockStage.release(), NULL, coll)); + new FetchStage(&_opCtx, &ws, mockStage.release(), NULL, coll)); WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState state; @@ -158,14 +158,14 @@ public: class FetchStageFilter : public QueryStageFetchBase { public: void run() { - ScopedTransaction transaction(&_txn, MODE_IX); - Lock::DBLock lk(_txn.lockState(), nsToDatabaseSubstring(ns()), MODE_X); - OldClientContext ctx(&_txn, ns()); + ScopedTransaction transaction(&_opCtx, MODE_IX); + Lock::DBLock lk(_opCtx.lockState(), nsToDatabaseSubstring(ns()), MODE_X); + OldClientContext ctx(&_opCtx, ns()); Database* db = ctx.db(); Collection* coll = db->getCollection(ns()); if (!coll) { - WriteUnitOfWork wuow(&_txn); - coll = db->createCollection(&_txn, ns()); + WriteUnitOfWork wuow(&_opCtx); + coll = db->createCollection(&_opCtx, ns()); wuow.commit(); } @@ -178,7 +178,7 @@ public: ASSERT_EQUALS(size_t(1), recordIds.size()); // Create a mock stage that returns the WSM. - auto mockStage = make_unique<QueuedDataStage>(&_txn, &ws); + auto mockStage = make_unique<QueuedDataStage>(&_opCtx, &ws); // Mock data. { @@ -203,7 +203,7 @@ public: // Matcher requires that foo==6 but we only have data with foo==5. unique_ptr<FetchStage> fetchStage( - new FetchStage(&_txn, &ws, mockStage.release(), filterExpr.get(), coll)); + new FetchStage(&_opCtx, &ws, mockStage.release(), filterExpr.get(), coll)); // First call should return a fetch request as it's not in memory. WorkingSetID id = WorkingSet::INVALID_ID; diff --git a/src/mongo/dbtests/query_stage_ixscan.cpp b/src/mongo/dbtests/query_stage_ixscan.cpp index 45ee7723ecf..7ee19d8f352 100644 --- a/src/mongo/dbtests/query_stage_ixscan.cpp +++ b/src/mongo/dbtests/query_stage_ixscan.cpp @@ -46,21 +46,21 @@ const auto kIndexVersion = IndexDescriptor::IndexVersion::kV2; class IndexScanTest { public: IndexScanTest() - : _scopedXact(&_txn, MODE_IX), - _dbLock(_txn.lockState(), nsToDatabaseSubstring(ns()), MODE_X), - _ctx(&_txn, ns()), + : _scopedXact(&_opCtx, MODE_IX), + _dbLock(_opCtx.lockState(), nsToDatabaseSubstring(ns()), MODE_X), + _ctx(&_opCtx, ns()), _coll(NULL) {} virtual ~IndexScanTest() {} virtual void setup() { - WriteUnitOfWork wunit(&_txn); + WriteUnitOfWork wunit(&_opCtx); - _ctx.db()->dropCollection(&_txn, ns()); - _coll = _ctx.db()->createCollection(&_txn, ns()); + _ctx.db()->dropCollection(&_opCtx, ns()); + _coll = _ctx.db()->createCollection(&_opCtx, ns()); ASSERT_OK(_coll->getIndexCatalog()->createIndexOnEmptyCollection( - &_txn, + &_opCtx, BSON("ns" << ns() << "key" << BSON("x" << 1) << "name" << DBClientBase::genIndexName(BSON("x" << 1)) << "v" @@ -70,9 +70,9 @@ public: } void insert(const BSONObj& doc) { - WriteUnitOfWork wunit(&_txn); + WriteUnitOfWork wunit(&_opCtx); OpDebug* const nullOpDebug = nullptr; - ASSERT_OK(_coll->insertDocument(&_txn, doc, nullOpDebug, false)); + ASSERT_OK(_coll->insertDocument(&_opCtx, doc, nullOpDebug, false)); wunit.commit(); } @@ -100,7 +100,7 @@ public: IndexScan* createIndexScanSimpleRange(BSONObj startKey, BSONObj endKey) { IndexCatalog* catalog = _coll->getIndexCatalog(); std::vector<IndexDescriptor*> indexes; - catalog->findIndexesByKeyPattern(&_txn, BSON("x" << 1), false, &indexes); + catalog->findIndexesByKeyPattern(&_opCtx, BSON("x" << 1), false, &indexes); ASSERT_EQ(indexes.size(), 1U); // We are not testing indexing here so use maximal bounds @@ -114,7 +114,7 @@ public: // This child stage gets owned and freed by the caller. MatchExpression* filter = NULL; - return new IndexScan(&_txn, params, &_ws, filter); + return new IndexScan(&_opCtx, params, &_ws, filter); } IndexScan* createIndexScan(BSONObj startKey, @@ -124,7 +124,7 @@ public: int direction = 1) { IndexCatalog* catalog = _coll->getIndexCatalog(); std::vector<IndexDescriptor*> indexes; - catalog->findIndexesByKeyPattern(&_txn, BSON("x" << 1), false, &indexes); + catalog->findIndexesByKeyPattern(&_opCtx, BSON("x" << 1), false, &indexes); ASSERT_EQ(indexes.size(), 1U); IndexScanParams params; @@ -139,7 +139,7 @@ public: params.bounds.fields.push_back(oil); MatchExpression* filter = NULL; - return new IndexScan(&_txn, params, &_ws, filter); + return new IndexScan(&_opCtx, params, &_ws, filter); } static const char* ns() { @@ -148,7 +148,7 @@ public: protected: const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext(); - OperationContext& _txn = *_txnPtr; + OperationContext& _opCtx = *_txnPtr; ScopedTransaction _scopedXact; Lock::DBLock _dbLock; diff --git a/src/mongo/dbtests/query_stage_keep.cpp b/src/mongo/dbtests/query_stage_keep.cpp index 782cc0eaf91..c5a57396987 100644 --- a/src/mongo/dbtests/query_stage_keep.cpp +++ b/src/mongo/dbtests/query_stage_keep.cpp @@ -60,14 +60,14 @@ using stdx::make_unique; class QueryStageKeepBase { public: - QueryStageKeepBase() : _client(&_txn) {} + QueryStageKeepBase() : _client(&_opCtx) {} virtual ~QueryStageKeepBase() { _client.dropCollection(ns()); } void getLocs(set<RecordId>* out, Collection* coll) { - auto cursor = coll->getCursor(&_txn); + auto cursor = coll->getCursor(&_opCtx); while (auto record = cursor->next()) { out->insert(record->id); } @@ -98,7 +98,7 @@ public: protected: const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext(); - OperationContext& _txn = *_txnPtr; + OperationContext& _opCtx = *_txnPtr; DBDirectClient _client; }; @@ -111,12 +111,12 @@ protected: class KeepStageBasic : public QueryStageKeepBase { public: void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); Database* db = ctx.db(); Collection* coll = db->getCollection(ns()); if (!coll) { - WriteUnitOfWork wuow(&_txn); - coll = db->createCollection(&_txn, ns()); + WriteUnitOfWork wuow(&_opCtx); + coll = db->createCollection(&_opCtx, ns()); wuow.commit(); } @@ -142,12 +142,12 @@ public: params.direction = CollectionScanParams::FORWARD; params.tailable = false; params.start = RecordId(); - CollectionScan* cs = new CollectionScan(&_txn, params, &ws, NULL); + CollectionScan* cs = new CollectionScan(&_opCtx, params, &ws, NULL); // Create a KeepMutations stage to merge in the 10 flagged objects. // Takes ownership of 'cs' MatchExpression* nullFilter = NULL; - auto keep = make_unique<KeepMutationsStage>(&_txn, nullFilter, &ws, cs); + auto keep = make_unique<KeepMutationsStage>(&_opCtx, nullFilter, &ws, cs); for (size_t i = 0; i < 10; ++i) { WorkingSetID id = getNextResult(keep.get()); @@ -178,13 +178,13 @@ public: class KeepStageFlagAdditionalAfterStreamingStarts : public QueryStageKeepBase { public: void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); Database* db = ctx.db(); Collection* coll = db->getCollection(ns()); if (!coll) { - WriteUnitOfWork wuow(&_txn); - coll = db->createCollection(&_txn, ns()); + WriteUnitOfWork wuow(&_opCtx); + coll = db->createCollection(&_opCtx, ns()); wuow.commit(); } WorkingSet ws; @@ -195,7 +195,8 @@ public: // Create a KeepMutationsStage with an EOF child, and flag 50 objects. We expect these // objects to be returned by the KeepMutationsStage. MatchExpression* nullFilter = NULL; - auto keep = make_unique<KeepMutationsStage>(&_txn, nullFilter, &ws, new EOFStage(&_txn)); + auto keep = + make_unique<KeepMutationsStage>(&_opCtx, nullFilter, &ws, new EOFStage(&_opCtx)); for (size_t i = 0; i < 50; ++i) { WorkingSetID id = ws.allocate(); WorkingSetMember* member = ws.get(id); diff --git a/src/mongo/dbtests/query_stage_merge_sort.cpp b/src/mongo/dbtests/query_stage_merge_sort.cpp index e69c0e9d18b..7eb05f0efcc 100644 --- a/src/mongo/dbtests/query_stage_merge_sort.cpp +++ b/src/mongo/dbtests/query_stage_merge_sort.cpp @@ -57,20 +57,20 @@ using stdx::make_unique; class QueryStageMergeSortTestBase { public: - QueryStageMergeSortTestBase() : _client(&_txn) {} + QueryStageMergeSortTestBase() : _client(&_opCtx) {} virtual ~QueryStageMergeSortTestBase() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); _client.dropCollection(ns()); } void addIndex(const BSONObj& obj) { - ASSERT_OK(dbtests::createIndex(&_txn, ns(), obj)); + ASSERT_OK(dbtests::createIndex(&_opCtx, ns(), obj)); } IndexDescriptor* getIndex(const BSONObj& obj, Collection* coll) { std::vector<IndexDescriptor*> indexes; - coll->getIndexCatalog()->findIndexesByKeyPattern(&_txn, obj, false, &indexes); + coll->getIndexCatalog()->findIndexesByKeyPattern(&_opCtx, obj, false, &indexes); return indexes.empty() ? nullptr : indexes[0]; } @@ -83,7 +83,7 @@ public: } void getRecordIds(set<RecordId>* out, Collection* coll) { - auto cursor = coll->getCursor(&_txn); + auto cursor = coll->getCursor(&_opCtx); while (auto record = cursor->next()) { out->insert(record->id); } @@ -109,7 +109,7 @@ public: protected: const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext(); - OperationContext& _txn = *_txnPtr; + OperationContext& _opCtx = *_txnPtr; private: DBDirectClient _client; @@ -120,12 +120,12 @@ private: class QueryStageMergeSortPrefixIndex : public QueryStageMergeSortTestBase { public: void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); Database* db = ctx.db(); Collection* coll = db->getCollection(ns()); if (!coll) { - WriteUnitOfWork wuow(&_txn); - coll = db->createCollection(&_txn, ns()); + WriteUnitOfWork wuow(&_opCtx); + coll = db->createCollection(&_opCtx, ns()); wuow.commit(); } @@ -146,7 +146,7 @@ public: // Sort by c:1 MergeSortStageParams msparams; msparams.pattern = BSON("c" << 1); - MergeSortStage* ms = new MergeSortStage(&_txn, msparams, ws.get(), coll); + MergeSortStage* ms = new MergeSortStage(&_opCtx, msparams, ws.get(), coll); // a:1 IndexScanParams params; @@ -156,17 +156,17 @@ public: params.bounds.endKey = objWithMaxKey(1); params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys; params.direction = 1; - ms->addChild(new IndexScan(&_txn, params, ws.get(), NULL)); + ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL)); // b:1 params.descriptor = getIndex(secondIndex, coll); - ms->addChild(new IndexScan(&_txn, params, ws.get(), NULL)); + ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL)); unique_ptr<FetchStage> fetchStage = - make_unique<FetchStage>(&_txn, ws.get(), ms, nullptr, coll); + make_unique<FetchStage>(&_opCtx, ws.get(), ms, nullptr, coll); // Must fetch if we want to easily pull out an obj. auto statusWithPlanExecutor = PlanExecutor::make( - &_txn, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL); + &_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL); ASSERT_OK(statusWithPlanExecutor.getStatus()); unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue()); @@ -191,12 +191,12 @@ public: class QueryStageMergeSortDups : public QueryStageMergeSortTestBase { public: void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); Database* db = ctx.db(); Collection* coll = db->getCollection(ns()); if (!coll) { - WriteUnitOfWork wuow(&_txn); - coll = db->createCollection(&_txn, ns()); + WriteUnitOfWork wuow(&_opCtx); + coll = db->createCollection(&_opCtx, ns()); wuow.commit(); } @@ -217,7 +217,7 @@ public: // Sort by c:1 MergeSortStageParams msparams; msparams.pattern = BSON("c" << 1); - MergeSortStage* ms = new MergeSortStage(&_txn, msparams, ws.get(), coll); + MergeSortStage* ms = new MergeSortStage(&_opCtx, msparams, ws.get(), coll); // a:1 IndexScanParams params; @@ -227,16 +227,16 @@ public: params.bounds.endKey = objWithMaxKey(1); params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys; params.direction = 1; - ms->addChild(new IndexScan(&_txn, params, ws.get(), NULL)); + ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL)); // b:1 params.descriptor = getIndex(secondIndex, coll); - ms->addChild(new IndexScan(&_txn, params, ws.get(), NULL)); + ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL)); unique_ptr<FetchStage> fetchStage = - make_unique<FetchStage>(&_txn, ws.get(), ms, nullptr, coll); + make_unique<FetchStage>(&_opCtx, ws.get(), ms, nullptr, coll); auto statusWithPlanExecutor = PlanExecutor::make( - &_txn, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL); + &_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL); ASSERT_OK(statusWithPlanExecutor.getStatus()); unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue()); @@ -261,12 +261,12 @@ public: class QueryStageMergeSortDupsNoDedup : public QueryStageMergeSortTestBase { public: void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); Database* db = ctx.db(); Collection* coll = db->getCollection(ns()); if (!coll) { - WriteUnitOfWork wuow(&_txn); - coll = db->createCollection(&_txn, ns()); + WriteUnitOfWork wuow(&_opCtx); + coll = db->createCollection(&_opCtx, ns()); wuow.commit(); } @@ -287,7 +287,7 @@ public: MergeSortStageParams msparams; msparams.dedup = false; msparams.pattern = BSON("c" << 1); - MergeSortStage* ms = new MergeSortStage(&_txn, msparams, ws.get(), coll); + MergeSortStage* ms = new MergeSortStage(&_opCtx, msparams, ws.get(), coll); // a:1 IndexScanParams params; @@ -297,16 +297,16 @@ public: params.bounds.endKey = objWithMaxKey(1); params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys; params.direction = 1; - ms->addChild(new IndexScan(&_txn, params, ws.get(), NULL)); + ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL)); // b:1 params.descriptor = getIndex(secondIndex, coll); - ms->addChild(new IndexScan(&_txn, params, ws.get(), NULL)); + ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL)); unique_ptr<FetchStage> fetchStage = - make_unique<FetchStage>(&_txn, ws.get(), ms, nullptr, coll); + make_unique<FetchStage>(&_opCtx, ws.get(), ms, nullptr, coll); auto statusWithPlanExecutor = PlanExecutor::make( - &_txn, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL); + &_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL); ASSERT_OK(statusWithPlanExecutor.getStatus()); unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue()); @@ -332,12 +332,12 @@ public: class QueryStageMergeSortPrefixIndexReverse : public QueryStageMergeSortTestBase { public: void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); Database* db = ctx.db(); Collection* coll = db->getCollection(ns()); if (!coll) { - WriteUnitOfWork wuow(&_txn); - coll = db->createCollection(&_txn, ns()); + WriteUnitOfWork wuow(&_opCtx); + coll = db->createCollection(&_opCtx, ns()); wuow.commit(); } @@ -359,7 +359,7 @@ public: // Sort by c:-1 MergeSortStageParams msparams; msparams.pattern = BSON("c" << -1); - MergeSortStage* ms = new MergeSortStage(&_txn, msparams, ws.get(), coll); + MergeSortStage* ms = new MergeSortStage(&_opCtx, msparams, ws.get(), coll); // a:1 IndexScanParams params; @@ -370,16 +370,16 @@ public: params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys; // This is the direction along the index. params.direction = 1; - ms->addChild(new IndexScan(&_txn, params, ws.get(), NULL)); + ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL)); // b:1 params.descriptor = getIndex(secondIndex, coll); - ms->addChild(new IndexScan(&_txn, params, ws.get(), NULL)); + ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL)); unique_ptr<FetchStage> fetchStage = - make_unique<FetchStage>(&_txn, ws.get(), ms, nullptr, coll); + make_unique<FetchStage>(&_opCtx, ws.get(), ms, nullptr, coll); auto statusWithPlanExecutor = PlanExecutor::make( - &_txn, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL); + &_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL); ASSERT_OK(statusWithPlanExecutor.getStatus()); unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue()); @@ -404,12 +404,12 @@ public: class QueryStageMergeSortOneStageEOF : public QueryStageMergeSortTestBase { public: void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); Database* db = ctx.db(); Collection* coll = db->getCollection(ns()); if (!coll) { - WriteUnitOfWork wuow(&_txn); - coll = db->createCollection(&_txn, ns()); + WriteUnitOfWork wuow(&_opCtx); + coll = db->createCollection(&_opCtx, ns()); wuow.commit(); } @@ -430,7 +430,7 @@ public: // Sort by c:1 MergeSortStageParams msparams; msparams.pattern = BSON("c" << 1); - MergeSortStage* ms = new MergeSortStage(&_txn, msparams, ws.get(), coll); + MergeSortStage* ms = new MergeSortStage(&_opCtx, msparams, ws.get(), coll); // a:1 IndexScanParams params; @@ -440,18 +440,18 @@ public: params.bounds.endKey = objWithMaxKey(1); params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys; params.direction = 1; - ms->addChild(new IndexScan(&_txn, params, ws.get(), NULL)); + ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL)); // b:51 (EOF) params.descriptor = getIndex(secondIndex, coll); params.bounds.startKey = BSON("" << 51 << "" << MinKey); params.bounds.endKey = BSON("" << 51 << "" << MaxKey); - ms->addChild(new IndexScan(&_txn, params, ws.get(), NULL)); + ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL)); unique_ptr<FetchStage> fetchStage = - make_unique<FetchStage>(&_txn, ws.get(), ms, nullptr, coll); + make_unique<FetchStage>(&_opCtx, ws.get(), ms, nullptr, coll); auto statusWithPlanExecutor = PlanExecutor::make( - &_txn, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL); + &_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL); ASSERT_OK(statusWithPlanExecutor.getStatus()); unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue()); @@ -473,12 +473,12 @@ public: class QueryStageMergeSortManyShort : public QueryStageMergeSortTestBase { public: void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); Database* db = ctx.db(); Collection* coll = db->getCollection(ns()); if (!coll) { - WriteUnitOfWork wuow(&_txn); - coll = db->createCollection(&_txn, ns()); + WriteUnitOfWork wuow(&_opCtx); + coll = db->createCollection(&_opCtx, ns()); wuow.commit(); } @@ -486,7 +486,7 @@ public: // Sort by foo:1 MergeSortStageParams msparams; msparams.pattern = BSON("foo" << 1); - MergeSortStage* ms = new MergeSortStage(&_txn, msparams, ws.get(), coll); + MergeSortStage* ms = new MergeSortStage(&_opCtx, msparams, ws.get(), coll); IndexScanParams params; params.bounds.isSimpleRange = true; @@ -504,13 +504,13 @@ public: BSONObj indexSpec = BSON(index << 1 << "foo" << 1); addIndex(indexSpec); params.descriptor = getIndex(indexSpec, coll); - ms->addChild(new IndexScan(&_txn, params, ws.get(), NULL)); + ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL)); } unique_ptr<FetchStage> fetchStage = - make_unique<FetchStage>(&_txn, ws.get(), ms, nullptr, coll); + make_unique<FetchStage>(&_opCtx, ws.get(), ms, nullptr, coll); auto statusWithPlanExecutor = PlanExecutor::make( - &_txn, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL); + &_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL); ASSERT_OK(statusWithPlanExecutor.getStatus()); unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue()); @@ -532,12 +532,12 @@ public: class QueryStageMergeSortInvalidation : public QueryStageMergeSortTestBase { public: void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); Database* db = ctx.db(); Collection* coll = db->getCollection(ns()); if (!coll) { - WriteUnitOfWork wuow(&_txn); - coll = db->createCollection(&_txn, ns()); + WriteUnitOfWork wuow(&_opCtx); + coll = db->createCollection(&_opCtx, ns()); wuow.commit(); } @@ -545,7 +545,7 @@ public: // Sort by foo:1 MergeSortStageParams msparams; msparams.pattern = BSON("foo" << 1); - auto ms = make_unique<MergeSortStage>(&_txn, msparams, &ws, coll); + auto ms = make_unique<MergeSortStage>(&_opCtx, msparams, &ws, coll); IndexScanParams params; params.bounds.isSimpleRange = true; @@ -565,7 +565,7 @@ public: BSONObj indexSpec = BSON(index << 1 << "foo" << 1); addIndex(indexSpec); params.descriptor = getIndex(indexSpec, coll); - ms->addChild(new IndexScan(&_txn, params, &ws, NULL)); + ms->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); } set<RecordId> recordIds; @@ -596,7 +596,7 @@ public: // Invalidate recordIds[11]. Should force a fetch and return the deleted document. ms->saveState(); - ms->invalidate(&_txn, *it, INVALIDATION_DELETION); + ms->invalidate(&_opCtx, *it, INVALIDATION_DELETION); ms->restoreState(); // Make sure recordIds[11] was fetched for us. @@ -648,12 +648,12 @@ public: class QueryStageMergeSortInvalidationMutationDedup : public QueryStageMergeSortTestBase { public: void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); Database* db = ctx.db(); Collection* coll = db->getCollection(ns()); if (!coll) { - WriteUnitOfWork wuow(&_txn); - coll = db->createCollection(&_txn, ns()); + WriteUnitOfWork wuow(&_opCtx); + coll = db->createCollection(&_opCtx, ns()); wuow.commit(); } @@ -672,7 +672,7 @@ public: WorkingSetMember* member; MergeSortStageParams msparams; msparams.pattern = BSON("a" << 1); - auto ms = stdx::make_unique<MergeSortStage>(&_txn, msparams, &ws, coll); + auto ms = stdx::make_unique<MergeSortStage>(&_opCtx, msparams, &ws, coll); // First child scans [5, 10]. { @@ -684,7 +684,7 @@ public: params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys; params.direction = 1; auto fetchStage = stdx::make_unique<FetchStage>( - &_txn, &ws, new IndexScan(&_txn, params, &ws, nullptr), nullptr, coll); + &_opCtx, &ws, new IndexScan(&_opCtx, params, &ws, nullptr), nullptr, coll); ms->addChild(fetchStage.release()); } @@ -698,7 +698,7 @@ public: params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys; params.direction = 1; auto fetchStage = stdx::make_unique<FetchStage>( - &_txn, &ws, new IndexScan(&_txn, params, &ws, nullptr), nullptr, coll); + &_opCtx, &ws, new IndexScan(&_opCtx, params, &ws, nullptr), nullptr, coll); ms->addChild(fetchStage.release()); } @@ -710,7 +710,7 @@ public: ++it; // Doc {a: 5} gets invalidated by an update. - ms->invalidate(&_txn, *it, INVALIDATION_MUTATION); + ms->invalidate(&_opCtx, *it, INVALIDATION_MUTATION); // Invalidated doc {a: 5} should still get returned. member = getNextResult(&ws, ms.get()); @@ -745,12 +745,12 @@ private: class QueryStageMergeSortStringsWithNullCollation : public QueryStageMergeSortTestBase { public: void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); Database* db = ctx.db(); Collection* coll = db->getCollection(ns()); if (!coll) { - WriteUnitOfWork wuow(&_txn); - coll = db->createCollection(&_txn, ns()); + WriteUnitOfWork wuow(&_opCtx); + coll = db->createCollection(&_opCtx, ns()); wuow.commit(); } @@ -774,7 +774,7 @@ public: MergeSortStageParams msparams; msparams.pattern = BSON("c" << 1 << "d" << 1); msparams.collator = nullptr; - MergeSortStage* ms = new MergeSortStage(&_txn, msparams, ws.get(), coll); + MergeSortStage* ms = new MergeSortStage(&_opCtx, msparams, ws.get(), coll); // a:1 IndexScanParams params; @@ -784,17 +784,17 @@ public: params.bounds.endKey = objWithMaxKey(1); params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys; params.direction = 1; - ms->addChild(new IndexScan(&_txn, params, ws.get(), NULL)); + ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL)); // b:1 params.descriptor = getIndex(secondIndex, coll); - ms->addChild(new IndexScan(&_txn, params, ws.get(), NULL)); + ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL)); unique_ptr<FetchStage> fetchStage = - make_unique<FetchStage>(&_txn, ws.get(), ms, nullptr, coll); + make_unique<FetchStage>(&_opCtx, ws.get(), ms, nullptr, coll); // Must fetch if we want to easily pull out an obj. auto statusWithPlanExecutor = PlanExecutor::make( - &_txn, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL); + &_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL); ASSERT_OK(statusWithPlanExecutor.getStatus()); unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue()); @@ -818,12 +818,12 @@ public: class QueryStageMergeSortStringsRespectsCollation : public QueryStageMergeSortTestBase { public: void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); Database* db = ctx.db(); Collection* coll = db->getCollection(ns()); if (!coll) { - WriteUnitOfWork wuow(&_txn); - coll = db->createCollection(&_txn, ns()); + WriteUnitOfWork wuow(&_opCtx); + coll = db->createCollection(&_opCtx, ns()); wuow.commit(); } @@ -848,7 +848,7 @@ public: msparams.pattern = BSON("c" << 1 << "d" << 1); CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString); msparams.collator = &collator; - MergeSortStage* ms = new MergeSortStage(&_txn, msparams, ws.get(), coll); + MergeSortStage* ms = new MergeSortStage(&_opCtx, msparams, ws.get(), coll); // a:1 IndexScanParams params; @@ -858,17 +858,17 @@ public: params.bounds.endKey = objWithMaxKey(1); params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys; params.direction = 1; - ms->addChild(new IndexScan(&_txn, params, ws.get(), NULL)); + ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL)); // b:1 params.descriptor = getIndex(secondIndex, coll); - ms->addChild(new IndexScan(&_txn, params, ws.get(), NULL)); + ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL)); unique_ptr<FetchStage> fetchStage = - make_unique<FetchStage>(&_txn, ws.get(), ms, nullptr, coll); + make_unique<FetchStage>(&_opCtx, ws.get(), ms, nullptr, coll); // Must fetch if we want to easily pull out an obj. auto statusWithPlanExecutor = PlanExecutor::make( - &_txn, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL); + &_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL); ASSERT_OK(statusWithPlanExecutor.getStatus()); unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue()); diff --git a/src/mongo/dbtests/query_stage_multiplan.cpp b/src/mongo/dbtests/query_stage_multiplan.cpp index 906e0e97d1c..3df262ce725 100644 --- a/src/mongo/dbtests/query_stage_multiplan.cpp +++ b/src/mongo/dbtests/query_stage_multiplan.cpp @@ -84,38 +84,38 @@ QuerySolution* createQuerySolution() { class QueryStageMultiPlanBase { public: - QueryStageMultiPlanBase() : _client(&_txn) { - OldClientWriteContext ctx(&_txn, nss.ns()); + QueryStageMultiPlanBase() : _client(&_opCtx) { + OldClientWriteContext ctx(&_opCtx, nss.ns()); _client.dropCollection(nss.ns()); } virtual ~QueryStageMultiPlanBase() { - OldClientWriteContext ctx(&_txn, nss.ns()); + OldClientWriteContext ctx(&_opCtx, nss.ns()); _client.dropCollection(nss.ns()); } void addIndex(const BSONObj& obj) { - ASSERT_OK(dbtests::createIndex(&_txn, nss.ns(), obj)); + ASSERT_OK(dbtests::createIndex(&_opCtx, nss.ns(), obj)); } void insert(const BSONObj& obj) { - OldClientWriteContext ctx(&_txn, nss.ns()); + OldClientWriteContext ctx(&_opCtx, nss.ns()); _client.insert(nss.ns(), obj); } void remove(const BSONObj& obj) { - OldClientWriteContext ctx(&_txn, nss.ns()); + OldClientWriteContext ctx(&_opCtx, nss.ns()); _client.remove(nss.ns(), obj); } - OperationContext* txn() { - return &_txn; + OperationContext* opCtx() { + return &_opCtx; } protected: const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext(); - OperationContext& _txn = *_txnPtr; - ClockSource* const _clock = _txn.getServiceContext()->getFastClockSource(); + OperationContext& _opCtx = *_txnPtr; + ClockSource* const _clock = _opCtx.getServiceContext()->getFastClockSource(); DBDirectClient _client; }; @@ -133,14 +133,15 @@ public: addIndex(BSON("foo" << 1)); - AutoGetCollectionForRead ctx(&_txn, nss); + AutoGetCollectionForRead ctx(&_opCtx, nss); const Collection* coll = ctx.getCollection(); // Plan 0: IXScan over foo == 7 // Every call to work() returns something so this should clearly win (by current scoring // at least). std::vector<IndexDescriptor*> indexes; - coll->getIndexCatalog()->findIndexesByKeyPattern(&_txn, BSON("foo" << 1), false, &indexes); + coll->getIndexCatalog()->findIndexesByKeyPattern( + &_opCtx, BSON("foo" << 1), false, &indexes); ASSERT_EQ(indexes.size(), 1U); IndexScanParams ixparams; @@ -152,8 +153,8 @@ public: ixparams.direction = 1; unique_ptr<WorkingSet> sharedWs(new WorkingSet()); - IndexScan* ix = new IndexScan(&_txn, ixparams, sharedWs.get(), NULL); - unique_ptr<PlanStage> firstRoot(new FetchStage(&_txn, sharedWs.get(), ix, NULL, coll)); + IndexScan* ix = new IndexScan(&_opCtx, ixparams, sharedWs.get(), NULL); + unique_ptr<PlanStage> firstRoot(new FetchStage(&_opCtx, sharedWs.get(), ix, NULL, coll)); // Plan 1: CollScan with matcher. CollectionScanParams csparams; @@ -169,19 +170,19 @@ public: unique_ptr<MatchExpression> filter = std::move(statusWithMatcher.getValue()); // Make the stage. unique_ptr<PlanStage> secondRoot( - new CollectionScan(&_txn, csparams, sharedWs.get(), filter.get())); + new CollectionScan(&_opCtx, csparams, sharedWs.get(), filter.get())); // Hand the plans off to the MPS. auto qr = stdx::make_unique<QueryRequest>(nss); qr->setFilter(BSON("foo" << 7)); auto statusWithCQ = CanonicalQuery::canonicalize( - txn(), std::move(qr), ExtensionsCallbackDisallowExtensions()); + opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions()); verify(statusWithCQ.isOK()); unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); verify(NULL != cq.get()); unique_ptr<MultiPlanStage> mps = - make_unique<MultiPlanStage>(&_txn, ctx.getCollection(), cq.get()); + make_unique<MultiPlanStage>(&_opCtx, ctx.getCollection(), cq.get()); mps->addPlan(createQuerySolution(), firstRoot.release(), sharedWs.get()); mps->addPlan(createQuerySolution(), secondRoot.release(), sharedWs.get()); @@ -192,7 +193,7 @@ public: ASSERT_EQUALS(0, mps->bestPlanIdx()); // Takes ownership of arguments other than 'collection'. - auto statusWithPlanExecutor = PlanExecutor::make(&_txn, + auto statusWithPlanExecutor = PlanExecutor::make(&_opCtx, std::move(sharedWs), std::move(mps), std::move(cq), @@ -226,7 +227,7 @@ public: addIndex(BSON("a" << 1)); addIndex(BSON("b" << 1)); - AutoGetCollectionForRead ctx(&_txn, nss); + AutoGetCollectionForRead ctx(&_opCtx, nss); Collection* collection = ctx.getCollection(); // Query for both 'a' and 'b' and sort on 'b'. @@ -234,7 +235,7 @@ public: qr->setFilter(BSON("a" << 1 << "b" << 1)); qr->setSort(BSON("b" << 1)); auto statusWithCQ = CanonicalQuery::canonicalize( - txn(), std::move(qr), ExtensionsCallbackDisallowExtensions()); + opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions()); verify(statusWithCQ.isOK()); unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); ASSERT(NULL != cq.get()); @@ -245,7 +246,7 @@ public: // Get planner params. QueryPlannerParams plannerParams; - fillOutPlannerParams(&_txn, collection, cq.get(), &plannerParams); + fillOutPlannerParams(&_opCtx, collection, cq.get(), &plannerParams); // Turn this off otherwise it pops up in some plans. plannerParams.options &= ~QueryPlannerParams::KEEP_MUTATIONS; @@ -259,12 +260,12 @@ public: ASSERT_EQUALS(solutions.size(), 3U); // Fill out the MultiPlanStage. - unique_ptr<MultiPlanStage> mps(new MultiPlanStage(&_txn, collection, cq.get())); + unique_ptr<MultiPlanStage> mps(new MultiPlanStage(&_opCtx, collection, cq.get())); unique_ptr<WorkingSet> ws(new WorkingSet()); // Put each solution from the planner into the MPR. for (size_t i = 0; i < solutions.size(); ++i) { PlanStage* root; - ASSERT(StageBuilder::build(&_txn, collection, *cq, *solutions[i], ws.get(), &root)); + ASSERT(StageBuilder::build(&_opCtx, collection, *cq, *solutions[i], ws.get(), &root)); // Takes ownership of 'solutions[i]' and 'root'. mps->addPlan(solutions[i], root, ws.get()); } @@ -323,8 +324,8 @@ public: const int nDocs = 500; auto ws = stdx::make_unique<WorkingSet>(); - auto firstPlan = stdx::make_unique<QueuedDataStage>(&_txn, ws.get()); - auto secondPlan = stdx::make_unique<QueuedDataStage>(&_txn, ws.get()); + auto firstPlan = stdx::make_unique<QueuedDataStage>(&_opCtx, ws.get()); + auto secondPlan = stdx::make_unique<QueuedDataStage>(&_opCtx, ws.get()); for (int i = 0; i < nDocs; ++i) { addMember(firstPlan.get(), ws.get(), BSON("x" << 1)); @@ -334,14 +335,14 @@ public: secondPlan->pushBack(PlanStage::NEED_TIME); } - AutoGetCollectionForRead ctx(&_txn, nss); + AutoGetCollectionForRead ctx(&_opCtx, nss); auto qr = stdx::make_unique<QueryRequest>(nss); qr->setFilter(BSON("x" << 1)); auto cq = uassertStatusOK(CanonicalQuery::canonicalize( - txn(), std::move(qr), ExtensionsCallbackDisallowExtensions())); + opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions())); unique_ptr<MultiPlanStage> mps = - make_unique<MultiPlanStage>(&_txn, ctx.getCollection(), cq.get()); + make_unique<MultiPlanStage>(&_opCtx, ctx.getCollection(), cq.get()); // Put each plan into the MultiPlanStage. Takes ownership of 'firstPlan' and 'secondPlan'. auto firstSoln = stdx::make_unique<QuerySolution>(); @@ -350,8 +351,11 @@ public: mps->addPlan(secondSoln.release(), secondPlan.release(), ws.get()); // Making a PlanExecutor chooses the best plan. - auto exec = uassertStatusOK(PlanExecutor::make( - &_txn, std::move(ws), std::move(mps), ctx.getCollection(), PlanExecutor::YIELD_MANUAL)); + auto exec = uassertStatusOK(PlanExecutor::make(&_opCtx, + std::move(ws), + std::move(mps), + ctx.getCollection(), + PlanExecutor::YIELD_MANUAL)); auto root = static_cast<MultiPlanStage*>(exec->getRootStage()); ASSERT_TRUE(root->bestPlanChosen()); @@ -409,16 +413,16 @@ public: addIndex(BSON("foo" << 1)); addIndex(BSON("foo" << -1 << "bar" << 1)); - AutoGetCollectionForRead ctx(&_txn, nss); + AutoGetCollectionForRead ctx(&_opCtx, nss); Collection* coll = ctx.getCollection(); // Create the executor (Matching all documents). auto qr = stdx::make_unique<QueryRequest>(nss); qr->setFilter(BSON("foo" << BSON("$gte" << 0))); auto cq = uassertStatusOK(CanonicalQuery::canonicalize( - txn(), std::move(qr), ExtensionsCallbackDisallowExtensions())); + opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions())); auto exec = - uassertStatusOK(getExecutor(&_txn, coll, std::move(cq), PlanExecutor::YIELD_MANUAL)); + uassertStatusOK(getExecutor(&_opCtx, coll, std::move(cq), PlanExecutor::YIELD_MANUAL)); ASSERT_EQ(exec->getRootStage()->stageType(), STAGE_MULTI_PLAN); diff --git a/src/mongo/dbtests/query_stage_near.cpp b/src/mongo/dbtests/query_stage_near.cpp index d973779e7ed..8b3c02fb540 100644 --- a/src/mongo/dbtests/query_stage_near.cpp +++ b/src/mongo/dbtests/query_stage_near.cpp @@ -78,7 +78,7 @@ public: _intervals.mutableVector().push_back(new MockInterval(data, min, max)); } - virtual StatusWith<CoveredInterval*> nextInterval(OperationContext* txn, + virtual StatusWith<CoveredInterval*> nextInterval(OperationContext* opCtx, WorkingSet* workingSet, Collection* collection) { if (_pos == static_cast<int>(_intervals.size())) @@ -88,7 +88,7 @@ public: bool lastInterval = _pos == static_cast<int>(_intervals.vector().size()); - auto queuedStage = make_unique<QueuedDataStage>(txn, workingSet); + auto queuedStage = make_unique<QueuedDataStage>(opCtx, workingSet); for (unsigned int i = 0; i < interval.data.size(); i++) { // Add all documents from the lastInterval into the QueuedDataStage. @@ -109,7 +109,7 @@ public: return StatusWith<double>(member->obj.value()["distance"].numberDouble()); } - virtual StageState initialize(OperationContext* txn, + virtual StageState initialize(OperationContext* opCtx, WorkingSet* workingSet, Collection* collection, WorkingSetID* out) { diff --git a/src/mongo/dbtests/query_stage_sort.cpp b/src/mongo/dbtests/query_stage_sort.cpp index 0d0184ea47a..f5ffa69d1e7 100644 --- a/src/mongo/dbtests/query_stage_sort.cpp +++ b/src/mongo/dbtests/query_stage_sort.cpp @@ -58,7 +58,7 @@ namespace dps = ::mongo::dotted_path_support; class QueryStageSortTestBase { public: - QueryStageSortTestBase() : _client(&_txn) {} + QueryStageSortTestBase() : _client(&_opCtx) {} void fillData() { for (int i = 0; i < numObj(); ++i) { @@ -75,7 +75,7 @@ public: } void getRecordIds(set<RecordId>* out, Collection* coll) { - auto cursor = coll->getCursor(&_txn); + auto cursor = coll->getCursor(&_opCtx); while (auto record = cursor->next()) { out->insert(record->id); } @@ -97,7 +97,7 @@ public: WorkingSetID id = ws->allocate(); WorkingSetMember* member = ws->get(id); member->recordId = *it; - member->obj = coll->docFor(&_txn, *it); + member->obj = coll->docFor(&_opCtx, *it); ws->transitionToRecordIdAndObj(id); ms->pushBack(id); } @@ -110,7 +110,7 @@ public: PlanExecutor* makePlanExecutorWithSortStage(Collection* coll) { // Build the mock scan stage which feeds the data. auto ws = make_unique<WorkingSet>(); - auto queuedDataStage = make_unique<QueuedDataStage>(&_txn, ws.get()); + auto queuedDataStage = make_unique<QueuedDataStage>(&_opCtx, ws.get()); insertVarietyOfObjects(ws.get(), queuedDataStage.get(), coll); SortStageParams params; @@ -119,14 +119,14 @@ public: params.limit = limit(); auto keyGenStage = make_unique<SortKeyGeneratorStage>( - &_txn, queuedDataStage.release(), ws.get(), params.pattern, BSONObj(), nullptr); + &_opCtx, queuedDataStage.release(), ws.get(), params.pattern, BSONObj(), nullptr); - auto ss = make_unique<SortStage>(&_txn, params, ws.get(), keyGenStage.release()); + auto ss = make_unique<SortStage>(&_opCtx, params, ws.get(), keyGenStage.release()); // The PlanExecutor will be automatically registered on construction due to the auto // yield policy, so it can receive invalidations when we remove documents later. - auto statusWithPlanExecutor = - PlanExecutor::make(&_txn, std::move(ws), std::move(ss), coll, PlanExecutor::YIELD_AUTO); + auto statusWithPlanExecutor = PlanExecutor::make( + &_opCtx, std::move(ws), std::move(ss), coll, PlanExecutor::YIELD_AUTO); invariant(statusWithPlanExecutor.isOK()); return statusWithPlanExecutor.getValue().release(); } @@ -147,7 +147,7 @@ public: */ void sortAndCheck(int direction, Collection* coll) { auto ws = make_unique<WorkingSet>(); - auto queuedDataStage = make_unique<QueuedDataStage>(&_txn, ws.get()); + auto queuedDataStage = make_unique<QueuedDataStage>(&_opCtx, ws.get()); // Insert a mix of the various types of data. insertVarietyOfObjects(ws.get(), queuedDataStage.get(), coll); @@ -158,16 +158,16 @@ public: params.limit = limit(); auto keyGenStage = make_unique<SortKeyGeneratorStage>( - &_txn, queuedDataStage.release(), ws.get(), params.pattern, BSONObj(), nullptr); + &_opCtx, queuedDataStage.release(), ws.get(), params.pattern, BSONObj(), nullptr); - auto sortStage = make_unique<SortStage>(&_txn, params, ws.get(), keyGenStage.release()); + auto sortStage = make_unique<SortStage>(&_opCtx, params, ws.get(), keyGenStage.release()); auto fetchStage = - make_unique<FetchStage>(&_txn, ws.get(), sortStage.release(), nullptr, coll); + make_unique<FetchStage>(&_opCtx, ws.get(), sortStage.release(), nullptr, coll); // Must fetch so we can look at the doc as a BSONObj. auto statusWithPlanExecutor = PlanExecutor::make( - &_txn, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL); + &_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL); ASSERT_OK(statusWithPlanExecutor.getStatus()); unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue()); @@ -222,7 +222,7 @@ public: protected: const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext(); - OperationContext& _txn = *_txnPtr; + OperationContext& _opCtx = *_txnPtr; DBDirectClient _client; }; @@ -235,12 +235,12 @@ public: } void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); Database* db = ctx.db(); Collection* coll = db->getCollection(ns()); if (!coll) { - WriteUnitOfWork wuow(&_txn); - coll = db->createCollection(&_txn, ns()); + WriteUnitOfWork wuow(&_opCtx); + coll = db->createCollection(&_opCtx, ns()); wuow.commit(); } @@ -257,12 +257,12 @@ public: } void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); Database* db = ctx.db(); Collection* coll = db->getCollection(ns()); if (!coll) { - WriteUnitOfWork wuow(&_txn); - coll = db->createCollection(&_txn, ns()); + WriteUnitOfWork wuow(&_opCtx); + coll = db->createCollection(&_opCtx, ns()); wuow.commit(); } @@ -288,12 +288,12 @@ public: } void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); Database* db = ctx.db(); Collection* coll = db->getCollection(ns()); if (!coll) { - WriteUnitOfWork wuow(&_txn); - coll = db->createCollection(&_txn, ns()); + WriteUnitOfWork wuow(&_opCtx); + coll = db->createCollection(&_opCtx, ns()); wuow.commit(); } @@ -313,12 +313,12 @@ public: } void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); Database* db = ctx.db(); Collection* coll = db->getCollection(ns()); if (!coll) { - WriteUnitOfWork wuow(&_txn); - coll = db->createCollection(&_txn, ns()); + WriteUnitOfWork wuow(&_opCtx); + coll = db->createCollection(&_opCtx, ns()); wuow.commit(); } @@ -347,7 +347,7 @@ public: // Since it's in the WorkingSet, the updates should not be reflected in the output. exec->saveState(); set<RecordId>::iterator it = recordIds.begin(); - Snapshotted<BSONObj> oldDoc = coll->docFor(&_txn, *it); + Snapshotted<BSONObj> oldDoc = coll->docFor(&_opCtx, *it); OID updatedId = oldDoc.value().getField("_id").OID(); SnapshotId idBeforeUpdate = oldDoc.snapshotId(); @@ -358,8 +358,8 @@ public: OplogUpdateEntryArgs args; args.ns = coll->ns().ns(); { - WriteUnitOfWork wuow(&_txn); - coll->updateDocument(&_txn, *it, oldDoc, newDoc, false, false, NULL, &args); + WriteUnitOfWork wuow(&_opCtx); + coll->updateDocument(&_opCtx, *it, oldDoc, newDoc, false, false, NULL, &args); wuow.commit(); } exec->restoreState(); @@ -374,10 +374,10 @@ public: // should be fetched. exec->saveState(); while (it != recordIds.end()) { - oldDoc = coll->docFor(&_txn, *it); + oldDoc = coll->docFor(&_opCtx, *it); { - WriteUnitOfWork wuow(&_txn); - coll->updateDocument(&_txn, *it++, oldDoc, newDoc, false, false, NULL, &args); + WriteUnitOfWork wuow(&_opCtx); + coll->updateDocument(&_opCtx, *it++, oldDoc, newDoc, false, false, NULL, &args); wuow.commit(); } } @@ -422,12 +422,12 @@ public: } void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); Database* db = ctx.db(); Collection* coll = db->getCollection(ns()); if (!coll) { - WriteUnitOfWork wuow(&_txn); - coll = db->createCollection(&_txn, ns()); + WriteUnitOfWork wuow(&_opCtx); + coll = db->createCollection(&_opCtx, ns()); wuow.commit(); } @@ -457,8 +457,8 @@ public: OpDebug* const nullOpDebug = nullptr; set<RecordId>::iterator it = recordIds.begin(); { - WriteUnitOfWork wuow(&_txn); - coll->deleteDocument(&_txn, *it++, nullOpDebug); + WriteUnitOfWork wuow(&_opCtx); + coll->deleteDocument(&_opCtx, *it++, nullOpDebug); wuow.commit(); } exec->restoreState(); @@ -473,8 +473,8 @@ public: exec->saveState(); while (it != recordIds.end()) { { - WriteUnitOfWork wuow(&_txn); - coll->deleteDocument(&_txn, *it++, nullOpDebug); + WriteUnitOfWork wuow(&_opCtx); + coll->deleteDocument(&_opCtx, *it++, nullOpDebug); wuow.commit(); } } @@ -521,17 +521,17 @@ public: } void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); Database* db = ctx.db(); Collection* coll = db->getCollection(ns()); if (!coll) { - WriteUnitOfWork wuow(&_txn); - coll = db->createCollection(&_txn, ns()); + WriteUnitOfWork wuow(&_opCtx); + coll = db->createCollection(&_opCtx, ns()); wuow.commit(); } auto ws = make_unique<WorkingSet>(); - auto queuedDataStage = make_unique<QueuedDataStage>(&_txn, ws.get()); + auto queuedDataStage = make_unique<QueuedDataStage>(&_opCtx, ws.get()); for (int i = 0; i < numObj(); ++i) { { @@ -557,16 +557,16 @@ public: params.limit = 0; auto keyGenStage = make_unique<SortKeyGeneratorStage>( - &_txn, queuedDataStage.release(), ws.get(), params.pattern, BSONObj(), nullptr); + &_opCtx, queuedDataStage.release(), ws.get(), params.pattern, BSONObj(), nullptr); - auto sortStage = make_unique<SortStage>(&_txn, params, ws.get(), keyGenStage.release()); + auto sortStage = make_unique<SortStage>(&_opCtx, params, ws.get(), keyGenStage.release()); auto fetchStage = - make_unique<FetchStage>(&_txn, ws.get(), sortStage.release(), nullptr, coll); + make_unique<FetchStage>(&_opCtx, ws.get(), sortStage.release(), nullptr, coll); // We don't get results back since we're sorting some parallel arrays. auto statusWithPlanExecutor = PlanExecutor::make( - &_txn, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL); + &_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL); unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue()); PlanExecutor::ExecState runnerState = exec->getNext(NULL, NULL); diff --git a/src/mongo/dbtests/query_stage_subplan.cpp b/src/mongo/dbtests/query_stage_subplan.cpp index 53c839d0fd4..b89df74ac83 100644 --- a/src/mongo/dbtests/query_stage_subplan.cpp +++ b/src/mongo/dbtests/query_stage_subplan.cpp @@ -50,23 +50,23 @@ static const NamespaceString nss("unittests.QueryStageSubplan"); class QueryStageSubplanBase { public: - QueryStageSubplanBase() : _client(&_txn) {} + QueryStageSubplanBase() : _client(&_opCtx) {} virtual ~QueryStageSubplanBase() { - OldClientWriteContext ctx(&_txn, nss.ns()); + OldClientWriteContext ctx(&_opCtx, nss.ns()); _client.dropCollection(nss.ns()); } void addIndex(const BSONObj& obj) { - ASSERT_OK(dbtests::createIndex(&_txn, nss.ns(), obj)); + ASSERT_OK(dbtests::createIndex(&_opCtx, nss.ns(), obj)); } void insert(const BSONObj& doc) { _client.insert(nss.ns(), doc); } - OperationContext* txn() { - return &_txn; + OperationContext* opCtx() { + return &_opCtx; } protected: @@ -80,13 +80,13 @@ protected: auto qr = unittest::assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)); auto cq = unittest::assertGet( - CanonicalQuery::canonicalize(txn(), std::move(qr), ExtensionsCallbackNoop())); + CanonicalQuery::canonicalize(opCtx(), std::move(qr), ExtensionsCallbackNoop())); return cq; } const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext(); - OperationContext& _txn = *_txnPtr; - ClockSource* _clock = _txn.getServiceContext()->getFastClockSource(); + OperationContext& _opCtx = *_txnPtr; + ClockSource* _clock = _opCtx.getServiceContext()->getFastClockSource(); private: DBDirectClient _client; @@ -101,7 +101,7 @@ private: class QueryStageSubplanGeo2dOr : public QueryStageSubplanBase { public: void run() { - OldClientWriteContext ctx(&_txn, nss.ns()); + OldClientWriteContext ctx(&_opCtx, nss.ns()); addIndex(BSON("a" << "2d" << "b" @@ -116,7 +116,7 @@ public: auto qr = stdx::make_unique<QueryRequest>(nss); qr->setFilter(query); auto statusWithCQ = CanonicalQuery::canonicalize( - txn(), std::move(qr), ExtensionsCallbackDisallowExtensions()); + opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions()); ASSERT_OK(statusWithCQ.getStatus()); std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); @@ -124,11 +124,11 @@ public: // Get planner params. QueryPlannerParams plannerParams; - fillOutPlannerParams(&_txn, collection, cq.get(), &plannerParams); + fillOutPlannerParams(&_opCtx, collection, cq.get(), &plannerParams); WorkingSet ws; std::unique_ptr<SubplanStage> subplan( - new SubplanStage(&_txn, collection, &ws, plannerParams, cq.get())); + new SubplanStage(&_opCtx, collection, &ws, plannerParams, cq.get())); // Plan selection should succeed due to falling back on regular planning. PlanYieldPolicy yieldPolicy(PlanExecutor::YIELD_MANUAL, _clock); @@ -142,7 +142,7 @@ public: class QueryStageSubplanPlanFromCache : public QueryStageSubplanBase { public: void run() { - OldClientWriteContext ctx(&_txn, nss.ns()); + OldClientWriteContext ctx(&_opCtx, nss.ns()); addIndex(BSON("a" << 1)); addIndex(BSON("a" << 1 << "b" << 1)); @@ -162,17 +162,17 @@ public: auto qr = stdx::make_unique<QueryRequest>(nss); qr->setFilter(query); auto statusWithCQ = CanonicalQuery::canonicalize( - txn(), std::move(qr), ExtensionsCallbackDisallowExtensions()); + opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions()); ASSERT_OK(statusWithCQ.getStatus()); std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); // Get planner params. QueryPlannerParams plannerParams; - fillOutPlannerParams(&_txn, collection, cq.get(), &plannerParams); + fillOutPlannerParams(&_opCtx, collection, cq.get(), &plannerParams); WorkingSet ws; std::unique_ptr<SubplanStage> subplan( - new SubplanStage(&_txn, collection, &ws, plannerParams, cq.get())); + new SubplanStage(&_opCtx, collection, &ws, plannerParams, cq.get())); PlanYieldPolicy yieldPolicy(PlanExecutor::YIELD_MANUAL, _clock); ASSERT_OK(subplan->pickBestPlan(&yieldPolicy)); @@ -185,7 +185,7 @@ public: // If we repeat the same query, the plan for the first branch should have come from // the cache. ws.clear(); - subplan.reset(new SubplanStage(&_txn, collection, &ws, plannerParams, cq.get())); + subplan.reset(new SubplanStage(&_opCtx, collection, &ws, plannerParams, cq.get())); ASSERT_OK(subplan->pickBestPlan(&yieldPolicy)); @@ -200,7 +200,7 @@ public: class QueryStageSubplanDontCacheZeroResults : public QueryStageSubplanBase { public: void run() { - OldClientWriteContext ctx(&_txn, nss.ns()); + OldClientWriteContext ctx(&_opCtx, nss.ns()); addIndex(BSON("a" << 1 << "b" << 1)); addIndex(BSON("a" << 1)); @@ -220,17 +220,17 @@ public: auto qr = stdx::make_unique<QueryRequest>(nss); qr->setFilter(query); auto statusWithCQ = CanonicalQuery::canonicalize( - txn(), std::move(qr), ExtensionsCallbackDisallowExtensions()); + opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions()); ASSERT_OK(statusWithCQ.getStatus()); std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); // Get planner params. QueryPlannerParams plannerParams; - fillOutPlannerParams(&_txn, collection, cq.get(), &plannerParams); + fillOutPlannerParams(&_opCtx, collection, cq.get(), &plannerParams); WorkingSet ws; std::unique_ptr<SubplanStage> subplan( - new SubplanStage(&_txn, collection, &ws, plannerParams, cq.get())); + new SubplanStage(&_opCtx, collection, &ws, plannerParams, cq.get())); PlanYieldPolicy yieldPolicy(PlanExecutor::YIELD_MANUAL, _clock); ASSERT_OK(subplan->pickBestPlan(&yieldPolicy)); @@ -244,7 +244,7 @@ public: // from the cache (because the first call to pickBestPlan() refrained from creating any // cache entries). ws.clear(); - subplan.reset(new SubplanStage(&_txn, collection, &ws, plannerParams, cq.get())); + subplan.reset(new SubplanStage(&_opCtx, collection, &ws, plannerParams, cq.get())); ASSERT_OK(subplan->pickBestPlan(&yieldPolicy)); @@ -259,7 +259,7 @@ public: class QueryStageSubplanDontCacheTies : public QueryStageSubplanBase { public: void run() { - OldClientWriteContext ctx(&_txn, nss.ns()); + OldClientWriteContext ctx(&_opCtx, nss.ns()); addIndex(BSON("a" << 1 << "b" << 1)); addIndex(BSON("a" << 1 << "c" << 1)); @@ -279,17 +279,17 @@ public: auto qr = stdx::make_unique<QueryRequest>(nss); qr->setFilter(query); auto statusWithCQ = CanonicalQuery::canonicalize( - txn(), std::move(qr), ExtensionsCallbackDisallowExtensions()); + opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions()); ASSERT_OK(statusWithCQ.getStatus()); std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); // Get planner params. QueryPlannerParams plannerParams; - fillOutPlannerParams(&_txn, collection, cq.get(), &plannerParams); + fillOutPlannerParams(&_opCtx, collection, cq.get(), &plannerParams); WorkingSet ws; std::unique_ptr<SubplanStage> subplan( - new SubplanStage(&_txn, collection, &ws, plannerParams, cq.get())); + new SubplanStage(&_opCtx, collection, &ws, plannerParams, cq.get())); PlanYieldPolicy yieldPolicy(PlanExecutor::YIELD_MANUAL, _clock); ASSERT_OK(subplan->pickBestPlan(&yieldPolicy)); @@ -303,7 +303,7 @@ public: // from the cache (because the first call to pickBestPlan() refrained from creating any // cache entries). ws.clear(); - subplan.reset(new SubplanStage(&_txn, collection, &ws, plannerParams, cq.get())); + subplan.reset(new SubplanStage(&_opCtx, collection, &ws, plannerParams, cq.get())); ASSERT_OK(subplan->pickBestPlan(&yieldPolicy)); @@ -520,7 +520,7 @@ public: class QueryStageSubplanPlanContainedOr : public QueryStageSubplanBase { public: void run() { - OldClientWriteContext ctx(&_txn, nss.ns()); + OldClientWriteContext ctx(&_opCtx, nss.ns()); addIndex(BSON("b" << 1 << "a" << 1)); addIndex(BSON("c" << 1 << "a" << 1)); @@ -535,17 +535,17 @@ public: auto qr = stdx::make_unique<QueryRequest>(nss); qr->setFilter(query); auto cq = unittest::assertGet(CanonicalQuery::canonicalize( - txn(), std::move(qr), ExtensionsCallbackDisallowExtensions())); + opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions())); Collection* collection = ctx.getCollection(); // Get planner params. QueryPlannerParams plannerParams; - fillOutPlannerParams(&_txn, collection, cq.get(), &plannerParams); + fillOutPlannerParams(&_opCtx, collection, cq.get(), &plannerParams); WorkingSet ws; std::unique_ptr<SubplanStage> subplan( - new SubplanStage(&_txn, collection, &ws, plannerParams, cq.get())); + new SubplanStage(&_opCtx, collection, &ws, plannerParams, cq.get())); // Plan selection should succeed due to falling back on regular planning. PlanYieldPolicy yieldPolicy(PlanExecutor::YIELD_MANUAL, _clock); @@ -583,7 +583,7 @@ public: class QueryStageSubplanPlanRootedOrNE : public QueryStageSubplanBase { public: void run() { - OldClientWriteContext ctx(&_txn, nss.ns()); + OldClientWriteContext ctx(&_opCtx, nss.ns()); addIndex(BSON("a" << 1 << "b" << 1)); addIndex(BSON("a" << 1 << "c" << 1)); @@ -597,16 +597,16 @@ public: qr->setFilter(fromjson("{$or: [{a: 1}, {a: {$ne:1}}]}")); qr->setSort(BSON("d" << 1)); auto cq = unittest::assertGet(CanonicalQuery::canonicalize( - txn(), std::move(qr), ExtensionsCallbackDisallowExtensions())); + opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions())); Collection* collection = ctx.getCollection(); QueryPlannerParams plannerParams; - fillOutPlannerParams(&_txn, collection, cq.get(), &plannerParams); + fillOutPlannerParams(&_opCtx, collection, cq.get(), &plannerParams); WorkingSet ws; std::unique_ptr<SubplanStage> subplan( - new SubplanStage(&_txn, collection, &ws, plannerParams, cq.get())); + new SubplanStage(&_opCtx, collection, &ws, plannerParams, cq.get())); PlanYieldPolicy yieldPolicy(PlanExecutor::YIELD_MANUAL, _clock); ASSERT_OK(subplan->pickBestPlan(&yieldPolicy)); diff --git a/src/mongo/dbtests/query_stage_tests.cpp b/src/mongo/dbtests/query_stage_tests.cpp index 276b9005b3c..55326de7c63 100644 --- a/src/mongo/dbtests/query_stage_tests.cpp +++ b/src/mongo/dbtests/query_stage_tests.cpp @@ -54,8 +54,8 @@ using std::unique_ptr; class IndexScanBase { public: - IndexScanBase() : _client(&_txn) { - OldClientWriteContext ctx(&_txn, ns()); + IndexScanBase() : _client(&_opCtx) { + OldClientWriteContext ctx(&_opCtx, ns()); for (int i = 0; i < numObj(); ++i) { BSONObjBuilder bob; @@ -70,16 +70,16 @@ public: } virtual ~IndexScanBase() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); _client.dropCollection(ns()); } void addIndex(const BSONObj& obj) { - ASSERT_OK(dbtests::createIndex(&_txn, ns(), obj)); + ASSERT_OK(dbtests::createIndex(&_opCtx, ns(), obj)); } int countResults(const IndexScanParams& params, BSONObj filterObj = BSONObj()) { - AutoGetCollectionForRead ctx(&_txn, NamespaceString(ns())); + AutoGetCollectionForRead ctx(&_opCtx, NamespaceString(ns())); const CollatorInterface* collator = nullptr; StatusWithMatchExpression statusWithMatcher = MatchExpressionParser::parse( @@ -89,10 +89,10 @@ public: unique_ptr<WorkingSet> ws = stdx::make_unique<WorkingSet>(); unique_ptr<IndexScan> ix = - stdx::make_unique<IndexScan>(&_txn, params, ws.get(), filterExpr.get()); + stdx::make_unique<IndexScan>(&_opCtx, params, ws.get(), filterExpr.get()); auto statusWithPlanExecutor = PlanExecutor::make( - &_txn, std::move(ws), std::move(ix), ctx.getCollection(), PlanExecutor::YIELD_MANUAL); + &_opCtx, std::move(ws), std::move(ix), ctx.getCollection(), PlanExecutor::YIELD_MANUAL); ASSERT_OK(statusWithPlanExecutor.getStatus()); unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue()); @@ -107,7 +107,7 @@ public: } void makeGeoData() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); for (int i = 0; i < numObj(); ++i) { double lat = double(rand()) / RAND_MAX; @@ -117,10 +117,10 @@ public: } IndexDescriptor* getIndex(const BSONObj& obj) { - AutoGetCollectionForRead ctx(&_txn, NamespaceString(ns())); + AutoGetCollectionForRead ctx(&_opCtx, NamespaceString(ns())); Collection* collection = ctx.getCollection(); std::vector<IndexDescriptor*> indexes; - collection->getIndexCatalog()->findIndexesByKeyPattern(&_txn, obj, false, &indexes); + collection->getIndexCatalog()->findIndexesByKeyPattern(&_opCtx, obj, false, &indexes); return indexes.empty() ? nullptr : indexes[0]; } @@ -133,7 +133,7 @@ public: protected: const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext(); - OperationContext& _txn = *_txnPtr; + OperationContext& _opCtx = *_txnPtr; private: DBDirectClient _client; diff --git a/src/mongo/dbtests/query_stage_update.cpp b/src/mongo/dbtests/query_stage_update.cpp index 06343ca966b..da49550c73a 100644 --- a/src/mongo/dbtests/query_stage_update.cpp +++ b/src/mongo/dbtests/query_stage_update.cpp @@ -65,14 +65,14 @@ static const NamespaceString nss("unittests.QueryStageUpdate"); class QueryStageUpdateBase { public: - QueryStageUpdateBase() : _client(&_txn) { - OldClientWriteContext ctx(&_txn, nss.ns()); + QueryStageUpdateBase() : _client(&_opCtx) { + OldClientWriteContext ctx(&_opCtx, nss.ns()); _client.dropCollection(nss.ns()); _client.createCollection(nss.ns()); } virtual ~QueryStageUpdateBase() { - OldClientWriteContext ctx(&_txn, nss.ns()); + OldClientWriteContext ctx(&_opCtx, nss.ns()); _client.dropCollection(nss.ns()); } @@ -92,7 +92,7 @@ public: auto qr = stdx::make_unique<QueryRequest>(nss); qr->setFilter(query); auto statusWithCQ = CanonicalQuery::canonicalize( - &_txn, std::move(qr), ExtensionsCallbackDisallowExtensions()); + &_opCtx, std::move(qr), ExtensionsCallbackDisallowExtensions()); ASSERT_OK(statusWithCQ.getStatus()); return std::move(statusWithCQ.getValue()); } @@ -124,7 +124,7 @@ public: params.direction = CollectionScanParams::FORWARD; params.tailable = false; - unique_ptr<CollectionScan> scan(new CollectionScan(&_txn, params, &ws, NULL)); + unique_ptr<CollectionScan> scan(new CollectionScan(&_opCtx, params, &ws, NULL)); while (!scan->isEOF()) { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState state = scan->work(&id); @@ -146,7 +146,7 @@ public: params.direction = direction; params.tailable = false; - unique_ptr<CollectionScan> scan(new CollectionScan(&_txn, params, &ws, NULL)); + unique_ptr<CollectionScan> scan(new CollectionScan(&_opCtx, params, &ws, NULL)); while (!scan->isEOF()) { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState state = scan->work(&id); @@ -174,7 +174,7 @@ public: protected: const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext(); - OperationContext& _txn = *_txnPtr; + OperationContext& _opCtx = *_txnPtr; private: DBDirectClient _client; @@ -188,8 +188,8 @@ public: void run() { // Run the update. { - OldClientWriteContext ctx(&_txn, nss.ns()); - CurOp& curOp = *CurOp::get(_txn); + OldClientWriteContext ctx(&_opCtx, nss.ns()); + CurOp& curOp = *CurOp::get(_opCtx); OpDebug* opDebug = &curOp.debug(); UpdateDriver driver((UpdateDriver::Options())); Collection* collection = ctx.getCollection(); @@ -217,17 +217,17 @@ public: params.canonicalQuery = cq.get(); auto ws = make_unique<WorkingSet>(); - auto eofStage = make_unique<EOFStage>(&_txn); + auto eofStage = make_unique<EOFStage>(&_opCtx); auto updateStage = - make_unique<UpdateStage>(&_txn, params, ws.get(), collection, eofStage.release()); + make_unique<UpdateStage>(&_opCtx, params, ws.get(), collection, eofStage.release()); runUpdate(updateStage.get()); } // Verify the contents of the resulting collection. { - AutoGetCollectionForRead ctx(&_txn, nss); + AutoGetCollectionForRead ctx(&_opCtx, nss); Collection* collection = ctx.getCollection(); vector<BSONObj> objs; @@ -249,7 +249,7 @@ public: void run() { // Run the update. { - OldClientWriteContext ctx(&_txn, nss.ns()); + OldClientWriteContext ctx(&_opCtx, nss.ns()); // Populate the collection. for (int i = 0; i < 10; ++i) { @@ -257,7 +257,7 @@ public: } ASSERT_EQUALS(10U, count(BSONObj())); - CurOp& curOp = *CurOp::get(_txn); + CurOp& curOp = *CurOp::get(_opCtx); OpDebug* opDebug = &curOp.debug(); UpdateDriver driver((UpdateDriver::Options())); Database* db = ctx.db(); @@ -294,10 +294,10 @@ public: updateParams.canonicalQuery = cq.get(); auto ws = make_unique<WorkingSet>(); - auto cs = make_unique<CollectionScan>(&_txn, collScanParams, ws.get(), cq->root()); + auto cs = make_unique<CollectionScan>(&_opCtx, collScanParams, ws.get(), cq->root()); auto updateStage = - make_unique<UpdateStage>(&_txn, updateParams, ws.get(), coll, cs.release()); + make_unique<UpdateStage>(&_opCtx, updateParams, ws.get(), coll, cs.release()); const UpdateStats* stats = static_cast<const UpdateStats*>(updateStage->getSpecificStats()); @@ -313,11 +313,11 @@ public: // Remove recordIds[targetDocIndex]; updateStage->saveState(); { - WriteUnitOfWork wunit(&_txn); - updateStage->invalidate(&_txn, recordIds[targetDocIndex], INVALIDATION_DELETION); + WriteUnitOfWork wunit(&_opCtx); + updateStage->invalidate(&_opCtx, recordIds[targetDocIndex], INVALIDATION_DELETION); wunit.commit(); } - BSONObj targetDoc = coll->docFor(&_txn, recordIds[targetDocIndex]).value(); + BSONObj targetDoc = coll->docFor(&_opCtx, recordIds[targetDocIndex]).value(); ASSERT(!targetDoc.isEmpty()); remove(targetDoc); updateStage->restoreState(); @@ -336,7 +336,7 @@ public: // Check the contents of the collection. { - AutoGetCollectionForRead ctx(&_txn, nss); + AutoGetCollectionForRead ctx(&_opCtx, nss); Collection* collection = ctx.getCollection(); vector<BSONObj> objs; @@ -370,8 +370,8 @@ public: ASSERT_EQUALS(10U, count(BSONObj())); // Various variables we'll need. - OldClientWriteContext ctx(&_txn, nss.ns()); - OpDebug* opDebug = &CurOp::get(_txn)->debug(); + OldClientWriteContext ctx(&_opCtx, nss.ns()); + OpDebug* opDebug = &CurOp::get(_opCtx)->debug(); Collection* coll = ctx.getCollection(); UpdateLifecycleImpl updateLifecycle(nss); UpdateRequest request(nss); @@ -397,7 +397,7 @@ public: // Configure a QueuedDataStage to pass the first object in the collection back in a // RID_AND_OBJ state. - auto qds = make_unique<QueuedDataStage>(&_txn, ws.get()); + auto qds = make_unique<QueuedDataStage>(&_opCtx, ws.get()); WorkingSetID id = ws->allocate(); WorkingSetMember* member = ws->get(id); member->recordId = recordIds[targetDocIndex]; @@ -411,7 +411,7 @@ public: updateParams.canonicalQuery = cq.get(); const auto updateStage = - make_unique<UpdateStage>(&_txn, updateParams, ws.get(), coll, qds.release()); + make_unique<UpdateStage>(&_opCtx, updateParams, ws.get(), coll, qds.release()); // Should return advanced. id = WorkingSet::INVALID_ID; @@ -458,8 +458,8 @@ public: ASSERT_EQUALS(50U, count(BSONObj())); // Various variables we'll need. - OldClientWriteContext ctx(&_txn, nss.ns()); - OpDebug* opDebug = &CurOp::get(_txn)->debug(); + OldClientWriteContext ctx(&_opCtx, nss.ns()); + OpDebug* opDebug = &CurOp::get(_opCtx)->debug(); Collection* coll = ctx.getCollection(); UpdateLifecycleImpl updateLifecycle(nss); UpdateRequest request(nss); @@ -485,7 +485,7 @@ public: // Configure a QueuedDataStage to pass the first object in the collection back in a // RID_AND_OBJ state. - auto qds = make_unique<QueuedDataStage>(&_txn, ws.get()); + auto qds = make_unique<QueuedDataStage>(&_opCtx, ws.get()); WorkingSetID id = ws->allocate(); WorkingSetMember* member = ws->get(id); member->recordId = recordIds[targetDocIndex]; @@ -499,7 +499,7 @@ public: updateParams.canonicalQuery = cq.get(); auto updateStage = - make_unique<UpdateStage>(&_txn, updateParams, ws.get(), coll, qds.release()); + make_unique<UpdateStage>(&_opCtx, updateParams, ws.get(), coll, qds.release()); // Should return advanced. id = WorkingSet::INVALID_ID; @@ -542,8 +542,8 @@ class QueryStageUpdateSkipOwnedObjects : public QueryStageUpdateBase { public: void run() { // Various variables we'll need. - OldClientWriteContext ctx(&_txn, nss.ns()); - OpDebug* opDebug = &CurOp::get(_txn)->debug(); + OldClientWriteContext ctx(&_opCtx, nss.ns()); + OpDebug* opDebug = &CurOp::get(_opCtx)->debug(); Collection* coll = ctx.getCollection(); UpdateLifecycleImpl updateLifecycle(nss); UpdateRequest request(nss); @@ -562,7 +562,7 @@ public: ASSERT_OK(driver.parse(request.getUpdates(), request.isMulti())); // Configure a QueuedDataStage to pass an OWNED_OBJ to the update stage. - auto qds = make_unique<QueuedDataStage>(&_txn, ws.get()); + auto qds = make_unique<QueuedDataStage>(&_opCtx, ws.get()); { WorkingSetID id = ws->allocate(); WorkingSetMember* member = ws->get(id); @@ -576,7 +576,7 @@ public: updateParams.canonicalQuery = cq.get(); const auto updateStage = - make_unique<UpdateStage>(&_txn, updateParams, ws.get(), coll, qds.release()); + make_unique<UpdateStage>(&_opCtx, updateParams, ws.get(), coll, qds.release()); const UpdateStats* stats = static_cast<const UpdateStats*>(updateStage->getSpecificStats()); // Call work, passing the set up member to the update stage. diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp index 5288038b1f8..b2b5495573e 100644 --- a/src/mongo/dbtests/querytests.cpp +++ b/src/mongo/dbtests/querytests.cpp @@ -65,15 +65,15 @@ const auto kIndexVersion = IndexDescriptor::IndexVersion::kV2; class Base { public: - Base() : _scopedXact(&_txn, MODE_X), _lk(_txn.lockState()), _context(&_txn, ns()) { + Base() : _scopedXact(&_opCtx, MODE_X), _lk(_opCtx.lockState()), _context(&_opCtx, ns()) { { - WriteUnitOfWork wunit(&_txn); + WriteUnitOfWork wunit(&_opCtx); _database = _context.db(); _collection = _database->getCollection(ns()); if (_collection) { - _database->dropCollection(&_txn, ns()); + _database->dropCollection(&_opCtx, ns()); } - _collection = _database->createCollection(&_txn, ns()); + _collection = _database->createCollection(&_opCtx, ns()); wunit.commit(); } @@ -82,8 +82,8 @@ public: ~Base() { try { - WriteUnitOfWork wunit(&_txn); - uassertStatusOK(_database->dropCollection(&_txn, ns())); + WriteUnitOfWork wunit(&_opCtx); + uassertStatusOK(_database->dropCollection(&_opCtx, ns())); wunit.commit(); } catch (...) { FAIL("Exception while cleaning up collection"); @@ -97,7 +97,7 @@ protected: void addIndex(const BSONObj& key) { Helpers::ensureIndex( - &_txn, _collection, key, kIndexVersion, false, key.firstElementFieldName()); + &_opCtx, _collection, key, kIndexVersion, false, key.firstElementFieldName()); } void insert(const char* s) { @@ -105,7 +105,7 @@ protected: } void insert(const BSONObj& o) { - WriteUnitOfWork wunit(&_txn); + WriteUnitOfWork wunit(&_opCtx); OpDebug* const nullOpDebug = nullptr; if (o["_id"].eoo()) { BSONObjBuilder b; @@ -113,16 +113,16 @@ protected: oid.init(); b.appendOID("_id", &oid); b.appendElements(o); - _collection->insertDocument(&_txn, b.obj(), nullOpDebug, false); + _collection->insertDocument(&_opCtx, b.obj(), nullOpDebug, false); } else { - _collection->insertDocument(&_txn, o, nullOpDebug, false); + _collection->insertDocument(&_opCtx, o, nullOpDebug, false); } wunit.commit(); } const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext(); - OperationContext& _txn = *_txnPtr; + OperationContext& _opCtx = *_txnPtr; ScopedTransaction _scopedXact; Lock::GlobalWrite _lk; OldClientContext _context; @@ -141,12 +141,13 @@ public: BSONObj query = fromjson("{$or:[{b:2},{c:3}]}"); BSONObj ret; // Check findOne() returning object. - ASSERT(Helpers::findOne(&_txn, _collection, query, ret, true)); + ASSERT(Helpers::findOne(&_opCtx, _collection, query, ret, true)); ASSERT_EQUALS(string("b"), ret.firstElement().fieldName()); // Cross check with findOne() returning location. ASSERT_BSONOBJ_EQ( ret, - _collection->docFor(&_txn, Helpers::findOne(&_txn, _collection, query, true)).value()); + _collection->docFor(&_opCtx, Helpers::findOne(&_opCtx, _collection, query, true)) + .value()); } }; @@ -158,25 +159,27 @@ public: BSONObj ret; // Check findOne() returning object, allowing unindexed scan. - ASSERT(Helpers::findOne(&_txn, _collection, query, ret, false)); + ASSERT(Helpers::findOne(&_opCtx, _collection, query, ret, false)); // Check findOne() returning location, allowing unindexed scan. ASSERT_BSONOBJ_EQ( ret, - _collection->docFor(&_txn, Helpers::findOne(&_txn, _collection, query, false)).value()); + _collection->docFor(&_opCtx, Helpers::findOne(&_opCtx, _collection, query, false)) + .value()); // Check findOne() returning object, requiring indexed scan without index. - ASSERT_THROWS(Helpers::findOne(&_txn, _collection, query, ret, true), + ASSERT_THROWS(Helpers::findOne(&_opCtx, _collection, query, ret, true), MsgAssertionException); // Check findOne() returning location, requiring indexed scan without index. - ASSERT_THROWS(Helpers::findOne(&_txn, _collection, query, true), MsgAssertionException); + ASSERT_THROWS(Helpers::findOne(&_opCtx, _collection, query, true), MsgAssertionException); addIndex(BSON("b" << 1)); // Check findOne() returning object, requiring indexed scan with index. - ASSERT(Helpers::findOne(&_txn, _collection, query, ret, true)); + ASSERT(Helpers::findOne(&_opCtx, _collection, query, ret, true)); // Check findOne() returning location, requiring indexed scan with index. ASSERT_BSONOBJ_EQ( ret, - _collection->docFor(&_txn, Helpers::findOne(&_txn, _collection, query, true)).value()); + _collection->docFor(&_opCtx, Helpers::findOne(&_opCtx, _collection, query, true)) + .value()); } }; @@ -185,23 +188,23 @@ public: void run() { // We don't normally allow empty objects in the database, but test that we can find // an empty object (one might be allowed inside a reserved namespace at some point). - ScopedTransaction transaction(&_txn, MODE_X); - Lock::GlobalWrite lk(_txn.lockState()); - OldClientContext ctx(&_txn, "unittests.querytests"); + ScopedTransaction transaction(&_opCtx, MODE_X); + Lock::GlobalWrite lk(_opCtx.lockState()); + OldClientContext ctx(&_opCtx, "unittests.querytests"); { - WriteUnitOfWork wunit(&_txn); + WriteUnitOfWork wunit(&_opCtx); Database* db = ctx.db(); if (db->getCollection(ns())) { _collection = NULL; - db->dropCollection(&_txn, ns()); + db->dropCollection(&_opCtx, ns()); } - _collection = db->createCollection(&_txn, ns(), CollectionOptions(), false); + _collection = db->createCollection(&_opCtx, ns(), CollectionOptions(), false); wunit.commit(); } ASSERT(_collection); - DBDirectClient cl(&_txn); + DBDirectClient cl(&_opCtx); BSONObj info; bool ok = cl.runCommand("unittests", BSON("godinsert" @@ -214,21 +217,22 @@ public: insert(BSONObj()); BSONObj query; BSONObj ret; - ASSERT(Helpers::findOne(&_txn, _collection, query, ret, false)); + ASSERT(Helpers::findOne(&_opCtx, _collection, query, ret, false)); ASSERT(ret.isEmpty()); ASSERT_BSONOBJ_EQ( ret, - _collection->docFor(&_txn, Helpers::findOne(&_txn, _collection, query, false)).value()); + _collection->docFor(&_opCtx, Helpers::findOne(&_opCtx, _collection, query, false)) + .value()); } }; class ClientBase { public: - ClientBase() : _client(&_txn) { - mongo::LastError::get(_txn.getClient()).reset(); + ClientBase() : _client(&_opCtx) { + mongo::LastError::get(_opCtx.getClient()).reset(); } virtual ~ClientBase() { - mongo::LastError::get(_txn.getClient()).reset(); + mongo::LastError::get(_opCtx.getClient()).reset(); } protected: @@ -243,7 +247,7 @@ protected: } const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext(); - OperationContext& _txn = *_txnPtr; + OperationContext& _opCtx = *_txnPtr; DBDirectClient _client; }; @@ -259,7 +263,7 @@ public: a.appendMaxKey("$lt"); BSONObj limit = a.done(); ASSERT(!_client.findOne(ns, QUERY("a" << limit)).isEmpty()); - ASSERT_OK(dbtests::createIndex(&_txn, ns, BSON("a" << 1))); + ASSERT_OK(dbtests::createIndex(&_opCtx, ns, BSON("a" << 1))); ASSERT(!_client.findOne(ns, QUERY("a" << limit).hint(BSON("a" << 1))).isEmpty()); } }; @@ -281,7 +285,7 @@ public: { // Check internal server handoff to getmore. - OldClientWriteContext ctx(&_txn, ns); + OldClientWriteContext ctx(&_opCtx, ns); auto pinnedCursor = unittest::assertGet(ctx.getCollection()->getCursorManager()->pinCursor(cursorId)); ASSERT_EQUALS(2, pinnedCursor.getCursor()->pos()); @@ -332,11 +336,11 @@ public: // Check that the cursor has been removed. { - AutoGetCollectionForRead ctx(&_txn, NamespaceString(ns)); + AutoGetCollectionForRead ctx(&_opCtx, NamespaceString(ns)); ASSERT(0 == ctx.getCollection()->getCursorManager()->numCursors()); } - ASSERT_FALSE(CursorManager::eraseCursorGlobal(&_txn, cursorId)); + ASSERT_FALSE(CursorManager::eraseCursorGlobal(&_opCtx, cursorId)); // Check that a subsequent get more fails with the cursor removed. ASSERT_THROWS(_client.getMore(ns, cursorId), UserException); @@ -380,7 +384,7 @@ public: // Check that the cursor still exists { - AutoGetCollectionForRead ctx(&_txn, NamespaceString(ns)); + AutoGetCollectionForRead ctx(&_opCtx, NamespaceString(ns)); ASSERT(1 == ctx.getCollection()->getCursorManager()->numCursors()); ASSERT_OK(ctx.getCollection()->getCursorManager()->pinCursor(cursorId).getStatus()); } @@ -657,9 +661,9 @@ public: _client.dropCollection(ns); _client.createCollection(ns, 10, true); - ScopedTransaction transaction(&_txn, MODE_IX); - Lock::DBLock lk(_txn.lockState(), "unittests", MODE_X); - OldClientContext ctx(&_txn, ns); + ScopedTransaction transaction(&_opCtx, MODE_IX); + Lock::DBLock lk(_opCtx.lockState(), "unittests", MODE_X); + OldClientContext ctx(&_opCtx, ns); BSONObj info; _client.runCommand("unittests", @@ -672,11 +676,11 @@ public: info); Date_t one = Date_t::fromMillisSinceEpoch( - LogicalClock::get(&_txn)->reserveTicks(1).asTimestamp().asLL()); + LogicalClock::get(&_opCtx)->reserveTicks(1).asTimestamp().asLL()); Date_t two = Date_t::fromMillisSinceEpoch( - LogicalClock::get(&_txn)->reserveTicks(1).asTimestamp().asLL()); + LogicalClock::get(&_opCtx)->reserveTicks(1).asTimestamp().asLL()); Date_t three = Date_t::fromMillisSinceEpoch( - LogicalClock::get(&_txn)->reserveTicks(1).asTimestamp().asLL()); + LogicalClock::get(&_opCtx)->reserveTicks(1).asTimestamp().asLL()); insert(ns, BSON("ts" << one)); insert(ns, BSON("ts" << two)); insert(ns, BSON("ts" << three)); @@ -739,7 +743,7 @@ public: } void run() { const char* ns = "unittests.querytests.BasicCount"; - ASSERT_OK(dbtests::createIndex(&_txn, ns, BSON("a" << 1))); + ASSERT_OK(dbtests::createIndex(&_opCtx, ns, BSON("a" << 1))); count(0); insert(ns, BSON("a" << 3)); count(0); @@ -764,7 +768,7 @@ public: } void run() { const char* ns = "unittests.querytests.ArrayId"; - ASSERT_OK(dbtests::createIndex(&_txn, ns, BSON("_id" << 1))); + ASSERT_OK(dbtests::createIndex(&_opCtx, ns, BSON("_id" << 1))); ASSERT(!error()); _client.insert(ns, fromjson("{'_id':[1,2]}")); ASSERT(error()); @@ -835,7 +839,7 @@ public: const char* ns = "unittests.querytests.NumericEmbedded"; _client.insert(ns, BSON("a" << BSON("b" << 1))); ASSERT(!_client.findOne(ns, BSON("a" << BSON("b" << 1.0))).isEmpty()); - ASSERT_OK(dbtests::createIndex(&_txn, ns, BSON("a" << 1))); + ASSERT_OK(dbtests::createIndex(&_opCtx, ns, BSON("a" << 1))); ASSERT(!_client.findOne(ns, BSON("a" << BSON("b" << 1.0))).isEmpty()); } }; @@ -855,7 +859,7 @@ public: ASSERT_EQUALS(0u, _client.getIndexSpecs(ns()).size()); } void checkIndex() { - ASSERT_OK(dbtests::createIndex(&_txn, ns(), BSON("a" << 1))); + ASSERT_OK(dbtests::createIndex(&_opCtx, ns(), BSON("a" << 1))); index(); } void run() { @@ -878,12 +882,12 @@ public: } void run() { const char* ns = "unittests.querytests.UniqueIndex"; - ASSERT_OK(dbtests::createIndex(&_txn, ns, BSON("a" << 1), true)); + ASSERT_OK(dbtests::createIndex(&_opCtx, ns, BSON("a" << 1), true)); _client.insert(ns, BSON("a" << 4 << "b" << 2)); _client.insert(ns, BSON("a" << 4 << "b" << 3)); ASSERT_EQUALS(1U, _client.count(ns, BSONObj())); _client.dropCollection(ns); - ASSERT_OK(dbtests::createIndex(&_txn, ns, BSON("b" << 1), true)); + ASSERT_OK(dbtests::createIndex(&_opCtx, ns, BSON("b" << 1), true)); _client.insert(ns, BSON("a" << 4 << "b" << 2)); _client.insert(ns, BSON("a" << 4 << "b" << 3)); ASSERT_EQUALS(2U, _client.count(ns, BSONObj())); @@ -900,7 +904,7 @@ public: _client.insert(ns, BSON("a" << 4 << "b" << 2)); _client.insert(ns, BSON("a" << 4 << "b" << 3)); ASSERT_EQUALS(ErrorCodes::DuplicateKey, - dbtests::createIndex(&_txn, ns, BSON("a" << 1), true)); + dbtests::createIndex(&_opCtx, ns, BSON("a" << 1), true)); ASSERT_EQUALS( 0U, _client.count("unittests.system.indexes", BSON("ns" << ns << "name" << NE << "_id_"))); @@ -928,7 +932,7 @@ public: void run() { const char* ns = "unittests.querytests.Size"; _client.insert(ns, fromjson("{a:[1,2,3]}")); - ASSERT_OK(dbtests::createIndex(&_txn, ns, BSON("a" << 1))); + ASSERT_OK(dbtests::createIndex(&_opCtx, ns, BSON("a" << 1))); ASSERT(_client.query(ns, QUERY("a" << mongo::BSIZE << 3).hint(BSON("a" << 1)))->more()); } }; @@ -942,7 +946,7 @@ public: const char* ns = "unittests.querytests.IndexedArray"; _client.insert(ns, fromjson("{a:[1,2,3]}")); ASSERT(_client.query(ns, Query("{a:[1,2,3]}"))->more()); - ASSERT_OK(dbtests::createIndex(&_txn, ns, BSON("a" << 1))); + ASSERT_OK(dbtests::createIndex(&_opCtx, ns, BSON("a" << 1))); ASSERT(_client.query(ns, Query("{a:{$in:[1,[1,2,3]]}}").hint(BSON("a" << 1)))->more()); ASSERT(_client.query(ns, Query("{a:[1,2,3]}").hint(BSON("a" << 1)))->more()); // SERVER-146 } @@ -957,7 +961,7 @@ public: const char* ns = "unittests.querytests.InsideArray"; _client.insert(ns, fromjson("{a:[[1],2]}")); check("$natural"); - ASSERT_OK(dbtests::createIndex(&_txn, ns, BSON("a" << 1))); + ASSERT_OK(dbtests::createIndex(&_opCtx, ns, BSON("a" << 1))); check("a"); // SERVER-146 } @@ -980,7 +984,7 @@ public: const char* ns = "unittests.querytests.IndexInsideArrayCorrect"; _client.insert(ns, fromjson("{'_id':1,a:[1]}")); _client.insert(ns, fromjson("{'_id':2,a:[[1]]}")); - ASSERT_OK(dbtests::createIndex(&_txn, ns, BSON("a" << 1))); + ASSERT_OK(dbtests::createIndex(&_opCtx, ns, BSON("a" << 1))); ASSERT_EQUALS( 1, _client.query(ns, Query("{a:[1]}").hint(BSON("a" << 1)))->next().getIntField("_id")); } @@ -995,7 +999,7 @@ public: const char* ns = "unittests.querytests.SubobjArr"; _client.insert(ns, fromjson("{a:[{b:[1]}]}")); check("$natural"); - ASSERT_OK(dbtests::createIndex(&_txn, ns, BSON("a" << 1))); + ASSERT_OK(dbtests::createIndex(&_opCtx, ns, BSON("a" << 1))); check("a"); } @@ -1014,7 +1018,7 @@ public: _client.dropCollection("unittests.querytests.MinMax"); } void run() { - ASSERT_OK(dbtests::createIndex(&_txn, ns, BSON("a" << 1 << "b" << 1))); + ASSERT_OK(dbtests::createIndex(&_opCtx, ns, BSON("a" << 1 << "b" << 1))); _client.insert(ns, BSON("a" << 1 << "b" << 1)); _client.insert(ns, BSON("a" << 1 << "b" << 2)); _client.insert(ns, BSON("a" << 2 << "b" << 1)); @@ -1072,7 +1076,7 @@ public: } void run() { checkMatch(); - ASSERT_OK(dbtests::createIndex(&_txn, _ns, BSON("a" << 1))); + ASSERT_OK(dbtests::createIndex(&_opCtx, _ns, BSON("a" << 1))); checkMatch(); } @@ -1110,7 +1114,7 @@ public: } void run() { checkMatch(); - ASSERT_OK(dbtests::createIndex(&_txn, _ns, BSON("a" << 1))); + ASSERT_OK(dbtests::createIndex(&_opCtx, _ns, BSON("a" << 1))); checkMatch(); } @@ -1133,9 +1137,9 @@ private: class DirectLocking : public ClientBase { public: void run() { - ScopedTransaction transaction(&_txn, MODE_X); - Lock::GlobalWrite lk(_txn.lockState()); - OldClientContext ctx(&_txn, "unittests.DirectLocking"); + ScopedTransaction transaction(&_opCtx, MODE_X); + Lock::GlobalWrite lk(_opCtx.lockState()); + OldClientContext ctx(&_opCtx, "unittests.DirectLocking"); _client.remove("a.b", BSONObj()); ASSERT_EQUALS("unittests", ctx.db()->name()); } @@ -1152,7 +1156,7 @@ public: _client.insert(ns, BSON("i" << "a")); - ASSERT_OK(dbtests::createIndex(&_txn, ns, BSON("i" << 1))); + ASSERT_OK(dbtests::createIndex(&_opCtx, ns, BSON("i" << 1))); ASSERT_EQUALS(1U, _client.count(ns, fromjson("{i:{$in:['a']}}"))); } }; @@ -1226,7 +1230,7 @@ public: } t(ns); - ASSERT_OK(dbtests::createIndex(&_txn, ns, BSON("7" << 1))); + ASSERT_OK(dbtests::createIndex(&_opCtx, ns, BSON("7" << 1))); t(ns); } }; @@ -1248,7 +1252,7 @@ public: } size_t numCursorsOpen() { - AutoGetCollectionForRead ctx(&_txn, NamespaceString(_ns)); + AutoGetCollectionForRead ctx(&_opCtx, NamespaceString(_ns)); Collection* collection = ctx.getCollection(); if (!collection) return 0; @@ -1286,7 +1290,7 @@ public: BSON("x" << "eliot"))["z"] .number()); - ASSERT_OK(dbtests::createIndex(&_txn, ns(), BSON("x" << 1))); + ASSERT_OK(dbtests::createIndex(&_opCtx, ns(), BSON("x" << 1))); ASSERT_EQUALS(17, _client .findOne(ns(), @@ -1304,13 +1308,13 @@ public: } void run() { string err; - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); // note that extents are always at least 4KB now - so this will get rounded up // a bit. { - WriteUnitOfWork wunit(&_txn); - ASSERT(userCreateNS(&_txn, + WriteUnitOfWork wunit(&_opCtx); + ASSERT(userCreateNS(&_opCtx, ctx.db(), ns(), fromjson("{ capped : true, size : 2000, max: 10000 }"), @@ -1368,7 +1372,7 @@ public: HelperTest() : CollectionBase("helpertest") {} void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); for (int i = 0; i < 50; i++) { insert(ns(), BSON("_id" << i << "x" << i * 2)); @@ -1377,13 +1381,13 @@ public: ASSERT_EQUALS(50, count()); BSONObj res; - ASSERT(Helpers::findOne(&_txn, ctx.getCollection(), BSON("_id" << 20), res, true)); + ASSERT(Helpers::findOne(&_opCtx, ctx.getCollection(), BSON("_id" << 20), res, true)); ASSERT_EQUALS(40, res["x"].numberInt()); - ASSERT(Helpers::findById(&_txn, ctx.db(), ns(), BSON("_id" << 20), res)); + ASSERT(Helpers::findById(&_opCtx, ctx.db(), ns(), BSON("_id" << 20), res)); ASSERT_EQUALS(40, res["x"].numberInt()); - ASSERT(!Helpers::findById(&_txn, ctx.db(), ns(), BSON("_id" << 200), res)); + ASSERT(!Helpers::findById(&_opCtx, ctx.db(), ns(), BSON("_id" << 200), res)); long long slow; long long fast; @@ -1393,14 +1397,15 @@ public: { Timer t; for (int i = 0; i < n; i++) { - ASSERT(Helpers::findOne(&_txn, ctx.getCollection(), BSON("_id" << 20), res, true)); + ASSERT( + Helpers::findOne(&_opCtx, ctx.getCollection(), BSON("_id" << 20), res, true)); } slow = t.micros(); } { Timer t; for (int i = 0; i < n; i++) { - ASSERT(Helpers::findById(&_txn, ctx.db(), ns(), BSON("_id" << 20), res)); + ASSERT(Helpers::findById(&_opCtx, ctx.db(), ns(), BSON("_id" << 20), res)); } fast = t.micros(); } @@ -1414,7 +1419,7 @@ public: HelperByIdTest() : CollectionBase("helpertestbyid") {} void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); for (int i = 0; i < 1000; i++) { insert(ns(), BSON("_id" << i << "x" << i * 2)); @@ -1425,7 +1430,7 @@ public: BSONObj res; for (int i = 0; i < 1000; i++) { - bool found = Helpers::findById(&_txn, ctx.db(), ns(), BSON("_id" << i), res); + bool found = Helpers::findById(&_opCtx, ctx.db(), ns(), BSON("_id" << i), res); ASSERT_EQUALS(i % 2, int(found)); } } @@ -1435,7 +1440,7 @@ class ClientCursorTest : public CollectionBase { ClientCursorTest() : CollectionBase("clientcursortest") {} void run() { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); for (int i = 0; i < 1000; i++) { insert(ns(), BSON("_id" << i << "x" << i * 2)); @@ -1595,9 +1600,9 @@ class CollectionInternalBase : public CollectionBase { public: CollectionInternalBase(const char* nsLeaf) : CollectionBase(nsLeaf), - _scopedXact(&_txn, MODE_IX), - _lk(_txn.lockState(), "unittests", MODE_X), - _ctx(&_txn, ns()) {} + _scopedXact(&_opCtx, MODE_IX), + _lk(_opCtx.lockState(), "unittests", MODE_X), + _ctx(&_opCtx, ns()) {} private: ScopedTransaction _scopedXact; @@ -1631,7 +1636,7 @@ public: DbMessage dbMessage(message); QueryMessage queryMessage(dbMessage); Message result; - string exhaust = runQuery(&_txn, queryMessage, NamespaceString(ns()), result); + string exhaust = runQuery(&_opCtx, queryMessage, NamespaceString(ns()), result); ASSERT(exhaust.size()); ASSERT_EQUALS(string(ns()), exhaust); } @@ -1650,7 +1655,7 @@ public: ClientCursor* clientCursor = 0; { - AutoGetCollectionForRead ctx(&_txn, NamespaceString(ns())); + AutoGetCollectionForRead ctx(&_opCtx, NamespaceString(ns())); auto clientCursorPin = unittest::assertGet(ctx.getCollection()->getCursorManager()->pinCursor(cursorId)); clientCursor = clientCursorPin.getCursor(); @@ -1702,11 +1707,11 @@ public: long long cursorId = cursor->getCursorId(); { - OldClientWriteContext ctx(&_txn, ns()); + OldClientWriteContext ctx(&_opCtx, ns()); auto pinnedCursor = unittest::assertGet( ctx.db()->getCollection(ns())->getCursorManager()->pinCursor(cursorId)); string expectedAssertion = str::stream() << "Cannot kill pinned cursor: " << cursorId; - ASSERT_THROWS_WHAT(CursorManager::eraseCursorGlobal(&_txn, cursorId), + ASSERT_THROWS_WHAT(CursorManager::eraseCursorGlobal(&_opCtx, cursorId), MsgAssertionException, expectedAssertion); } diff --git a/src/mongo/dbtests/replica_set_tests.cpp b/src/mongo/dbtests/replica_set_tests.cpp index f6cb5b11e21..b11f0111be1 100644 --- a/src/mongo/dbtests/replica_set_tests.cpp +++ b/src/mongo/dbtests/replica_set_tests.cpp @@ -46,15 +46,15 @@ ServiceContext::UniqueOperationContext makeOpCtx() { class ReplicaSetTest : public mongo::unittest::Test { protected: void setUp() { - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); _storageInterface = stdx::make_unique<repl::StorageInterfaceMock>(); _replCoordExternalState.reset( new repl::ReplicationCoordinatorExternalStateImpl(_storageInterface.get())); } void tearDown() { - auto txn = makeOpCtx(); - DBDirectClient client(txn.get()); + auto opCtx = makeOpCtx(); + DBDirectClient client(opCtx.get()); client.dropCollection("local.replset.election"); _replCoordExternalState.reset(); @@ -75,57 +75,57 @@ private: }; TEST_F(ReplicaSetTest, ReplCoordExternalStateStoresLastVoteWithNewTerm) { - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); auto replCoordExternalState = getReplCoordExternalState(); - replCoordExternalState->storeLocalLastVoteDocument(txn.get(), repl::LastVote{2, 1}); + replCoordExternalState->storeLocalLastVoteDocument(opCtx.get(), repl::LastVote{2, 1}); - auto lastVote = replCoordExternalState->loadLocalLastVoteDocument(txn.get()); + auto lastVote = replCoordExternalState->loadLocalLastVoteDocument(opCtx.get()); ASSERT_OK(lastVote.getStatus()); ASSERT_EQ(lastVote.getValue().getTerm(), 2); ASSERT_EQ(lastVote.getValue().getCandidateIndex(), 1); - replCoordExternalState->storeLocalLastVoteDocument(txn.get(), repl::LastVote{3, 1}); + replCoordExternalState->storeLocalLastVoteDocument(opCtx.get(), repl::LastVote{3, 1}); - lastVote = replCoordExternalState->loadLocalLastVoteDocument(txn.get()); + lastVote = replCoordExternalState->loadLocalLastVoteDocument(opCtx.get()); ASSERT_OK(lastVote.getStatus()); ASSERT_EQ(lastVote.getValue().getTerm(), 3); ASSERT_EQ(lastVote.getValue().getCandidateIndex(), 1); } TEST_F(ReplicaSetTest, ReplCoordExternalStateDoesNotStoreLastVoteWithOldTerm) { - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); auto replCoordExternalState = getReplCoordExternalState(); - replCoordExternalState->storeLocalLastVoteDocument(txn.get(), repl::LastVote{2, 1}); + replCoordExternalState->storeLocalLastVoteDocument(opCtx.get(), repl::LastVote{2, 1}); - auto lastVote = replCoordExternalState->loadLocalLastVoteDocument(txn.get()); + auto lastVote = replCoordExternalState->loadLocalLastVoteDocument(opCtx.get()); ASSERT_OK(lastVote.getStatus()); ASSERT_EQ(lastVote.getValue().getTerm(), 2); ASSERT_EQ(lastVote.getValue().getCandidateIndex(), 1); - replCoordExternalState->storeLocalLastVoteDocument(txn.get(), repl::LastVote{1, 1}); + replCoordExternalState->storeLocalLastVoteDocument(opCtx.get(), repl::LastVote{1, 1}); - lastVote = replCoordExternalState->loadLocalLastVoteDocument(txn.get()); + lastVote = replCoordExternalState->loadLocalLastVoteDocument(opCtx.get()); ASSERT_OK(lastVote.getStatus()); ASSERT_EQ(lastVote.getValue().getTerm(), 2); ASSERT_EQ(lastVote.getValue().getCandidateIndex(), 1); } TEST_F(ReplicaSetTest, ReplCoordExternalStateDoesNotStoreLastVoteWithEqualTerm) { - auto txn = makeOpCtx(); + auto opCtx = makeOpCtx(); auto replCoordExternalState = getReplCoordExternalState(); - replCoordExternalState->storeLocalLastVoteDocument(txn.get(), repl::LastVote{2, 1}); + replCoordExternalState->storeLocalLastVoteDocument(opCtx.get(), repl::LastVote{2, 1}); - auto lastVote = replCoordExternalState->loadLocalLastVoteDocument(txn.get()); + auto lastVote = replCoordExternalState->loadLocalLastVoteDocument(opCtx.get()); ASSERT_OK(lastVote.getStatus()); ASSERT_EQ(lastVote.getValue().getTerm(), 2); ASSERT_EQ(lastVote.getValue().getCandidateIndex(), 1); - replCoordExternalState->storeLocalLastVoteDocument(txn.get(), repl::LastVote{2, 2}); + replCoordExternalState->storeLocalLastVoteDocument(opCtx.get(), repl::LastVote{2, 2}); - lastVote = replCoordExternalState->loadLocalLastVoteDocument(txn.get()); + lastVote = replCoordExternalState->loadLocalLastVoteDocument(opCtx.get()); ASSERT_OK(lastVote.getStatus()); ASSERT_EQ(lastVote.getValue().getTerm(), 2); ASSERT_EQ(lastVote.getValue().getCandidateIndex(), 1); diff --git a/src/mongo/dbtests/repltests.cpp b/src/mongo/dbtests/repltests.cpp index 8a2a597645f..59eedcd86b7 100644 --- a/src/mongo/dbtests/repltests.cpp +++ b/src/mongo/dbtests/repltests.cpp @@ -69,36 +69,36 @@ BSONObj f(const char* s) { class Base { protected: const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext(); - OperationContext& _txn = *_txnPtr; + OperationContext& _opCtx = *_txnPtr; mutable DBDirectClient _client; public: - Base() : _client(&_txn) { + Base() : _client(&_opCtx) { ReplSettings replSettings; replSettings.setOplogSizeBytes(10 * 1024 * 1024); replSettings.setMaster(true); setGlobalReplicationCoordinator( - new repl::ReplicationCoordinatorMock(_txn.getServiceContext(), replSettings)); + new repl::ReplicationCoordinatorMock(_opCtx.getServiceContext(), replSettings)); // Since the Client object persists across tests, even though the global // ReplicationCoordinator does not, we need to clear the last op associated with the client // to avoid the invariant in ReplClientInfo::setLastOp that the optime only goes forward. - repl::ReplClientInfo::forClient(_txn.getClient()).clearLastOp_forTest(); + repl::ReplClientInfo::forClient(_opCtx.getClient()).clearLastOp_forTest(); getGlobalServiceContext()->setOpObserver(stdx::make_unique<OpObserverImpl>()); setOplogCollectionName(); - createOplog(&_txn); + createOplog(&_opCtx); - OldClientWriteContext ctx(&_txn, ns()); - WriteUnitOfWork wuow(&_txn); + OldClientWriteContext ctx(&_opCtx, ns()); + WriteUnitOfWork wuow(&_opCtx); Collection* c = ctx.db()->getCollection(ns()); if (!c) { - c = ctx.db()->createCollection(&_txn, ns()); + c = ctx.db()->createCollection(&_opCtx, ns()); } - ASSERT(c->getIndexCatalog()->haveIdIndex(&_txn)); + ASSERT(c->getIndexCatalog()->haveIdIndex(&_opCtx)); wuow.commit(); } ~Base() { @@ -108,7 +108,7 @@ public: ReplSettings replSettings; replSettings.setOplogSizeBytes(10 * 1024 * 1024); setGlobalReplicationCoordinator( - new repl::ReplicationCoordinatorMock(_txn.getServiceContext(), replSettings)); + new repl::ReplicationCoordinatorMock(_opCtx.getServiceContext(), replSettings)); } catch (...) { FAIL("Exception while cleaning up test"); } @@ -145,68 +145,68 @@ protected: return _client.findOne(cllNS(), BSONObj()); } int count() const { - ScopedTransaction transaction(&_txn, MODE_X); - Lock::GlobalWrite lk(_txn.lockState()); - OldClientContext ctx(&_txn, ns()); + ScopedTransaction transaction(&_opCtx, MODE_X); + Lock::GlobalWrite lk(_opCtx.lockState()); + OldClientContext ctx(&_opCtx, ns()); Database* db = ctx.db(); Collection* coll = db->getCollection(ns()); if (!coll) { - WriteUnitOfWork wunit(&_txn); - coll = db->createCollection(&_txn, ns()); + WriteUnitOfWork wunit(&_opCtx); + coll = db->createCollection(&_opCtx, ns()); wunit.commit(); } int count = 0; - auto cursor = coll->getCursor(&_txn); + auto cursor = coll->getCursor(&_opCtx); while (auto record = cursor->next()) { ++count; } return count; } int opCount() { - return DBDirectClient(&_txn).query(cllNS(), BSONObj())->itcount(); + return DBDirectClient(&_opCtx).query(cllNS(), BSONObj())->itcount(); } void applyAllOperations() { - ScopedTransaction transaction(&_txn, MODE_X); - Lock::GlobalWrite lk(_txn.lockState()); + ScopedTransaction transaction(&_opCtx, MODE_X); + Lock::GlobalWrite lk(_opCtx.lockState()); vector<BSONObj> ops; { - DBDirectClient db(&_txn); + DBDirectClient db(&_opCtx); auto cursor = db.query(cllNS(), BSONObj()); while (cursor->more()) { ops.push_back(cursor->nextSafeOwned()); } } { - OldClientContext ctx(&_txn, ns()); + OldClientContext ctx(&_opCtx, ns()); BSONObjBuilder b; b.append("host", "localhost"); b.appendTimestamp("syncedTo", 0); - ReplSource a(&_txn, b.obj()); + ReplSource a(&_opCtx, b.obj()); for (vector<BSONObj>::iterator i = ops.begin(); i != ops.end(); ++i) { if (0) { mongo::unittest::log() << "op: " << *i << endl; } - _txn.setReplicatedWrites(false); - a.applyOperation(&_txn, ctx.db(), *i); - _txn.setReplicatedWrites(true); + _opCtx.setReplicatedWrites(false); + a.applyOperation(&_opCtx, ctx.db(), *i); + _opCtx.setReplicatedWrites(true); } } } void printAll(const char* ns) { - ScopedTransaction transaction(&_txn, MODE_X); - Lock::GlobalWrite lk(_txn.lockState()); - OldClientContext ctx(&_txn, ns); + ScopedTransaction transaction(&_opCtx, MODE_X); + Lock::GlobalWrite lk(_opCtx.lockState()); + OldClientContext ctx(&_opCtx, ns); Database* db = ctx.db(); Collection* coll = db->getCollection(ns); if (!coll) { - WriteUnitOfWork wunit(&_txn); - coll = db->createCollection(&_txn, ns); + WriteUnitOfWork wunit(&_opCtx); + coll = db->createCollection(&_opCtx, ns); wunit.commit(); } - auto cursor = coll->getCursor(&_txn); + auto cursor = coll->getCursor(&_opCtx); ::mongo::log() << "all for " << ns << endl; while (auto record = cursor->next()) { ::mongo::log() << record->data.releaseToBson() << endl; @@ -214,35 +214,35 @@ protected: } // These deletes don't get logged. void deleteAll(const char* ns) const { - ScopedTransaction transaction(&_txn, MODE_X); - Lock::GlobalWrite lk(_txn.lockState()); - OldClientContext ctx(&_txn, ns); - WriteUnitOfWork wunit(&_txn); + ScopedTransaction transaction(&_opCtx, MODE_X); + Lock::GlobalWrite lk(_opCtx.lockState()); + OldClientContext ctx(&_opCtx, ns); + WriteUnitOfWork wunit(&_opCtx); Database* db = ctx.db(); Collection* coll = db->getCollection(ns); if (!coll) { - coll = db->createCollection(&_txn, ns); + coll = db->createCollection(&_opCtx, ns); } - ASSERT_OK(coll->truncate(&_txn)); + ASSERT_OK(coll->truncate(&_opCtx)); wunit.commit(); } void insert(const BSONObj& o) const { - ScopedTransaction transaction(&_txn, MODE_X); - Lock::GlobalWrite lk(_txn.lockState()); - OldClientContext ctx(&_txn, ns()); - WriteUnitOfWork wunit(&_txn); + ScopedTransaction transaction(&_opCtx, MODE_X); + Lock::GlobalWrite lk(_opCtx.lockState()); + OldClientContext ctx(&_opCtx, ns()); + WriteUnitOfWork wunit(&_opCtx); Database* db = ctx.db(); Collection* coll = db->getCollection(ns()); if (!coll) { - coll = db->createCollection(&_txn, ns()); + coll = db->createCollection(&_opCtx, ns()); } OpDebug* const nullOpDebug = nullptr; if (o.hasField("_id")) { - _txn.setReplicatedWrites(false); - coll->insertDocument(&_txn, o, nullOpDebug, true); - _txn.setReplicatedWrites(true); + _opCtx.setReplicatedWrites(false); + coll->insertDocument(&_opCtx, o, nullOpDebug, true); + _opCtx.setReplicatedWrites(true); wunit.commit(); return; } @@ -252,9 +252,9 @@ protected: id.init(); b.appendOID("_id", &id); b.appendElements(o); - _txn.setReplicatedWrites(false); - coll->insertDocument(&_txn, b.obj(), nullOpDebug, true); - _txn.setReplicatedWrites(true); + _opCtx.setReplicatedWrites(false); + coll->insertDocument(&_opCtx, b.obj(), nullOpDebug, true); + _opCtx.setReplicatedWrites(true); wunit.commit(); } static BSONObj wid(const char* json) { @@ -1240,7 +1240,7 @@ public: void reset() const { deleteAll(ns()); // Add an index on 'a'. This prevents the update from running 'in place'. - ASSERT_OK(dbtests::createIndex(&_txn, ns(), BSON("a" << 1))); + ASSERT_OK(dbtests::createIndex(&_opCtx, ns(), BSON("a" << 1))); insert(fromjson("{'_id':0,z:1}")); } }; @@ -1383,7 +1383,7 @@ public: bool returnEmpty; SyncTest() : SyncTail(nullptr, SyncTail::MultiSyncApplyFunc()), returnEmpty(false) {} virtual ~SyncTest() {} - virtual BSONObj getMissingDoc(OperationContext* txn, Database* db, const BSONObj& o) { + virtual BSONObj getMissingDoc(OperationContext* opCtx, Database* db, const BSONObj& o) { if (returnEmpty) { BSONObj o; return o; @@ -1407,16 +1407,16 @@ public: << "foo" << "bar")); - ScopedTransaction transaction(&_txn, MODE_X); - Lock::GlobalWrite lk(_txn.lockState()); + ScopedTransaction transaction(&_opCtx, MODE_X); + Lock::GlobalWrite lk(_opCtx.lockState()); // this should fail because we can't connect try { SyncTail badSource(nullptr, SyncTail::MultiSyncApplyFunc()); badSource.setHostname("localhost:123"); - OldClientContext ctx(&_txn, ns()); - badSource.getMissingDoc(&_txn, ctx.db(), o); + OldClientContext ctx(&_opCtx, ns()); + badSource.getMissingDoc(&_opCtx, ctx.db(), o); } catch (DBException&) { threw = true; } @@ -1424,7 +1424,7 @@ public: // now this should succeed SyncTest t; - verify(t.shouldRetry(&_txn, o)); + verify(t.shouldRetry(&_opCtx, o)); verify(!_client .findOne(ns(), BSON("_id" @@ -1433,7 +1433,7 @@ public: // force it not to find an obj t.returnEmpty = true; - verify(!t.shouldRetry(&_txn, o)); + verify(!t.shouldRetry(&_opCtx, o)); } }; diff --git a/src/mongo/dbtests/rollbacktests.cpp b/src/mongo/dbtests/rollbacktests.cpp index b842eb5a9bc..27982080d36 100644 --- a/src/mongo/dbtests/rollbacktests.cpp +++ b/src/mongo/dbtests/rollbacktests.cpp @@ -50,13 +50,13 @@ namespace RollbackTests { namespace { const auto kIndexVersion = IndexDescriptor::IndexVersion::kV2; -void dropDatabase(OperationContext* txn, const NamespaceString& nss) { - ScopedTransaction transaction(txn, MODE_X); - Lock::GlobalWrite globalWriteLock(txn->lockState()); - Database* db = dbHolder().get(txn, nss.db()); +void dropDatabase(OperationContext* opCtx, const NamespaceString& nss) { + ScopedTransaction transaction(opCtx, MODE_X); + Lock::GlobalWrite globalWriteLock(opCtx->lockState()); + Database* db = dbHolder().get(opCtx, nss.db()); if (db) { - Database::dropDatabase(txn, db); + Database::dropDatabase(opCtx, db); } } bool collectionExists(OldClientContext* ctx, const string& ns) { @@ -65,38 +65,38 @@ bool collectionExists(OldClientContext* ctx, const string& ns) { dbEntry->getCollectionNamespaces(&names); return std::find(names.begin(), names.end(), ns) != names.end(); } -void createCollection(OperationContext* txn, const NamespaceString& nss) { - ScopedTransaction transaction(txn, MODE_IX); - Lock::DBLock dbXLock(txn->lockState(), nss.db(), MODE_X); - OldClientContext ctx(txn, nss.ns()); +void createCollection(OperationContext* opCtx, const NamespaceString& nss) { + ScopedTransaction transaction(opCtx, MODE_IX); + Lock::DBLock dbXLock(opCtx->lockState(), nss.db(), MODE_X); + OldClientContext ctx(opCtx, nss.ns()); { - WriteUnitOfWork uow(txn); + WriteUnitOfWork uow(opCtx); ASSERT(!collectionExists(&ctx, nss.ns())); - ASSERT_OK(userCreateNS(txn, ctx.db(), nss.ns(), BSONObj(), false)); + ASSERT_OK(userCreateNS(opCtx, ctx.db(), nss.ns(), BSONObj(), false)); ASSERT(collectionExists(&ctx, nss.ns())); uow.commit(); } } -Status renameCollection(OperationContext* txn, +Status renameCollection(OperationContext* opCtx, const NamespaceString& source, const NamespaceString& target) { ASSERT_EQ(source.db(), target.db()); - Database* db = dbHolder().get(txn, source.db()); - return db->renameCollection(txn, source.ns(), target.ns(), false); + Database* db = dbHolder().get(opCtx, source.db()); + return db->renameCollection(opCtx, source.ns(), target.ns(), false); } -Status truncateCollection(OperationContext* txn, const NamespaceString& nss) { - Collection* coll = dbHolder().get(txn, nss.db())->getCollection(nss.ns()); - return coll->truncate(txn); +Status truncateCollection(OperationContext* opCtx, const NamespaceString& nss) { + Collection* coll = dbHolder().get(opCtx, nss.db())->getCollection(nss.ns()); + return coll->truncate(opCtx); } -void insertRecord(OperationContext* txn, const NamespaceString& nss, const BSONObj& data) { - Collection* coll = dbHolder().get(txn, nss.db())->getCollection(nss.ns()); +void insertRecord(OperationContext* opCtx, const NamespaceString& nss, const BSONObj& data) { + Collection* coll = dbHolder().get(opCtx, nss.db())->getCollection(nss.ns()); OpDebug* const nullOpDebug = nullptr; - ASSERT_OK(coll->insertDocument(txn, data, nullOpDebug, false)); + ASSERT_OK(coll->insertDocument(opCtx, data, nullOpDebug, false)); } -void assertOnlyRecord(OperationContext* txn, const NamespaceString& nss, const BSONObj& data) { - Collection* coll = dbHolder().get(txn, nss.db())->getCollection(nss.ns()); - auto cursor = coll->getCursor(txn); +void assertOnlyRecord(OperationContext* opCtx, const NamespaceString& nss, const BSONObj& data) { + Collection* coll = dbHolder().get(opCtx, nss.db())->getCollection(nss.ns()); + auto cursor = coll->getCursor(opCtx); auto record = cursor->next(); ASSERT(record); @@ -104,29 +104,29 @@ void assertOnlyRecord(OperationContext* txn, const NamespaceString& nss, const B ASSERT(!cursor->next()); } -void assertEmpty(OperationContext* txn, const NamespaceString& nss) { - Collection* coll = dbHolder().get(txn, nss.db())->getCollection(nss.ns()); - ASSERT(!coll->getCursor(txn)->next()); +void assertEmpty(OperationContext* opCtx, const NamespaceString& nss) { + Collection* coll = dbHolder().get(opCtx, nss.db())->getCollection(nss.ns()); + ASSERT(!coll->getCursor(opCtx)->next()); } -bool indexExists(OperationContext* txn, const NamespaceString& nss, const string& idxName) { - Collection* coll = dbHolder().get(txn, nss.db())->getCollection(nss.ns()); - return coll->getIndexCatalog()->findIndexByName(txn, idxName, true) != NULL; +bool indexExists(OperationContext* opCtx, const NamespaceString& nss, const string& idxName) { + Collection* coll = dbHolder().get(opCtx, nss.db())->getCollection(nss.ns()); + return coll->getIndexCatalog()->findIndexByName(opCtx, idxName, true) != NULL; } -bool indexReady(OperationContext* txn, const NamespaceString& nss, const string& idxName) { - Collection* coll = dbHolder().get(txn, nss.db())->getCollection(nss.ns()); - return coll->getIndexCatalog()->findIndexByName(txn, idxName, false) != NULL; +bool indexReady(OperationContext* opCtx, const NamespaceString& nss, const string& idxName) { + Collection* coll = dbHolder().get(opCtx, nss.db())->getCollection(nss.ns()); + return coll->getIndexCatalog()->findIndexByName(opCtx, idxName, false) != NULL; } -size_t getNumIndexEntries(OperationContext* txn, +size_t getNumIndexEntries(OperationContext* opCtx, const NamespaceString& nss, const string& idxName) { size_t numEntries = 0; - Collection* coll = dbHolder().get(txn, nss.db())->getCollection(nss.ns()); + Collection* coll = dbHolder().get(opCtx, nss.db())->getCollection(nss.ns()); IndexCatalog* catalog = coll->getIndexCatalog(); - IndexDescriptor* desc = catalog->findIndexByName(txn, idxName, false); + IndexDescriptor* desc = catalog->findIndexByName(opCtx, idxName, false); if (desc) { - auto cursor = catalog->getIndex(desc)->newCursor(txn); + auto cursor = catalog->getIndex(desc)->newCursor(opCtx); for (auto kv = cursor->seek(kMinBSONKey, true); kv; kv = cursor->next()) { numEntries++; @@ -136,11 +136,11 @@ size_t getNumIndexEntries(OperationContext* txn, return numEntries; } -void dropIndex(OperationContext* txn, const NamespaceString& nss, const string& idxName) { - Collection* coll = dbHolder().get(txn, nss.db())->getCollection(nss.ns()); - IndexDescriptor* desc = coll->getIndexCatalog()->findIndexByName(txn, idxName); +void dropIndex(OperationContext* opCtx, const NamespaceString& nss, const string& idxName) { + Collection* coll = dbHolder().get(opCtx, nss.db())->getCollection(nss.ns()); + IndexDescriptor* desc = coll->getIndexCatalog()->findIndexByName(opCtx, idxName); ASSERT(desc); - ASSERT_OK(coll->getIndexCatalog()->dropIndex(txn, desc)); + ASSERT_OK(coll->getIndexCatalog()->dropIndex(opCtx, desc)); } } // namespace @@ -149,19 +149,19 @@ class CreateCollection { public: void run() { string ns = "unittests.rollback_create_collection"; - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; NamespaceString nss(ns); - dropDatabase(&txn, nss); + dropDatabase(&opCtx, nss); - ScopedTransaction transaction(&txn, MODE_IX); - Lock::DBLock dbXLock(txn.lockState(), nss.db(), MODE_X); - OldClientContext ctx(&txn, ns); + ScopedTransaction transaction(&opCtx, MODE_IX); + Lock::DBLock dbXLock(opCtx.lockState(), nss.db(), MODE_X); + OldClientContext ctx(&opCtx, ns); { - WriteUnitOfWork uow(&txn); + WriteUnitOfWork uow(&opCtx); ASSERT(!collectionExists(&ctx, ns)); auto options = capped ? BSON("capped" << true << "size" << 1000) : BSONObj(); - ASSERT_OK(userCreateNS(&txn, ctx.db(), ns, options, defaultIndexes)); + ASSERT_OK(userCreateNS(&opCtx, ctx.db(), ns, options, defaultIndexes)); ASSERT(collectionExists(&ctx, ns)); if (!rollback) { uow.commit(); @@ -180,19 +180,19 @@ class DropCollection { public: void run() { string ns = "unittests.rollback_drop_collection"; - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; NamespaceString nss(ns); - dropDatabase(&txn, nss); + dropDatabase(&opCtx, nss); - ScopedTransaction transaction(&txn, MODE_IX); - Lock::DBLock dbXLock(txn.lockState(), nss.db(), MODE_X); - OldClientContext ctx(&txn, ns); + ScopedTransaction transaction(&opCtx, MODE_IX); + Lock::DBLock dbXLock(opCtx.lockState(), nss.db(), MODE_X); + OldClientContext ctx(&opCtx, ns); { - WriteUnitOfWork uow(&txn); + WriteUnitOfWork uow(&opCtx); ASSERT(!collectionExists(&ctx, ns)); auto options = capped ? BSON("capped" << true << "size" << 1000) : BSONObj(); - ASSERT_OK(userCreateNS(&txn, ctx.db(), ns, options, defaultIndexes)); + ASSERT_OK(userCreateNS(&opCtx, ctx.db(), ns, options, defaultIndexes)); uow.commit(); } ASSERT(collectionExists(&ctx, ns)); @@ -200,9 +200,9 @@ public: // END OF SETUP / START OF TEST { - WriteUnitOfWork uow(&txn); + WriteUnitOfWork uow(&opCtx); ASSERT(collectionExists(&ctx, ns)); - ASSERT_OK(ctx.db()->dropCollection(&txn, ns)); + ASSERT_OK(ctx.db()->dropCollection(&opCtx, ns)); ASSERT(!collectionExists(&ctx, ns)); if (!rollback) { uow.commit(); @@ -222,21 +222,21 @@ public: void run() { NamespaceString source("unittests.rollback_rename_collection_src"); NamespaceString target("unittests.rollback_rename_collection_dest"); - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; - dropDatabase(&txn, source); - dropDatabase(&txn, target); + dropDatabase(&opCtx, source); + dropDatabase(&opCtx, target); - ScopedTransaction transaction(&txn, MODE_X); - Lock::GlobalWrite globalWriteLock(txn.lockState()); - OldClientContext ctx(&txn, source.ns()); + ScopedTransaction transaction(&opCtx, MODE_X); + Lock::GlobalWrite globalWriteLock(opCtx.lockState()); + OldClientContext ctx(&opCtx, source.ns()); { - WriteUnitOfWork uow(&txn); + WriteUnitOfWork uow(&opCtx); ASSERT(!collectionExists(&ctx, source.ns())); ASSERT(!collectionExists(&ctx, target.ns())); - ASSERT_OK(userCreateNS(&txn, ctx.db(), source.ns(), BSONObj(), defaultIndexes)); + ASSERT_OK(userCreateNS(&opCtx, ctx.db(), source.ns(), BSONObj(), defaultIndexes)); uow.commit(); } ASSERT(collectionExists(&ctx, source.ns())); @@ -245,8 +245,8 @@ public: // END OF SETUP / START OF TEST { - WriteUnitOfWork uow(&txn); - ASSERT_OK(renameCollection(&txn, source, target)); + WriteUnitOfWork uow(&opCtx); + ASSERT_OK(renameCollection(&opCtx, source, target)); ASSERT(!collectionExists(&ctx, source.ns())); ASSERT(collectionExists(&ctx, target.ns())); if (!rollback) { @@ -269,15 +269,15 @@ public: void run() { NamespaceString source("unittests.rollback_rename_droptarget_collection_src"); NamespaceString target("unittests.rollback_rename_droptarget_collection_dest"); - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; - dropDatabase(&txn, source); - dropDatabase(&txn, target); + dropDatabase(&opCtx, source); + dropDatabase(&opCtx, target); - ScopedTransaction transaction(&txn, MODE_X); - Lock::GlobalWrite globalWriteLock(txn.lockState()); - OldClientContext ctx(&txn, source.ns()); + ScopedTransaction transaction(&opCtx, MODE_X); + Lock::GlobalWrite globalWriteLock(opCtx.lockState()); + OldClientContext ctx(&opCtx, source.ns()); BSONObj sourceDoc = BSON("_id" << "source"); @@ -285,31 +285,31 @@ public: << "target"); { - WriteUnitOfWork uow(&txn); + WriteUnitOfWork uow(&opCtx); ASSERT(!collectionExists(&ctx, source.ns())); ASSERT(!collectionExists(&ctx, target.ns())); - ASSERT_OK(userCreateNS(&txn, ctx.db(), source.ns(), BSONObj(), defaultIndexes)); - ASSERT_OK(userCreateNS(&txn, ctx.db(), target.ns(), BSONObj(), defaultIndexes)); + ASSERT_OK(userCreateNS(&opCtx, ctx.db(), source.ns(), BSONObj(), defaultIndexes)); + ASSERT_OK(userCreateNS(&opCtx, ctx.db(), target.ns(), BSONObj(), defaultIndexes)); - insertRecord(&txn, source, sourceDoc); - insertRecord(&txn, target, targetDoc); + insertRecord(&opCtx, source, sourceDoc); + insertRecord(&opCtx, target, targetDoc); uow.commit(); } ASSERT(collectionExists(&ctx, source.ns())); ASSERT(collectionExists(&ctx, target.ns())); - assertOnlyRecord(&txn, source, sourceDoc); - assertOnlyRecord(&txn, target, targetDoc); + assertOnlyRecord(&opCtx, source, sourceDoc); + assertOnlyRecord(&opCtx, target, targetDoc); // END OF SETUP / START OF TEST { - WriteUnitOfWork uow(&txn); - ASSERT_OK(ctx.db()->dropCollection(&txn, target.ns())); - ASSERT_OK(renameCollection(&txn, source, target)); + WriteUnitOfWork uow(&opCtx); + ASSERT_OK(ctx.db()->dropCollection(&opCtx, target.ns())); + ASSERT_OK(renameCollection(&opCtx, source, target)); ASSERT(!collectionExists(&ctx, source.ns())); ASSERT(collectionExists(&ctx, target.ns())); - assertOnlyRecord(&txn, target, sourceDoc); + assertOnlyRecord(&opCtx, target, sourceDoc); if (!rollback) { uow.commit(); } @@ -317,12 +317,12 @@ public: if (rollback) { ASSERT(collectionExists(&ctx, source.ns())); ASSERT(collectionExists(&ctx, target.ns())); - assertOnlyRecord(&txn, source, sourceDoc); - assertOnlyRecord(&txn, target, targetDoc); + assertOnlyRecord(&opCtx, source, sourceDoc); + assertOnlyRecord(&opCtx, target, targetDoc); } else { ASSERT(!collectionExists(&ctx, source.ns())); ASSERT(collectionExists(&ctx, target.ns())); - assertOnlyRecord(&txn, target, sourceDoc); + assertOnlyRecord(&opCtx, target, sourceDoc); } } }; @@ -332,13 +332,13 @@ class ReplaceCollection { public: void run() { NamespaceString nss("unittests.rollback_replace_collection"); - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; - dropDatabase(&txn, nss); + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; + dropDatabase(&opCtx, nss); - ScopedTransaction transaction(&txn, MODE_IX); - Lock::DBLock dbXLock(txn.lockState(), nss.db(), MODE_X); - OldClientContext ctx(&txn, nss.ns()); + ScopedTransaction transaction(&opCtx, MODE_IX); + Lock::DBLock dbXLock(opCtx.lockState(), nss.db(), MODE_X); + OldClientContext ctx(&opCtx, nss.ns()); BSONObj oldDoc = BSON("_id" << "old"); @@ -346,34 +346,34 @@ public: << "new"); { - WriteUnitOfWork uow(&txn); + WriteUnitOfWork uow(&opCtx); ASSERT(!collectionExists(&ctx, nss.ns())); - ASSERT_OK(userCreateNS(&txn, ctx.db(), nss.ns(), BSONObj(), defaultIndexes)); - insertRecord(&txn, nss, oldDoc); + ASSERT_OK(userCreateNS(&opCtx, ctx.db(), nss.ns(), BSONObj(), defaultIndexes)); + insertRecord(&opCtx, nss, oldDoc); uow.commit(); } ASSERT(collectionExists(&ctx, nss.ns())); - assertOnlyRecord(&txn, nss, oldDoc); + assertOnlyRecord(&opCtx, nss, oldDoc); // END OF SETUP / START OF TEST { - WriteUnitOfWork uow(&txn); - ASSERT_OK(ctx.db()->dropCollection(&txn, nss.ns())); + WriteUnitOfWork uow(&opCtx); + ASSERT_OK(ctx.db()->dropCollection(&opCtx, nss.ns())); ASSERT(!collectionExists(&ctx, nss.ns())); - ASSERT_OK(userCreateNS(&txn, ctx.db(), nss.ns(), BSONObj(), defaultIndexes)); + ASSERT_OK(userCreateNS(&opCtx, ctx.db(), nss.ns(), BSONObj(), defaultIndexes)); ASSERT(collectionExists(&ctx, nss.ns())); - insertRecord(&txn, nss, newDoc); - assertOnlyRecord(&txn, nss, newDoc); + insertRecord(&opCtx, nss, newDoc); + assertOnlyRecord(&opCtx, nss, newDoc); if (!rollback) { uow.commit(); } } ASSERT(collectionExists(&ctx, nss.ns())); if (rollback) { - assertOnlyRecord(&txn, nss, oldDoc); + assertOnlyRecord(&opCtx, nss, oldDoc); } else { - assertOnlyRecord(&txn, nss, newDoc); + assertOnlyRecord(&opCtx, nss, newDoc); } } }; @@ -383,27 +383,27 @@ class CreateDropCollection { public: void run() { NamespaceString nss("unittests.rollback_create_drop_collection"); - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; - dropDatabase(&txn, nss); + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; + dropDatabase(&opCtx, nss); - ScopedTransaction transaction(&txn, MODE_IX); - Lock::DBLock dbXLock(txn.lockState(), nss.db(), MODE_X); - OldClientContext ctx(&txn, nss.ns()); + ScopedTransaction transaction(&opCtx, MODE_IX); + Lock::DBLock dbXLock(opCtx.lockState(), nss.db(), MODE_X); + OldClientContext ctx(&opCtx, nss.ns()); BSONObj doc = BSON("_id" << "example string"); ASSERT(!collectionExists(&ctx, nss.ns())); { - WriteUnitOfWork uow(&txn); + WriteUnitOfWork uow(&opCtx); - ASSERT_OK(userCreateNS(&txn, ctx.db(), nss.ns(), BSONObj(), defaultIndexes)); + ASSERT_OK(userCreateNS(&opCtx, ctx.db(), nss.ns(), BSONObj(), defaultIndexes)); ASSERT(collectionExists(&ctx, nss.ns())); - insertRecord(&txn, nss, doc); - assertOnlyRecord(&txn, nss, doc); + insertRecord(&opCtx, nss, doc); + assertOnlyRecord(&opCtx, nss, doc); - ASSERT_OK(ctx.db()->dropCollection(&txn, nss.ns())); + ASSERT_OK(ctx.db()->dropCollection(&opCtx, nss.ns())); ASSERT(!collectionExists(&ctx, nss.ns())); if (!rollback) { @@ -419,37 +419,37 @@ class TruncateCollection { public: void run() { NamespaceString nss("unittests.rollback_truncate_collection"); - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; - dropDatabase(&txn, nss); + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; + dropDatabase(&opCtx, nss); - ScopedTransaction transaction(&txn, MODE_IX); - Lock::DBLock dbXLock(txn.lockState(), nss.db(), MODE_X); - OldClientContext ctx(&txn, nss.ns()); + ScopedTransaction transaction(&opCtx, MODE_IX); + Lock::DBLock dbXLock(opCtx.lockState(), nss.db(), MODE_X); + OldClientContext ctx(&opCtx, nss.ns()); BSONObj doc = BSON("_id" << "foo"); ASSERT(!collectionExists(&ctx, nss.ns())); { - WriteUnitOfWork uow(&txn); + WriteUnitOfWork uow(&opCtx); - ASSERT_OK(userCreateNS(&txn, ctx.db(), nss.ns(), BSONObj(), defaultIndexes)); + ASSERT_OK(userCreateNS(&opCtx, ctx.db(), nss.ns(), BSONObj(), defaultIndexes)); ASSERT(collectionExists(&ctx, nss.ns())); - insertRecord(&txn, nss, doc); - assertOnlyRecord(&txn, nss, doc); + insertRecord(&opCtx, nss, doc); + assertOnlyRecord(&opCtx, nss, doc); uow.commit(); } - assertOnlyRecord(&txn, nss, doc); + assertOnlyRecord(&opCtx, nss, doc); // END OF SETUP / START OF TEST { - WriteUnitOfWork uow(&txn); + WriteUnitOfWork uow(&opCtx); - ASSERT_OK(truncateCollection(&txn, nss)); + ASSERT_OK(truncateCollection(&opCtx, nss)); ASSERT(collectionExists(&ctx, nss.ns())); - assertEmpty(&txn, nss); + assertEmpty(&opCtx, nss); if (!rollback) { uow.commit(); @@ -457,9 +457,9 @@ public: } ASSERT(collectionExists(&ctx, nss.ns())); if (rollback) { - assertOnlyRecord(&txn, nss, doc); + assertOnlyRecord(&opCtx, nss, doc); } else { - assertEmpty(&txn, nss); + assertEmpty(&opCtx, nss); } } }; @@ -469,14 +469,14 @@ class CreateIndex { public: void run() { string ns = "unittests.rollback_create_index"; - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; NamespaceString nss(ns); - dropDatabase(&txn, nss); - createCollection(&txn, nss); + dropDatabase(&opCtx, nss); + createCollection(&opCtx, nss); - ScopedTransaction transaction(&txn, MODE_IX); - AutoGetDb autoDb(&txn, nss.db(), MODE_X); + ScopedTransaction transaction(&opCtx, MODE_IX); + AutoGetDb autoDb(&opCtx, nss.db(), MODE_X); Collection* coll = autoDb.getDb()->getCollection(ns); IndexCatalog* catalog = coll->getIndexCatalog(); @@ -488,20 +488,20 @@ public: // END SETUP / START TEST { - WriteUnitOfWork uow(&txn); - ASSERT_OK(catalog->createIndexOnEmptyCollection(&txn, spec)); - insertRecord(&txn, nss, BSON("a" << 1)); - insertRecord(&txn, nss, BSON("a" << 2)); - insertRecord(&txn, nss, BSON("a" << 3)); + WriteUnitOfWork uow(&opCtx); + ASSERT_OK(catalog->createIndexOnEmptyCollection(&opCtx, spec)); + insertRecord(&opCtx, nss, BSON("a" << 1)); + insertRecord(&opCtx, nss, BSON("a" << 2)); + insertRecord(&opCtx, nss, BSON("a" << 3)); if (!rollback) { uow.commit(); } } if (rollback) { - ASSERT(!indexExists(&txn, nss, idxName)); + ASSERT(!indexExists(&opCtx, nss, idxName)); } else { - ASSERT(indexReady(&txn, nss, idxName)); + ASSERT(indexReady(&opCtx, nss, idxName)); } } }; @@ -511,14 +511,14 @@ class DropIndex { public: void run() { string ns = "unittests.rollback_drop_index"; - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; NamespaceString nss(ns); - dropDatabase(&txn, nss); - createCollection(&txn, nss); + dropDatabase(&opCtx, nss); + createCollection(&opCtx, nss); - ScopedTransaction transaction(&txn, MODE_IX); - AutoGetDb autoDb(&txn, nss.db(), MODE_X); + ScopedTransaction transaction(&opCtx, MODE_IX); + AutoGetDb autoDb(&opCtx, nss.db(), MODE_X); Collection* coll = autoDb.getDb()->getCollection(ns); IndexCatalog* catalog = coll->getIndexCatalog(); @@ -528,34 +528,34 @@ public: << static_cast<int>(kIndexVersion)); { - WriteUnitOfWork uow(&txn); - ASSERT_OK(catalog->createIndexOnEmptyCollection(&txn, spec)); - insertRecord(&txn, nss, BSON("a" << 1)); - insertRecord(&txn, nss, BSON("a" << 2)); - insertRecord(&txn, nss, BSON("a" << 3)); + WriteUnitOfWork uow(&opCtx); + ASSERT_OK(catalog->createIndexOnEmptyCollection(&opCtx, spec)); + insertRecord(&opCtx, nss, BSON("a" << 1)); + insertRecord(&opCtx, nss, BSON("a" << 2)); + insertRecord(&opCtx, nss, BSON("a" << 3)); uow.commit(); } - ASSERT(indexReady(&txn, nss, idxName)); - ASSERT_EQ(3u, getNumIndexEntries(&txn, nss, idxName)); + ASSERT(indexReady(&opCtx, nss, idxName)); + ASSERT_EQ(3u, getNumIndexEntries(&opCtx, nss, idxName)); // END SETUP / START TEST { - WriteUnitOfWork uow(&txn); + WriteUnitOfWork uow(&opCtx); - dropIndex(&txn, nss, idxName); - ASSERT(!indexExists(&txn, nss, idxName)); + dropIndex(&opCtx, nss, idxName); + ASSERT(!indexExists(&opCtx, nss, idxName)); if (!rollback) { uow.commit(); } } if (rollback) { - ASSERT(indexExists(&txn, nss, idxName)); - ASSERT(indexReady(&txn, nss, idxName)); - ASSERT_EQ(3u, getNumIndexEntries(&txn, nss, idxName)); + ASSERT(indexExists(&opCtx, nss, idxName)); + ASSERT(indexReady(&opCtx, nss, idxName)); + ASSERT_EQ(3u, getNumIndexEntries(&opCtx, nss, idxName)); } else { - ASSERT(!indexExists(&txn, nss, idxName)); + ASSERT(!indexExists(&opCtx, nss, idxName)); } } }; @@ -565,14 +565,14 @@ class CreateDropIndex { public: void run() { string ns = "unittests.rollback_create_drop_index"; - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; NamespaceString nss(ns); - dropDatabase(&txn, nss); - createCollection(&txn, nss); + dropDatabase(&opCtx, nss); + createCollection(&opCtx, nss); - ScopedTransaction transaction(&txn, MODE_IX); - AutoGetDb autoDb(&txn, nss.db(), MODE_X); + ScopedTransaction transaction(&opCtx, MODE_IX); + AutoGetDb autoDb(&opCtx, nss.db(), MODE_X); Collection* coll = autoDb.getDb()->getCollection(ns); IndexCatalog* catalog = coll->getIndexCatalog(); @@ -584,24 +584,24 @@ public: // END SETUP / START TEST { - WriteUnitOfWork uow(&txn); + WriteUnitOfWork uow(&opCtx); - ASSERT_OK(catalog->createIndexOnEmptyCollection(&txn, spec)); - insertRecord(&txn, nss, BSON("a" << 1)); - insertRecord(&txn, nss, BSON("a" << 2)); - insertRecord(&txn, nss, BSON("a" << 3)); - ASSERT(indexExists(&txn, nss, idxName)); - ASSERT_EQ(3u, getNumIndexEntries(&txn, nss, idxName)); + ASSERT_OK(catalog->createIndexOnEmptyCollection(&opCtx, spec)); + insertRecord(&opCtx, nss, BSON("a" << 1)); + insertRecord(&opCtx, nss, BSON("a" << 2)); + insertRecord(&opCtx, nss, BSON("a" << 3)); + ASSERT(indexExists(&opCtx, nss, idxName)); + ASSERT_EQ(3u, getNumIndexEntries(&opCtx, nss, idxName)); - dropIndex(&txn, nss, idxName); - ASSERT(!indexExists(&txn, nss, idxName)); + dropIndex(&opCtx, nss, idxName); + ASSERT(!indexExists(&opCtx, nss, idxName)); if (!rollback) { uow.commit(); } } - ASSERT(!indexExists(&txn, nss, idxName)); + ASSERT(!indexExists(&opCtx, nss, idxName)); } }; @@ -610,14 +610,14 @@ class SetIndexHead { public: void run() { string ns = "unittests.rollback_set_index_head"; - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; NamespaceString nss(ns); - dropDatabase(&txn, nss); - createCollection(&txn, nss); + dropDatabase(&opCtx, nss); + createCollection(&opCtx, nss); - ScopedTransaction transaction(&txn, MODE_IX); - AutoGetDb autoDb(&txn, nss.db(), MODE_X); + ScopedTransaction transaction(&opCtx, MODE_IX); + AutoGetDb autoDb(&opCtx, nss.db(), MODE_X); Collection* coll = autoDb.getDb()->getCollection(ns); IndexCatalog* catalog = coll->getIndexCatalog(); @@ -627,19 +627,19 @@ public: << static_cast<int>(kIndexVersion)); { - WriteUnitOfWork uow(&txn); - ASSERT_OK(catalog->createIndexOnEmptyCollection(&txn, spec)); + WriteUnitOfWork uow(&opCtx); + ASSERT_OK(catalog->createIndexOnEmptyCollection(&opCtx, spec)); uow.commit(); } - IndexDescriptor* indexDesc = catalog->findIndexByName(&txn, idxName); + IndexDescriptor* indexDesc = catalog->findIndexByName(&opCtx, idxName); invariant(indexDesc); const IndexCatalogEntry* ice = catalog->getEntry(indexDesc); invariant(ice); HeadManager* headManager = ice->headManager(); - const RecordId oldHead = headManager->getHead(&txn); - ASSERT_EQ(oldHead, ice->head(&txn)); + const RecordId oldHead = headManager->getHead(&opCtx); + ASSERT_EQ(oldHead, ice->head(&opCtx)); const RecordId dummyHead(123, 456); ASSERT_NE(oldHead, dummyHead); @@ -647,12 +647,12 @@ public: // END SETUP / START TEST { - WriteUnitOfWork uow(&txn); + WriteUnitOfWork uow(&opCtx); - headManager->setHead(&txn, dummyHead); + headManager->setHead(&opCtx, dummyHead); - ASSERT_EQ(ice->head(&txn), dummyHead); - ASSERT_EQ(headManager->getHead(&txn), dummyHead); + ASSERT_EQ(ice->head(&opCtx), dummyHead); + ASSERT_EQ(headManager->getHead(&opCtx), dummyHead); if (!rollback) { uow.commit(); @@ -660,11 +660,11 @@ public: } if (rollback) { - ASSERT_EQ(ice->head(&txn), oldHead); - ASSERT_EQ(headManager->getHead(&txn), oldHead); + ASSERT_EQ(ice->head(&opCtx), oldHead); + ASSERT_EQ(headManager->getHead(&opCtx), oldHead); } else { - ASSERT_EQ(ice->head(&txn), dummyHead); - ASSERT_EQ(headManager->getHead(&txn), dummyHead); + ASSERT_EQ(ice->head(&opCtx), dummyHead); + ASSERT_EQ(headManager->getHead(&opCtx), dummyHead); } } }; @@ -674,14 +674,14 @@ class CreateCollectionAndIndexes { public: void run() { string ns = "unittests.rollback_create_collection_and_indexes"; - const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); - OperationContext& txn = *txnPtr; + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; NamespaceString nss(ns); - dropDatabase(&txn, nss); + dropDatabase(&opCtx, nss); - ScopedTransaction transaction(&txn, MODE_IX); - Lock::DBLock dbXLock(txn.lockState(), nss.db(), MODE_X); - OldClientContext ctx(&txn, nss.ns()); + ScopedTransaction transaction(&opCtx, MODE_IX); + Lock::DBLock dbXLock(opCtx.lockState(), nss.db(), MODE_X); + OldClientContext ctx(&opCtx, nss.ns()); string idxNameA = "indexA"; string idxNameB = "indexB"; @@ -696,16 +696,16 @@ public: // END SETUP / START TEST { - WriteUnitOfWork uow(&txn); + WriteUnitOfWork uow(&opCtx); ASSERT(!collectionExists(&ctx, nss.ns())); - ASSERT_OK(userCreateNS(&txn, ctx.db(), nss.ns(), BSONObj(), false)); + ASSERT_OK(userCreateNS(&opCtx, ctx.db(), nss.ns(), BSONObj(), false)); ASSERT(collectionExists(&ctx, nss.ns())); Collection* coll = ctx.db()->getCollection(ns); IndexCatalog* catalog = coll->getIndexCatalog(); - ASSERT_OK(catalog->createIndexOnEmptyCollection(&txn, specA)); - ASSERT_OK(catalog->createIndexOnEmptyCollection(&txn, specB)); - ASSERT_OK(catalog->createIndexOnEmptyCollection(&txn, specC)); + ASSERT_OK(catalog->createIndexOnEmptyCollection(&opCtx, specA)); + ASSERT_OK(catalog->createIndexOnEmptyCollection(&opCtx, specB)); + ASSERT_OK(catalog->createIndexOnEmptyCollection(&opCtx, specC)); if (!rollback) { uow.commit(); @@ -715,9 +715,9 @@ public: ASSERT(!collectionExists(&ctx, ns)); } else { ASSERT(collectionExists(&ctx, ns)); - ASSERT(indexReady(&txn, nss, idxNameA)); - ASSERT(indexReady(&txn, nss, idxNameB)); - ASSERT(indexReady(&txn, nss, idxNameC)); + ASSERT(indexReady(&opCtx, nss, idxNameA)); + ASSERT(indexReady(&opCtx, nss, idxNameB)); + ASSERT(indexReady(&opCtx, nss, idxNameC)); } } }; diff --git a/src/mongo/dbtests/sort_key_generator_test.cpp b/src/mongo/dbtests/sort_key_generator_test.cpp index c808a52623a..0f25f214c07 100644 --- a/src/mongo/dbtests/sort_key_generator_test.cpp +++ b/src/mongo/dbtests/sort_key_generator_test.cpp @@ -55,7 +55,7 @@ BSONObj extractSortKey(const char* sortSpec, const char* query, const CollatorInterface* collator) { QueryTestServiceContext serviceContext; - auto txn = serviceContext.makeOperationContext(); + auto opCtx = serviceContext.makeOperationContext(); WorkingSetMember wsm; wsm.obj = Snapshotted<BSONObj>(SnapshotId(), fromjson(doc)); @@ -63,7 +63,7 @@ BSONObj extractSortKey(const char* sortSpec, BSONObj sortKey; auto sortKeyGen = stdx::make_unique<SortKeyGenerator>( - txn.get(), fromjson(sortSpec), fromjson(query), collator); + opCtx.get(), fromjson(sortSpec), fromjson(query), collator); ASSERT_OK(sortKeyGen->getSortKey(wsm, &sortKey)); return sortKey; @@ -83,7 +83,7 @@ BSONObj extractSortKeyCovered(const char* sortSpec, const IndexKeyDatum& ikd, const CollatorInterface* collator) { QueryTestServiceContext serviceContext; - auto txn = serviceContext.makeOperationContext(); + auto opCtx = serviceContext.makeOperationContext(); WorkingSet ws; WorkingSetID wsid = ws.allocate(); @@ -93,7 +93,7 @@ BSONObj extractSortKeyCovered(const char* sortSpec, BSONObj sortKey; auto sortKeyGen = - stdx::make_unique<SortKeyGenerator>(txn.get(), fromjson(sortSpec), BSONObj(), collator); + stdx::make_unique<SortKeyGenerator>(opCtx.get(), fromjson(sortSpec), BSONObj(), collator); ASSERT_OK(sortKeyGen->getSortKey(*wsm, &sortKey)); return sortKey; diff --git a/src/mongo/dbtests/updatetests.cpp b/src/mongo/dbtests/updatetests.cpp index 92bd635d0af..7b62f5ad4a0 100644 --- a/src/mongo/dbtests/updatetests.cpp +++ b/src/mongo/dbtests/updatetests.cpp @@ -56,11 +56,11 @@ namespace dps = ::mongo::dotted_path_support; class ClientBase { public: - ClientBase() : _client(&_txn) { - mongo::LastError::get(_txn.getClient()).reset(); + ClientBase() : _client(&_opCtx) { + mongo::LastError::get(_opCtx.getClient()).reset(); } virtual ~ClientBase() { - mongo::LastError::get(_txn.getClient()).reset(); + mongo::LastError::get(_opCtx.getClient()).reset(); } protected: @@ -75,7 +75,7 @@ protected: } const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext(); - OperationContext& _txn = *_txnPtr; + OperationContext& _opCtx = *_txnPtr; DBDirectClient _client; }; @@ -1717,7 +1717,7 @@ public: class IndexParentOfMod : public SetBase { public: void run() { - ASSERT_OK(dbtests::createIndex(&_txn, ns(), BSON("a" << 1))); + ASSERT_OK(dbtests::createIndex(&_opCtx, ns(), BSON("a" << 1))); _client.insert(ns(), fromjson("{'_id':0}")); _client.update(ns(), Query(), fromjson("{$set:{'a.b':4}}")); ASSERT_BSONOBJ_EQ(fromjson("{'_id':0,a:{b:4}}"), _client.findOne(ns(), Query())); diff --git a/src/mongo/dbtests/validate_tests.cpp b/src/mongo/dbtests/validate_tests.cpp index 3eb1706fbce..9b949ec3b93 100644 --- a/src/mongo/dbtests/validate_tests.cpp +++ b/src/mongo/dbtests/validate_tests.cpp @@ -59,7 +59,7 @@ static const char* const _ns = "unittests.validate_tests"; */ class ValidateBase { public: - explicit ValidateBase(bool full) : _ctx(&_txn, _ns), _client(&_txn), _full(full) { + explicit ValidateBase(bool full) : _ctx(&_opCtx, _ns), _client(&_opCtx), _full(full) { _client.createCollection(_ns); } ~ValidateBase() { @@ -75,7 +75,7 @@ protected: ValidateResults results; BSONObjBuilder output; ASSERT_OK(collection()->validate( - &_txn, _full ? kValidateFull : kValidateIndex, &results, &output)); + &_opCtx, _full ? kValidateFull : kValidateIndex, &results, &output)); // Check if errors are reported if and only if valid is set to false. ASSERT_EQ(results.valid, results.errors.empty()); @@ -94,7 +94,7 @@ protected: } const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext(); - OperationContext& _txn = *_txnPtr; + OperationContext& _opCtx = *_txnPtr; OldClientWriteContext _ctx; DBDirectClient _client; bool _full; @@ -112,13 +112,13 @@ public: RecordId id1; { OpDebug* const nullOpDebug = nullptr; - WriteUnitOfWork wunit(&_txn); - ASSERT_OK(db->dropCollection(&_txn, _ns)); - coll = db->createCollection(&_txn, _ns); + WriteUnitOfWork wunit(&_opCtx); + ASSERT_OK(db->dropCollection(&_opCtx, _ns)); + coll = db->createCollection(&_opCtx, _ns); - ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 1), nullOpDebug, true)); - id1 = coll->getCursor(&_txn)->next()->id; - ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 2), nullOpDebug, true)); + ASSERT_OK(coll->insertDocument(&_opCtx, BSON("_id" << 1), nullOpDebug, true)); + id1 = coll->getCursor(&_opCtx)->next()->id; + ASSERT_OK(coll->insertDocument(&_opCtx, BSON("_id" << 2), nullOpDebug, true)); wunit.commit(); } @@ -128,8 +128,8 @@ public: // Remove {_id: 1} from the record store, so we get more _id entries than records. { - WriteUnitOfWork wunit(&_txn); - rs->deleteRecord(&_txn, id1); + WriteUnitOfWork wunit(&_opCtx); + rs->deleteRecord(&_opCtx, id1); wunit.commit(); } @@ -138,11 +138,11 @@ public: // Insert records {_id: 0} and {_id: 1} , so we get too few _id entries, and verify // validate fails. { - WriteUnitOfWork wunit(&_txn); + WriteUnitOfWork wunit(&_opCtx); for (int j = 0; j < 2; j++) { auto doc = BSON("_id" << j); - ASSERT_OK( - rs->insertRecord(&_txn, doc.objdata(), doc.objsize(), /*enforceQuota*/ false)); + ASSERT_OK(rs->insertRecord( + &_opCtx, doc.objdata(), doc.objsize(), /*enforceQuota*/ false)); } wunit.commit(); } @@ -162,16 +162,18 @@ public: RecordId id1; { OpDebug* const nullOpDebug = nullptr; - WriteUnitOfWork wunit(&_txn); - ASSERT_OK(db->dropCollection(&_txn, _ns)); - coll = db->createCollection(&_txn, _ns); - ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 1 << "a" << 1), nullOpDebug, true)); - id1 = coll->getCursor(&_txn)->next()->id; - ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 2 << "a" << 2), nullOpDebug, true)); + WriteUnitOfWork wunit(&_opCtx); + ASSERT_OK(db->dropCollection(&_opCtx, _ns)); + coll = db->createCollection(&_opCtx, _ns); + ASSERT_OK( + coll->insertDocument(&_opCtx, BSON("_id" << 1 << "a" << 1), nullOpDebug, true)); + id1 = coll->getCursor(&_opCtx)->next()->id; + ASSERT_OK( + coll->insertDocument(&_opCtx, BSON("_id" << 2 << "a" << 2), nullOpDebug, true)); wunit.commit(); } - auto status = dbtests::createIndexFromSpec(&_txn, + auto status = dbtests::createIndexFromSpec(&_opCtx, coll->ns().ns(), BSON("name" << "a" @@ -191,8 +193,8 @@ public: // Remove a record, so we get more _id entries than records, and verify validate fails. { - WriteUnitOfWork wunit(&_txn); - rs->deleteRecord(&_txn, id1); + WriteUnitOfWork wunit(&_opCtx); + rs->deleteRecord(&_opCtx, id1); wunit.commit(); } @@ -201,11 +203,11 @@ public: // Insert two more records, so we get too few entries for a non-sparse index, and // verify validate fails. { - WriteUnitOfWork wunit(&_txn); + WriteUnitOfWork wunit(&_opCtx); for (int j = 0; j < 2; j++) { auto doc = BSON("_id" << j); - ASSERT_OK( - rs->insertRecord(&_txn, doc.objdata(), doc.objsize(), /*enforceQuota*/ false)); + ASSERT_OK(rs->insertRecord( + &_opCtx, doc.objdata(), doc.objsize(), /*enforceQuota*/ false)); } wunit.commit(); } @@ -224,17 +226,20 @@ public: Collection* coll; RecordId id1; { - WriteUnitOfWork wunit(&_txn); - ASSERT_OK(db->dropCollection(&_txn, _ns)); - coll = db->createCollection(&_txn, _ns); - ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 1 << "a" << 1), nullOpDebug, true)); - id1 = coll->getCursor(&_txn)->next()->id; - ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 2 << "a" << 2), nullOpDebug, true)); - ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 3 << "b" << 3), nullOpDebug, true)); + WriteUnitOfWork wunit(&_opCtx); + ASSERT_OK(db->dropCollection(&_opCtx, _ns)); + coll = db->createCollection(&_opCtx, _ns); + ASSERT_OK( + coll->insertDocument(&_opCtx, BSON("_id" << 1 << "a" << 1), nullOpDebug, true)); + id1 = coll->getCursor(&_opCtx)->next()->id; + ASSERT_OK( + coll->insertDocument(&_opCtx, BSON("_id" << 2 << "a" << 2), nullOpDebug, true)); + ASSERT_OK( + coll->insertDocument(&_opCtx, BSON("_id" << 3 << "b" << 3), nullOpDebug, true)); wunit.commit(); } - auto status = dbtests::createIndexFromSpec(&_txn, + auto status = dbtests::createIndexFromSpec(&_opCtx, coll->ns().ns(), BSON("name" << "a" @@ -255,10 +260,10 @@ public: // Update {a: 1} to {a: 9} without updating the index, so we get inconsistent values // between the index and the document. Verify validate fails. { - WriteUnitOfWork wunit(&_txn); + WriteUnitOfWork wunit(&_opCtx); auto doc = BSON("_id" << 1 << "a" << 9); auto updateStatus = rs->updateRecord( - &_txn, id1, doc.objdata(), doc.objsize(), /*enforceQuota*/ false, NULL); + &_opCtx, id1, doc.objdata(), doc.objsize(), /*enforceQuota*/ false, NULL); ASSERT_OK(updateStatus); wunit.commit(); @@ -279,13 +284,13 @@ public: Collection* coll; RecordId id1; { - WriteUnitOfWork wunit(&_txn); - ASSERT_OK(db->dropCollection(&_txn, _ns)); - coll = db->createCollection(&_txn, _ns); + WriteUnitOfWork wunit(&_opCtx); + ASSERT_OK(db->dropCollection(&_opCtx, _ns)); + coll = db->createCollection(&_opCtx, _ns); - ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 1), nullOpDebug, true)); - id1 = coll->getCursor(&_txn)->next()->id; - ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 2), nullOpDebug, true)); + ASSERT_OK(coll->insertDocument(&_opCtx, BSON("_id" << 1), nullOpDebug, true)); + id1 = coll->getCursor(&_opCtx)->next()->id; + ASSERT_OK(coll->insertDocument(&_opCtx, BSON("_id" << 2), nullOpDebug, true)); wunit.commit(); } @@ -296,10 +301,10 @@ public: // Update {_id: 1} to {_id: 9} without updating the index, so we get inconsistent values // between the index and the document. Verify validate fails. { - WriteUnitOfWork wunit(&_txn); + WriteUnitOfWork wunit(&_opCtx); auto doc = BSON("_id" << 9); auto updateStatus = rs->updateRecord( - &_txn, id1, doc.objdata(), doc.objsize(), /*enforceQuota*/ false, NULL); + &_opCtx, id1, doc.objdata(), doc.objsize(), /*enforceQuota*/ false, NULL); ASSERT_OK(updateStatus); wunit.commit(); } @@ -308,10 +313,10 @@ public: // Revert {_id: 9} to {_id: 1} and verify that validate succeeds. { - WriteUnitOfWork wunit(&_txn); + WriteUnitOfWork wunit(&_opCtx); auto doc = BSON("_id" << 1); auto updateStatus = rs->updateRecord( - &_txn, id1, doc.objdata(), doc.objsize(), /*enforceQuota*/ false, NULL); + &_opCtx, id1, doc.objdata(), doc.objsize(), /*enforceQuota*/ false, NULL); ASSERT_OK(updateStatus); wunit.commit(); } @@ -322,11 +327,12 @@ public: // will still be the same number of index entries and documents, but one document will not // have an index entry. { - WriteUnitOfWork wunit(&_txn); - rs->deleteRecord(&_txn, id1); + WriteUnitOfWork wunit(&_opCtx); + rs->deleteRecord(&_opCtx, id1); auto doc = BSON("_id" << 3); - ASSERT_OK(rs->insertRecord(&_txn, doc.objdata(), doc.objsize(), /*enforceQuota*/ false) - .getStatus()); + ASSERT_OK( + rs->insertRecord(&_opCtx, doc.objdata(), doc.objsize(), /*enforceQuota*/ false) + .getStatus()); wunit.commit(); } @@ -354,22 +360,22 @@ public: // {a: [c: 1]} auto doc3 = BSON("_id" << 3 << "a" << BSON_ARRAY(BSON("c" << 1))); { - WriteUnitOfWork wunit(&_txn); - ASSERT_OK(db->dropCollection(&_txn, _ns)); - coll = db->createCollection(&_txn, _ns); + WriteUnitOfWork wunit(&_opCtx); + ASSERT_OK(db->dropCollection(&_opCtx, _ns)); + coll = db->createCollection(&_opCtx, _ns); - ASSERT_OK(coll->insertDocument(&_txn, doc1, nullOpDebug, true)); - id1 = coll->getCursor(&_txn)->next()->id; - ASSERT_OK(coll->insertDocument(&_txn, doc2, nullOpDebug, true)); - ASSERT_OK(coll->insertDocument(&_txn, doc3, nullOpDebug, true)); + ASSERT_OK(coll->insertDocument(&_opCtx, doc1, nullOpDebug, true)); + id1 = coll->getCursor(&_opCtx)->next()->id; + ASSERT_OK(coll->insertDocument(&_opCtx, doc2, nullOpDebug, true)); + ASSERT_OK(coll->insertDocument(&_opCtx, doc3, nullOpDebug, true)); wunit.commit(); } ASSERT_TRUE(checkValid()); // Create multi-key index. - auto status = dbtests::createIndexFromSpec(&_txn, + auto status = dbtests::createIndexFromSpec(&_opCtx, coll->ns().ns(), BSON("name" << "multikey_index" @@ -389,9 +395,9 @@ public: // Update a document's indexed field without updating the index. { - WriteUnitOfWork wunit(&_txn); + WriteUnitOfWork wunit(&_opCtx); auto updateStatus = rs->updateRecord( - &_txn, id1, doc1_b.objdata(), doc1_b.objsize(), /*enforceQuota*/ false, NULL); + &_opCtx, id1, doc1_b.objdata(), doc1_b.objsize(), /*enforceQuota*/ false, NULL); ASSERT_OK(updateStatus); wunit.commit(); } @@ -401,9 +407,9 @@ public: // Update a document's non-indexed field without updating the index. // Index validation should still be valid. { - WriteUnitOfWork wunit(&_txn); + WriteUnitOfWork wunit(&_opCtx); auto updateStatus = rs->updateRecord( - &_txn, id1, doc1_c.objdata(), doc1_c.objsize(), /*enforceQuota*/ false, NULL); + &_opCtx, id1, doc1_c.objdata(), doc1_c.objsize(), /*enforceQuota*/ false, NULL); ASSERT_OK(updateStatus); wunit.commit(); } @@ -423,19 +429,22 @@ public: Collection* coll; RecordId id1; { - WriteUnitOfWork wunit(&_txn); - ASSERT_OK(db->dropCollection(&_txn, _ns)); - coll = db->createCollection(&_txn, _ns); - - ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 1 << "a" << 1), nullOpDebug, true)); - id1 = coll->getCursor(&_txn)->next()->id; - ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 2 << "a" << 2), nullOpDebug, true)); - ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 3 << "b" << 1), nullOpDebug, true)); + WriteUnitOfWork wunit(&_opCtx); + ASSERT_OK(db->dropCollection(&_opCtx, _ns)); + coll = db->createCollection(&_opCtx, _ns); + + ASSERT_OK( + coll->insertDocument(&_opCtx, BSON("_id" << 1 << "a" << 1), nullOpDebug, true)); + id1 = coll->getCursor(&_opCtx)->next()->id; + ASSERT_OK( + coll->insertDocument(&_opCtx, BSON("_id" << 2 << "a" << 2), nullOpDebug, true)); + ASSERT_OK( + coll->insertDocument(&_opCtx, BSON("_id" << 3 << "b" << 1), nullOpDebug, true)); wunit.commit(); } // Create a sparse index. - auto status = dbtests::createIndexFromSpec(&_txn, + auto status = dbtests::createIndexFromSpec(&_opCtx, coll->ns().ns(), BSON("name" << "sparse_index" @@ -457,10 +466,10 @@ public: // Update a document's indexed field without updating the index. { - WriteUnitOfWork wunit(&_txn); + WriteUnitOfWork wunit(&_opCtx); auto doc = BSON("_id" << 2 << "a" << 3); auto updateStatus = rs->updateRecord( - &_txn, id1, doc.objdata(), doc.objsize(), /*enforceQuota*/ false, NULL); + &_opCtx, id1, doc.objdata(), doc.objsize(), /*enforceQuota*/ false, NULL); ASSERT_OK(updateStatus); wunit.commit(); } @@ -480,22 +489,24 @@ public: Collection* coll; RecordId id1; { - WriteUnitOfWork wunit(&_txn); - ASSERT_OK(db->dropCollection(&_txn, _ns)); - coll = db->createCollection(&_txn, _ns); - - ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 1 << "a" << 1), nullOpDebug, true)); - id1 = coll->getCursor(&_txn)->next()->id; - ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 2 << "a" << 2), nullOpDebug, true)); + WriteUnitOfWork wunit(&_opCtx); + ASSERT_OK(db->dropCollection(&_opCtx, _ns)); + coll = db->createCollection(&_opCtx, _ns); + + ASSERT_OK( + coll->insertDocument(&_opCtx, BSON("_id" << 1 << "a" << 1), nullOpDebug, true)); + id1 = coll->getCursor(&_opCtx)->next()->id; + ASSERT_OK( + coll->insertDocument(&_opCtx, BSON("_id" << 2 << "a" << 2), nullOpDebug, true)); // Explicitly test that multi-key partial indexes containing documents that // don't match the filter expression are handled correctly. ASSERT_OK(coll->insertDocument( - &_txn, BSON("_id" << 3 << "a" << BSON_ARRAY(-1 << -2 << -3)), nullOpDebug, true)); + &_opCtx, BSON("_id" << 3 << "a" << BSON_ARRAY(-1 << -2 << -3)), nullOpDebug, true)); wunit.commit(); } // Create a partial index. - auto status = dbtests::createIndexFromSpec(&_txn, + auto status = dbtests::createIndexFromSpec(&_opCtx, coll->ns().ns(), BSON("name" << "partial_index" @@ -517,10 +528,10 @@ public: // Update an unindexed document without updating the index. { - WriteUnitOfWork wunit(&_txn); + WriteUnitOfWork wunit(&_opCtx); auto doc = BSON("_id" << 1); auto updateStatus = rs->updateRecord( - &_txn, id1, doc.objdata(), doc.objsize(), /*enforceQuota*/ false, NULL); + &_opCtx, id1, doc.objdata(), doc.objsize(), /*enforceQuota*/ false, NULL); ASSERT_OK(updateStatus); wunit.commit(); } @@ -541,16 +552,16 @@ public: Collection* coll; RecordId id1; { - WriteUnitOfWork wunit(&_txn); - ASSERT_OK(db->dropCollection(&_txn, _ns)); - coll = db->createCollection(&_txn, _ns); + WriteUnitOfWork wunit(&_opCtx); + ASSERT_OK(db->dropCollection(&_opCtx, _ns)); + coll = db->createCollection(&_opCtx, _ns); ASSERT_OK(coll->insertDocument( - &_txn, BSON("_id" << 1 << "x" << 1 << "a" << 2), nullOpDebug, true)); + &_opCtx, BSON("_id" << 1 << "x" << 1 << "a" << 2), nullOpDebug, true)); wunit.commit(); } // Create a partial geo index that indexes the document. This should throw an error. - ASSERT_THROWS(dbtests::createIndexFromSpec(&_txn, + ASSERT_THROWS(dbtests::createIndexFromSpec(&_opCtx, coll->ns().ns(), BSON("name" << "partial_index" @@ -568,7 +579,7 @@ public: UserException); // Create a partial geo index that does not index the document. - auto status = dbtests::createIndexFromSpec(&_txn, + auto status = dbtests::createIndexFromSpec(&_opCtx, coll->ns().ns(), BSON("name" << "partial_index" @@ -599,24 +610,27 @@ public: Collection* coll; RecordId id1; { - WriteUnitOfWork wunit(&_txn); - ASSERT_OK(db->dropCollection(&_txn, _ns)); - coll = db->createCollection(&_txn, _ns); + WriteUnitOfWork wunit(&_opCtx); + ASSERT_OK(db->dropCollection(&_opCtx, _ns)); + coll = db->createCollection(&_opCtx, _ns); ASSERT_OK(coll->insertDocument( - &_txn, BSON("_id" << 1 << "a" << 1 << "b" << 4), nullOpDebug, true)); - id1 = coll->getCursor(&_txn)->next()->id; + &_opCtx, BSON("_id" << 1 << "a" << 1 << "b" << 4), nullOpDebug, true)); + id1 = coll->getCursor(&_opCtx)->next()->id; ASSERT_OK(coll->insertDocument( - &_txn, BSON("_id" << 2 << "a" << 2 << "b" << 5), nullOpDebug, true)); - ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 3 << "a" << 3), nullOpDebug, true)); - ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 4 << "b" << 6), nullOpDebug, true)); - ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 5 << "c" << 7), nullOpDebug, true)); + &_opCtx, BSON("_id" << 2 << "a" << 2 << "b" << 5), nullOpDebug, true)); + ASSERT_OK( + coll->insertDocument(&_opCtx, BSON("_id" << 3 << "a" << 3), nullOpDebug, true)); + ASSERT_OK( + coll->insertDocument(&_opCtx, BSON("_id" << 4 << "b" << 6), nullOpDebug, true)); + ASSERT_OK( + coll->insertDocument(&_opCtx, BSON("_id" << 5 << "c" << 7), nullOpDebug, true)); wunit.commit(); } // Create two compound indexes, one forward and one reverse, to test // validate()'s index direction parsing. - auto status = dbtests::createIndexFromSpec(&_txn, + auto status = dbtests::createIndexFromSpec(&_opCtx, coll->ns().ns(), BSON("name" << "compound_index_1" @@ -630,7 +644,7 @@ public: << false)); ASSERT_OK(status); - status = dbtests::createIndexFromSpec(&_txn, + status = dbtests::createIndexFromSpec(&_opCtx, coll->ns().ns(), BSON("name" << "compound_index_2" @@ -650,10 +664,10 @@ public: // Update a document's indexed field without updating the index. { - WriteUnitOfWork wunit(&_txn); + WriteUnitOfWork wunit(&_opCtx); auto doc = BSON("_id" << 1 << "a" << 1 << "b" << 3); auto updateStatus = rs->updateRecord( - &_txn, id1, doc.objdata(), doc.objsize(), /*enforceQuota*/ false, NULL); + &_opCtx, id1, doc.objdata(), doc.objsize(), /*enforceQuota*/ false, NULL); ASSERT_OK(updateStatus); wunit.commit(); } @@ -673,20 +687,23 @@ public: Collection* coll; RecordId id1; { - WriteUnitOfWork wunit(&_txn); - ASSERT_OK(db->dropCollection(&_txn, _ns)); - coll = db->createCollection(&_txn, _ns); - - ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 1 << "a" << 1), nullOpDebug, true)); - id1 = coll->getCursor(&_txn)->next()->id; - ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 2 << "a" << 2), nullOpDebug, true)); - ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 3 << "b" << 1), nullOpDebug, true)); + WriteUnitOfWork wunit(&_opCtx); + ASSERT_OK(db->dropCollection(&_opCtx, _ns)); + coll = db->createCollection(&_opCtx, _ns); + + ASSERT_OK( + coll->insertDocument(&_opCtx, BSON("_id" << 1 << "a" << 1), nullOpDebug, true)); + id1 = coll->getCursor(&_opCtx)->next()->id; + ASSERT_OK( + coll->insertDocument(&_opCtx, BSON("_id" << 2 << "a" << 2), nullOpDebug, true)); + ASSERT_OK( + coll->insertDocument(&_opCtx, BSON("_id" << 3 << "b" << 1), nullOpDebug, true)); wunit.commit(); } const std::string indexName = "bad_index"; auto status = dbtests::createIndexFromSpec( - &_txn, + &_opCtx, coll->ns().ns(), BSON("name" << indexName << "ns" << coll->ns().ns() << "key" << BSON("a" << 1) << "v" << static_cast<int>(kIndexVersion) @@ -698,11 +715,11 @@ public: // Replace a correct index entry with a bad one and check it's invalid. IndexCatalog* indexCatalog = coll->getIndexCatalog(); - IndexDescriptor* descriptor = indexCatalog->findIndexByName(&_txn, indexName); + IndexDescriptor* descriptor = indexCatalog->findIndexByName(&_opCtx, indexName); IndexAccessMethod* iam = indexCatalog->getIndex(descriptor); { - WriteUnitOfWork wunit(&_txn); + WriteUnitOfWork wunit(&_opCtx); int64_t numDeleted; int64_t numInserted; const BSONObj actualKey = BSON("a" << 1); @@ -710,8 +727,8 @@ public: InsertDeleteOptions options; options.dupsAllowed = true; options.logIfError = true; - auto removeStatus = iam->remove(&_txn, actualKey, id1, options, &numDeleted); - auto insertStatus = iam->insert(&_txn, badKey, id1, options, &numInserted); + auto removeStatus = iam->remove(&_opCtx, actualKey, id1, options, &numDeleted); + auto insertStatus = iam->insert(&_opCtx, badKey, id1, options, &numInserted); ASSERT_EQUALS(numDeleted, 1); ASSERT_EQUALS(numInserted, 1); @@ -735,20 +752,23 @@ public: Collection* coll; RecordId id1; { - WriteUnitOfWork wunit(&_txn); - ASSERT_OK(db->dropCollection(&_txn, _ns)); - coll = db->createCollection(&_txn, _ns); - - ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 1 << "a" << 1), nullOpDebug, true)); - id1 = coll->getCursor(&_txn)->next()->id; - ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 2 << "a" << 2), nullOpDebug, true)); - ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 3 << "b" << 1), nullOpDebug, true)); + WriteUnitOfWork wunit(&_opCtx); + ASSERT_OK(db->dropCollection(&_opCtx, _ns)); + coll = db->createCollection(&_opCtx, _ns); + + ASSERT_OK( + coll->insertDocument(&_opCtx, BSON("_id" << 1 << "a" << 1), nullOpDebug, true)); + id1 = coll->getCursor(&_opCtx)->next()->id; + ASSERT_OK( + coll->insertDocument(&_opCtx, BSON("_id" << 2 << "a" << 2), nullOpDebug, true)); + ASSERT_OK( + coll->insertDocument(&_opCtx, BSON("_id" << 3 << "b" << 1), nullOpDebug, true)); wunit.commit(); } const std::string indexName = "bad_index"; auto status = dbtests::createIndexFromSpec( - &_txn, + &_opCtx, coll->ns().ns(), BSON("name" << indexName << "ns" << coll->ns().ns() << "key" << BSON("a" << 1) << "v" << static_cast<int>(kIndexVersion) @@ -761,7 +781,7 @@ public: // Change the IndexDescriptor's keyPattern to descending so the index ordering // appears wrong. IndexCatalog* indexCatalog = coll->getIndexCatalog(); - IndexDescriptor* descriptor = indexCatalog->findIndexByName(&_txn, indexName); + IndexDescriptor* descriptor = indexCatalog->findIndexByName(&_opCtx, indexName); descriptor->setKeyPatternForTest(BSON("a" << -1)); ASSERT_FALSE(checkValid()); |