summaryrefslogtreecommitdiff
path: root/src/mongo/dbtests/query_stage_and.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/dbtests/query_stage_and.cpp')
-rw-r--r--src/mongo/dbtests/query_stage_and.cpp289
1 files changed, 145 insertions, 144 deletions
diff --git a/src/mongo/dbtests/query_stage_and.cpp b/src/mongo/dbtests/query_stage_and.cpp
index 340101f66fa..fa4961ce4eb 100644
--- a/src/mongo/dbtests/query_stage_and.cpp
+++ b/src/mongo/dbtests/query_stage_and.cpp
@@ -61,19 +61,19 @@ using stdx::make_unique;
class QueryStageAndBase {
public:
- QueryStageAndBase() : _client(&_txn) {}
+ QueryStageAndBase() : _client(&_opCtx) {}
virtual ~QueryStageAndBase() {
_client.dropCollection(ns());
}
void addIndex(const BSONObj& obj) {
- ASSERT_OK(dbtests::createIndex(&_txn, ns(), obj));
+ ASSERT_OK(dbtests::createIndex(&_opCtx, ns(), obj));
}
IndexDescriptor* getIndex(const BSONObj& obj, Collection* coll) {
std::vector<IndexDescriptor*> indexes;
- coll->getIndexCatalog()->findIndexesByKeyPattern(&_txn, obj, false, &indexes);
+ coll->getIndexCatalog()->findIndexesByKeyPattern(&_opCtx, obj, false, &indexes);
if (indexes.empty()) {
FAIL(mongoutils::str::stream() << "Unable to find index with key pattern " << obj);
}
@@ -81,7 +81,7 @@ public:
}
void getRecordIds(set<RecordId>* out, Collection* coll) {
- auto cursor = coll->getCursor(&_txn);
+ auto cursor = coll->getCursor(&_opCtx);
while (auto record = cursor->next()) {
out->insert(record->id);
}
@@ -151,7 +151,7 @@ public:
protected:
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
- OperationContext& _txn = *_txnPtr;
+ OperationContext& _opCtx = *_txnPtr;
private:
DBDirectClient _client;
@@ -168,12 +168,12 @@ private:
class QueryStageAndHashInvalidation : public QueryStageAndBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = ctx.getCollection();
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -185,7 +185,7 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- auto ah = make_unique<AndHashStage>(&_txn, &ws, coll);
+ auto ah = make_unique<AndHashStage>(&_opCtx, &ws, coll);
// Foo <= 20
IndexScanParams params;
@@ -195,7 +195,7 @@ public:
params.bounds.endKey = BSONObj();
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = -1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// Bar >= 10
params.descriptor = getIndex(BSON("bar" << 1), coll);
@@ -203,7 +203,7 @@ public:
params.bounds.endKey = BSONObj();
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// ah reads the first child into its hash table.
// ah should read foo=20, foo=19, ..., foo=0 in that order.
@@ -221,9 +221,9 @@ public:
getRecordIds(&data, coll);
size_t memUsageBefore = ah->getMemUsage();
for (set<RecordId>::const_iterator it = data.begin(); it != data.end(); ++it) {
- if (coll->docFor(&_txn, *it).value()["foo"].numberInt() == 15) {
- ah->invalidate(&_txn, *it, INVALIDATION_DELETION);
- remove(coll->docFor(&_txn, *it).value());
+ if (coll->docFor(&_opCtx, *it).value()["foo"].numberInt() == 15) {
+ ah->invalidate(&_opCtx, *it, INVALIDATION_DELETION);
+ remove(coll->docFor(&_opCtx, *it).value());
break;
}
}
@@ -273,12 +273,12 @@ public:
class QueryStageAndHashInvalidateLookahead : public QueryStageAndBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = ctx.getCollection();
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -291,7 +291,7 @@ public:
addIndex(BSON("baz" << 1));
WorkingSet ws;
- auto ah = make_unique<AndHashStage>(&_txn, &ws, coll);
+ auto ah = make_unique<AndHashStage>(&_opCtx, &ws, coll);
// Foo <= 20 (descending)
IndexScanParams params;
@@ -301,12 +301,12 @@ public:
params.bounds.endKey = BSONObj();
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = -1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// Bar <= 19 (descending)
params.descriptor = getIndex(BSON("bar" << 1), coll);
params.bounds.startKey = BSON("" << 19);
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// First call to work reads the first result from the children.
// The first result is for the first scan over foo is {foo: 20, bar: 20, baz: 20}.
@@ -326,8 +326,8 @@ public:
size_t memUsageBefore = ah->getMemUsage();
for (set<RecordId>::const_iterator it = data.begin(); it != data.end(); ++it) {
- if (0 == deletedObj.woCompare(coll->docFor(&_txn, *it).value())) {
- ah->invalidate(&_txn, *it, INVALIDATION_DELETION);
+ if (0 == deletedObj.woCompare(coll->docFor(&_opCtx, *it).value())) {
+ ah->invalidate(&_opCtx, *it, INVALIDATION_DELETION);
break;
}
}
@@ -350,7 +350,8 @@ public:
continue;
}
WorkingSetMember* wsm = ws.get(id);
- ASSERT_NOT_EQUALS(0, deletedObj.woCompare(coll->docFor(&_txn, wsm->recordId).value()));
+ ASSERT_NOT_EQUALS(0,
+ deletedObj.woCompare(coll->docFor(&_opCtx, wsm->recordId).value()));
++count;
}
@@ -362,12 +363,12 @@ public:
class QueryStageAndHashTwoLeaf : public QueryStageAndBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = ctx.getCollection();
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -379,7 +380,7 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- auto ah = make_unique<AndHashStage>(&_txn, &ws, coll);
+ auto ah = make_unique<AndHashStage>(&_opCtx, &ws, coll);
// Foo <= 20
IndexScanParams params;
@@ -389,7 +390,7 @@ public:
params.bounds.endKey = BSONObj();
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = -1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// Bar >= 10
params.descriptor = getIndex(BSON("bar" << 1), coll);
@@ -397,7 +398,7 @@ public:
params.bounds.endKey = BSONObj();
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// foo == bar == baz, and foo<=20, bar>=10, so our values are:
// foo == 10, 11, 12, 13, 14, 15. 16, 17, 18, 19, 20
@@ -412,12 +413,12 @@ public:
class QueryStageAndHashTwoLeafFirstChildLargeKeys : public QueryStageAndBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = ctx.getCollection();
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -434,7 +435,7 @@ public:
// before hashed AND is done reading the first child (stage has to
// hold 21 keys in buffer for Foo <= 20).
WorkingSet ws;
- auto ah = make_unique<AndHashStage>(&_txn, &ws, coll, 20 * big.size());
+ auto ah = make_unique<AndHashStage>(&_opCtx, &ws, coll, 20 * big.size());
// Foo <= 20
IndexScanParams params;
@@ -444,7 +445,7 @@ public:
params.bounds.endKey = BSONObj();
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = -1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// Bar >= 10
params.descriptor = getIndex(BSON("bar" << 1), coll);
@@ -452,7 +453,7 @@ public:
params.bounds.endKey = BSONObj();
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// Stage execution should fail.
ASSERT_EQUALS(-1, countResults(ah.get()));
@@ -465,12 +466,12 @@ public:
class QueryStageAndHashTwoLeafLastChildLargeKeys : public QueryStageAndBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = ctx.getCollection();
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -487,7 +488,7 @@ public:
// keys in last child's index are not buffered. There are 6 keys
// that satisfy the criteria Foo <= 20 and Bar >= 10 and 5 <= baz <= 15.
WorkingSet ws;
- auto ah = make_unique<AndHashStage>(&_txn, &ws, coll, 5 * big.size());
+ auto ah = make_unique<AndHashStage>(&_opCtx, &ws, coll, 5 * big.size());
// Foo <= 20
IndexScanParams params;
@@ -497,7 +498,7 @@ public:
params.bounds.endKey = BSONObj();
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = -1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// Bar >= 10
params.descriptor = getIndex(BSON("bar" << 1 << "big" << 1), coll);
@@ -505,7 +506,7 @@ public:
params.bounds.endKey = BSONObj();
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// foo == bar == baz, and foo<=20, bar>=10, so our values are:
// foo == 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20.
@@ -517,12 +518,12 @@ public:
class QueryStageAndHashThreeLeaf : public QueryStageAndBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = ctx.getCollection();
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -535,7 +536,7 @@ public:
addIndex(BSON("baz" << 1));
WorkingSet ws;
- auto ah = make_unique<AndHashStage>(&_txn, &ws, coll);
+ auto ah = make_unique<AndHashStage>(&_opCtx, &ws, coll);
// Foo <= 20
IndexScanParams params;
@@ -545,7 +546,7 @@ public:
params.bounds.endKey = BSONObj();
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = -1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// Bar >= 10
params.descriptor = getIndex(BSON("bar" << 1), coll);
@@ -553,7 +554,7 @@ public:
params.bounds.endKey = BSONObj();
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// 5 <= baz <= 15
params.descriptor = getIndex(BSON("baz" << 1), coll);
@@ -561,7 +562,7 @@ public:
params.bounds.endKey = BSON("" << 15);
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// foo == bar == baz, and foo<=20, bar>=10, 5<=baz<=15, so our values are:
// foo == 10, 11, 12, 13, 14, 15.
@@ -579,12 +580,12 @@ public:
class QueryStageAndHashThreeLeafMiddleChildLargeKeys : public QueryStageAndBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = ctx.getCollection();
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -602,7 +603,7 @@ public:
// before hashed AND is done reading the second child (stage has to
// hold 11 keys in buffer for Foo <= 20 and Bar >= 10).
WorkingSet ws;
- auto ah = make_unique<AndHashStage>(&_txn, &ws, coll, 10 * big.size());
+ auto ah = make_unique<AndHashStage>(&_opCtx, &ws, coll, 10 * big.size());
// Foo <= 20
IndexScanParams params;
@@ -612,7 +613,7 @@ public:
params.bounds.endKey = BSONObj();
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = -1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// Bar >= 10
params.descriptor = getIndex(BSON("bar" << 1 << "big" << 1), coll);
@@ -620,7 +621,7 @@ public:
params.bounds.endKey = BSONObj();
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// 5 <= baz <= 15
params.descriptor = getIndex(BSON("baz" << 1), coll);
@@ -628,7 +629,7 @@ public:
params.bounds.endKey = BSON("" << 15);
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// Stage execution should fail.
ASSERT_EQUALS(-1, countResults(ah.get()));
@@ -639,12 +640,12 @@ public:
class QueryStageAndHashWithNothing : public QueryStageAndBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = ctx.getCollection();
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -656,7 +657,7 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- auto ah = make_unique<AndHashStage>(&_txn, &ws, coll);
+ auto ah = make_unique<AndHashStage>(&_opCtx, &ws, coll);
// Foo <= 20
IndexScanParams params;
@@ -666,7 +667,7 @@ public:
params.bounds.endKey = BSONObj();
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = -1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// Bar == 5. Index scan should be eof.
params.descriptor = getIndex(BSON("bar" << 1), coll);
@@ -674,7 +675,7 @@ public:
params.bounds.endKey = BSON("" << 5);
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
int count = 0;
int works = 0;
@@ -701,12 +702,12 @@ public:
class QueryStageAndHashProducesNothing : public QueryStageAndBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = ctx.getCollection();
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -719,7 +720,7 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- auto ah = make_unique<AndHashStage>(&_txn, &ws, coll);
+ auto ah = make_unique<AndHashStage>(&_opCtx, &ws, coll);
// Foo >= 100
IndexScanParams params;
@@ -729,7 +730,7 @@ public:
params.bounds.endKey = BSONObj();
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// Bar <= 100
params.descriptor = getIndex(BSON("bar" << 1), coll);
@@ -741,7 +742,7 @@ public:
<< "");
params.bounds.boundInclusion = BoundInclusion::kIncludeStartKeyOnly;
params.direction = -1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
ASSERT_EQUALS(0, countResults(ah.get()));
}
@@ -754,12 +755,12 @@ public:
class QueryStageAndHashFirstChildFetched : public QueryStageAndBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = ctx.getCollection();
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -771,7 +772,7 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- auto ah = make_unique<AndHashStage>(&_txn, &ws, coll);
+ auto ah = make_unique<AndHashStage>(&_opCtx, &ws, coll);
// Foo <= 20
IndexScanParams params;
@@ -781,11 +782,11 @@ public:
params.bounds.endKey = BSONObj();
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = -1;
- IndexScan* firstScan = new IndexScan(&_txn, params, &ws, NULL);
+ IndexScan* firstScan = new IndexScan(&_opCtx, params, &ws, NULL);
// First child of the AND_HASH stage is a Fetch. The NULL in the
// constructor means there is no filter.
- FetchStage* fetch = new FetchStage(&_txn, &ws, firstScan, NULL, coll);
+ FetchStage* fetch = new FetchStage(&_opCtx, &ws, firstScan, NULL, coll);
ah->addChild(fetch);
// Bar >= 10
@@ -794,7 +795,7 @@ public:
params.bounds.endKey = BSONObj();
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// Check that the AndHash stage returns docs {foo: 10, bar: 10}
// through {foo: 20, bar: 20}.
@@ -813,12 +814,12 @@ public:
class QueryStageAndHashSecondChildFetched : public QueryStageAndBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = ctx.getCollection();
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -830,7 +831,7 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- auto ah = make_unique<AndHashStage>(&_txn, &ws, coll);
+ auto ah = make_unique<AndHashStage>(&_opCtx, &ws, coll);
// Foo <= 20
IndexScanParams params;
@@ -840,7 +841,7 @@ public:
params.bounds.endKey = BSONObj();
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = -1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// Bar >= 10
params.descriptor = getIndex(BSON("bar" << 1), coll);
@@ -848,11 +849,11 @@ public:
params.bounds.endKey = BSONObj();
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = 1;
- IndexScan* secondScan = new IndexScan(&_txn, params, &ws, NULL);
+ IndexScan* secondScan = new IndexScan(&_opCtx, params, &ws, NULL);
// Second child of the AND_HASH stage is a Fetch. The NULL in the
// constructor means there is no filter.
- FetchStage* fetch = new FetchStage(&_txn, &ws, secondScan, NULL, coll);
+ FetchStage* fetch = new FetchStage(&_opCtx, &ws, secondScan, NULL, coll);
ah->addChild(fetch);
// Check that the AndHash stage returns docs {foo: 10, bar: 10}
@@ -869,12 +870,12 @@ public:
class QueryStageAndHashDeadChild : public QueryStageAndBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = ctx.getCollection();
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -885,9 +886,9 @@ public:
// Child2: NEED_TIME, DEAD
{
WorkingSet ws;
- const auto andHashStage = make_unique<AndHashStage>(&_txn, &ws, coll);
+ const auto andHashStage = make_unique<AndHashStage>(&_opCtx, &ws, coll);
- auto childStage1 = make_unique<QueuedDataStage>(&_txn, &ws);
+ auto childStage1 = make_unique<QueuedDataStage>(&_opCtx, &ws);
{
WorkingSetID id = ws.allocate();
WorkingSetMember* wsm = ws.get(id);
@@ -897,7 +898,7 @@ public:
childStage1->pushBack(id);
}
- auto childStage2 = make_unique<QueuedDataStage>(&_txn, &ws);
+ auto childStage2 = make_unique<QueuedDataStage>(&_opCtx, &ws);
childStage2->pushBack(PlanStage::NEED_TIME);
childStage2->pushBack(PlanStage::DEAD);
@@ -918,9 +919,9 @@ public:
// Child2: Data
{
WorkingSet ws;
- const auto andHashStage = make_unique<AndHashStage>(&_txn, &ws, coll);
+ const auto andHashStage = make_unique<AndHashStage>(&_opCtx, &ws, coll);
- auto childStage1 = make_unique<QueuedDataStage>(&_txn, &ws);
+ auto childStage1 = make_unique<QueuedDataStage>(&_opCtx, &ws);
{
WorkingSetID id = ws.allocate();
@@ -932,7 +933,7 @@ public:
}
childStage1->pushBack(PlanStage::DEAD);
- auto childStage2 = make_unique<QueuedDataStage>(&_txn, &ws);
+ auto childStage2 = make_unique<QueuedDataStage>(&_opCtx, &ws);
{
WorkingSetID id = ws.allocate();
WorkingSetMember* wsm = ws.get(id);
@@ -959,9 +960,9 @@ public:
// Child2: Data, DEAD
{
WorkingSet ws;
- const auto andHashStage = make_unique<AndHashStage>(&_txn, &ws, coll);
+ const auto andHashStage = make_unique<AndHashStage>(&_opCtx, &ws, coll);
- auto childStage1 = make_unique<QueuedDataStage>(&_txn, &ws);
+ auto childStage1 = make_unique<QueuedDataStage>(&_opCtx, &ws);
{
WorkingSetID id = ws.allocate();
WorkingSetMember* wsm = ws.get(id);
@@ -971,7 +972,7 @@ public:
childStage1->pushBack(id);
}
- auto childStage2 = make_unique<QueuedDataStage>(&_txn, &ws);
+ auto childStage2 = make_unique<QueuedDataStage>(&_opCtx, &ws);
{
WorkingSetID id = ws.allocate();
WorkingSetMember* wsm = ws.get(id);
@@ -1007,12 +1008,12 @@ public:
class QueryStageAndSortedInvalidation : public QueryStageAndBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = ctx.getCollection();
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -1024,7 +1025,7 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- auto ah = make_unique<AndSortedStage>(&_txn, &ws, coll);
+ auto ah = make_unique<AndSortedStage>(&_opCtx, &ws, coll);
// Scan over foo == 1
IndexScanParams params;
@@ -1034,11 +1035,11 @@ public:
params.bounds.endKey = BSON("" << 1);
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// Scan over bar == 1
params.descriptor = getIndex(BSON("bar" << 1), coll);
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// Get the set of RecordIds in our collection to use later.
set<RecordId> data;
@@ -1056,8 +1057,8 @@ public:
// very first insert, which should be the very first thing in data. Let's invalidate it
// and make sure it shows up in the flagged results.
ah->saveState();
- ah->invalidate(&_txn, *data.begin(), INVALIDATION_DELETION);
- remove(coll->docFor(&_txn, *data.begin()).value());
+ ah->invalidate(&_opCtx, *data.begin(), INVALIDATION_DELETION);
+ remove(coll->docFor(&_opCtx, *data.begin()).value());
ah->restoreState();
// Make sure the nuked obj is actually in the flagged data.
@@ -1099,8 +1100,8 @@ public:
// Remove a result that's coming up. It's not the 'target' result of the AND so it's
// not flagged.
ah->saveState();
- ah->invalidate(&_txn, *it, INVALIDATION_DELETION);
- remove(coll->docFor(&_txn, *it).value());
+ ah->invalidate(&_opCtx, *it, INVALIDATION_DELETION);
+ remove(coll->docFor(&_opCtx, *it).value());
ah->restoreState();
// Get all results aside from the two we killed.
@@ -1131,12 +1132,12 @@ public:
class QueryStageAndSortedThreeLeaf : public QueryStageAndBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = ctx.getCollection();
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -1157,7 +1158,7 @@ public:
addIndex(BSON("baz" << 1));
WorkingSet ws;
- auto ah = make_unique<AndSortedStage>(&_txn, &ws, coll);
+ auto ah = make_unique<AndSortedStage>(&_opCtx, &ws, coll);
// Scan over foo == 1
IndexScanParams params;
@@ -1167,15 +1168,15 @@ public:
params.bounds.endKey = BSON("" << 1);
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// bar == 1
params.descriptor = getIndex(BSON("bar" << 1), coll);
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// baz == 1
params.descriptor = getIndex(BSON("baz" << 1), coll);
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
ASSERT_EQUALS(50, countResults(ah.get()));
}
@@ -1185,12 +1186,12 @@ public:
class QueryStageAndSortedWithNothing : public QueryStageAndBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = ctx.getCollection();
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -1202,7 +1203,7 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- auto ah = make_unique<AndSortedStage>(&_txn, &ws, coll);
+ auto ah = make_unique<AndSortedStage>(&_opCtx, &ws, coll);
// Foo == 7. Should be EOF.
IndexScanParams params;
@@ -1212,7 +1213,7 @@ public:
params.bounds.endKey = BSON("" << 7);
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// Bar == 20, not EOF.
params.descriptor = getIndex(BSON("bar" << 1), coll);
@@ -1220,7 +1221,7 @@ public:
params.bounds.endKey = BSON("" << 20);
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
ASSERT_EQUALS(0, countResults(ah.get()));
}
@@ -1230,12 +1231,12 @@ public:
class QueryStageAndSortedProducesNothing : public QueryStageAndBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = ctx.getCollection();
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -1251,7 +1252,7 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- auto ah = make_unique<AndSortedStage>(&_txn, &ws, coll);
+ auto ah = make_unique<AndSortedStage>(&_opCtx, &ws, coll);
// foo == 7.
IndexScanParams params;
@@ -1261,7 +1262,7 @@ public:
params.bounds.endKey = BSON("" << 7);
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// bar == 20.
params.descriptor = getIndex(BSON("bar" << 1), coll);
@@ -1269,7 +1270,7 @@ public:
params.bounds.endKey = BSON("" << 20);
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
ASSERT_EQUALS(0, countResults(ah.get()));
}
@@ -1279,12 +1280,12 @@ public:
class QueryStageAndSortedByLastChild : public QueryStageAndBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = ctx.getCollection();
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -1296,7 +1297,7 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- auto ah = make_unique<AndHashStage>(&_txn, &ws, coll);
+ auto ah = make_unique<AndHashStage>(&_opCtx, &ws, coll);
// Scan over foo == 1
IndexScanParams params;
@@ -1306,13 +1307,13 @@ public:
params.bounds.endKey = BSON("" << 1);
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// Intersect with 7 <= bar < 10000
params.descriptor = getIndex(BSON("bar" << 1), coll);
params.bounds.startKey = BSON("" << 7);
params.bounds.endKey = BSON("" << 10000);
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
WorkingSetID lastId = WorkingSet::INVALID_ID;
@@ -1323,11 +1324,11 @@ public:
if (PlanStage::ADVANCED != status) {
continue;
}
- BSONObj thisObj = coll->docFor(&_txn, ws.get(id)->recordId).value();
+ BSONObj thisObj = coll->docFor(&_opCtx, ws.get(id)->recordId).value();
ASSERT_EQUALS(7 + count, thisObj["bar"].numberInt());
++count;
if (WorkingSet::INVALID_ID != lastId) {
- BSONObj lastObj = coll->docFor(&_txn, ws.get(lastId)->recordId).value();
+ BSONObj lastObj = coll->docFor(&_opCtx, ws.get(lastId)->recordId).value();
ASSERT_LESS_THAN(lastObj["bar"].woCompare(thisObj["bar"]), 0);
}
lastId = id;
@@ -1344,12 +1345,12 @@ public:
class QueryStageAndSortedFirstChildFetched : public QueryStageAndBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = ctx.getCollection();
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -1362,7 +1363,7 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- unique_ptr<AndSortedStage> as = make_unique<AndSortedStage>(&_txn, &ws, coll);
+ unique_ptr<AndSortedStage> as = make_unique<AndSortedStage>(&_opCtx, &ws, coll);
// Scan over foo == 1
IndexScanParams params;
@@ -1372,16 +1373,16 @@ public:
params.bounds.endKey = BSON("" << 1);
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = 1;
- IndexScan* firstScan = new IndexScan(&_txn, params, &ws, NULL);
+ IndexScan* firstScan = new IndexScan(&_opCtx, params, &ws, NULL);
// First child of the AND_SORTED stage is a Fetch. The NULL in the
// constructor means there is no filter.
- FetchStage* fetch = new FetchStage(&_txn, &ws, firstScan, NULL, coll);
+ FetchStage* fetch = new FetchStage(&_opCtx, &ws, firstScan, NULL, coll);
as->addChild(fetch);
// bar == 1
params.descriptor = getIndex(BSON("bar" << 1), coll);
- as->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ as->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
for (int i = 0; i < 50; i++) {
BSONObj obj = getNext(as.get(), &ws);
@@ -1398,12 +1399,12 @@ public:
class QueryStageAndSortedSecondChildFetched : public QueryStageAndBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = ctx.getCollection();
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -1416,7 +1417,7 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- unique_ptr<AndSortedStage> as = make_unique<AndSortedStage>(&_txn, &ws, coll);
+ unique_ptr<AndSortedStage> as = make_unique<AndSortedStage>(&_opCtx, &ws, coll);
// Scan over foo == 1
IndexScanParams params;
@@ -1426,15 +1427,15 @@ public:
params.bounds.endKey = BSON("" << 1);
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = 1;
- as->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ as->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// bar == 1
params.descriptor = getIndex(BSON("bar" << 1), coll);
- IndexScan* secondScan = new IndexScan(&_txn, params, &ws, NULL);
+ IndexScan* secondScan = new IndexScan(&_opCtx, params, &ws, NULL);
// Second child of the AND_SORTED stage is a Fetch. The NULL in the
// constructor means there is no filter.
- FetchStage* fetch = new FetchStage(&_txn, &ws, secondScan, NULL, coll);
+ FetchStage* fetch = new FetchStage(&_opCtx, &ws, secondScan, NULL, coll);
as->addChild(fetch);
for (int i = 0; i < 50; i++) {