summaryrefslogtreecommitdiff
path: root/src/mongo
diff options
context:
space:
mode:
authorJames Wahlin <james.wahlin@10gen.com>2016-02-03 16:18:08 -0500
committerJames Wahlin <james.wahlin@10gen.com>2016-02-05 14:43:37 -0500
commit2c360253ba289c293ccfcacbf010a2c79b12ef0a (patch)
tree849cc8e1553df6b43ae775611d1fc45e8a846af1 /src/mongo
parent4c8be4e74705ca78a52d6bdbd4f290e10014d2a1 (diff)
downloadmongo-2c360253ba289c293ccfcacbf010a2c79b12ef0a.tar.gz
SERVER-18826 Rename WorkingSet State Names from LOC to RID
Also renamed: * WorkingSetMember::hasLoc() -> WorkingSetMember::hasRecordId * WorkingSetMember::loc -> WorkingSetMember::recordId
Diffstat (limited to 'src/mongo')
-rw-r--r--src/mongo/db/commands/list_collections.cpp2
-rw-r--r--src/mongo/db/commands/list_indexes.cpp2
-rw-r--r--src/mongo/db/exec/and_common-inl.h10
-rw-r--r--src/mongo/db/exec/and_hash.cpp30
-rw-r--r--src/mongo/db/exec/and_sorted.cpp53
-rw-r--r--src/mongo/db/exec/and_sorted.h8
-rw-r--r--src/mongo/db/exec/cached_plan.cpp4
-rw-r--r--src/mongo/db/exec/collection_scan.cpp4
-rw-r--r--src/mongo/db/exec/delete.cpp12
-rw-r--r--src/mongo/db/exec/distinct_scan.cpp4
-rw-r--r--src/mongo/db/exec/fetch.cpp16
-rw-r--r--src/mongo/db/exec/fetch.h4
-rw-r--r--src/mongo/db/exec/idhack.cpp18
-rw-r--r--src/mongo/db/exec/index_iterator.cpp4
-rw-r--r--src/mongo/db/exec/index_scan.cpp4
-rw-r--r--src/mongo/db/exec/merge_sort.cpp14
-rw-r--r--src/mongo/db/exec/multi_iterator.cpp4
-rw-r--r--src/mongo/db/exec/multi_plan.cpp15
-rw-r--r--src/mongo/db/exec/near.cpp22
-rw-r--r--src/mongo/db/exec/oplogstart.cpp6
-rw-r--r--src/mongo/db/exec/or.cpp8
-rw-r--r--src/mongo/db/exec/plan_stats.h4
-rw-r--r--src/mongo/db/exec/projection.cpp2
-rw-r--r--src/mongo/db/exec/projection_exec.cpp6
-rw-r--r--src/mongo/db/exec/projection_exec_test.cpp4
-rw-r--r--src/mongo/db/exec/sort.cpp28
-rw-r--r--src/mongo/db/exec/sort.h4
-rw-r--r--src/mongo/db/exec/sort_key_generator.cpp2
-rw-r--r--src/mongo/db/exec/text_or.cpp6
-rw-r--r--src/mongo/db/exec/update.cpp38
-rw-r--r--src/mongo/db/exec/update.h8
-rw-r--r--src/mongo/db/exec/working_set.cpp20
-rw-r--r--src/mongo/db/exec/working_set.h28
-rw-r--r--src/mongo/db/exec/working_set_common.cpp20
-rw-r--r--src/mongo/db/exec/working_set_common.h14
-rw-r--r--src/mongo/db/exec/working_set_test.cpp14
-rw-r--r--src/mongo/db/query/explain.cpp2
-rw-r--r--src/mongo/db/query/plan_executor.cpp6
-rw-r--r--src/mongo/dbtests/query_stage_and.cpp38
-rw-r--r--src/mongo/dbtests/query_stage_collscan.cpp42
-rw-r--r--src/mongo/dbtests/query_stage_count.cpp46
-rw-r--r--src/mongo/dbtests/query_stage_delete.cpp32
-rw-r--r--src/mongo/dbtests/query_stage_fetch.cpp28
-rw-r--r--src/mongo/dbtests/query_stage_ixscan.cpp18
-rw-r--r--src/mongo/dbtests/query_stage_merge_sort.cpp30
-rw-r--r--src/mongo/dbtests/query_stage_sort.cpp36
-rw-r--r--src/mongo/dbtests/query_stage_update.cpp44
-rw-r--r--src/mongo/dbtests/sort_key_generator_test.cpp2
48 files changed, 384 insertions, 382 deletions
diff --git a/src/mongo/db/commands/list_collections.cpp b/src/mongo/db/commands/list_collections.cpp
index c33c246b2c1..1c21d3b561d 100644
--- a/src/mongo/db/commands/list_collections.cpp
+++ b/src/mongo/db/commands/list_collections.cpp
@@ -133,7 +133,7 @@ void _addWorkingSetMember(OperationContext* txn,
WorkingSetID id = ws->allocate();
WorkingSetMember* member = ws->get(id);
member->keyData.clear();
- member->loc = RecordId();
+ member->recordId = RecordId();
member->obj = Snapshotted<BSONObj>(SnapshotId(), maybe);
member->transitionToOwnedObj();
root->pushBack(id);
diff --git a/src/mongo/db/commands/list_indexes.cpp b/src/mongo/db/commands/list_indexes.cpp
index 98424e25bcd..6f637c17e1e 100644
--- a/src/mongo/db/commands/list_indexes.cpp
+++ b/src/mongo/db/commands/list_indexes.cpp
@@ -173,7 +173,7 @@ public:
WorkingSetID id = ws->allocate();
WorkingSetMember* member = ws->get(id);
member->keyData.clear();
- member->loc = RecordId();
+ member->recordId = RecordId();
member->obj = Snapshotted<BSONObj>(SnapshotId(), indexSpec.getOwned());
member->transitionToOwnedObj();
root->pushBack(id);
diff --git a/src/mongo/db/exec/and_common-inl.h b/src/mongo/db/exec/and_common-inl.h
index 8151163e6d3..3a6f7cb0e69 100644
--- a/src/mongo/db/exec/and_common-inl.h
+++ b/src/mongo/db/exec/and_common-inl.h
@@ -44,9 +44,9 @@ public:
// Both 'src' and 'dest' must have a RecordId (and they must be the same RecordId), as
// we should have just matched them according to this RecordId while doing an
// intersection.
- verify(dest->hasLoc());
- verify(src.hasLoc());
- verify(dest->loc == src.loc);
+ verify(dest->hasRecordId());
+ verify(src.hasRecordId());
+ verify(dest->recordId == src.recordId);
// Merge computed data.
typedef WorkingSetComputedDataType WSCD;
@@ -63,7 +63,7 @@ public:
}
if (src.hasObj()) {
- invariant(src.getState() == WorkingSetMember::LOC_AND_OBJ);
+ invariant(src.getState() == WorkingSetMember::RID_AND_OBJ);
// 'src' has the full document but 'dest' doesn't so we need to copy it over.
dest->obj = src.obj;
@@ -72,7 +72,7 @@ public:
// We have an object so we don't need key data.
dest->keyData.clear();
- workingSet->transitionToLocAndObj(destId);
+ workingSet->transitionToRecordIdAndObj(destId);
// Now 'dest' has the full object. No more work to do.
return;
diff --git a/src/mongo/db/exec/and_hash.cpp b/src/mongo/db/exec/and_hash.cpp
index 8153359ce89..a9bcb8466f2 100644
--- a/src/mongo/db/exec/and_hash.cpp
+++ b/src/mongo/db/exec/and_hash.cpp
@@ -219,12 +219,12 @@ PlanStage::StageState AndHashStage::doWork(WorkingSetID* out) {
// Maybe the child had an invalidation. We intersect RecordId(s) so we can't do anything
// with this WSM.
- if (!member->hasLoc()) {
+ if (!member->hasRecordId()) {
_ws->flagForReview(*out);
return PlanStage::NEED_TIME;
}
- DataMap::iterator it = _dataMap.find(member->loc);
+ DataMap::iterator it = _dataMap.find(member->recordId);
if (_dataMap.end() == it) {
// Child's output wasn't in every previous child. Throw it out.
_ws->free(*out);
@@ -264,13 +264,13 @@ PlanStage::StageState AndHashStage::readFirstChild(WorkingSetID* out) {
// Maybe the child had an invalidation. We intersect RecordId(s) so we can't do anything
// with this WSM.
- if (!member->hasLoc()) {
+ if (!member->hasRecordId()) {
_ws->flagForReview(id);
return PlanStage::NEED_TIME;
}
- if (!_dataMap.insert(std::make_pair(member->loc, id)).second) {
- // Didn't insert because we already had this loc inside the map. This should only
+ if (!_dataMap.insert(std::make_pair(member->recordId, id)).second) {
+ // Didn't insert because we already had this RecordId inside the map. This should only
// happen if we're seeing a newer copy of the same doc in a more recent snapshot.
// Throw out the newer copy of the doc.
_ws->free(id);
@@ -329,18 +329,18 @@ PlanStage::StageState AndHashStage::hashOtherChildren(WorkingSetID* out) {
// Maybe the child had an invalidation. We intersect RecordId(s) so we can't do anything
// with this WSM.
- if (!member->hasLoc()) {
+ if (!member->hasRecordId()) {
_ws->flagForReview(id);
return PlanStage::NEED_TIME;
}
- verify(member->hasLoc());
- if (_dataMap.end() == _dataMap.find(member->loc)) {
+ verify(member->hasRecordId());
+ if (_dataMap.end() == _dataMap.find(member->recordId)) {
// Ignore. It's not in any previous child.
} else {
// We have a hit. Copy data into the WSM we already have.
- _seenMap.insert(member->loc);
- WorkingSetID olderMemberID = _dataMap[member->loc];
+ _seenMap.insert(member->recordId);
+ WorkingSetID olderMemberID = _dataMap[member->recordId];
WorkingSetMember* olderMember = _ws->get(olderMemberID);
size_t memUsageBefore = olderMember->getMemUsage();
@@ -423,8 +423,8 @@ void AndHashStage::doInvalidate(OperationContext* txn, const RecordId& dl, Inval
for (size_t i = 0; i < _lookAheadResults.size(); ++i) {
if (WorkingSet::INVALID_ID != _lookAheadResults[i]) {
WorkingSetMember* member = _ws->get(_lookAheadResults[i]);
- if (member->hasLoc() && member->loc == dl) {
- WorkingSetCommon::fetchAndInvalidateLoc(txn, member, _collection);
+ if (member->hasRecordId() && member->recordId == dl) {
+ WorkingSetCommon::fetchAndInvalidateRecordId(txn, member, _collection);
_ws->flagForReview(_lookAheadResults[i]);
_lookAheadResults[i] = WorkingSet::INVALID_ID;
}
@@ -441,7 +441,7 @@ void AndHashStage::doInvalidate(OperationContext* txn, const RecordId& dl, Inval
if (_dataMap.end() != it) {
WorkingSetID id = it->second;
WorkingSetMember* member = _ws->get(id);
- verify(member->loc == dl);
+ verify(member->recordId == dl);
if (_hashingChildren) {
++_specificStats.flaggedInProgress;
@@ -452,8 +452,8 @@ void AndHashStage::doInvalidate(OperationContext* txn, const RecordId& dl, Inval
// Update memory stats.
_memUsage -= member->getMemUsage();
- // The loc is about to be invalidated. Fetch it and clear the loc.
- WorkingSetCommon::fetchAndInvalidateLoc(txn, member, _collection);
+ // The RecordId is about to be invalidated. Fetch it and clear the RecordId.
+ WorkingSetCommon::fetchAndInvalidateRecordId(txn, member, _collection);
// Add the WSID to the to-be-reviewed list in the WS.
_ws->flagForReview(id);
diff --git a/src/mongo/db/exec/and_sorted.cpp b/src/mongo/db/exec/and_sorted.cpp
index 576141e6259..47efc2aba20 100644
--- a/src/mongo/db/exec/and_sorted.cpp
+++ b/src/mongo/db/exec/and_sorted.cpp
@@ -75,21 +75,21 @@ PlanStage::StageState AndSortedStage::doWork(WorkingSetID* out) {
// If we don't have any nodes that we're work()-ing until they hit a certain RecordId...
if (0 == _workingTowardRep.size()) {
// Get a target RecordId.
- return getTargetLoc(out);
+ return getTargetRecordId(out);
}
// Move nodes toward the target RecordId.
// If all nodes reach the target RecordId, return it. The next call to work() will set a new
// target.
- return moveTowardTargetLoc(out);
+ return moveTowardTargetRecordId(out);
}
-PlanStage::StageState AndSortedStage::getTargetLoc(WorkingSetID* out) {
+PlanStage::StageState AndSortedStage::getTargetRecordId(WorkingSetID* out) {
verify(numeric_limits<size_t>::max() == _targetNode);
verify(WorkingSet::INVALID_ID == _targetId);
- verify(RecordId() == _targetLoc);
+ verify(RecordId() == _targetRecordId);
- // Pick one, and get a loc to work toward.
+ // Pick one, and get a RecordId to work toward.
WorkingSetID id = WorkingSet::INVALID_ID;
StageState state = _children[0]->work(&id);
@@ -98,17 +98,17 @@ PlanStage::StageState AndSortedStage::getTargetLoc(WorkingSetID* out) {
// Maybe the child had an invalidation. We intersect RecordId(s) so we can't do anything
// with this WSM.
- if (!member->hasLoc()) {
+ if (!member->hasRecordId()) {
_ws->flagForReview(id);
return PlanStage::NEED_TIME;
}
- verify(member->hasLoc());
+ verify(member->hasRecordId());
// We have a value from one child to AND with.
_targetNode = 0;
_targetId = id;
- _targetLoc = member->loc;
+ _targetRecordId = member->recordId;
// Ensure that the BSONObj underlying the WorkingSetMember is owned in case we yield.
member->makeObjOwnedIfNeeded();
@@ -145,11 +145,11 @@ PlanStage::StageState AndSortedStage::getTargetLoc(WorkingSetID* out) {
}
}
-PlanStage::StageState AndSortedStage::moveTowardTargetLoc(WorkingSetID* out) {
+PlanStage::StageState AndSortedStage::moveTowardTargetRecordId(WorkingSetID* out) {
verify(numeric_limits<size_t>::max() != _targetNode);
verify(WorkingSet::INVALID_ID != _targetId);
- // We have nodes that haven't hit _targetLoc yet.
+ // We have nodes that haven't hit _targetRecordId yet.
size_t workingChildNumber = _workingTowardRep.front();
auto& next = _children[workingChildNumber];
WorkingSetID id = WorkingSet::INVALID_ID;
@@ -160,15 +160,15 @@ PlanStage::StageState AndSortedStage::moveTowardTargetLoc(WorkingSetID* out) {
// Maybe the child had an invalidation. We intersect RecordId(s) so we can't do anything
// with this WSM.
- if (!member->hasLoc()) {
+ if (!member->hasRecordId()) {
_ws->flagForReview(id);
return PlanStage::NEED_TIME;
}
- verify(member->hasLoc());
+ verify(member->hasRecordId());
- if (member->loc == _targetLoc) {
- // The front element has hit _targetLoc. Don't move it forward anymore/work on
+ if (member->recordId == _targetRecordId) {
+ // The front element has hit _targetRecordId. Don't move it forward anymore/work on
// another element.
_workingTowardRep.pop();
AndCommon::mergeFrom(_ws, _targetId, *member);
@@ -179,27 +179,27 @@ PlanStage::StageState AndSortedStage::moveTowardTargetLoc(WorkingSetID* out) {
_targetNode = numeric_limits<size_t>::max();
_targetId = WorkingSet::INVALID_ID;
- _targetLoc = RecordId();
+ _targetRecordId = RecordId();
*out = toReturn;
return PlanStage::ADVANCED;
}
- // More children need to be advanced to _targetLoc.
+ // More children need to be advanced to _targetRecordId.
return PlanStage::NEED_TIME;
- } else if (member->loc < _targetLoc) {
+ } else if (member->recordId < _targetRecordId) {
// The front element of _workingTowardRep hasn't hit the thing we're AND-ing with
// yet. Try again later.
_ws->free(id);
return PlanStage::NEED_TIME;
} else {
- // member->loc > _targetLoc.
- // _targetLoc wasn't successfully AND-ed with the other sub-plans. We toss it and
+ // member->recordId > _targetRecordId.
+ // _targetRecordId wasn't successfully AND-ed with the other sub-plans. We toss it and
// try AND-ing with the next value.
_specificStats.failedAnd[_targetNode]++;
_ws->free(_targetId);
_targetNode = workingChildNumber;
- _targetLoc = member->loc;
+ _targetRecordId = member->recordId;
_targetId = id;
// Ensure that the BSONObj underlying the WorkingSetMember is owned in case we yield.
@@ -211,7 +211,7 @@ PlanStage::StageState AndSortedStage::moveTowardTargetLoc(WorkingSetID* out) {
_workingTowardRep.push(i);
}
}
- // Need time to chase after the new _targetLoc.
+ // Need time to chase after the new _targetRecordId.
return PlanStage::NEED_TIME;
}
} else if (PlanStage::IS_EOF == state) {
@@ -250,20 +250,21 @@ void AndSortedStage::doInvalidate(OperationContext* txn,
return;
}
- if (dl == _targetLoc) {
- // We're in the middle of moving children forward until they hit _targetLoc, which is no
+ if (dl == _targetRecordId) {
+ // We're in the middle of moving children forward until they hit _targetRecordId, which is
+ // no
// longer a valid target. If it's a deletion we can't AND it with anything, if it's a
// mutation the predicates implied by the AND may no longer be true. So no matter what,
- // fetch it, flag for review, and find another _targetLoc.
+ // fetch it, flag for review, and find another _targetRecordId.
++_specificStats.flagged;
// The RecordId could still be a valid result so flag it and save it for later.
- WorkingSetCommon::fetchAndInvalidateLoc(txn, _ws->get(_targetId), _collection);
+ WorkingSetCommon::fetchAndInvalidateRecordId(txn, _ws->get(_targetId), _collection);
_ws->flagForReview(_targetId);
_targetId = WorkingSet::INVALID_ID;
_targetNode = numeric_limits<size_t>::max();
- _targetLoc = RecordId();
+ _targetRecordId = RecordId();
_workingTowardRep = std::queue<size_t>();
}
}
diff --git a/src/mongo/db/exec/and_sorted.h b/src/mongo/db/exec/and_sorted.h
index d8edfcd21c6..be245b9583f 100644
--- a/src/mongo/db/exec/and_sorted.h
+++ b/src/mongo/db/exec/and_sorted.h
@@ -74,11 +74,11 @@ public:
private:
// Find a node to AND against.
- PlanStage::StageState getTargetLoc(WorkingSetID* out);
+ PlanStage::StageState getTargetRecordId(WorkingSetID* out);
// Move a child which hasn't advanced to the target node forward.
// Returns the target node in 'out' if all children successfully advance to it.
- PlanStage::StageState moveTowardTargetLoc(WorkingSetID* out);
+ PlanStage::StageState moveTowardTargetRecordId(WorkingSetID* out);
// Not owned by us.
const Collection* _collection;
@@ -88,11 +88,11 @@ private:
// The current node we're AND-ing against.
size_t _targetNode;
- RecordId _targetLoc;
+ RecordId _targetRecordId;
WorkingSetID _targetId;
// Nodes we're moving forward until they hit the element we're AND-ing.
- // Everything in here has not advanced to _targetLoc yet.
+ // Everything in here has not advanced to _targetRecordId yet.
// These are indices into _children.
std::queue<size_t> _workingTowardRep;
diff --git a/src/mongo/db/exec/cached_plan.cpp b/src/mongo/db/exec/cached_plan.cpp
index 6b3f43e3ff1..8037dad1c5a 100644
--- a/src/mongo/db/exec/cached_plan.cpp
+++ b/src/mongo/db/exec/cached_plan.cpp
@@ -303,8 +303,8 @@ void CachedPlanStage::doInvalidate(OperationContext* txn,
InvalidationType type) {
for (auto it = _results.begin(); it != _results.end(); ++it) {
WorkingSetMember* member = _ws->get(*it);
- if (member->hasLoc() && member->loc == dl) {
- WorkingSetCommon::fetchAndInvalidateLoc(txn, member, _collection);
+ if (member->hasRecordId() && member->recordId == dl) {
+ WorkingSetCommon::fetchAndInvalidateRecordId(txn, member, _collection);
}
}
}
diff --git a/src/mongo/db/exec/collection_scan.cpp b/src/mongo/db/exec/collection_scan.cpp
index 634fec6e00d..696a05a71ea 100644
--- a/src/mongo/db/exec/collection_scan.cpp
+++ b/src/mongo/db/exec/collection_scan.cpp
@@ -156,9 +156,9 @@ PlanStage::StageState CollectionScan::doWork(WorkingSetID* out) {
WorkingSetID id = _workingSet->allocate();
WorkingSetMember* member = _workingSet->get(id);
- member->loc = record->id;
+ member->recordId = record->id;
member->obj = {getOpCtx()->recoveryUnit()->getSnapshotId(), record->data.releaseToBson()};
- _workingSet->transitionToLocAndObj(id);
+ _workingSet->transitionToRecordIdAndObj(id);
return returnIfMatches(member, id, out);
}
diff --git a/src/mongo/db/exec/delete.cpp b/src/mongo/db/exec/delete.cpp
index ed0dcd80761..bf061ccc98a 100644
--- a/src/mongo/db/exec/delete.cpp
+++ b/src/mongo/db/exec/delete.cpp
@@ -144,12 +144,12 @@ PlanStage::StageState DeleteStage::doWork(WorkingSetID* out) {
// We want to free this member when we return, unless we need to retry it.
ScopeGuard memberFreer = MakeGuard(&WorkingSet::free, _ws, id);
- if (!member->hasLoc()) {
+ if (!member->hasRecordId()) {
// We expect to be here because of an invalidation causing a force-fetch.
++_specificStats.nInvalidateSkips;
return PlanStage::NEED_TIME;
}
- RecordId rloc = member->loc;
+ RecordId recordId = member->recordId;
// Deletes can't have projections. This means that covering analysis will always add
// a fetch. We should always get fetched data, and never just key data.
invariant(member->hasObj());
@@ -177,7 +177,7 @@ PlanStage::StageState DeleteStage::doWork(WorkingSetID* out) {
// is allowed to free the memory.
if (_params.returnDeleted) {
// Save a copy of the document that is about to get deleted, but keep it in the
- // LOC_AND_OBJ state in case we need to retry deleting it.
+ // RID_AND_OBJ state in case we need to retry deleting it.
BSONObj deletedDoc = member->obj.value();
member->obj.setValue(deletedDoc.getOwned());
}
@@ -199,7 +199,7 @@ PlanStage::StageState DeleteStage::doWork(WorkingSetID* out) {
// Do the write, unless this is an explain.
if (!_params.isExplain) {
WriteUnitOfWork wunit(getOpCtx());
- _collection->deleteDocument(getOpCtx(), rloc, _params.fromMigrate);
+ _collection->deleteDocument(getOpCtx(), recordId, _params.fromMigrate);
wunit.commit();
}
@@ -221,8 +221,8 @@ PlanStage::StageState DeleteStage::doWork(WorkingSetID* out) {
if (_params.returnDeleted) {
// After deleting the document, the RecordId associated with this member is invalid.
- // Remove the 'loc' from the WorkingSetMember before returning it.
- member->loc = RecordId();
+ // Remove the 'recordId' from the WorkingSetMember before returning it.
+ member->recordId = RecordId();
member->transitionToOwnedObj();
}
diff --git a/src/mongo/db/exec/distinct_scan.cpp b/src/mongo/db/exec/distinct_scan.cpp
index 109e1e256ed..f80d86064f8 100644
--- a/src/mongo/db/exec/distinct_scan.cpp
+++ b/src/mongo/db/exec/distinct_scan.cpp
@@ -112,9 +112,9 @@ PlanStage::StageState DistinctScan::doWork(WorkingSetID* out) {
// Package up the result for the caller.
WorkingSetID id = _workingSet->allocate();
WorkingSetMember* member = _workingSet->get(id);
- member->loc = kv->loc;
+ member->recordId = kv->loc;
member->keyData.push_back(IndexKeyDatum(_descriptor->keyPattern(), kv->key, _iam));
- _workingSet->transitionToLocAndIdx(id);
+ _workingSet->transitionToRecordIdAndIdx(id);
*out = id;
return PlanStage::ADVANCED;
diff --git a/src/mongo/db/exec/fetch.cpp b/src/mongo/db/exec/fetch.cpp
index aaef747d95a..79b70ceba83 100644
--- a/src/mongo/db/exec/fetch.cpp
+++ b/src/mongo/db/exec/fetch.cpp
@@ -97,15 +97,15 @@ PlanStage::StageState FetchStage::doWork(WorkingSetID* out) {
if (member->hasObj()) {
++_specificStats.alreadyHasObj;
} else {
- // We need a valid loc to fetch from and this is the only state that has one.
- verify(WorkingSetMember::LOC_AND_IDX == member->getState());
- verify(member->hasLoc());
+ // We need a valid RecordId to fetch from and this is the only state that has one.
+ verify(WorkingSetMember::RID_AND_IDX == member->getState());
+ verify(member->hasRecordId());
try {
if (!_cursor)
_cursor = _collection->getCursor(getOpCtx());
- if (auto fetcher = _cursor->fetcherForId(member->loc)) {
+ if (auto fetcher = _cursor->fetcherForId(member->recordId)) {
// There's something to fetch. Hand the fetcher off to the WSM, and pass up
// a fetch request.
_idRetrying = id;
@@ -171,13 +171,13 @@ void FetchStage::doReattachToOperationContext() {
}
void FetchStage::doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
- // It's possible that the loc getting invalidated is the one we're about to
+ // It's possible that the recordId getting invalidated is the one we're about to
// fetch. In this case we do a "forced fetch" and put the WSM in owned object state.
if (WorkingSet::INVALID_ID != _idRetrying) {
WorkingSetMember* member = _ws->get(_idRetrying);
- if (member->hasLoc() && (member->loc == dl)) {
- // Fetch it now and kill the diskloc.
- WorkingSetCommon::fetchAndInvalidateLoc(txn, member, _collection);
+ if (member->hasRecordId() && (member->recordId == dl)) {
+ // Fetch it now and kill the recordId.
+ WorkingSetCommon::fetchAndInvalidateRecordId(txn, member, _collection);
}
}
}
diff --git a/src/mongo/db/exec/fetch.h b/src/mongo/db/exec/fetch.h
index f5fafd66786..981a2f812c8 100644
--- a/src/mongo/db/exec/fetch.h
+++ b/src/mongo/db/exec/fetch.h
@@ -42,8 +42,8 @@ class SeekableRecordCursor;
/**
* This stage turns a RecordId into a BSONObj.
*
- * In WorkingSetMember terms, it transitions from LOC_AND_IDX to LOC_AND_OBJ by reading
- * the record at the provided loc. Returns verbatim any data that already has an object.
+ * In WorkingSetMember terms, it transitions from RID_AND_IDX to RID_AND_OBJ by reading
+ * the record at the provided RecordId. Returns verbatim any data that already has an object.
*
* Preconditions: Valid RecordId.
*/
diff --git a/src/mongo/db/exec/idhack.cpp b/src/mongo/db/exec/idhack.cpp
index b6cdcc530fa..affe2e4dc50 100644
--- a/src/mongo/db/exec/idhack.cpp
+++ b/src/mongo/db/exec/idhack.cpp
@@ -120,10 +120,10 @@ PlanStage::StageState IDHackStage::doWork(WorkingSetID* out) {
WorkingSetID id = WorkingSet::INVALID_ID;
try {
// Look up the key by going directly to the index.
- RecordId loc = _accessMethod->findSingle(getOpCtx(), _key);
+ RecordId recordId = _accessMethod->findSingle(getOpCtx(), _key);
// Key not found.
- if (loc.isNull()) {
+ if (recordId.isNull()) {
_done = true;
return PlanStage::IS_EOF;
}
@@ -134,14 +134,14 @@ PlanStage::StageState IDHackStage::doWork(WorkingSetID* out) {
// Create a new WSM for the result document.
id = _workingSet->allocate();
WorkingSetMember* member = _workingSet->get(id);
- member->loc = loc;
- _workingSet->transitionToLocAndIdx(id);
+ member->recordId = recordId;
+ _workingSet->transitionToRecordIdAndIdx(id);
if (!_recordCursor)
_recordCursor = _collection->getCursor(getOpCtx());
// We may need to request a yield while we fetch the document.
- if (auto fetcher = _recordCursor->fetcherForId(loc)) {
+ if (auto fetcher = _recordCursor->fetcherForId(recordId)) {
// There's something to fetch. Hand the fetcher off to the WSM, and pass up a
// fetch request.
_idBeingPagedIn = id;
@@ -215,13 +215,13 @@ void IDHackStage::doInvalidate(OperationContext* txn, const RecordId& dl, Invali
return;
}
- // It's possible that the loc getting invalidated is the one we're about to
+ // It's possible that the RecordId getting invalidated is the one we're about to
// fetch. In this case we do a "forced fetch" and put the WSM in owned object state.
if (WorkingSet::INVALID_ID != _idBeingPagedIn) {
WorkingSetMember* member = _workingSet->get(_idBeingPagedIn);
- if (member->hasLoc() && (member->loc == dl)) {
- // Fetch it now and kill the diskloc.
- WorkingSetCommon::fetchAndInvalidateLoc(txn, member, _collection);
+ if (member->hasRecordId() && (member->recordId == dl)) {
+ // Fetch it now and kill the RecordId.
+ WorkingSetCommon::fetchAndInvalidateRecordId(txn, member, _collection);
}
}
}
diff --git a/src/mongo/db/exec/index_iterator.cpp b/src/mongo/db/exec/index_iterator.cpp
index 454731cc545..683455cd519 100644
--- a/src/mongo/db/exec/index_iterator.cpp
+++ b/src/mongo/db/exec/index_iterator.cpp
@@ -64,9 +64,9 @@ PlanStage::StageState IndexIteratorStage::doWork(WorkingSetID* out) {
WorkingSetID id = _ws->allocate();
WorkingSetMember* member = _ws->get(id);
- member->loc = entry->loc;
+ member->recordId = entry->loc;
member->keyData.push_back(IndexKeyDatum(_keyPattern, entry->key, _iam));
- _ws->transitionToLocAndIdx(id);
+ _ws->transitionToRecordIdAndIdx(id);
*out = id;
return PlanStage::ADVANCED;
diff --git a/src/mongo/db/exec/index_scan.cpp b/src/mongo/db/exec/index_scan.cpp
index a942cbb2287..4e7410eeb13 100644
--- a/src/mongo/db/exec/index_scan.cpp
+++ b/src/mongo/db/exec/index_scan.cpp
@@ -209,9 +209,9 @@ PlanStage::StageState IndexScan::doWork(WorkingSetID* out) {
// We found something to return, so fill out the WSM.
WorkingSetID id = _workingSet->allocate();
WorkingSetMember* member = _workingSet->get(id);
- member->loc = kv->loc;
+ member->recordId = kv->loc;
member->keyData.push_back(IndexKeyDatum(_keyPattern, kv->key, _iam));
- _workingSet->transitionToLocAndIdx(id);
+ _workingSet->transitionToRecordIdAndIdx(id);
if (_params.addKeyMetadata) {
BSONObjBuilder bob;
diff --git a/src/mongo/db/exec/merge_sort.cpp b/src/mongo/db/exec/merge_sort.cpp
index 4aa2bb8c62c..0baa5d2767f 100644
--- a/src/mongo/db/exec/merge_sort.cpp
+++ b/src/mongo/db/exec/merge_sort.cpp
@@ -86,21 +86,21 @@ PlanStage::StageState MergeSortStage::doWork(WorkingSetID* out) {
// If we're deduping...
if (_dedup) {
- if (!member->hasLoc()) {
+ if (!member->hasRecordId()) {
// Can't dedup data unless there's a RecordId. We go ahead and use its
// result.
_noResultToMerge.pop();
} else {
++_specificStats.dupsTested;
- // ...and there's a diskloc and and we've seen the RecordId before
- if (_seen.end() != _seen.find(member->loc)) {
+ // ...and there's a RecordId and and we've seen the RecordId before
+ if (_seen.end() != _seen.find(member->recordId)) {
// ...drop it.
_ws->free(id);
++_specificStats.dupsDropped;
return PlanStage::NEED_TIME;
} else {
// Otherwise, note that we've seen it.
- _seen.insert(member->loc);
+ _seen.insert(member->recordId);
// We're going to use the result from the child, so we remove it from
// the queue of children without a result.
_noResultToMerge.pop();
@@ -175,14 +175,14 @@ PlanStage::StageState MergeSortStage::doWork(WorkingSetID* out) {
void MergeSortStage::doInvalidate(OperationContext* txn,
const RecordId& dl,
InvalidationType type) {
- // Go through our data and see if we're holding on to the invalidated loc.
+ // Go through our data and see if we're holding on to the invalidated RecordId.
for (list<StageWithValue>::iterator valueIt = _mergingData.begin();
valueIt != _mergingData.end();
valueIt++) {
WorkingSetMember* member = _ws->get(valueIt->id);
- if (member->hasLoc() && (dl == member->loc)) {
+ if (member->hasRecordId() && (dl == member->recordId)) {
// Fetch the about-to-be mutated result.
- WorkingSetCommon::fetchAndInvalidateLoc(txn, member, _collection);
+ WorkingSetCommon::fetchAndInvalidateRecordId(txn, member, _collection);
++_specificStats.forcedFetches;
}
}
diff --git a/src/mongo/db/exec/multi_iterator.cpp b/src/mongo/db/exec/multi_iterator.cpp
index cd9d0954538..49129451e42 100644
--- a/src/mongo/db/exec/multi_iterator.cpp
+++ b/src/mongo/db/exec/multi_iterator.cpp
@@ -90,9 +90,9 @@ PlanStage::StageState MultiIteratorStage::doWork(WorkingSetID* out) {
*out = _ws->allocate();
WorkingSetMember* member = _ws->get(*out);
- member->loc = record->id;
+ member->recordId = record->id;
member->obj = {getOpCtx()->recoveryUnit()->getSnapshotId(), record->data.releaseToBson()};
- _ws->transitionToLocAndObj(*out);
+ _ws->transitionToRecordIdAndObj(*out);
return PlanStage::ADVANCED;
}
diff --git a/src/mongo/db/exec/multi_plan.cpp b/src/mongo/db/exec/multi_plan.cpp
index bd0cf272188..95c70b47ce0 100644
--- a/src/mongo/db/exec/multi_plan.cpp
+++ b/src/mongo/db/exec/multi_plan.cpp
@@ -406,13 +406,13 @@ namespace {
void invalidateHelper(OperationContext* txn,
WorkingSet* ws, // may flag for review
- const RecordId& dl,
+ const RecordId& recordId,
list<WorkingSetID>* idsToInvalidate,
const Collection* collection) {
for (auto it = idsToInvalidate->begin(); it != idsToInvalidate->end(); ++it) {
WorkingSetMember* member = ws->get(*it);
- if (member->hasLoc() && member->loc == dl) {
- WorkingSetCommon::fetchAndInvalidateLoc(txn, member, collection);
+ if (member->hasRecordId() && member->recordId == recordId) {
+ WorkingSetCommon::fetchAndInvalidateRecordId(txn, member, collection);
}
}
}
@@ -420,7 +420,7 @@ void invalidateHelper(OperationContext* txn,
} // namespace
void MultiPlanStage::doInvalidate(OperationContext* txn,
- const RecordId& dl,
+ const RecordId& recordId,
InvalidationType type) {
if (_failure) {
return;
@@ -428,14 +428,15 @@ void MultiPlanStage::doInvalidate(OperationContext* txn,
if (bestPlanChosen()) {
CandidatePlan& bestPlan = _candidates[_bestPlanIdx];
- invalidateHelper(txn, bestPlan.ws, dl, &bestPlan.results, _collection);
+ invalidateHelper(txn, bestPlan.ws, recordId, &bestPlan.results, _collection);
if (hasBackupPlan()) {
CandidatePlan& backupPlan = _candidates[_backupPlanIdx];
- invalidateHelper(txn, backupPlan.ws, dl, &backupPlan.results, _collection);
+ invalidateHelper(txn, backupPlan.ws, recordId, &backupPlan.results, _collection);
}
} else {
for (size_t ix = 0; ix < _candidates.size(); ++ix) {
- invalidateHelper(txn, _candidates[ix].ws, dl, &_candidates[ix].results, _collection);
+ invalidateHelper(
+ txn, _candidates[ix].ws, recordId, &_candidates[ix].results, _collection);
}
}
}
diff --git a/src/mongo/db/exec/near.cpp b/src/mongo/db/exec/near.cpp
index 1eedc41c1ba..e3f0ed7781d 100644
--- a/src/mongo/db/exec/near.cpp
+++ b/src/mongo/db/exec/near.cpp
@@ -186,8 +186,8 @@ PlanStage::StageState NearStage::bufferNext(WorkingSetID* toReturn, Status* erro
WorkingSetMember* nextMember = _workingSet->get(nextMemberID);
// The child stage may not dedup so we must dedup them ourselves.
- if (_nextInterval->dedupCovering && nextMember->hasLoc()) {
- if (_seenDocuments.end() != _seenDocuments.find(nextMember->loc)) {
+ if (_nextInterval->dedupCovering && nextMember->hasRecordId()) {
+ if (_seenDocuments.end() != _seenDocuments.find(nextMember->recordId)) {
_workingSet->free(nextMemberID);
return PlanStage::NEED_TIME;
}
@@ -212,8 +212,8 @@ PlanStage::StageState NearStage::bufferNext(WorkingSetID* toReturn, Status* erro
_resultBuffer.push(SearchResult(nextMemberID, memberDistance));
// Store the member's RecordId, if available, for quick invalidation
- if (nextMember->hasLoc()) {
- _seenDocuments.insert(std::make_pair(nextMember->loc, nextMemberID));
+ if (nextMember->hasRecordId()) {
+ _seenDocuments.insert(std::make_pair(nextMember->recordId, nextMemberID));
}
return PlanStage::NEED_TIME;
@@ -235,8 +235,8 @@ PlanStage::StageState NearStage::advanceNext(WorkingSetID* toReturn) {
// Throw out all documents with memberDistance < minDistance
if (memberDistance < _nextInterval->minDistance) {
WorkingSetMember* member = _workingSet->get(result.resultID);
- if (member->hasLoc()) {
- _seenDocuments.erase(member->loc);
+ if (member->hasRecordId()) {
+ _seenDocuments.erase(member->recordId);
}
_resultBuffer.pop();
_workingSet->free(result.resultID);
@@ -269,8 +269,8 @@ PlanStage::StageState NearStage::advanceNext(WorkingSetID* toReturn) {
// calls to invalidate don't cause us to take action for a RecordId we're done with.
*toReturn = resultID;
WorkingSetMember* member = _workingSet->get(*toReturn);
- if (member->hasLoc()) {
- _seenDocuments.erase(member->loc);
+ if (member->hasRecordId()) {
+ _seenDocuments.erase(member->recordId);
}
// This value is used by nextInterval() to determine the size of the next interval.
@@ -291,9 +291,9 @@ void NearStage::doInvalidate(OperationContext* txn, const RecordId& dl, Invalida
if (seenIt != _seenDocuments.end()) {
WorkingSetMember* member = _workingSet->get(seenIt->second);
- verify(member->hasLoc());
- WorkingSetCommon::fetchAndInvalidateLoc(txn, member, _collection);
- verify(!member->hasLoc());
+ verify(member->hasRecordId());
+ WorkingSetCommon::fetchAndInvalidateRecordId(txn, member, _collection);
+ verify(!member->hasRecordId());
// Don't keep it around in the seen map since there's no valid RecordId anymore
_seenDocuments.erase(seenIt);
diff --git a/src/mongo/db/exec/oplogstart.cpp b/src/mongo/db/exec/oplogstart.cpp
index 5041d3af4f9..3b96dcac89d 100644
--- a/src/mongo/db/exec/oplogstart.cpp
+++ b/src/mongo/db/exec/oplogstart.cpp
@@ -106,9 +106,9 @@ PlanStage::StageState OplogStart::workExtentHopping(WorkingSetID* out) {
_done = true;
WorkingSetID id = _workingSet->allocate();
WorkingSetMember* member = _workingSet->get(id);
- member->loc = record->id;
+ member->recordId = record->id;
member->obj = {getOpCtx()->recoveryUnit()->getSnapshotId(), std::move(obj)};
- _workingSet->transitionToLocAndObj(id);
+ _workingSet->transitionToRecordIdAndObj(id);
*out = id;
return PlanStage::ADVANCED;
}
@@ -149,7 +149,7 @@ PlanStage::StageState OplogStart::workBackwardsScan(WorkingSetID* out) {
WorkingSetMember* member = _workingSet->get(*out);
verify(member->hasObj());
- verify(member->hasLoc());
+ verify(member->hasRecordId());
if (!_filter->matchesBSON(member->obj.value())) {
_done = true;
diff --git a/src/mongo/db/exec/or.cpp b/src/mongo/db/exec/or.cpp
index 718e59b50ba..327ec1b31fd 100644
--- a/src/mongo/db/exec/or.cpp
+++ b/src/mongo/db/exec/or.cpp
@@ -66,18 +66,18 @@ PlanStage::StageState OrStage::doWork(WorkingSetID* out) {
WorkingSetMember* member = _ws->get(id);
// If we're deduping (and there's something to dedup by)
- if (_dedup && member->hasLoc()) {
+ if (_dedup && member->hasRecordId()) {
++_specificStats.dupsTested;
// ...and we've seen the RecordId before
- if (_seen.end() != _seen.find(member->loc)) {
+ if (_seen.end() != _seen.find(member->recordId)) {
// ...drop it.
++_specificStats.dupsDropped;
_ws->free(id);
return PlanStage::NEED_TIME;
} else {
// Otherwise, note that we've seen it.
- _seen.insert(member->loc);
+ _seen.insert(member->recordId);
}
}
@@ -131,7 +131,7 @@ void OrStage::doInvalidate(OperationContext* txn, const RecordId& dl, Invalidati
if (_dedup && INVALIDATION_DELETION == type) {
unordered_set<RecordId, RecordId::Hasher>::iterator it = _seen.find(dl);
if (_seen.end() != it) {
- ++_specificStats.locsForgotten;
+ ++_specificStats.recordIdsForgotten;
_seen.erase(dl);
}
}
diff --git a/src/mongo/db/exec/plan_stats.h b/src/mongo/db/exec/plan_stats.h
index e7cfbf03f6f..90d410f55c5 100644
--- a/src/mongo/db/exec/plan_stats.h
+++ b/src/mongo/db/exec/plan_stats.h
@@ -446,7 +446,7 @@ struct MultiPlanStats : public SpecificStats {
};
struct OrStats : public SpecificStats {
- OrStats() : dupsTested(0), dupsDropped(0), locsForgotten(0) {}
+ OrStats() : dupsTested(0), dupsDropped(0), recordIdsForgotten(0) {}
SpecificStats* clone() const final {
OrStats* specific = new OrStats(*this);
@@ -457,7 +457,7 @@ struct OrStats : public SpecificStats {
size_t dupsDropped;
// How many calls to invalidate(...) actually removed a RecordId from our deduping map?
- size_t locsForgotten;
+ size_t recordIdsForgotten;
};
struct ProjectionStats : public SpecificStats {
diff --git a/src/mongo/db/exec/projection.cpp b/src/mongo/db/exec/projection.cpp
index bc066c96d8a..01cb246cf49 100644
--- a/src/mongo/db/exec/projection.cpp
+++ b/src/mongo/db/exec/projection.cpp
@@ -182,7 +182,7 @@ Status ProjectionStage::transform(WorkingSetMember* member) {
}
member->keyData.clear();
- member->loc = RecordId();
+ member->recordId = RecordId();
member->obj = Snapshotted<BSONObj>(SnapshotId(), bob.obj());
member->transitionToOwnedObj();
return Status::OK();
diff --git a/src/mongo/db/exec/projection_exec.cpp b/src/mongo/db/exec/projection_exec.cpp
index f5ea2cfd901..93ce77d7814 100644
--- a/src/mongo/db/exec/projection_exec.cpp
+++ b/src/mongo/db/exec/projection_exec.cpp
@@ -250,7 +250,7 @@ Status ProjectionExec::transform(WorkingSetMember* member) const {
member->obj = Snapshotted<BSONObj>(SnapshotId(), builder.obj());
member->keyData.clear();
- member->loc = RecordId();
+ member->recordId = RecordId();
member->transitionToOwnedObj();
return Status::OK();
}
@@ -347,14 +347,14 @@ Status ProjectionExec::transform(WorkingSetMember* member) const {
return sortKeyMetaStatus;
}
} else if (META_RECORDID == it->second) {
- bob.append(it->first, static_cast<long long>(member->loc.repr()));
+ bob.append(it->first, static_cast<long long>(member->recordId.repr()));
}
}
BSONObj newObj = bob.obj();
member->obj = Snapshotted<BSONObj>(SnapshotId(), newObj);
member->keyData.clear();
- member->loc = RecordId();
+ member->recordId = RecordId();
member->transitionToOwnedObj();
return Status::OK();
diff --git a/src/mongo/db/exec/projection_exec_test.cpp b/src/mongo/db/exec/projection_exec_test.cpp
index a92eb405b58..0db187b347d 100644
--- a/src/mongo/db/exec/projection_exec_test.cpp
+++ b/src/mongo/db/exec/projection_exec_test.cpp
@@ -146,7 +146,7 @@ void testTransform(const char* specStr,
/**
* Test function to verify the results of projecting the $meta sortKey while under a covered
* projection. In particular, it tests that ProjectionExec can take a WorkingSetMember in
- * LOC_AND_IDX state and use the sortKey along with the index data to generate the final output
+ * RID_AND_IDX state and use the sortKey along with the index data to generate the final output
* document. For SERVER-20117.
*
* sortKey - The sort key in BSONObj form.
@@ -163,7 +163,7 @@ BSONObj transformMetaSortKeyCovered(const BSONObj& sortKey,
WorkingSetMember* wsm = ws.get(wsid);
wsm->keyData.push_back(ikd);
wsm->addComputed(new SortKeyComputedData(sortKey));
- ws.transitionToLocAndIdx(wsid);
+ ws.transitionToRecordIdAndIdx(wsid);
ProjectionExec projExec(fromjson(projSpec), nullptr, ExtensionsCallbackDisallowExtensions());
ASSERT_OK(projExec.transform(wsm));
diff --git a/src/mongo/db/exec/sort.cpp b/src/mongo/db/exec/sort.cpp
index a621fbe945e..2d5b81065a2 100644
--- a/src/mongo/db/exec/sort.cpp
+++ b/src/mongo/db/exec/sort.cpp
@@ -65,7 +65,7 @@ bool SortStage::WorkingSetComparator::operator()(const SortableDataItem& lhs,
return result < 0;
}
// Indices use RecordId as an additional sort key so we must as well.
- return lhs.loc < rhs.loc;
+ return lhs.recordId < rhs.recordId;
}
SortStage::SortStage(OperationContext* opCtx,
@@ -131,8 +131,8 @@ PlanStage::StageState SortStage::doWork(WorkingSetID* out) {
verify(member->hasObj());
// We might be sorting something that was invalidated at some point.
- if (member->hasLoc()) {
- _wsidByDiskLoc[member->loc] = id;
+ if (member->hasRecordId()) {
+ _wsidByRecordId[member->recordId] = id;
}
SortableDataItem item;
@@ -144,9 +144,9 @@ PlanStage::StageState SortStage::doWork(WorkingSetID* out) {
static_cast<const SortKeyComputedData*>(member->getComputed(WSM_SORT_KEY));
item.sortKey = sortKeyComputedData->getSortKey();
- if (member->hasLoc()) {
+ if (member->hasRecordId()) {
// The RecordId breaks ties when sorting two WSMs with the same sort key.
- item.loc = member->loc;
+ item.recordId = member->recordId;
}
addToBuffer(item);
@@ -187,8 +187,8 @@ PlanStage::StageState SortStage::doWork(WorkingSetID* out) {
// If we're returning something, take it out of our DL -> WSID map so that future
// calls to invalidate don't cause us to take action for a DL we're done with.
WorkingSetMember* member = _ws->get(*out);
- if (member->hasLoc()) {
- _wsidByDiskLoc.erase(member->loc);
+ if (member->hasRecordId()) {
+ _wsidByRecordId.erase(member->recordId);
}
return PlanStage::ADVANCED;
@@ -202,18 +202,18 @@ void SortStage::doInvalidate(OperationContext* txn, const RecordId& dl, Invalida
// _data contains indices into the WorkingSet, not actual data. If a WorkingSetMember in
// the WorkingSet needs to change state as a result of a RecordId invalidation, it will still
// be at the same spot in the WorkingSet. As such, we don't need to modify _data.
- DataMap::iterator it = _wsidByDiskLoc.find(dl);
+ DataMap::iterator it = _wsidByRecordId.find(dl);
// If we're holding on to data that's got the RecordId we're invalidating...
- if (_wsidByDiskLoc.end() != it) {
+ if (_wsidByRecordId.end() != it) {
// Grab the WSM that we're nuking.
WorkingSetMember* member = _ws->get(it->second);
- verify(member->loc == dl);
+ verify(member->recordId == dl);
- WorkingSetCommon::fetchAndInvalidateLoc(txn, member, _collection);
+ WorkingSetCommon::fetchAndInvalidateRecordId(txn, member, _collection);
// Remove the RecordId from our set of active DLs.
- _wsidByDiskLoc.erase(it);
+ _wsidByRecordId.erase(it);
++_specificStats.forcedFetches;
}
}
@@ -317,8 +317,8 @@ void SortStage::addToBuffer(const SortableDataItem& item) {
// RecordId invalidation map and free from working set.
if (wsidToFree != WorkingSet::INVALID_ID) {
WorkingSetMember* member = _ws->get(wsidToFree);
- if (member->hasLoc()) {
- _wsidByDiskLoc.erase(member->loc);
+ if (member->hasRecordId()) {
+ _wsidByRecordId.erase(member->recordId);
}
_ws->free(wsidToFree);
}
diff --git a/src/mongo/db/exec/sort.h b/src/mongo/db/exec/sort.h
index aab0d3e6a8d..e84fded850f 100644
--- a/src/mongo/db/exec/sort.h
+++ b/src/mongo/db/exec/sort.h
@@ -121,7 +121,7 @@ private:
// Since we must replicate the behavior of a covered sort as much as possible we use the
// RecordId to break sortKey ties.
// See sorta.js.
- RecordId loc;
+ RecordId recordId;
};
// Comparison object for data buffers (vector and set).
@@ -170,7 +170,7 @@ private:
// We buffer a lot of data and we want to look it up by RecordId quickly upon invalidation.
typedef unordered_map<RecordId, WorkingSetID, RecordId::Hasher> DataMap;
- DataMap _wsidByDiskLoc;
+ DataMap _wsidByRecordId;
SortStats _specificStats;
diff --git a/src/mongo/db/exec/sort_key_generator.cpp b/src/mongo/db/exec/sort_key_generator.cpp
index 22237b02353..f7337a586c0 100644
--- a/src/mongo/db/exec/sort_key_generator.cpp
+++ b/src/mongo/db/exec/sort_key_generator.cpp
@@ -150,7 +150,7 @@ Status SortKeyGenerator::getSortKey(const WorkingSetMember& member, BSONObj* obj
}
StatusWith<BSONObj> SortKeyGenerator::getSortKeyFromIndexKey(const WorkingSetMember& member) const {
- invariant(member.getState() == WorkingSetMember::LOC_AND_IDX);
+ invariant(member.getState() == WorkingSetMember::RID_AND_IDX);
invariant(!_sortHasMeta);
BSONObjBuilder sortKeyObj;
diff --git a/src/mongo/db/exec/text_or.cpp b/src/mongo/db/exec/text_or.cpp
index e09c52e08bd..23011388369 100644
--- a/src/mongo/db/exec/text_or.cpp
+++ b/src/mongo/db/exec/text_or.cpp
@@ -328,10 +328,10 @@ private:
PlanStage::StageState TextOrStage::addTerm(WorkingSetID wsid, WorkingSetID* out) {
WorkingSetMember* wsm = _ws->get(wsid);
- invariant(wsm->getState() == WorkingSetMember::LOC_AND_IDX);
+ invariant(wsm->getState() == WorkingSetMember::RID_AND_IDX);
invariant(1 == wsm->keyData.size());
const IndexKeyDatum newKeyData = wsm->keyData.back(); // copy to keep it around.
- TextRecordData* textRecordData = &_scores[wsm->loc];
+ TextRecordData* textRecordData = &_scores[wsm->recordId];
if (textRecordData->score < 0) {
// We have already rejected this document for not matching the filter.
@@ -375,7 +375,7 @@ PlanStage::StageState TextOrStage::addTerm(WorkingSetID wsid, WorkingSetID* out)
}
if (shouldKeep && !wsm->hasObj()) {
- // Our parent expects LOC_AND_OBJ members, so we fetch the document here if we haven't
+ // Our parent expects RID_AND_OBJ members, so we fetch the document here if we haven't
// already.
try {
shouldKeep = WorkingSetCommon::fetch(getOpCtx(), _ws, wsid, _recordCursor);
diff --git a/src/mongo/db/exec/update.cpp b/src/mongo/db/exec/update.cpp
index 835c24f6381..eaedf6b4142 100644
--- a/src/mongo/db/exec/update.cpp
+++ b/src/mongo/db/exec/update.cpp
@@ -425,7 +425,7 @@ UpdateStage::UpdateStage(OperationContext* txn,
_collection(collection),
_idRetrying(WorkingSet::INVALID_ID),
_idReturning(WorkingSet::INVALID_ID),
- _updatedLocs(params.request->isMulti() ? new DiskLocSet() : NULL),
+ _updatedRecordIds(params.request->isMulti() ? new RecordIdSet() : NULL),
_doc(params.driver->getDocument()) {
_children.emplace_back(child);
// We are an update until we fall into the insert case.
@@ -436,7 +436,7 @@ UpdateStage::UpdateStage(OperationContext* txn,
_specificStats.isDocReplacement = params.driver->isDocReplacement();
}
-BSONObj UpdateStage::transformAndUpdate(const Snapshotted<BSONObj>& oldObj, RecordId& loc) {
+BSONObj UpdateStage::transformAndUpdate(const Snapshotted<BSONObj>& oldObj, RecordId& recordId) {
const UpdateRequest* request = _params.request;
UpdateDriver* driver = _params.driver;
CanonicalQuery* cq = _params.canonicalQuery;
@@ -533,7 +533,7 @@ BSONObj UpdateStage::transformAndUpdate(const Snapshotted<BSONObj>& oldObj, Reco
// Prepare to write back the modified document
WriteUnitOfWork wunit(getOpCtx());
- RecordId newLoc;
+ RecordId newRecordId;
if (inPlace) {
// Don't actually do the write if this is an explain.
@@ -549,7 +549,7 @@ BSONObj UpdateStage::transformAndUpdate(const Snapshotted<BSONObj>& oldObj, Reco
args.fromMigrate = request->isFromMigration();
StatusWith<RecordData> newRecStatus = _collection->updateDocumentWithDamages(
getOpCtx(),
- loc,
+ recordId,
Snapshotted<RecordData>(oldObj.snapshotId(), oldRec),
source,
_damages,
@@ -558,7 +558,7 @@ BSONObj UpdateStage::transformAndUpdate(const Snapshotted<BSONObj>& oldObj, Reco
}
_specificStats.fastmod = true;
- newLoc = loc;
+ newRecordId = recordId;
} else {
// The updates were not in place. Apply them through the file manager.
@@ -578,7 +578,7 @@ BSONObj UpdateStage::transformAndUpdate(const Snapshotted<BSONObj>& oldObj, Reco
args.criteria = idQuery;
args.fromMigrate = request->isFromMigration();
StatusWith<RecordId> res = _collection->updateDocument(getOpCtx(),
- loc,
+ recordId,
oldObj,
newObj,
true,
@@ -586,7 +586,7 @@ BSONObj UpdateStage::transformAndUpdate(const Snapshotted<BSONObj>& oldObj, Reco
_params.opDebug,
&args);
uassertStatusOK(res.getStatus());
- newLoc = res.getValue();
+ newRecordId = res.getValue();
}
}
@@ -598,11 +598,11 @@ BSONObj UpdateStage::transformAndUpdate(const Snapshotted<BSONObj>& oldObj, Reco
//
// If the document is indexed and the mod changes an indexed value, we might see
// it again. For an example, see the comment above near declaration of
- // updatedLocs.
+ // updatedRecordIds.
//
// This must be done after the wunit commits so we are sure we won't be rolling back.
- if (_updatedLocs && (newLoc != loc || driver->modsAffectIndices())) {
- _updatedLocs->insert(newLoc);
+ if (_updatedRecordIds && (newRecordId != recordId || driver->modsAffectIndices())) {
+ _updatedRecordIds->insert(newRecordId);
}
}
@@ -818,28 +818,28 @@ PlanStage::StageState UpdateStage::doWork(WorkingSetID* out) {
if (PlanStage::ADVANCED == status) {
// Need to get these things from the result returned by the child.
- RecordId loc;
+ RecordId recordId;
WorkingSetMember* member = _ws->get(id);
// We want to free this member when we return, unless we need to retry it.
ScopeGuard memberFreer = MakeGuard(&WorkingSet::free, _ws, id);
- if (!member->hasLoc()) {
+ if (!member->hasRecordId()) {
// We expect to be here because of an invalidation causing a force-fetch.
++_specificStats.nInvalidateSkips;
return PlanStage::NEED_TIME;
}
- loc = member->loc;
+ recordId = member->recordId;
// Updates can't have projections. This means that covering analysis will always add
// a fetch. We should always get fetched data, and never just key data.
invariant(member->hasObj());
- // We fill this with the new locs of moved doc so we don't double-update.
- if (_updatedLocs && _updatedLocs->count(loc) > 0) {
- // Found a loc that refers to a document we had already updated. Note that
- // we can never remove from _updatedLocs because updates by other clients
+ // We fill this with the new RecordIds of moved doc so we don't double-update.
+ if (_updatedRecordIds && _updatedRecordIds->count(recordId) > 0) {
+ // Found a RecordId that refers to a document we had already updated. Note that
+ // we can never remove from _updatedRecordIds because updates by other clients
// could cause us to encounter a document again later.
return PlanStage::NEED_TIME;
}
@@ -885,7 +885,7 @@ PlanStage::StageState UpdateStage::doWork(WorkingSetID* out) {
}
// Do the update, get us the new version of the doc.
- BSONObj newObj = transformAndUpdate(member->obj, loc);
+ BSONObj newObj = transformAndUpdate(member->obj, recordId);
// Set member's obj to be the doc we want to return.
if (_params.request->shouldReturnAnyDocs()) {
@@ -896,7 +896,7 @@ PlanStage::StageState UpdateStage::doWork(WorkingSetID* out) {
invariant(_params.request->shouldReturnOldDocs());
member->obj.setValue(oldObj);
}
- member->loc = RecordId();
+ member->recordId = RecordId();
member->transitionToOwnedObj();
}
} catch (const WriteConflictException& wce) {
diff --git a/src/mongo/db/exec/update.h b/src/mongo/db/exec/update.h
index 235a234fb51..263c28ab8e5 100644
--- a/src/mongo/db/exec/update.h
+++ b/src/mongo/db/exec/update.h
@@ -147,11 +147,11 @@ public:
private:
/**
- * Computes the result of applying mods to the document 'oldObj' at RecordId 'loc' in
+ * Computes the result of applying mods to the document 'oldObj' at RecordId 'recordId' in
* memory, then commits these changes to the database. Returns a possibly unowned copy
* of the newly-updated version of the document.
*/
- BSONObj transformAndUpdate(const Snapshotted<BSONObj>& oldObj, RecordId& loc);
+ BSONObj transformAndUpdate(const Snapshotted<BSONObj>& oldObj, RecordId& recordId);
/**
* Computes the document to insert and inserts it into the collection. Used if the
@@ -205,8 +205,8 @@ private:
// document and we wouldn't want to update that.
//
// So, no matter what, we keep track of where the doc wound up.
- typedef unordered_set<RecordId, RecordId::Hasher> DiskLocSet;
- const std::unique_ptr<DiskLocSet> _updatedLocs;
+ typedef unordered_set<RecordId, RecordId::Hasher> RecordIdSet;
+ const std::unique_ptr<RecordIdSet> _updatedRecordIds;
// These get reused for each update.
mutablebson::Document& _doc;
diff --git a/src/mongo/db/exec/working_set.cpp b/src/mongo/db/exec/working_set.cpp
index caf6b8b918e..96b5c0ca360 100644
--- a/src/mongo/db/exec/working_set.cpp
+++ b/src/mongo/db/exec/working_set.cpp
@@ -106,15 +106,15 @@ void WorkingSet::clear() {
_yieldSensitiveIds.clear();
}
-void WorkingSet::transitionToLocAndIdx(WorkingSetID id) {
+void WorkingSet::transitionToRecordIdAndIdx(WorkingSetID id) {
WorkingSetMember* member = get(id);
- member->_state = WorkingSetMember::LOC_AND_IDX;
+ member->_state = WorkingSetMember::RID_AND_IDX;
_yieldSensitiveIds.push_back(id);
}
-void WorkingSet::transitionToLocAndObj(WorkingSetID id) {
+void WorkingSet::transitionToRecordIdAndObj(WorkingSetID id) {
WorkingSetMember* member = get(id);
- member->_state = WorkingSetMember::LOC_AND_OBJ;
+ member->_state = WorkingSetMember::RID_AND_OBJ;
}
void WorkingSet::transitionToOwnedObj(WorkingSetID id) {
@@ -157,20 +157,20 @@ void WorkingSetMember::transitionToOwnedObj() {
}
-bool WorkingSetMember::hasLoc() const {
- return _state == LOC_AND_IDX || _state == LOC_AND_OBJ;
+bool WorkingSetMember::hasRecordId() const {
+ return _state == RID_AND_IDX || _state == RID_AND_OBJ;
}
bool WorkingSetMember::hasObj() const {
- return _state == OWNED_OBJ || _state == LOC_AND_OBJ;
+ return _state == OWNED_OBJ || _state == RID_AND_OBJ;
}
bool WorkingSetMember::hasOwnedObj() const {
- return _state == OWNED_OBJ || (_state == LOC_AND_OBJ && obj.value().isOwned());
+ return _state == OWNED_OBJ || (_state == RID_AND_OBJ && obj.value().isOwned());
}
void WorkingSetMember::makeObjOwnedIfNeeded() {
- if (supportsDocLocking() && _state == LOC_AND_OBJ && !obj.value().isOwned()) {
+ if (supportsDocLocking() && _state == RID_AND_OBJ && !obj.value().isOwned()) {
obj.setValue(obj.value().getOwned());
}
}
@@ -232,7 +232,7 @@ bool WorkingSetMember::getFieldDotted(const string& field, BSONElement* out) con
size_t WorkingSetMember::getMemUsage() const {
size_t memUsage = 0;
- if (hasLoc()) {
+ if (hasRecordId()) {
memUsage += sizeof(RecordId);
}
diff --git a/src/mongo/db/exec/working_set.h b/src/mongo/db/exec/working_set.h
index a769fe395ea..57e3bdc9a5a 100644
--- a/src/mongo/db/exec/working_set.h
+++ b/src/mongo/db/exec/working_set.h
@@ -121,19 +121,19 @@ public:
// WorkingSetMember state transitions
//
- void transitionToLocAndIdx(WorkingSetID id);
- void transitionToLocAndObj(WorkingSetID id);
+ void transitionToRecordIdAndIdx(WorkingSetID id);
+ void transitionToRecordIdAndObj(WorkingSetID id);
void transitionToOwnedObj(WorkingSetID id);
/**
- * Returns the list of working set ids that have transitioned into the LOC_AND_IDX or
- * LOC_AND_OBJ state since the last yield. The members corresponding to these ids may have since
+ * Returns the list of working set ids that have transitioned into the RID_AND_IDX or
+ * RID_AND_OBJ state since the last yield. The members corresponding to these ids may have since
* transitioned to a different state or been freed, so these cases must be handled by the
* caller. The list may also contain duplicates.
*
* Execution stages are *not* responsible for managing this list, as working set ids are added
- * to the set automatically by WorkingSet::transitionToLocAndIdx() and
- * WorkingSet::transitionToLocAndObj().
+ * to the set automatically by WorkingSet::transitionToRecordIdAndIdx() and
+ * WorkingSet::transitionToRecordIdAndObj().
*
* As a side effect, calling this method clears the list of flagged ids kept by the working set.
*/
@@ -232,9 +232,9 @@ private:
/**
* The type of the data passed between query stages. In particular:
*
- * Index scan stages return a WorkingSetMember in the LOC_AND_IDX state.
+ * Index scan stages return a WorkingSetMember in the RID_AND_IDX state.
*
- * Collection scan stages return a WorkingSetMember in the LOC_AND_OBJ state.
+ * Collection scan stages return a WorkingSetMember in the RID_AND_OBJ state.
*
* A WorkingSetMember may have any of the data above.
*/
@@ -255,11 +255,11 @@ public:
INVALID,
// Data is from 1 or more indices.
- LOC_AND_IDX,
+ RID_AND_IDX,
// Data is from a collection scan, or data is from an index scan and was fetched. The
// BSONObj might be owned or unowned.
- LOC_AND_OBJ,
+ RID_AND_OBJ,
// RecordId has been invalidated, or the obj doesn't correspond to an on-disk document
// anymore (e.g. is a computed expression).
@@ -278,20 +278,20 @@ public:
// Core attributes
//
- RecordId loc;
+ RecordId recordId;
Snapshotted<BSONObj> obj;
std::vector<IndexKeyDatum> keyData;
- // True if this WSM has survived a yield in LOC_AND_IDX state.
+ // True if this WSM has survived a yield in RID_AND_IDX state.
// TODO consider replacing by tracking SnapshotIds for IndexKeyDatums.
bool isSuspicious = false;
- bool hasLoc() const;
+ bool hasRecordId() const;
bool hasObj() const;
bool hasOwnedObj() const;
/**
- * Ensures that 'obj' of a WSM in the LOC_AND_OBJ state is owned BSON. It is a no-op if the WSM
+ * Ensures that 'obj' of a WSM in the RID_AND_OBJ state is owned BSON. It is a no-op if the WSM
* is in a different state or if 'obj' is already owned.
*
* It is also a no-op if the active storage engine doesn't support document-level concurrency.
diff --git a/src/mongo/db/exec/working_set_common.cpp b/src/mongo/db/exec/working_set_common.cpp
index 745e8e8b4a5..b1a1190642d 100644
--- a/src/mongo/db/exec/working_set_common.cpp
+++ b/src/mongo/db/exec/working_set_common.cpp
@@ -40,23 +40,23 @@
namespace mongo {
// static
-bool WorkingSetCommon::fetchAndInvalidateLoc(OperationContext* txn,
- WorkingSetMember* member,
- const Collection* collection) {
+bool WorkingSetCommon::fetchAndInvalidateRecordId(OperationContext* txn,
+ WorkingSetMember* member,
+ const Collection* collection) {
// Already in our desired state.
if (member->getState() == WorkingSetMember::OWNED_OBJ) {
return true;
}
// We can't do anything without a RecordId.
- if (!member->hasLoc()) {
+ if (!member->hasRecordId()) {
return false;
}
// Do the fetch, invalidate the DL.
- member->obj = collection->docFor(txn, member->loc);
+ member->obj = collection->docFor(txn, member->recordId);
member->obj.setValue(member->obj.value().getOwned());
- member->loc = RecordId();
+ member->recordId = RecordId();
member->transitionToOwnedObj();
return true;
@@ -72,7 +72,7 @@ void WorkingSetCommon::prepareForSnapshotChange(WorkingSet* workingSet) {
// We may see the same member twice, so anything we do here should be idempotent.
WorkingSetMember* member = workingSet->get(id);
- if (member->getState() == WorkingSetMember::LOC_AND_IDX) {
+ if (member->getState() == WorkingSetMember::RID_AND_IDX) {
member->isSuspicious = true;
}
}
@@ -90,10 +90,10 @@ bool WorkingSetCommon::fetch(OperationContext* txn,
// We should have a RecordId but need to retrieve the obj. Get the obj now and reset all WSM
// state appropriately.
- invariant(member->hasLoc());
+ invariant(member->hasRecordId());
member->obj.reset();
- auto record = cursor->seekExact(member->loc);
+ auto record = cursor->seekExact(member->recordId);
if (!record) {
return false;
}
@@ -119,7 +119,7 @@ bool WorkingSetCommon::fetch(OperationContext* txn,
}
member->keyData.clear();
- workingSet->transitionToLocAndObj(id);
+ workingSet->transitionToRecordIdAndObj(id);
return true;
}
diff --git a/src/mongo/db/exec/working_set_common.h b/src/mongo/db/exec/working_set_common.h
index b4313ee0ccf..5ed238ce27f 100644
--- a/src/mongo/db/exec/working_set_common.h
+++ b/src/mongo/db/exec/working_set_common.h
@@ -45,25 +45,25 @@ public:
* Requires either a valid BSONObj or valid RecordId.
* Returns true if the fetch and invalidate succeeded, false otherwise.
*/
- static bool fetchAndInvalidateLoc(OperationContext* txn,
- WorkingSetMember* member,
- const Collection* collection);
+ static bool fetchAndInvalidateRecordId(OperationContext* txn,
+ WorkingSetMember* member,
+ const Collection* collection);
/**
* This must be called as part of "saveState" operations after all nodes in the tree save their
* state.
*
* Iterates over WorkingSetIDs in 'workingSet' which are "sensitive to yield". These are ids
- * that have transitioned into the LOC_AND_IDX state since the previous yield.
+ * that have transitioned into the RID_AND_IDX state since the previous yield.
*
- * The LOC_AND_IDX members are tagged as suspicious so that they can be handled properly in case
+ * The RID_AND_IDX members are tagged as suspicious so that they can be handled properly in case
* the document keyed by the index key is deleted or updated during the yield.
*/
static void prepareForSnapshotChange(WorkingSet* workingSet);
/**
- * Transitions the WorkingSetMember with WorkingSetID 'id' from the LOC_AND_IDX state to the
- * LOC_AND_OBJ state by fetching a document. Does the fetch using 'cursor'.
+ * Transitions the WorkingSetMember with WorkingSetID 'id' from the RID_AND_IDX state to the
+ * RID_AND_OBJ state by fetching a document. Does the fetch using 'cursor'.
*
* If false is returned, the document should not be considered for the result set. It is the
* caller's responsibility to free 'id' in this case.
diff --git a/src/mongo/db/exec/working_set_test.cpp b/src/mongo/db/exec/working_set_test.cpp
index 2eeb850bf21..c0c98facdf7 100644
--- a/src/mongo/db/exec/working_set_test.cpp
+++ b/src/mongo/db/exec/working_set_test.cpp
@@ -71,12 +71,12 @@ TEST_F(WorkingSetFixture, noFieldToGet) {
ASSERT_EQUALS(WorkingSetMember::INVALID, member->getState());
ASSERT_FALSE(member->getFieldDotted("foo", &elt));
- ws->transitionToLocAndIdx(id);
+ ws->transitionToRecordIdAndIdx(id);
ASSERT_FALSE(member->getFieldDotted("foo", &elt));
// Our state is that of a valid object. The getFieldDotted shouldn't throw; there's
// something to call getFieldDotted on, but there's no field there.
- ws->transitionToLocAndObj(id);
+ ws->transitionToRecordIdAndObj(id);
ASSERT_TRUE(member->getFieldDotted("foo", &elt));
WorkingSetMember* member = ws->get(id);
@@ -91,8 +91,8 @@ TEST_F(WorkingSetFixture, getFieldUnowned) {
string fieldName = "x";
BSONObj obj = BSON(fieldName << 5);
- // Not truthful since the loc is bogus, but the loc isn't accessed anyway...
- ws->transitionToLocAndObj(id);
+ // Not truthful since the RecordId is bogus, but the RecordId isn't accessed anyway...
+ ws->transitionToRecordIdAndObj(id);
member->obj = Snapshotted<BSONObj>(SnapshotId(), BSONObj(obj.objdata()));
ASSERT_TRUE(obj.isOwned());
ASSERT_FALSE(member->obj.value().isOwned());
@@ -123,8 +123,8 @@ TEST_F(WorkingSetFixture, getFieldFromIndex) {
int secondValue = 10;
member->keyData.push_back(IndexKeyDatum(BSON(firstName << 1), BSON("" << firstValue), NULL));
- // Also a minor lie as loc is bogus.
- ws->transitionToLocAndIdx(id);
+ // Also a minor lie as RecordId is bogus.
+ ws->transitionToRecordIdAndIdx(id);
BSONElement elt;
ASSERT_TRUE(member->getFieldDotted(firstName, &elt));
ASSERT_EQUALS(elt.numberInt(), firstValue);
@@ -146,7 +146,7 @@ TEST_F(WorkingSetFixture, getDottedFieldFromIndex) {
int firstValue = 5;
member->keyData.push_back(IndexKeyDatum(BSON(firstName << 1), BSON("" << firstValue), NULL));
- ws->transitionToLocAndIdx(id);
+ ws->transitionToRecordIdAndIdx(id);
BSONElement elt;
ASSERT_TRUE(member->getFieldDotted(firstName, &elt));
ASSERT_EQUALS(elt.numberInt(), firstValue);
diff --git a/src/mongo/db/query/explain.cpp b/src/mongo/db/query/explain.cpp
index f3a42f03209..fab181bc42d 100644
--- a/src/mongo/db/query/explain.cpp
+++ b/src/mongo/db/query/explain.cpp
@@ -391,7 +391,7 @@ void Explain::statsToBSON(const PlanStageStats& stats,
if (verbosity >= ExplainCommon::EXEC_STATS) {
bob->appendNumber("dupsTested", spec->dupsTested);
bob->appendNumber("dupsDropped", spec->dupsDropped);
- bob->appendNumber("locsForgotten", spec->locsForgotten);
+ bob->appendNumber("recordIdsForgotten", spec->recordIdsForgotten);
}
} else if (STAGE_LIMIT == stats.stageType) {
LimitStats* spec = static_cast<LimitStats*>(stats.specific.get());
diff --git a/src/mongo/db/query/plan_executor.cpp b/src/mongo/db/query/plan_executor.cpp
index ecdbeb7d41b..5f63b55a538 100644
--- a/src/mongo/db/query/plan_executor.cpp
+++ b/src/mongo/db/query/plan_executor.cpp
@@ -400,7 +400,7 @@ PlanExecutor::ExecState PlanExecutor::getNextImpl(Snapshotted<BSONObj>* objOut,
bool hasRequestedData = true;
if (NULL != objOut) {
- if (WorkingSetMember::LOC_AND_IDX == member->getState()) {
+ if (WorkingSetMember::RID_AND_IDX == member->getState()) {
if (1 != member->keyData.size()) {
_workingSet->free(id);
hasRequestedData = false;
@@ -418,8 +418,8 @@ PlanExecutor::ExecState PlanExecutor::getNextImpl(Snapshotted<BSONObj>* objOut,
}
if (NULL != dlOut) {
- if (member->hasLoc()) {
- *dlOut = member->loc;
+ if (member->hasRecordId()) {
+ *dlOut = member->recordId;
} else {
_workingSet->free(id);
hasRequestedData = false;
diff --git a/src/mongo/dbtests/query_stage_and.cpp b/src/mongo/dbtests/query_stage_and.cpp
index 60d914c9dd9..1b9ada903e4 100644
--- a/src/mongo/dbtests/query_stage_and.cpp
+++ b/src/mongo/dbtests/query_stage_and.cpp
@@ -77,7 +77,7 @@ public:
return descriptor;
}
- void getLocs(set<RecordId>* out, Collection* coll) {
+ void getRecordIds(set<RecordId>* out, Collection* coll) {
auto cursor = coll->getCursor(&_txn);
while (auto record = cursor->next()) {
out->insert(record->id);
@@ -214,7 +214,7 @@ public:
ah->saveState();
// ...invalidate one of the read objects
set<RecordId> data;
- getLocs(&data, coll);
+ getRecordIds(&data, coll);
size_t memUsageBefore = ah->getMemUsage();
for (set<RecordId>::const_iterator it = data.begin(); it != data.end(); ++it) {
if (coll->docFor(&_txn, *it).value()["foo"].numberInt() == 15) {
@@ -318,7 +318,7 @@ public:
BSONObj deletedObj = BSON("_id" << 20 << "foo" << 20 << "bar" << 20 << "baz" << 20);
ah->saveState();
set<RecordId> data;
- getLocs(&data, coll);
+ getRecordIds(&data, coll);
size_t memUsageBefore = ah->getMemUsage();
for (set<RecordId>::const_iterator it = data.begin(); it != data.end(); ++it) {
@@ -346,7 +346,7 @@ public:
continue;
}
WorkingSetMember* wsm = ws.get(id);
- ASSERT_NOT_EQUALS(0, deletedObj.woCompare(coll->docFor(&_txn, wsm->loc).value()));
+ ASSERT_NOT_EQUALS(0, deletedObj.woCompare(coll->docFor(&_txn, wsm->recordId).value()));
++count;
}
@@ -887,9 +887,9 @@ public:
{
WorkingSetID id = ws.allocate();
WorkingSetMember* wsm = ws.get(id);
- wsm->loc = RecordId(1);
+ wsm->recordId = RecordId(1);
wsm->obj = Snapshotted<BSONObj>(SnapshotId(), dataObj);
- ws.transitionToLocAndObj(id);
+ ws.transitionToRecordIdAndObj(id);
childStage1->pushBack(id);
}
@@ -921,9 +921,9 @@ public:
{
WorkingSetID id = ws.allocate();
WorkingSetMember* wsm = ws.get(id);
- wsm->loc = RecordId(1);
+ wsm->recordId = RecordId(1);
wsm->obj = Snapshotted<BSONObj>(SnapshotId(), dataObj);
- ws.transitionToLocAndObj(id);
+ ws.transitionToRecordIdAndObj(id);
childStage1->pushBack(id);
}
childStage1->pushBack(PlanStage::DEAD);
@@ -932,9 +932,9 @@ public:
{
WorkingSetID id = ws.allocate();
WorkingSetMember* wsm = ws.get(id);
- wsm->loc = RecordId(2);
+ wsm->recordId = RecordId(2);
wsm->obj = Snapshotted<BSONObj>(SnapshotId(), dataObj);
- ws.transitionToLocAndObj(id);
+ ws.transitionToRecordIdAndObj(id);
childStage2->pushBack(id);
}
@@ -961,9 +961,9 @@ public:
{
WorkingSetID id = ws.allocate();
WorkingSetMember* wsm = ws.get(id);
- wsm->loc = RecordId(1);
+ wsm->recordId = RecordId(1);
wsm->obj = Snapshotted<BSONObj>(SnapshotId(), dataObj);
- ws.transitionToLocAndObj(id);
+ ws.transitionToRecordIdAndObj(id);
childStage1->pushBack(id);
}
@@ -971,9 +971,9 @@ public:
{
WorkingSetID id = ws.allocate();
WorkingSetMember* wsm = ws.get(id);
- wsm->loc = RecordId(2);
+ wsm->recordId = RecordId(2);
wsm->obj = Snapshotted<BSONObj>(SnapshotId(), dataObj);
- ws.transitionToLocAndObj(id);
+ ws.transitionToRecordIdAndObj(id);
childStage2->pushBack(id);
}
childStage2->pushBack(PlanStage::DEAD);
@@ -1036,9 +1036,9 @@ public:
params.descriptor = getIndex(BSON("bar" << 1), coll);
ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
- // Get the set of disklocs in our collection to use later.
+ // Get the set of RecordIds in our collection to use later.
set<RecordId> data;
- getLocs(&data, coll);
+ getRecordIds(&data, coll);
// We're making an assumption here that happens to be true because we clear out the
// collection before running this: increasing inserts have increasing RecordIds.
@@ -1085,7 +1085,7 @@ public:
ASSERT_EQUALS(1, elt.numberInt());
ASSERT_TRUE(member->getFieldDotted("bar", &elt));
ASSERT_EQUALS(1, elt.numberInt());
- ASSERT_EQUALS(member->loc, *it);
+ ASSERT_EQUALS(member->recordId, *it);
}
// Move 'it' to a result that's yet to show up.
@@ -1319,11 +1319,11 @@ public:
if (PlanStage::ADVANCED != status) {
continue;
}
- BSONObj thisObj = coll->docFor(&_txn, ws.get(id)->loc).value();
+ BSONObj thisObj = coll->docFor(&_txn, ws.get(id)->recordId).value();
ASSERT_EQUALS(7 + count, thisObj["bar"].numberInt());
++count;
if (WorkingSet::INVALID_ID != lastId) {
- BSONObj lastObj = coll->docFor(&_txn, ws.get(lastId)->loc).value();
+ BSONObj lastObj = coll->docFor(&_txn, ws.get(lastId)->recordId).value();
ASSERT_LESS_THAN(lastObj["bar"].woCompare(thisObj["bar"]), 0);
}
lastId = id;
diff --git a/src/mongo/dbtests/query_stage_collscan.cpp b/src/mongo/dbtests/query_stage_collscan.cpp
index 1cbf7b32d40..8fd44c1418c 100644
--- a/src/mongo/dbtests/query_stage_collscan.cpp
+++ b/src/mongo/dbtests/query_stage_collscan.cpp
@@ -112,9 +112,9 @@ public:
return count;
}
- void getLocs(Collection* collection,
- CollectionScanParams::Direction direction,
- vector<RecordId>* out) {
+ void getRecordIds(Collection* collection,
+ CollectionScanParams::Direction direction,
+ vector<RecordId>* out) {
WorkingSet ws;
CollectionScanParams params;
@@ -128,8 +128,8 @@ public:
PlanStage::StageState state = scan->work(&id);
if (PlanStage::ADVANCED == state) {
WorkingSetMember* member = ws.get(id);
- verify(member->hasLoc());
- out->push_back(member->loc);
+ verify(member->hasRecordId());
+ out->push_back(member->recordId);
}
}
}
@@ -275,8 +275,8 @@ public:
Collection* coll = ctx.getCollection();
// Get the RecordIds that would be returned by an in-order scan.
- vector<RecordId> locs;
- getLocs(coll, CollectionScanParams::FORWARD, &locs);
+ vector<RecordId> recordIds;
+ getRecordIds(coll, CollectionScanParams::FORWARD, &recordIds);
// Configure the scan.
CollectionScanParams params;
@@ -293,23 +293,23 @@ public:
PlanStage::StageState state = scan->work(&id);
if (PlanStage::ADVANCED == state) {
WorkingSetMember* member = ws.get(id);
- ASSERT_EQUALS(coll->docFor(&_txn, locs[count]).value()["foo"].numberInt(),
+ ASSERT_EQUALS(coll->docFor(&_txn, recordIds[count]).value()["foo"].numberInt(),
member->obj.value()["foo"].numberInt());
++count;
}
}
- // Remove locs[count].
+ // Remove recordIds[count].
scan->saveState();
{
WriteUnitOfWork wunit(&_txn);
- scan->invalidate(&_txn, locs[count], INVALIDATION_DELETION);
+ scan->invalidate(&_txn, recordIds[count], INVALIDATION_DELETION);
wunit.commit(); // to avoid rollback of the invalidate
}
- remove(coll->docFor(&_txn, locs[count]).value());
+ remove(coll->docFor(&_txn, recordIds[count]).value());
scan->restoreState();
- // Skip over locs[count].
+ // Skip over recordIds[count].
++count;
// Expect the rest.
@@ -318,7 +318,7 @@ public:
PlanStage::StageState state = scan->work(&id);
if (PlanStage::ADVANCED == state) {
WorkingSetMember* member = ws.get(id);
- ASSERT_EQUALS(coll->docFor(&_txn, locs[count]).value()["foo"].numberInt(),
+ ASSERT_EQUALS(coll->docFor(&_txn, recordIds[count]).value()["foo"].numberInt(),
member->obj.value()["foo"].numberInt());
++count;
}
@@ -340,8 +340,8 @@ public:
Collection* coll = ctx.getCollection();
// Get the RecordIds that would be returned by an in-order scan.
- vector<RecordId> locs;
- getLocs(coll, CollectionScanParams::BACKWARD, &locs);
+ vector<RecordId> recordIds;
+ getRecordIds(coll, CollectionScanParams::BACKWARD, &recordIds);
// Configure the scan.
CollectionScanParams params;
@@ -358,23 +358,23 @@ public:
PlanStage::StageState state = scan->work(&id);
if (PlanStage::ADVANCED == state) {
WorkingSetMember* member = ws.get(id);
- ASSERT_EQUALS(coll->docFor(&_txn, locs[count]).value()["foo"].numberInt(),
+ ASSERT_EQUALS(coll->docFor(&_txn, recordIds[count]).value()["foo"].numberInt(),
member->obj.value()["foo"].numberInt());
++count;
}
}
- // Remove locs[count].
+ // Remove recordIds[count].
scan->saveState();
{
WriteUnitOfWork wunit(&_txn);
- scan->invalidate(&_txn, locs[count], INVALIDATION_DELETION);
+ scan->invalidate(&_txn, recordIds[count], INVALIDATION_DELETION);
wunit.commit(); // to avoid rollback of the invalidate
}
- remove(coll->docFor(&_txn, locs[count]).value());
+ remove(coll->docFor(&_txn, recordIds[count]).value());
scan->restoreState();
- // Skip over locs[count].
+ // Skip over recordIds[count].
++count;
// Expect the rest.
@@ -383,7 +383,7 @@ public:
PlanStage::StageState state = scan->work(&id);
if (PlanStage::ADVANCED == state) {
WorkingSetMember* member = ws.get(id);
- ASSERT_EQUALS(coll->docFor(&_txn, locs[count]).value()["foo"].numberInt(),
+ ASSERT_EQUALS(coll->docFor(&_txn, recordIds[count]).value()["foo"].numberInt(),
member->obj.value()["foo"].numberInt());
++count;
}
diff --git a/src/mongo/dbtests/query_stage_count.cpp b/src/mongo/dbtests/query_stage_count.cpp
index ed68c51feac..45584db28f4 100644
--- a/src/mongo/dbtests/query_stage_count.cpp
+++ b/src/mongo/dbtests/query_stage_count.cpp
@@ -81,8 +81,8 @@ public:
wunit.commit();
}
- void getLocs() {
- _locs.clear();
+ void getRecordIds() {
+ _recordIds.clear();
WorkingSet ws;
CollectionScanParams params;
@@ -96,8 +96,8 @@ public:
PlanStage::StageState state = scan->work(&id);
if (PlanStage::ADVANCED == state) {
WorkingSetMember* member = ws.get(id);
- verify(member->hasLoc());
- _locs.push_back(member->loc);
+ verify(member->hasRecordId());
+ _recordIds.push_back(member->recordId);
}
}
}
@@ -108,19 +108,19 @@ public:
wunit.commit();
}
- void remove(const RecordId& loc) {
+ void remove(const RecordId& recordId) {
WriteUnitOfWork wunit(&_txn);
- _coll->deleteDocument(&_txn, loc);
+ _coll->deleteDocument(&_txn, recordId);
wunit.commit();
}
- void update(const RecordId& oldLoc, const BSONObj& newDoc) {
+ void update(const RecordId& oldrecordId, const BSONObj& newDoc) {
WriteUnitOfWork wunit(&_txn);
- BSONObj oldDoc = _coll->getRecordStore()->dataFor(&_txn, oldLoc).releaseToBson();
+ BSONObj oldDoc = _coll->getRecordStore()->dataFor(&_txn, oldrecordId).releaseToBson();
OplogUpdateEntryArgs args;
args.ns = _coll->ns().ns();
_coll->updateDocument(&_txn,
- oldLoc,
+ oldrecordId,
Snapshotted<BSONObj>(_txn.recoveryUnit()->getSnapshotId(), oldDoc),
newDoc,
false,
@@ -138,7 +138,7 @@ public:
// - asserts nSkipped is correct
void testCount(const CountRequest& request, int expected_n = kDocuments, bool indexed = false) {
setup();
- getLocs();
+ getRecordIds();
unique_ptr<WorkingSet> ws(new WorkingSet);
@@ -223,7 +223,7 @@ public:
}
protected:
- vector<RecordId> _locs;
+ vector<RecordId> _recordIds;
OperationContextImpl _txn;
ScopedTransaction _scopedXact;
Lock::DBLock _dbLock;
@@ -294,14 +294,14 @@ public:
// At the point which this is called we are in between counting the first + second record
void interject(CountStage& count_stage, int interjection) {
if (interjection == 0) {
- // At this point, our first interjection, we've counted _locs[0]
- // and are about to count _locs[1]
+ // At this point, our first interjection, we've counted _recordIds[0]
+ // and are about to count _recordIds[1]
WriteUnitOfWork wunit(&_txn);
- count_stage.invalidate(&_txn, _locs[interjection], INVALIDATION_DELETION);
- remove(_locs[interjection]);
+ count_stage.invalidate(&_txn, _recordIds[interjection], INVALIDATION_DELETION);
+ remove(_recordIds[interjection]);
- count_stage.invalidate(&_txn, _locs[interjection + 1], INVALIDATION_DELETION);
- remove(_locs[interjection + 1]);
+ count_stage.invalidate(&_txn, _recordIds[interjection + 1], INVALIDATION_DELETION);
+ remove(_recordIds[interjection + 1]);
wunit.commit();
}
}
@@ -321,13 +321,13 @@ public:
// At the point which this is called we are in between the first and second record
void interject(CountStage& count_stage, int interjection) {
if (interjection == 0) {
- count_stage.invalidate(&_txn, _locs[0], INVALIDATION_MUTATION);
- OID id1 = _coll->docFor(&_txn, _locs[0]).value().getField("_id").OID();
- update(_locs[0], BSON("_id" << id1 << "x" << 100));
+ count_stage.invalidate(&_txn, _recordIds[0], INVALIDATION_MUTATION);
+ OID id1 = _coll->docFor(&_txn, _recordIds[0]).value().getField("_id").OID();
+ update(_recordIds[0], BSON("_id" << id1 << "x" << 100));
- count_stage.invalidate(&_txn, _locs[1], INVALIDATION_MUTATION);
- OID id2 = _coll->docFor(&_txn, _locs[1]).value().getField("_id").OID();
- update(_locs[1], BSON("_id" << id2 << "x" << 100));
+ count_stage.invalidate(&_txn, _recordIds[1], INVALIDATION_MUTATION);
+ OID id2 = _coll->docFor(&_txn, _recordIds[1]).value().getField("_id").OID();
+ update(_recordIds[1], BSON("_id" << id2 << "x" << 100));
}
}
};
diff --git a/src/mongo/dbtests/query_stage_delete.cpp b/src/mongo/dbtests/query_stage_delete.cpp
index 4b6dacc27d1..c35536708ca 100644
--- a/src/mongo/dbtests/query_stage_delete.cpp
+++ b/src/mongo/dbtests/query_stage_delete.cpp
@@ -80,9 +80,9 @@ public:
_client.remove(nss.ns(), obj);
}
- void getLocs(Collection* collection,
- CollectionScanParams::Direction direction,
- vector<RecordId>* out) {
+ void getRecordIds(Collection* collection,
+ CollectionScanParams::Direction direction,
+ vector<RecordId>* out) {
WorkingSet ws;
CollectionScanParams params;
@@ -96,8 +96,8 @@ public:
PlanStage::StageState state = scan->work(&id);
if (PlanStage::ADVANCED == state) {
WorkingSetMember* member = ws.get(id);
- verify(member->hasLoc());
- out->push_back(member->loc);
+ verify(member->hasRecordId());
+ out->push_back(member->recordId);
}
}
}
@@ -133,8 +133,8 @@ public:
Collection* coll = ctx.getCollection();
// Get the RecordIds that would be returned by an in-order scan.
- vector<RecordId> locs;
- getLocs(coll, CollectionScanParams::FORWARD, &locs);
+ vector<RecordId> recordIds;
+ getRecordIds(coll, CollectionScanParams::FORWARD, &recordIds);
// Configure the scan.
CollectionScanParams collScanParams;
@@ -163,14 +163,14 @@ public:
ASSERT_EQUALS(PlanStage::NEED_TIME, state);
}
- // Remove locs[targetDocIndex];
+ // Remove recordIds[targetDocIndex];
deleteStage.saveState();
{
WriteUnitOfWork wunit(&_txn);
- deleteStage.invalidate(&_txn, locs[targetDocIndex], INVALIDATION_DELETION);
+ deleteStage.invalidate(&_txn, recordIds[targetDocIndex], INVALIDATION_DELETION);
wunit.commit();
}
- BSONObj targetDoc = coll->docFor(&_txn, locs[targetDocIndex]).value();
+ BSONObj targetDoc = coll->docFor(&_txn, recordIds[targetDocIndex]).value();
ASSERT(!targetDoc.isEmpty());
remove(targetDoc);
deleteStage.restoreState();
@@ -202,18 +202,18 @@ public:
const unique_ptr<CanonicalQuery> cq(canonicalize(query));
// Get the RecordIds that would be returned by an in-order scan.
- vector<RecordId> locs;
- getLocs(coll, CollectionScanParams::FORWARD, &locs);
+ vector<RecordId> recordIds;
+ getRecordIds(coll, CollectionScanParams::FORWARD, &recordIds);
// Configure a QueuedDataStage to pass the first object in the collection back in a
- // LOC_AND_OBJ state.
+ // RID_AND_OBJ state.
auto qds = make_unique<QueuedDataStage>(&_txn, ws.get());
WorkingSetID id = ws->allocate();
WorkingSetMember* member = ws->get(id);
- member->loc = locs[targetDocIndex];
+ member->recordId = recordIds[targetDocIndex];
const BSONObj oldDoc = BSON("_id" << targetDocIndex << "foo" << targetDocIndex);
member->obj = Snapshotted<BSONObj>(SnapshotId(), oldDoc);
- ws->transitionToLocAndObj(id);
+ ws->transitionToRecordIdAndObj(id);
qds->pushBack(id);
// Configure the delete.
@@ -238,7 +238,7 @@ public:
WorkingSetMember* resultMember = ws->get(id);
// With an owned copy of the object, with no RecordId.
ASSERT_TRUE(resultMember->hasOwnedObj());
- ASSERT_FALSE(resultMember->hasLoc());
+ ASSERT_FALSE(resultMember->hasRecordId());
ASSERT_EQUALS(resultMember->getState(), WorkingSetMember::OWNED_OBJ);
ASSERT_TRUE(resultMember->obj.value().isOwned());
diff --git a/src/mongo/dbtests/query_stage_fetch.cpp b/src/mongo/dbtests/query_stage_fetch.cpp
index 55bb911347f..079be7c5244 100644
--- a/src/mongo/dbtests/query_stage_fetch.cpp
+++ b/src/mongo/dbtests/query_stage_fetch.cpp
@@ -61,7 +61,7 @@ public:
_client.dropCollection(ns());
}
- void getLocs(set<RecordId>* out, Collection* coll) {
+ void getRecordIds(set<RecordId>* out, Collection* coll) {
auto cursor = coll->getCursor(&_txn);
while (auto record = cursor->next()) {
out->insert(record->id);
@@ -105,9 +105,9 @@ public:
// Add an object to the DB.
insert(BSON("foo" << 5));
- set<RecordId> locs;
- getLocs(&locs, coll);
- ASSERT_EQUALS(size_t(1), locs.size());
+ set<RecordId> recordIds;
+ getRecordIds(&recordIds, coll);
+ ASSERT_EQUALS(size_t(1), recordIds.size());
// Create a mock stage that returns the WSM.
auto mockStage = make_unique<QueuedDataStage>(&_txn, &ws);
@@ -116,16 +116,16 @@ public:
{
WorkingSetID id = ws.allocate();
WorkingSetMember* mockMember = ws.get(id);
- mockMember->loc = *locs.begin();
- mockMember->obj = coll->docFor(&_txn, mockMember->loc);
- ws.transitionToLocAndObj(id);
+ mockMember->recordId = *recordIds.begin();
+ mockMember->obj = coll->docFor(&_txn, mockMember->recordId);
+ ws.transitionToRecordIdAndObj(id);
// Points into our DB.
mockStage->pushBack(id);
}
{
WorkingSetID id = ws.allocate();
WorkingSetMember* mockMember = ws.get(id);
- mockMember->loc = RecordId();
+ mockMember->recordId = RecordId();
mockMember->obj = Snapshotted<BSONObj>(SnapshotId(), BSON("foo" << 6));
mockMember->transitionToOwnedObj();
ASSERT_TRUE(mockMember->obj.value().isOwned());
@@ -171,9 +171,9 @@ public:
// Add an object to the DB.
insert(BSON("foo" << 5));
- set<RecordId> locs;
- getLocs(&locs, coll);
- ASSERT_EQUALS(size_t(1), locs.size());
+ set<RecordId> recordIds;
+ getRecordIds(&recordIds, coll);
+ ASSERT_EQUALS(size_t(1), recordIds.size());
// Create a mock stage that returns the WSM.
auto mockStage = make_unique<QueuedDataStage>(&_txn, &ws);
@@ -182,10 +182,10 @@ public:
{
WorkingSetID id = ws.allocate();
WorkingSetMember* mockMember = ws.get(id);
- mockMember->loc = *locs.begin();
- ws.transitionToLocAndIdx(id);
+ mockMember->recordId = *recordIds.begin();
+ ws.transitionToRecordIdAndIdx(id);
- // State is loc and index, shouldn't be able to get the foo data inside.
+ // State is RecordId and index, shouldn't be able to get the foo data inside.
BSONElement elt;
ASSERT_FALSE(mockMember->getFieldDotted("foo", &elt));
mockStage->pushBack(id);
diff --git a/src/mongo/dbtests/query_stage_ixscan.cpp b/src/mongo/dbtests/query_stage_ixscan.cpp
index 1b11268838e..97f77af78e6 100644
--- a/src/mongo/dbtests/query_stage_ixscan.cpp
+++ b/src/mongo/dbtests/query_stage_ixscan.cpp
@@ -185,10 +185,10 @@ public:
// Expect to get key {'': 5} and then key {'': 6}.
WorkingSetMember* member = getNext(ixscan.get());
- ASSERT_EQ(WorkingSetMember::LOC_AND_IDX, member->getState());
+ ASSERT_EQ(WorkingSetMember::RID_AND_IDX, member->getState());
ASSERT_EQ(member->keyData[0].keyData, BSON("" << 5));
member = getNext(ixscan.get());
- ASSERT_EQ(WorkingSetMember::LOC_AND_IDX, member->getState());
+ ASSERT_EQ(WorkingSetMember::RID_AND_IDX, member->getState());
ASSERT_EQ(member->keyData[0].keyData, BSON("" << 6));
// Save state and insert a few indexed docs.
@@ -198,7 +198,7 @@ public:
ixscan->restoreState();
member = getNext(ixscan.get());
- ASSERT_EQ(WorkingSetMember::LOC_AND_IDX, member->getState());
+ ASSERT_EQ(WorkingSetMember::RID_AND_IDX, member->getState());
ASSERT_EQ(member->keyData[0].keyData, BSON("" << 10));
WorkingSetID id;
@@ -222,7 +222,7 @@ public:
// Expect to get key {'': 6}.
WorkingSetMember* member = getNext(ixscan.get());
- ASSERT_EQ(WorkingSetMember::LOC_AND_IDX, member->getState());
+ ASSERT_EQ(WorkingSetMember::RID_AND_IDX, member->getState());
ASSERT_EQ(member->keyData[0].keyData, BSON("" << 6));
// Save state and insert an indexed doc.
@@ -231,7 +231,7 @@ public:
ixscan->restoreState();
member = getNext(ixscan.get());
- ASSERT_EQ(WorkingSetMember::LOC_AND_IDX, member->getState());
+ ASSERT_EQ(WorkingSetMember::RID_AND_IDX, member->getState());
ASSERT_EQ(member->keyData[0].keyData, BSON("" << 7));
WorkingSetID id;
@@ -255,7 +255,7 @@ public:
// Expect to get key {'': 6}.
WorkingSetMember* member = getNext(ixscan.get());
- ASSERT_EQ(WorkingSetMember::LOC_AND_IDX, member->getState());
+ ASSERT_EQ(WorkingSetMember::RID_AND_IDX, member->getState());
ASSERT_EQ(member->keyData[0].keyData, BSON("" << 6));
// Save state and insert an indexed doc.
@@ -285,10 +285,10 @@ public:
// Expect to get key {'': 10} and then {'': 8}.
WorkingSetMember* member = getNext(ixscan.get());
- ASSERT_EQ(WorkingSetMember::LOC_AND_IDX, member->getState());
+ ASSERT_EQ(WorkingSetMember::RID_AND_IDX, member->getState());
ASSERT_EQ(member->keyData[0].keyData, BSON("" << 10));
member = getNext(ixscan.get());
- ASSERT_EQ(WorkingSetMember::LOC_AND_IDX, member->getState());
+ ASSERT_EQ(WorkingSetMember::RID_AND_IDX, member->getState());
ASSERT_EQ(member->keyData[0].keyData, BSON("" << 8));
// Save state and insert an indexed doc.
@@ -299,7 +299,7 @@ public:
// Ensure that we don't erroneously return {'': 9} or {'':3}.
member = getNext(ixscan.get());
- ASSERT_EQ(WorkingSetMember::LOC_AND_IDX, member->getState());
+ ASSERT_EQ(WorkingSetMember::RID_AND_IDX, member->getState());
ASSERT_EQ(member->keyData[0].keyData, BSON("" << 6));
WorkingSetID id;
diff --git a/src/mongo/dbtests/query_stage_merge_sort.cpp b/src/mongo/dbtests/query_stage_merge_sort.cpp
index c6fd3073ead..cd056a57d85 100644
--- a/src/mongo/dbtests/query_stage_merge_sort.cpp
+++ b/src/mongo/dbtests/query_stage_merge_sort.cpp
@@ -77,7 +77,7 @@ public:
_client.remove(ns(), obj);
}
- void getLocs(set<RecordId>* out, Collection* coll) {
+ void getRecordIds(set<RecordId>* out, Collection* coll) {
auto cursor = coll->getCursor(&_txn);
while (auto record = cursor->next()) {
out->insert(record->id);
@@ -562,12 +562,12 @@ public:
ms->addChild(new IndexScan(&_txn, params, &ws, NULL));
}
- set<RecordId> locs;
- getLocs(&locs, coll);
+ set<RecordId> recordIds;
+ getRecordIds(&recordIds, coll);
- set<RecordId>::iterator it = locs.begin();
+ set<RecordId>::iterator it = recordIds.begin();
- // Get 10 results. Should be getting results in order of 'locs'.
+ // Get 10 results. Should be getting results in order of 'recordIds'.
int count = 0;
while (!ms->isEOF() && count < 10) {
WorkingSetID id = WorkingSet::INVALID_ID;
@@ -577,7 +577,7 @@ public:
}
WorkingSetMember* member = ws.get(id);
- ASSERT_EQUALS(member->loc, *it);
+ ASSERT_EQUALS(member->recordId, *it);
BSONElement elt;
string index(1, 'a' + count);
ASSERT(member->getFieldDotted(index, &elt));
@@ -588,12 +588,12 @@ public:
++it;
}
- // Invalidate locs[11]. Should force a fetch and return the deleted document.
+ // Invalidate recordIds[11]. Should force a fetch and return the deleted document.
ms->saveState();
ms->invalidate(&_txn, *it, INVALIDATION_DELETION);
ms->restoreState();
- // Make sure locs[11] was fetched for us.
+ // Make sure recordIds[11] was fetched for us.
{
WorkingSetID id = WorkingSet::INVALID_ID;
PlanStage::StageState status;
@@ -602,7 +602,7 @@ public:
} while (PlanStage::ADVANCED != status);
WorkingSetMember* member = ws.get(id);
- ASSERT(!member->hasLoc());
+ ASSERT(!member->hasRecordId());
ASSERT(member->hasObj());
string index(1, 'a' + count);
BSONElement elt;
@@ -624,7 +624,7 @@ public:
}
WorkingSetMember* member = ws.get(id);
- ASSERT_EQUALS(member->loc, *it);
+ ASSERT_EQUALS(member->recordId, *it);
BSONElement elt;
string index(1, 'a' + count);
ASSERT_TRUE(member->getFieldDotted(index, &elt));
@@ -659,7 +659,7 @@ public:
addIndex(BSON("a" << 1));
std::set<RecordId> rids;
- getLocs(&rids, coll);
+ getRecordIds(&rids, coll);
set<RecordId>::iterator it = rids.begin();
WorkingSet ws;
@@ -698,8 +698,8 @@ public:
// First doc should be {a: 4}.
member = getNextResult(&ws, ms.get());
- ASSERT_EQ(member->getState(), WorkingSetMember::LOC_AND_OBJ);
- ASSERT_EQ(member->loc, *it);
+ ASSERT_EQ(member->getState(), WorkingSetMember::RID_AND_OBJ);
+ ASSERT_EQ(member->recordId, *it);
ASSERT_EQ(member->obj.value(), BSON("_id" << 4 << "a" << 4));
++it;
@@ -714,8 +714,8 @@ public:
// We correctly dedup the invalidated doc and return {a: 6} next.
member = getNextResult(&ws, ms.get());
- ASSERT_EQ(member->getState(), WorkingSetMember::LOC_AND_OBJ);
- ASSERT_EQ(member->loc, *it);
+ ASSERT_EQ(member->getState(), WorkingSetMember::RID_AND_OBJ);
+ ASSERT_EQ(member->recordId, *it);
ASSERT_EQ(member->obj.value(), BSON("_id" << 6 << "a" << 6));
}
diff --git a/src/mongo/dbtests/query_stage_sort.cpp b/src/mongo/dbtests/query_stage_sort.cpp
index 0349c0de3db..efaa77a69ca 100644
--- a/src/mongo/dbtests/query_stage_sort.cpp
+++ b/src/mongo/dbtests/query_stage_sort.cpp
@@ -69,7 +69,7 @@ public:
_client.insert(ns(), obj);
}
- void getLocs(set<RecordId>* out, Collection* coll) {
+ void getRecordIds(set<RecordId>* out, Collection* coll) {
auto cursor = coll->getCursor(&_txn);
while (auto record = cursor->next()) {
out->insert(record->id);
@@ -80,20 +80,20 @@ public:
* We feed a mix of (key, unowned, owned) data to the sort stage.
*/
void insertVarietyOfObjects(WorkingSet* ws, QueuedDataStage* ms, Collection* coll) {
- set<RecordId> locs;
- getLocs(&locs, coll);
+ set<RecordId> recordIds;
+ getRecordIds(&recordIds, coll);
- set<RecordId>::iterator it = locs.begin();
+ set<RecordId>::iterator it = recordIds.begin();
for (int i = 0; i < numObj(); ++i, ++it) {
- ASSERT_FALSE(it == locs.end());
+ ASSERT_FALSE(it == recordIds.end());
// Insert some owned obj data.
WorkingSetID id = ws->allocate();
WorkingSetMember* member = ws->get(id);
- member->loc = *it;
+ member->recordId = *it;
member->obj = coll->docFor(&_txn, *it);
- ws->transitionToLocAndObj(id);
+ ws->transitionToRecordIdAndObj(id);
ms->pushBack(id);
}
}
@@ -322,8 +322,8 @@ public:
}
// The data we're going to later invalidate.
- set<RecordId> locs;
- getLocs(&locs, coll);
+ set<RecordId> recordIds;
+ getRecordIds(&recordIds, coll);
unique_ptr<PlanExecutor> exec(makePlanExecutorWithSortStage(coll));
SortStage* ss = static_cast<SortStage*>(exec->getRootStage());
@@ -340,10 +340,10 @@ public:
ASSERT_NOT_EQUALS(PlanStage::ADVANCED, status);
}
- // We should have read in the first 'firstRead' locs. Invalidate the first one.
+ // We should have read in the first 'firstRead' recordIds. Invalidate the first one.
// Since it's in the WorkingSet, the updates should not be reflected in the output.
exec->saveState();
- set<RecordId>::iterator it = locs.begin();
+ set<RecordId>::iterator it = recordIds.begin();
Snapshotted<BSONObj> oldDoc = coll->docFor(&_txn, *it);
OID updatedId = oldDoc.value().getField("_id").OID();
@@ -370,7 +370,7 @@ public:
// Let's just invalidate everything now. Already read into ss, so original values
// should be fetched.
exec->saveState();
- while (it != locs.end()) {
+ while (it != recordIds.end()) {
oldDoc = coll->docFor(&_txn, *it);
{
WriteUnitOfWork wuow(&_txn);
@@ -435,8 +435,8 @@ public:
}
// The data we're going to later invalidate.
- set<RecordId> locs;
- getLocs(&locs, coll);
+ set<RecordId> recordIds;
+ getRecordIds(&recordIds, coll);
unique_ptr<PlanExecutor> exec(makePlanExecutorWithSortStage(coll));
SortStage* ss = static_cast<SortStage*>(exec->getRootStage());
@@ -453,9 +453,9 @@ public:
ASSERT_NOT_EQUALS(PlanStage::ADVANCED, status);
}
- // We should have read in the first 'firstRead' locs. Invalidate the first.
+ // We should have read in the first 'firstRead' recordIds. Invalidate the first.
exec->saveState();
- set<RecordId>::iterator it = locs.begin();
+ set<RecordId>::iterator it = recordIds.begin();
{
WriteUnitOfWork wuow(&_txn);
coll->deleteDocument(&_txn, *it++);
@@ -471,7 +471,7 @@ public:
// Let's just invalidate everything now.
exec->saveState();
- while (it != locs.end()) {
+ while (it != recordIds.end()) {
{
WriteUnitOfWork wuow(&_txn);
coll->deleteDocument(&_txn, *it++);
@@ -502,7 +502,7 @@ public:
// Deletion invalidation of everything fed to sort with limit enabled.
// Limit size of working set within sort stage to a small number
-// Sort stage implementation should not try to invalidate DiskLocc that
+// Sort stage implementation should not try to invalidate RecordIds that
// are no longer in the working set.
template <int LIMIT>
diff --git a/src/mongo/dbtests/query_stage_update.cpp b/src/mongo/dbtests/query_stage_update.cpp
index 10526f041ac..ff232f4cb2a 100644
--- a/src/mongo/dbtests/query_stage_update.cpp
+++ b/src/mongo/dbtests/query_stage_update.cpp
@@ -132,9 +132,9 @@ public:
}
}
- void getLocs(Collection* collection,
- CollectionScanParams::Direction direction,
- vector<RecordId>* out) {
+ void getRecordIds(Collection* collection,
+ CollectionScanParams::Direction direction,
+ vector<RecordId>* out) {
WorkingSet ws;
CollectionScanParams params;
@@ -148,8 +148,8 @@ public:
PlanStage::StageState state = scan->work(&id);
if (PlanStage::ADVANCED == state) {
WorkingSetMember* member = ws.get(id);
- verify(member->hasLoc());
- out->push_back(member->loc);
+ verify(member->hasRecordId());
+ out->push_back(member->recordId);
}
}
}
@@ -259,8 +259,8 @@ public:
Collection* coll = db->getCollection(nss.ns());
// Get the RecordIds that would be returned by an in-order scan.
- vector<RecordId> locs;
- getLocs(coll, CollectionScanParams::FORWARD, &locs);
+ vector<RecordId> recordIds;
+ getRecordIds(coll, CollectionScanParams::FORWARD, &recordIds);
UpdateRequest request(nss);
UpdateLifecycleImpl updateLifecycle(false, nss);
@@ -305,14 +305,14 @@ public:
ASSERT_EQUALS(PlanStage::NEED_TIME, state);
}
- // Remove locs[targetDocIndex];
+ // Remove recordIds[targetDocIndex];
updateStage->saveState();
{
WriteUnitOfWork wunit(&_txn);
- updateStage->invalidate(&_txn, locs[targetDocIndex], INVALIDATION_DELETION);
+ updateStage->invalidate(&_txn, recordIds[targetDocIndex], INVALIDATION_DELETION);
wunit.commit();
}
- BSONObj targetDoc = coll->docFor(&_txn, locs[targetDocIndex]).value();
+ BSONObj targetDoc = coll->docFor(&_txn, recordIds[targetDocIndex]).value();
ASSERT(!targetDoc.isEmpty());
remove(targetDoc);
updateStage->restoreState();
@@ -377,8 +377,8 @@ public:
const unique_ptr<CanonicalQuery> cq(canonicalize(query));
// Get the RecordIds that would be returned by an in-order scan.
- vector<RecordId> locs;
- getLocs(coll, CollectionScanParams::FORWARD, &locs);
+ vector<RecordId> recordIds;
+ getRecordIds(coll, CollectionScanParams::FORWARD, &recordIds);
// Populate the request.
request.setQuery(query);
@@ -391,14 +391,14 @@ public:
ASSERT_OK(driver.parse(request.getUpdates(), request.isMulti()));
// Configure a QueuedDataStage to pass the first object in the collection back in a
- // LOC_AND_OBJ state.
+ // RID_AND_OBJ state.
auto qds = make_unique<QueuedDataStage>(&_txn, ws.get());
WorkingSetID id = ws->allocate();
WorkingSetMember* member = ws->get(id);
- member->loc = locs[targetDocIndex];
+ member->recordId = recordIds[targetDocIndex];
const BSONObj oldDoc = BSON("_id" << targetDocIndex << "foo" << targetDocIndex);
member->obj = Snapshotted<BSONObj>(SnapshotId(), oldDoc);
- ws->transitionToLocAndObj(id);
+ ws->transitionToRecordIdAndObj(id);
qds->pushBack(id);
// Configure the update.
@@ -420,7 +420,7 @@ public:
WorkingSetMember* resultMember = ws->get(id);
// With an owned copy of the object, with no RecordId.
ASSERT_TRUE(resultMember->hasOwnedObj());
- ASSERT_FALSE(resultMember->hasLoc());
+ ASSERT_FALSE(resultMember->hasRecordId());
ASSERT_EQUALS(resultMember->getState(), WorkingSetMember::OWNED_OBJ);
ASSERT_TRUE(resultMember->obj.value().isOwned());
@@ -465,8 +465,8 @@ public:
const unique_ptr<CanonicalQuery> cq(canonicalize(query));
// Get the RecordIds that would be returned by an in-order scan.
- vector<RecordId> locs;
- getLocs(coll, CollectionScanParams::FORWARD, &locs);
+ vector<RecordId> recordIds;
+ getRecordIds(coll, CollectionScanParams::FORWARD, &recordIds);
// Populate the request.
request.setQuery(query);
@@ -479,14 +479,14 @@ public:
ASSERT_OK(driver.parse(request.getUpdates(), request.isMulti()));
// Configure a QueuedDataStage to pass the first object in the collection back in a
- // LOC_AND_OBJ state.
+ // RID_AND_OBJ state.
auto qds = make_unique<QueuedDataStage>(&_txn, ws.get());
WorkingSetID id = ws->allocate();
WorkingSetMember* member = ws->get(id);
- member->loc = locs[targetDocIndex];
+ member->recordId = recordIds[targetDocIndex];
const BSONObj oldDoc = BSON("_id" << targetDocIndex << "foo" << targetDocIndex);
member->obj = Snapshotted<BSONObj>(SnapshotId(), oldDoc);
- ws->transitionToLocAndObj(id);
+ ws->transitionToRecordIdAndObj(id);
qds->pushBack(id);
// Configure the update.
@@ -508,7 +508,7 @@ public:
WorkingSetMember* resultMember = ws->get(id);
// With an owned copy of the object, with no RecordId.
ASSERT_TRUE(resultMember->hasOwnedObj());
- ASSERT_FALSE(resultMember->hasLoc());
+ ASSERT_FALSE(resultMember->hasRecordId());
ASSERT_EQUALS(resultMember->getState(), WorkingSetMember::OWNED_OBJ);
ASSERT_TRUE(resultMember->obj.value().isOwned());
diff --git a/src/mongo/dbtests/sort_key_generator_test.cpp b/src/mongo/dbtests/sort_key_generator_test.cpp
index 26263c06119..5d6622f0f2b 100644
--- a/src/mongo/dbtests/sort_key_generator_test.cpp
+++ b/src/mongo/dbtests/sort_key_generator_test.cpp
@@ -71,7 +71,7 @@ BSONObj extractSortKeyCovered(const char* sortSpec, const IndexKeyDatum& ikd) {
WorkingSetID wsid = ws.allocate();
WorkingSetMember* wsm = ws.get(wsid);
wsm->keyData.push_back(ikd);
- ws.transitionToLocAndIdx(wsid);
+ ws.transitionToRecordIdAndIdx(wsid);
BSONObj sortKey;
auto sortKeyGen = stdx::make_unique<SortKeyGenerator>(fromjson(sortSpec), BSONObj());