summaryrefslogtreecommitdiff
path: root/src/mongo
diff options
context:
space:
mode:
authorMax Hirschhorn <max.hirschhorn@mongodb.com>2015-08-24 23:51:27 -0400
committerMax Hirschhorn <max.hirschhorn@mongodb.com>2015-08-24 23:51:27 -0400
commit764e0c45471d5ca63c708f362be0e6d01ee72eb0 (patch)
tree77c6693c1c1003d802a40a30eda8a79a4e0d7704 /src/mongo
parent564f8089c0d4541215d1aa31dae331115e68b95f (diff)
downloadmongo-764e0c45471d5ca63c708f362be0e6d01ee72eb0.tar.gz
SERVER-16444 Copy data in the query subsystem as needed.
A WorkingSetMember in the LOC_AND_OBJ state must be made owned when: 1. Its WorkingSetID is cached across multiple calls to work(). 2. Multiple calls to next(), seekExact(), saveState(), etc. are performed on the same WiredTiger cursor in a single work() call. No longer necessary to always copy data out of WiredTiger buffers.
Diffstat (limited to 'src/mongo')
-rw-r--r--src/mongo/db/commands/pipeline_command.cpp7
-rw-r--r--src/mongo/db/dbhelpers.cpp1
-rw-r--r--src/mongo/db/exec/and_common-inl.h1
-rw-r--r--src/mongo/db/exec/and_hash.cpp11
-rw-r--r--src/mongo/db/exec/and_sorted.cpp7
-rw-r--r--src/mongo/db/exec/cached_plan.cpp3
-rw-r--r--src/mongo/db/exec/delete.cpp16
-rw-r--r--src/mongo/db/exec/fetch.cpp3
-rw-r--r--src/mongo/db/exec/merge_sort.cpp6
-rw-r--r--src/mongo/db/exec/multi_plan.cpp4
-rw-r--r--src/mongo/db/exec/near.cpp2
-rw-r--r--src/mongo/db/exec/pipeline_proxy.cpp4
-rw-r--r--src/mongo/db/exec/pipeline_proxy.h7
-rw-r--r--src/mongo/db/exec/sort.cpp17
-rw-r--r--src/mongo/db/exec/text_or.cpp5
-rw-r--r--src/mongo/db/exec/update.cpp20
-rw-r--r--src/mongo/db/exec/working_set.cpp8
-rw-r--r--src/mongo/db/exec/working_set.h4
-rw-r--r--src/mongo/db/exec/working_set_common.cpp4
-rw-r--r--src/mongo/db/exec/working_set_common.h6
-rw-r--r--src/mongo/db/query/plan_executor.cpp9
-rw-r--r--src/mongo/db/repl/replication_info.cpp2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp12
-rw-r--r--src/mongo/dbtests/query_stage_merge_sort.cpp4
-rw-r--r--src/mongo/dbtests/query_stage_sort.cpp2
-rw-r--r--src/mongo/dbtests/query_stage_update.cpp2
26 files changed, 92 insertions, 75 deletions
diff --git a/src/mongo/db/commands/pipeline_command.cpp b/src/mongo/db/commands/pipeline_command.cpp
index b348f8c3c84..0332d783266 100644
--- a/src/mongo/db/commands/pipeline_command.cpp
+++ b/src/mongo/db/commands/pipeline_command.cpp
@@ -98,11 +98,10 @@ static bool handleCursorCommand(OperationContext* txn,
break;
}
+ // If adding this object will cause us to exceed the BSON size limit, then we stash it for
+ // later.
if (resultsArray.len() + next.objsize() > byteLimit) {
- // Get the pipeline proxy stage wrapped by this PlanExecutor.
- PipelineProxyStage* proxy = static_cast<PipelineProxyStage*>(exec->getRootStage());
- // too big. next will be the first doc in the second batch
- proxy->pushBack(next);
+ exec->enqueue(next);
break;
}
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp
index 0e854d70813..25a8bc494ff 100644
--- a/src/mongo/db/dbhelpers.cpp
+++ b/src/mongo/db/dbhelpers.cpp
@@ -364,7 +364,6 @@ long long Helpers::removeRange(OperationContext* txn,
PlanExecutor::ExecState state;
// This may yield so we cannot touch nsd after this.
state = exec->getNext(&obj, &rloc);
- exec.reset();
if (PlanExecutor::IS_EOF == state) {
break;
}
diff --git a/src/mongo/db/exec/and_common-inl.h b/src/mongo/db/exec/and_common-inl.h
index 85599c198fc..552b6312045 100644
--- a/src/mongo/db/exec/and_common-inl.h
+++ b/src/mongo/db/exec/and_common-inl.h
@@ -67,6 +67,7 @@ public:
// 'src' has the full document but 'dest' doesn't so we need to copy it over.
dest->obj = src.obj;
+ dest->makeObjOwned();
// We have an object so we don't need key data.
dest->keyData.clear();
diff --git a/src/mongo/db/exec/and_hash.cpp b/src/mongo/db/exec/and_hash.cpp
index 3e153316931..e272c54b17e 100644
--- a/src/mongo/db/exec/and_hash.cpp
+++ b/src/mongo/db/exec/and_hash.cpp
@@ -136,6 +136,7 @@ PlanStage::StageState AndHashStage::work(WorkingSetID* out) {
for (size_t i = 0; i < _children.size(); ++i) {
auto& child = _children[i];
for (size_t j = 0; j < kLookAheadWorks; ++j) {
+ // Cache the result in _lookAheadResults[i].
StageState childStatus = child->work(&_lookAheadResults[i]);
if (PlanStage::IS_EOF == childStatus) {
@@ -144,9 +145,10 @@ PlanStage::StageState AndHashStage::work(WorkingSetID* out) {
_dataMap.clear();
return PlanStage::IS_EOF;
} else if (PlanStage::ADVANCED == childStatus) {
- // We have a result cached in _lookAheadResults[i]. Stop looking at this
- // child.
- break;
+ // Ensure that the BSONObj underlying the WorkingSetMember is owned in case we
+ // yield.
+ _ws->get(_lookAheadResults[i])->makeObjOwned();
+ break; // Stop looking at this child.
} else if (PlanStage::FAILURE == childStatus || PlanStage::DEAD == childStatus) {
// Propage error to parent.
*out = _lookAheadResults[i];
@@ -283,6 +285,9 @@ PlanStage::StageState AndHashStage::readFirstChild(WorkingSetID* out) {
return PlanStage::NEED_TIME;
}
+ // Ensure that the BSONObj underlying the WorkingSetMember is owned in case we yield.
+ member->makeObjOwned();
+
// Update memory stats.
_memUsage += member->getMemUsage();
diff --git a/src/mongo/db/exec/and_sorted.cpp b/src/mongo/db/exec/and_sorted.cpp
index 5844078c762..10c351f8e64 100644
--- a/src/mongo/db/exec/and_sorted.cpp
+++ b/src/mongo/db/exec/and_sorted.cpp
@@ -115,6 +115,9 @@ PlanStage::StageState AndSortedStage::getTargetLoc(WorkingSetID* out) {
_targetId = id;
_targetLoc = member->loc;
+ // Ensure that the BSONObj underlying the WorkingSetMember is owned in case we yield.
+ member->makeObjOwned();
+
// We have to AND with all other children.
for (size_t i = 1; i < _children.size(); ++i) {
_workingTowardRep.push(i);
@@ -210,6 +213,10 @@ PlanStage::StageState AndSortedStage::moveTowardTargetLoc(WorkingSetID* out) {
_targetNode = workingChildNumber;
_targetLoc = member->loc;
_targetId = id;
+
+ // Ensure that the BSONObj underlying the WorkingSetMember is owned in case we yield.
+ member->makeObjOwned();
+
_workingTowardRep = std::queue<size_t>();
for (size_t i = 0; i < _children.size(); ++i) {
if (workingChildNumber != i) {
diff --git a/src/mongo/db/exec/cached_plan.cpp b/src/mongo/db/exec/cached_plan.cpp
index a5ab96f47b9..147c74a32ec 100644
--- a/src/mongo/db/exec/cached_plan.cpp
+++ b/src/mongo/db/exec/cached_plan.cpp
@@ -96,6 +96,9 @@ Status CachedPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
if (PlanStage::ADVANCED == state) {
// Save result for later.
+ WorkingSetMember* member = _ws->get(id);
+ // Ensure that the BSONObj underlying the WorkingSetMember is owned in case we yield.
+ member->makeObjOwned();
_results.push_back(id);
if (_results.size() >= numResults) {
diff --git a/src/mongo/db/exec/delete.cpp b/src/mongo/db/exec/delete.cpp
index 1b970a475fc..b58354aa5d5 100644
--- a/src/mongo/db/exec/delete.cpp
+++ b/src/mongo/db/exec/delete.cpp
@@ -154,24 +154,27 @@ PlanStage::StageState DeleteStage::work(WorkingSetID* out) {
}
}
+ // Ensure that the BSONObj underlying the WorkingSetMember is owned because saveState()
+ // is allowed to free the memory.
+ if (_params.returnDeleted) {
+ member->makeObjOwned();
+ }
+
// TODO: Do we want to buffer docs and delete them in a group rather than
// saving/restoring state repeatedly?
try {
- child()->saveState();
if (supportsDocLocking()) {
- // Doc-locking engines require this after saveState() since they don't use
+ // Doc-locking engines require this before saveState() since they don't use
// invalidations.
WorkingSetCommon::prepareForSnapshotChange(_ws);
}
+ child()->saveState();
} catch (const WriteConflictException& wce) {
std::terminate();
}
if (_params.returnDeleted) {
- // Save a copy of the document that is about to get deleted.
- BSONObj deletedDoc = member->obj.value();
- member->obj.setValue(deletedDoc.getOwned());
member->loc = RecordId();
member->transitionToOwnedObj();
}
@@ -185,6 +188,9 @@ PlanStage::StageState DeleteStage::work(WorkingSetID* out) {
++_specificStats.docsDeleted;
} catch (const WriteConflictException& wce) {
+ // Ensure that the BSONObj underlying the WorkingSetMember is owned because it may be
+ // freed when we yield.
+ member->makeObjOwned();
_idRetrying = id;
memberFreer.Dismiss(); // Keep this member around so we can retry deleting it.
*out = WorkingSet::INVALID_ID;
diff --git a/src/mongo/db/exec/fetch.cpp b/src/mongo/db/exec/fetch.cpp
index fb76a5f9f81..c22a30b5c7e 100644
--- a/src/mongo/db/exec/fetch.cpp
+++ b/src/mongo/db/exec/fetch.cpp
@@ -128,6 +128,9 @@ PlanStage::StageState FetchStage::work(WorkingSetID* out) {
return NEED_TIME;
}
} catch (const WriteConflictException& wce) {
+ // Ensure that the BSONObj underlying the WorkingSetMember is owned because it may
+ // be freed when we yield.
+ member->makeObjOwned();
_idRetrying = id;
*out = WorkingSet::INVALID_ID;
_commonStats.needYield++;
diff --git a/src/mongo/db/exec/merge_sort.cpp b/src/mongo/db/exec/merge_sort.cpp
index 8faf6cfdeb3..be218d266b5 100644
--- a/src/mongo/db/exec/merge_sort.cpp
+++ b/src/mongo/db/exec/merge_sort.cpp
@@ -87,10 +87,10 @@ PlanStage::StageState MergeSortStage::work(WorkingSetID* out) {
StageState code = child->work(&id);
if (PlanStage::ADVANCED == code) {
+ WorkingSetMember* member = _ws->get(id);
+
// If we're deduping...
if (_dedup) {
- WorkingSetMember* member = _ws->get(id);
-
if (!member->hasLoc()) {
// Can't dedup data unless there's a RecordId. We go ahead and use its
// result.
@@ -122,6 +122,8 @@ PlanStage::StageState MergeSortStage::work(WorkingSetID* out) {
StageWithValue value;
value.id = id;
value.stage = child;
+ // Ensure that the BSONObj underlying the WorkingSetMember is owned in case we yield.
+ member->makeObjOwned();
_mergingData.push_front(value);
// Insert the result (indirectly) into our priority queue.
diff --git a/src/mongo/db/exec/multi_plan.cpp b/src/mongo/db/exec/multi_plan.cpp
index 3807da74997..7d3c636882c 100644
--- a/src/mongo/db/exec/multi_plan.cpp
+++ b/src/mongo/db/exec/multi_plan.cpp
@@ -379,6 +379,10 @@ bool MultiPlanStage::workAllPlans(size_t numResults, PlanYieldPolicy* yieldPolic
if (PlanStage::ADVANCED == state) {
// Save result for later.
+ WorkingSetMember* member = candidate.ws->get(id);
+ // Ensure that the BSONObj underlying the WorkingSetMember is owned in case we choose to
+ // return the results from the 'candidate' plan.
+ member->makeObjOwned();
candidate.results.push_back(id);
// Once a plan returns enough results, stop working.
diff --git a/src/mongo/db/exec/near.cpp b/src/mongo/db/exec/near.cpp
index 760bf4f1247..96386326841 100644
--- a/src/mongo/db/exec/near.cpp
+++ b/src/mongo/db/exec/near.cpp
@@ -216,6 +216,8 @@ PlanStage::StageState NearStage::bufferNext(WorkingSetID* toReturn, Status* erro
// results.
double memberDistance = distanceStatus.getValue();
+ // Ensure that the BSONObj underlying the WorkingSetMember is owned in case we yield.
+ nextMember->makeObjOwned();
_resultBuffer.push(SearchResult(nextMemberID, memberDistance));
// Store the member's RecordId, if available, for quick invalidation
diff --git a/src/mongo/db/exec/pipeline_proxy.cpp b/src/mongo/db/exec/pipeline_proxy.cpp
index e45318bf3ce..6011c3acc0f 100644
--- a/src/mongo/db/exec/pipeline_proxy.cpp
+++ b/src/mongo/db/exec/pipeline_proxy.cpp
@@ -116,10 +116,6 @@ void PipelineProxyStage::doReattachToOperationContext() {
}
}
-void PipelineProxyStage::pushBack(const BSONObj& obj) {
- _stash.push_back(obj);
-}
-
unique_ptr<PlanStageStats> PipelineProxyStage::getStats() {
unique_ptr<PlanStageStats> ret =
make_unique<PlanStageStats>(CommonStats(kStageType), STAGE_PIPELINE_PROXY);
diff --git a/src/mongo/db/exec/pipeline_proxy.h b/src/mongo/db/exec/pipeline_proxy.h
index 6c85f117470..a82c5fe13fb 100644
--- a/src/mongo/db/exec/pipeline_proxy.h
+++ b/src/mongo/db/exec/pipeline_proxy.h
@@ -62,11 +62,6 @@ public:
void doReattachToOperationContext() final;
/**
- * Make obj the next object returned by getNext().
- */
- void pushBack(const BSONObj& obj);
-
- /**
* Return a shared pointer to the PlanExecutor that feeds the pipeline. The returned
* pointer may be NULL.
*/
@@ -91,7 +86,7 @@ public:
private:
boost::optional<BSONObj> getNextBson();
- // Things in the _stash sould be returned before pulling items from _pipeline.
+ // Things in the _stash should be returned before pulling items from _pipeline.
const boost::intrusive_ptr<Pipeline> _pipeline;
std::vector<BSONObj> _stash;
const bool _includeMetaData;
diff --git a/src/mongo/db/exec/sort.cpp b/src/mongo/db/exec/sort.cpp
index da8be66a28b..3295a6b82fe 100644
--- a/src/mongo/db/exec/sort.cpp
+++ b/src/mongo/db/exec/sort.cpp
@@ -271,13 +271,17 @@ void SortStage::addToBuffer(const SortableDataItem& item) {
// Holds ID of working set member to be freed at end of this function.
WorkingSetID wsidToFree = WorkingSet::INVALID_ID;
+ WorkingSetMember* member = _ws->get(item.wsid);
if (_limit == 0) {
+ // Ensure that the BSONObj underlying the WorkingSetMember is owned in case we yield.
+ member->makeObjOwned();
_data.push_back(item);
- _memUsage += _ws->get(item.wsid)->getMemUsage();
+ _memUsage += member->getMemUsage();
} else if (_limit == 1) {
if (_data.empty()) {
+ member->makeObjOwned();
_data.push_back(item);
- _memUsage = _ws->get(item.wsid)->getMemUsage();
+ _memUsage = member->getMemUsage();
return;
}
wsidToFree = item.wsid;
@@ -285,16 +289,18 @@ void SortStage::addToBuffer(const SortableDataItem& item) {
// Compare new item with existing item in vector.
if (cmp(item, _data[0])) {
wsidToFree = _data[0].wsid;
+ member->makeObjOwned();
_data[0] = item;
- _memUsage = _ws->get(item.wsid)->getMemUsage();
+ _memUsage = member->getMemUsage();
}
} else {
// Update data item set instead of vector
// Limit not reached - insert and return
vector<SortableDataItem>::size_type limit(_limit);
if (_dataSet->size() < limit) {
+ member->makeObjOwned();
_dataSet->insert(item);
- _memUsage += _ws->get(item.wsid)->getMemUsage();
+ _memUsage += member->getMemUsage();
return;
}
// Limit will be exceeded - compare with item with lowest key
@@ -306,13 +312,14 @@ void SortStage::addToBuffer(const SortableDataItem& item) {
const WorkingSetComparator& cmp = *_sortKeyComparator;
if (cmp(item, lastItem)) {
_memUsage -= _ws->get(lastItem.wsid)->getMemUsage();
- _memUsage += _ws->get(item.wsid)->getMemUsage();
+ _memUsage += member->getMemUsage();
wsidToFree = lastItem.wsid;
// According to std::set iterator validity rules,
// it does not matter which of erase()/insert() happens first.
// Here, we choose to erase first to release potential resources
// used by the last item and to keep the scope of the iterator to a minimum.
_dataSet->erase(lastItemIt);
+ member->makeObjOwned();
_dataSet->insert(item);
}
}
diff --git a/src/mongo/db/exec/text_or.cpp b/src/mongo/db/exec/text_or.cpp
index bf45ba45ec6..3f525214747 100644
--- a/src/mongo/db/exec/text_or.cpp
+++ b/src/mongo/db/exec/text_or.cpp
@@ -334,7 +334,7 @@ private:
WorkingSetMember* member = _ws->get(_id);
// Make it owned since we are buffering results.
- member->obj.setValue(member->obj.value().getOwned());
+ member->makeObjOwned();
return member->obj.value();
}
@@ -373,6 +373,9 @@ PlanStage::StageState TextOrStage::addTerm(WorkingSetID wsid, WorkingSetID* out)
_recordCursor);
shouldKeep = _filter->matches(&tdoc);
} catch (const WriteConflictException& wce) {
+ // Ensure that the BSONObj underlying the WorkingSetMember is owned because it may
+ // be freed when we yield.
+ wsm->makeObjOwned();
_idRetrying = wsid;
*out = WorkingSet::INVALID_ID;
return NEED_YIELD;
diff --git a/src/mongo/db/exec/update.cpp b/src/mongo/db/exec/update.cpp
index 62580187790..db693ea99ba 100644
--- a/src/mongo/db/exec/update.cpp
+++ b/src/mongo/db/exec/update.cpp
@@ -853,24 +853,22 @@ PlanStage::StageState UpdateStage::work(WorkingSetID* out) {
}
}
+ // Ensure that the BSONObj underlying the WorkingSetMember is owned because saveState()
+ // is allowed to free the memory.
+ member->makeObjOwned();
+
// Save state before making changes
try {
- child()->saveState();
if (supportsDocLocking()) {
- // Doc-locking engines require this after saveState() since they don't use
+ // Doc-locking engines require this before saveState() since they don't use
// invalidations.
WorkingSetCommon::prepareForSnapshotChange(_ws);
}
+ child()->saveState();
} catch (const WriteConflictException& wce) {
std::terminate();
}
- // If we care about the pre-updated version of the doc, save it out here.
- BSONObj oldObj;
- if (_params.request->shouldReturnOldDocs()) {
- oldObj = member->obj.value().getOwned();
- }
-
// Do the update, get us the new version of the doc.
BSONObj newObj = transformAndUpdate(member->obj, loc);
@@ -879,14 +877,14 @@ PlanStage::StageState UpdateStage::work(WorkingSetID* out) {
if (_params.request->shouldReturnNewDocs()) {
member->obj = Snapshotted<BSONObj>(getOpCtx()->recoveryUnit()->getSnapshotId(),
newObj.getOwned());
- } else {
- invariant(_params.request->shouldReturnOldDocs());
- member->obj.setValue(oldObj);
}
member->loc = RecordId();
member->transitionToOwnedObj();
}
} catch (const WriteConflictException& wce) {
+ // Ensure that the BSONObj underlying the WorkingSetMember is owned because it may be
+ // freed when we yield.
+ member->makeObjOwned();
_idRetrying = id;
memberFreer.Dismiss(); // Keep this member around so we can retry updating it.
*out = WorkingSet::INVALID_ID;
diff --git a/src/mongo/db/exec/working_set.cpp b/src/mongo/db/exec/working_set.cpp
index 0eae502e3cc..c55e512fc32 100644
--- a/src/mongo/db/exec/working_set.cpp
+++ b/src/mongo/db/exec/working_set.cpp
@@ -114,11 +114,6 @@ void WorkingSet::transitionToLocAndIdx(WorkingSetID id) {
void WorkingSet::transitionToLocAndObj(WorkingSetID id) {
WorkingSetMember* member = get(id);
member->_state = WorkingSetMember::LOC_AND_OBJ;
-
- // If 'obj' is owned, then it doesn't need to made owned in preparation for yield.
- if (!member->obj.value().isOwned()) {
- _yieldSensitiveIds.push_back(id);
- }
}
void WorkingSet::transitionToOwnedObj(WorkingSetID id) {
@@ -174,8 +169,7 @@ bool WorkingSetMember::hasOwnedObj() const {
}
void WorkingSetMember::makeObjOwned() {
- invariant(_state == LOC_AND_OBJ);
- if (!obj.value().isOwned()) {
+ if (_state == LOC_AND_OBJ && !obj.value().isOwned()) {
obj.setValue(obj.value().getOwned());
}
}
diff --git a/src/mongo/db/exec/working_set.h b/src/mongo/db/exec/working_set.h
index 845627d00df..f10efca6d3b 100644
--- a/src/mongo/db/exec/working_set.h
+++ b/src/mongo/db/exec/working_set.h
@@ -291,8 +291,8 @@ public:
bool hasOwnedObj() const;
/**
- * Ensures that 'obj' is owned BSON. Only valid to call on a working set member in LOC_AND_OBJ
- * state. No-op if 'obj' is already owned.
+ * Ensures that 'obj' of a WSM in the LOC_AND_OBJ state is owned BSON. It is a no-op if the WSM
+ * is in a different state or if 'obj' is already owned.
*/
void makeObjOwned();
diff --git a/src/mongo/db/exec/working_set_common.cpp b/src/mongo/db/exec/working_set_common.cpp
index 0b79447c506..d376658b41f 100644
--- a/src/mongo/db/exec/working_set_common.cpp
+++ b/src/mongo/db/exec/working_set_common.cpp
@@ -74,10 +74,6 @@ void WorkingSetCommon::prepareForSnapshotChange(WorkingSet* workingSet) {
WorkingSetMember* member = workingSet->get(id);
if (member->getState() == WorkingSetMember::LOC_AND_IDX) {
member->isSuspicious = true;
- } else if (member->getState() == WorkingSetMember::LOC_AND_OBJ) {
- // Need to make sure that the data is owned, as underlying storage can change during a
- // yield.
- member->makeObjOwned();
}
}
}
diff --git a/src/mongo/db/exec/working_set_common.h b/src/mongo/db/exec/working_set_common.h
index 6c8efbbb66f..2b19cbffb20 100644
--- a/src/mongo/db/exec/working_set_common.h
+++ b/src/mongo/db/exec/working_set_common.h
@@ -54,12 +54,10 @@ public:
* state.
*
* Iterates over WorkingSetIDs in 'workingSet' which are "sensitive to yield". These are ids
- * that have transitioned into the LOC_AND_IDX or LOC_AND_OBJ state since the previous yield.
+ * that have transitioned into the LOC_AND_IDX state since the previous yield.
*
* The LOC_AND_IDX members are tagged as suspicious so that they can be handled properly in case
- * the document keyed by the index key is deleted or updated during the yield. LOC_AND_OBJ
- * working set members with unowned BSON documents have their 'obj' field made owned in order to
- * ensure that they don't point into storage which may be modified during yield.
+ * the document keyed by the index key is deleted or updated during the yield.
*/
static void prepareForSnapshotChange(WorkingSet* workingSet);
diff --git a/src/mongo/db/query/plan_executor.cpp b/src/mongo/db/query/plan_executor.cpp
index 64878802385..78f9dad20ce 100644
--- a/src/mongo/db/query/plan_executor.cpp
+++ b/src/mongo/db/query/plan_executor.cpp
@@ -253,19 +253,18 @@ OperationContext* PlanExecutor::getOpCtx() const {
void PlanExecutor::saveState() {
invariant(_currentState == kUsable || _currentState == kSaved);
- if (!killed()) {
- _root->saveState();
- }
// Doc-locking storage engines drop their transactional context after saving state.
// The query stages inside this stage tree might buffer record ids (e.g. text, geoNear,
// mergeSort, sort) which are no longer protected by the storage engine's transactional
- // boundaries. Force-fetch the documents for any such record ids so that we have our
- // own copy in the working set.
+ // boundaries.
if (supportsDocLocking()) {
WorkingSetCommon::prepareForSnapshotChange(_workingSet.get());
}
+ if (!killed()) {
+ _root->saveState();
+ }
_currentState = kSaved;
}
diff --git a/src/mongo/db/repl/replication_info.cpp b/src/mongo/db/repl/replication_info.cpp
index b1e53cdd0a6..fda61a7f65d 100644
--- a/src/mongo/db/repl/replication_info.cpp
+++ b/src/mongo/db/repl/replication_info.cpp
@@ -92,7 +92,7 @@ void appendReplicationInfo(OperationContext* txn, BSONObjBuilder& result, int le
BSONObj obj;
PlanExecutor::ExecState state;
while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
- src.push_back(obj);
+ src.push_back(obj.getOwned());
}
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
index a24676bcb52..6a8778e25bf 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
@@ -154,11 +154,9 @@ public:
WT_ITEM value;
invariantWTOK(c->get_value(c, &value));
- auto data = RecordData(static_cast<const char*>(value.data), value.size);
- data.makeOwned(); // TODO delete this line once safe.
_lastReturnedId = id;
- return {{id, std::move(data)}};
+ return {{id, {static_cast<const char*>(value.data), static_cast<int>(value.size)}}};
}
boost::optional<Record> seekExact(const RecordId& id) final {
@@ -174,12 +172,10 @@ public:
WT_ITEM value;
invariantWTOK(c->get_value(c, &value));
- auto data = RecordData(static_cast<const char*>(value.data), value.size);
- data.makeOwned(); // TODO delete this line once safe.
_lastReturnedId = id;
_eof = false;
- return {{id, std::move(data)}};
+ return {{id, {static_cast<const char*>(value.data), static_cast<int>(value.size)}}};
}
void savePositioned() final {
@@ -328,10 +324,8 @@ public:
WT_ITEM value;
invariantWTOK(_cursor->get_value(_cursor, &value));
- auto data = RecordData(static_cast<const char*>(value.data), value.size);
- data.makeOwned(); // TODO delete this line once safe.
- return {{id, std::move(data)}};
+ return {{id, {static_cast<const char*>(value.data), static_cast<int>(value.size)}}};
}
boost::optional<Record> seekExact(const RecordId& id) final {
diff --git a/src/mongo/dbtests/query_stage_merge_sort.cpp b/src/mongo/dbtests/query_stage_merge_sort.cpp
index 3ceb78b7870..b553ecf1269 100644
--- a/src/mongo/dbtests/query_stage_merge_sort.cpp
+++ b/src/mongo/dbtests/query_stage_merge_sort.cpp
@@ -167,6 +167,7 @@ public:
for (int i = 0; i < N; ++i) {
BSONObj first, second;
ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&first, NULL));
+ first = first.getOwned();
ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&second, NULL));
ASSERT_EQUALS(first["c"].numberInt(), second["c"].numberInt());
ASSERT_EQUALS(i, first["c"].numberInt());
@@ -236,6 +237,7 @@ public:
for (int i = 0; i < N; ++i) {
BSONObj first, second;
ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&first, NULL));
+ first = first.getOwned();
ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&second, NULL));
ASSERT_EQUALS(first["c"].numberInt(), second["c"].numberInt());
ASSERT_EQUALS(i, first["c"].numberInt());
@@ -306,6 +308,7 @@ public:
BSONObj first, second;
// We inserted N objects but we get 2 * N from the runner because of dups.
ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&first, NULL));
+ first = first.getOwned();
ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&second, NULL));
ASSERT_EQUALS(first["c"].numberInt(), second["c"].numberInt());
ASSERT_EQUALS(i, first["c"].numberInt());
@@ -377,6 +380,7 @@ public:
for (int i = 0; i < N; ++i) {
BSONObj first, second;
ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&first, NULL));
+ first = first.getOwned();
ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&second, NULL));
ASSERT_EQUALS(first["c"].numberInt(), second["c"].numberInt());
ASSERT_EQUALS(N - i - 1, first["c"].numberInt());
diff --git a/src/mongo/dbtests/query_stage_sort.cpp b/src/mongo/dbtests/query_stage_sort.cpp
index 6f8d7a41e20..351473104bb 100644
--- a/src/mongo/dbtests/query_stage_sort.cpp
+++ b/src/mongo/dbtests/query_stage_sort.cpp
@@ -170,6 +170,7 @@ public:
// totally) correct.
BSONObj last;
ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&last, NULL));
+ ASSERT(last.isOwned());
// Count 'last'.
int count = 1;
@@ -182,6 +183,7 @@ public:
ASSERT(cmp == 0 || cmp == 1);
++count;
last = current;
+ ASSERT(last.isOwned());
}
checkCount(count);
diff --git a/src/mongo/dbtests/query_stage_update.cpp b/src/mongo/dbtests/query_stage_update.cpp
index ecff448d363..96b6806fa49 100644
--- a/src/mongo/dbtests/query_stage_update.cpp
+++ b/src/mongo/dbtests/query_stage_update.cpp
@@ -124,7 +124,7 @@ public:
if (PlanStage::ADVANCED == state) {
WorkingSetMember* member = ws.get(id);
verify(member->hasObj());
- out->push_back(member->obj.value());
+ out->push_back(member->obj.value().getOwned());
}
}
}