summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorMartin Neupauer <martin.neupauer@mongodb.com>2019-08-29 13:22:09 +0000
committerevergreen <evergreen@mongodb.com>2019-08-29 13:22:09 +0000
commit3cdbdedce431fbc71a5eb89b689268e783d73bd4 (patch)
tree327109bb734c91384f13f5acee6b71c917ea1fd3 /src
parentffd486c3ff049abc9f8a2c76b3e2b9dea970c19b (diff)
downloadmongo-3cdbdedce431fbc71a5eb89b689268e783d73bd4.tar.gz
SERVER-42181 Make WorkingSetMember hold Document instead of BSONObj
SERVER-42157 Unowned mode for Document/Value
Diffstat (limited to 'src')
-rw-r--r--src/mongo/db/commands/list_collections.cpp2
-rw-r--r--src/mongo/db/commands/list_indexes.cpp2
-rw-r--r--src/mongo/db/exec/and_common.h2
-rw-r--r--src/mongo/db/exec/cached_plan.cpp3
-rw-r--r--src/mongo/db/exec/change_stream_proxy.cpp11
-rw-r--r--src/mongo/db/exec/change_stream_proxy.h6
-rw-r--r--src/mongo/db/exec/collection_scan.cpp3
-rw-r--r--src/mongo/db/exec/delete.cpp5
-rw-r--r--src/mongo/db/exec/filter.h9
-rw-r--r--src/mongo/db/exec/geo_near.cpp2
-rw-r--r--src/mongo/db/exec/idhack.cpp2
-rw-r--r--src/mongo/db/exec/multi_iterator.cpp3
-rw-r--r--src/mongo/db/exec/pipeline_proxy.cpp28
-rw-r--r--src/mongo/db/exec/pipeline_proxy.h4
-rw-r--r--src/mongo/db/exec/projection.cpp12
-rw-r--r--src/mongo/db/exec/shard_filter.cpp5
-rw-r--r--src/mongo/db/exec/sort_test.cpp4
-rw-r--r--src/mongo/db/exec/text_match.cpp2
-rw-r--r--src/mongo/db/exec/trial_stage.cpp4
-rw-r--r--src/mongo/db/exec/update_stage.cpp15
-rw-r--r--src/mongo/db/exec/working_set.cpp45
-rw-r--r--src/mongo/db/exec/working_set.h208
-rw-r--r--src/mongo/db/exec/working_set_common.cpp37
-rw-r--r--src/mongo/db/exec/working_set_common.h9
-rw-r--r--src/mongo/db/exec/working_set_test.cpp14
-rw-r--r--src/mongo/db/exec/write_stage_common.cpp4
-rw-r--r--src/mongo/db/index/sort_key_generator.cpp2
-rw-r--r--src/mongo/db/index/sort_key_generator_test.cpp4
-rw-r--r--src/mongo/db/pipeline/document.cpp12
-rw-r--r--src/mongo/db/pipeline/document.h34
-rw-r--r--src/mongo/db/pipeline/document_internal.h21
-rw-r--r--src/mongo/db/pipeline/document_source_cursor.cpp7
-rw-r--r--src/mongo/db/pipeline/value.cpp5
-rw-r--r--src/mongo/db/pipeline/value.h2
-rw-r--r--src/mongo/db/query/plan_executor_impl.cpp10
-rw-r--r--src/mongo/db/storage/snapshot.h5
-rw-r--r--src/mongo/dbtests/query_stage_and.cpp12
-rw-r--r--src/mongo/dbtests/query_stage_cached_plan.cpp4
-rw-r--r--src/mongo/dbtests/query_stage_collscan.cpp8
-rw-r--r--src/mongo/dbtests/query_stage_delete.cpp6
-rw-r--r--src/mongo/dbtests/query_stage_distinct.cpp2
-rw-r--r--src/mongo/dbtests/query_stage_ensure_sorted.cpp4
-rw-r--r--src/mongo/dbtests/query_stage_fetch.cpp7
-rw-r--r--src/mongo/dbtests/query_stage_limit_skip.cpp2
-rw-r--r--src/mongo/dbtests/query_stage_merge_sort.cpp8
-rw-r--r--src/mongo/dbtests/query_stage_multiplan.cpp4
-rw-r--r--src/mongo/dbtests/query_stage_near.cpp6
-rw-r--r--src/mongo/dbtests/query_stage_sort.cpp16
-rw-r--r--src/mongo/dbtests/query_stage_sort_key_generator.cpp2
-rw-r--r--src/mongo/dbtests/query_stage_trial.cpp4
-rw-r--r--src/mongo/dbtests/query_stage_update.cpp14
51 files changed, 345 insertions, 297 deletions
diff --git a/src/mongo/db/commands/list_collections.cpp b/src/mongo/db/commands/list_collections.cpp
index fc05460ac46..c6ce95ebf55 100644
--- a/src/mongo/db/commands/list_collections.cpp
+++ b/src/mongo/db/commands/list_collections.cpp
@@ -131,7 +131,7 @@ void _addWorkingSetMember(OperationContext* opCtx,
WorkingSetMember* member = ws->get(id);
member->keyData.clear();
member->recordId = RecordId();
- member->obj = Snapshotted<BSONObj>(SnapshotId(), maybe);
+ member->resetDocument(SnapshotId(), maybe);
member->transitionToOwnedObj();
root->pushBack(id);
}
diff --git a/src/mongo/db/commands/list_indexes.cpp b/src/mongo/db/commands/list_indexes.cpp
index ea037bd117b..68d2465e48a 100644
--- a/src/mongo/db/commands/list_indexes.cpp
+++ b/src/mongo/db/commands/list_indexes.cpp
@@ -190,7 +190,7 @@ public:
WorkingSetMember* member = ws->get(id);
member->keyData.clear();
member->recordId = RecordId();
- member->obj = Snapshotted<BSONObj>(SnapshotId(), indexSpec.getOwned());
+ member->resetDocument(SnapshotId(), indexSpec.getOwned());
member->transitionToOwnedObj();
root->pushBack(id);
}
diff --git a/src/mongo/db/exec/and_common.h b/src/mongo/db/exec/and_common.h
index fe7a0573694..ca063852aa6 100644
--- a/src/mongo/db/exec/and_common.h
+++ b/src/mongo/db/exec/and_common.h
@@ -64,7 +64,7 @@ public:
invariant(src.getState() == WorkingSetMember::RID_AND_OBJ);
// 'src' has the full document but 'dest' doesn't so we need to copy it over.
- dest->obj = src.obj;
+ dest->doc = src.doc;
dest->makeObjOwnedIfNeeded();
// We have an object so we don't need key data.
diff --git a/src/mongo/db/exec/cached_plan.cpp b/src/mongo/db/exec/cached_plan.cpp
index a3e6c4d3935..0c46d328c8a 100644
--- a/src/mongo/db/exec/cached_plan.cpp
+++ b/src/mongo/db/exec/cached_plan.cpp
@@ -139,8 +139,7 @@ Status CachedPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
} else if (PlanStage::FAILURE == state) {
// On failure, fall back to replanning the whole query. We neither evict the
// existing cache entry nor cache the result of replanning.
- BSONObj statusObj;
- WorkingSetCommon::getStatusMemberObject(*_ws, id, &statusObj);
+ BSONObj statusObj = WorkingSetCommon::getStatusMemberDocument(*_ws, id)->toBson();
LOG(1) << "Execution of cached plan failed, falling back to replan."
<< " query: " << redact(_canonicalQuery->toStringShort())
diff --git a/src/mongo/db/exec/change_stream_proxy.cpp b/src/mongo/db/exec/change_stream_proxy.cpp
index 3d6da255e53..7f5da7c214e 100644
--- a/src/mongo/db/exec/change_stream_proxy.cpp
+++ b/src/mongo/db/exec/change_stream_proxy.cpp
@@ -50,15 +50,15 @@ ChangeStreamProxyStage::ChangeStreamProxyStage(OperationContext* opCtx,
_latestOplogTimestamp = ResumeToken::parse(_postBatchResumeToken).getData().clusterTime;
}
-boost::optional<BSONObj> ChangeStreamProxyStage::getNextBson() {
+boost::optional<Document> ChangeStreamProxyStage::getNext() {
if (auto next = _pipeline->getNext()) {
// While we have more results to return, we track both the timestamp and the resume token of
// the latest event observed in the oplog, the latter via its sort key metadata field.
- auto nextBSON = _validateAndConvertToBSON(*next);
+ _validateResumeToken(*next);
_latestOplogTimestamp = PipelineD::getLatestOplogTimestamp(_pipeline.get());
_postBatchResumeToken = next->metadata().getSortKey();
_setSpeculativeReadTimestamp();
- return nextBSON;
+ return next;
}
// We ran out of results to return. Check whether the oplog cursor has moved forward since the
@@ -76,10 +76,10 @@ boost::optional<BSONObj> ChangeStreamProxyStage::getNextBson() {
return boost::none;
}
-BSONObj ChangeStreamProxyStage::_validateAndConvertToBSON(const Document& event) const {
+void ChangeStreamProxyStage::_validateResumeToken(const Document& event) const {
// If we are producing output to be merged on mongoS, then no stages can have modified the _id.
if (_includeMetaData) {
- return event.toBsonWithMetaData();
+ return;
}
// Confirm that the document _id field matches the original resume token in the sort key field.
auto eventBSON = event.toBson();
@@ -95,7 +95,6 @@ BSONObj ChangeStreamProxyStage::_validateAndConvertToBSON(const Document& event)
<< BSON("_id" << resumeToken) << " but found: "
<< (eventBSON["_id"] ? BSON("_id" << eventBSON["_id"]) : BSONObj()),
idField.binaryEqual(resumeToken));
- return eventBSON;
}
void ChangeStreamProxyStage::_setSpeculativeReadTimestamp() {
diff --git a/src/mongo/db/exec/change_stream_proxy.h b/src/mongo/db/exec/change_stream_proxy.h
index 3659dff3cbc..0cfc9d8d825 100644
--- a/src/mongo/db/exec/change_stream_proxy.h
+++ b/src/mongo/db/exec/change_stream_proxy.h
@@ -77,13 +77,13 @@ public:
}
protected:
- boost::optional<BSONObj> getNextBson() final;
+ boost::optional<Document> getNext() final;
private:
/**
- * Verifies that the docs's resume token has not been modified, then converts the doc to BSON.
+ * Verifies that the docs's resume token has not been modified.
*/
- BSONObj _validateAndConvertToBSON(const Document& event) const;
+ void _validateResumeToken(const Document& event) const;
/**
* Set the speculative majority read timestamp if we have scanned up to a certain oplog
diff --git a/src/mongo/db/exec/collection_scan.cpp b/src/mongo/db/exec/collection_scan.cpp
index 9ebd656512d..48ace4d7261 100644
--- a/src/mongo/db/exec/collection_scan.cpp
+++ b/src/mongo/db/exec/collection_scan.cpp
@@ -186,7 +186,8 @@ PlanStage::StageState CollectionScan::doWork(WorkingSetID* out) {
WorkingSetID id = _workingSet->allocate();
WorkingSetMember* member = _workingSet->get(id);
member->recordId = record->id;
- member->obj = {getOpCtx()->recoveryUnit()->getSnapshotId(), record->data.releaseToBson()};
+ member->resetDocument(getOpCtx()->recoveryUnit()->getSnapshotId(),
+ record->data.releaseToBson());
_workingSet->transitionToRecordIdAndObj(id);
return returnIfMatches(member, id, out);
diff --git a/src/mongo/db/exec/delete.cpp b/src/mongo/db/exec/delete.cpp
index 5bfc34510d7..c31ce92b72d 100644
--- a/src/mongo/db/exec/delete.cpp
+++ b/src/mongo/db/exec/delete.cpp
@@ -183,12 +183,11 @@ PlanStage::StageState DeleteStage::doWork(WorkingSetID* out) {
if (_params->returnDeleted) {
// Save a copy of the document that is about to get deleted, but keep it in the RID_AND_OBJ
// state in case we need to retry deleting it.
- BSONObj deletedDoc = member->obj.value();
- member->obj.setValue(deletedDoc.getOwned());
+ member->makeObjOwnedIfNeeded();
}
if (_params->removeSaver) {
- uassertStatusOK(_params->removeSaver->goingToDelete(member->obj.value()));
+ uassertStatusOK(_params->removeSaver->goingToDelete(member->doc.value().toBson()));
}
// TODO: Do we want to buffer docs and delete them in a group rather than saving/restoring state
diff --git a/src/mongo/db/exec/filter.h b/src/mongo/db/exec/filter.h
index dcc079c9136..fff46d553dd 100644
--- a/src/mongo/db/exec/filter.h
+++ b/src/mongo/db/exec/filter.h
@@ -42,20 +42,20 @@ namespace mongo {
*/
class WorkingSetMatchableDocument : public MatchableDocument {
public:
- WorkingSetMatchableDocument(WorkingSetMember* wsm) : _wsm(wsm) {}
+ WorkingSetMatchableDocument(WorkingSetMember* wsm)
+ : _wsm(wsm), _obj(_wsm->doc.value().toBson()) {}
// This is only called by a $where query. The query system must be smart enough to realize
// that it should do a fetch beforehand.
BSONObj toBSON() const {
- invariant(_wsm->hasObj());
- return _wsm->obj.value();
+ return _obj;
}
ElementIterator* allocateIterator(const ElementPath* path) const final {
// BSONElementIterator does some interesting things with arrays that I don't think
// SimpleArrayElementIterator does.
if (_wsm->hasObj()) {
- return new BSONElementIterator(path, _wsm->obj.value());
+ return new BSONElementIterator(path, _obj);
}
// NOTE: This (kind of) duplicates code in WorkingSetMember::getFieldDotted.
@@ -95,6 +95,7 @@ public:
private:
WorkingSetMember* _wsm;
+ BSONObj _obj;
};
class IndexKeyMatchableDocument : public MatchableDocument {
diff --git a/src/mongo/db/exec/geo_near.cpp b/src/mongo/db/exec/geo_near.cpp
index ef1fb4e2aa9..d9e7749e5fc 100644
--- a/src/mongo/db/exec/geo_near.cpp
+++ b/src/mongo/db/exec/geo_near.cpp
@@ -151,7 +151,7 @@ static StatusWith<double> computeGeoNearDistance(const GeoNearParams& nearParams
// Extract all the geometries out of this document for the near query
std::vector<std::unique_ptr<StoredGeometry>> geometries;
- extractGeometries(member->obj.value(), nearParams.nearQuery->field, &geometries);
+ extractGeometries(member->doc.value().toBson(), nearParams.nearQuery->field, &geometries);
// Compute the minimum distance of all the geometries in the document
double minDistance = -1;
diff --git a/src/mongo/db/exec/idhack.cpp b/src/mongo/db/exec/idhack.cpp
index 13a5837b53d..4ff011d024e 100644
--- a/src/mongo/db/exec/idhack.cpp
+++ b/src/mongo/db/exec/idhack.cpp
@@ -131,7 +131,7 @@ PlanStage::StageState IDHackStage::advance(WorkingSetID id,
invariant(member->hasObj());
if (_addKeyMetadata) {
- BSONObj ownedKeyObj = member->obj.value()["_id"].wrap().getOwned();
+ BSONObj ownedKeyObj = member->doc.value().toBson()["_id"].wrap().getOwned();
member->metadata().setIndexKey(IndexKeyEntry::rehydrateKey(_key, ownedKeyObj));
}
diff --git a/src/mongo/db/exec/multi_iterator.cpp b/src/mongo/db/exec/multi_iterator.cpp
index dba74089ddc..761d6b21ed0 100644
--- a/src/mongo/db/exec/multi_iterator.cpp
+++ b/src/mongo/db/exec/multi_iterator.cpp
@@ -74,7 +74,8 @@ PlanStage::StageState MultiIteratorStage::doWork(WorkingSetID* out) {
*out = _ws->allocate();
WorkingSetMember* member = _ws->get(*out);
member->recordId = record->id;
- member->obj = {getOpCtx()->recoveryUnit()->getSnapshotId(), record->data.releaseToBson()};
+ member->resetDocument(getOpCtx()->recoveryUnit()->getSnapshotId(),
+ record->data.releaseToBson());
_ws->transitionToRecordIdAndObj(*out);
return PlanStage::ADVANCED;
}
diff --git a/src/mongo/db/exec/pipeline_proxy.cpp b/src/mongo/db/exec/pipeline_proxy.cpp
index cf144ec98e6..c0c1f2c23d2 100644
--- a/src/mongo/db/exec/pipeline_proxy.cpp
+++ b/src/mongo/db/exec/pipeline_proxy.cpp
@@ -72,16 +72,22 @@ PlanStage::StageState PipelineProxyStage::doWork(WorkingSetID* out) {
if (!_stash.empty()) {
*out = _ws->allocate();
WorkingSetMember* member = _ws->get(*out);
- member->obj = Snapshotted<BSONObj>(SnapshotId(), _stash.back());
+ if (_includeMetaData && _stash.back().metadata()) {
+ member->metadata() = _stash.back().metadata();
+ }
+ member->doc = {SnapshotId(), std::move(_stash.back())};
_stash.pop_back();
member->transitionToOwnedObj();
return PlanStage::ADVANCED;
}
- if (boost::optional<BSONObj> next = getNextBson()) {
+ if (auto next = getNext()) {
*out = _ws->allocate();
WorkingSetMember* member = _ws->get(*out);
- member->obj = Snapshotted<BSONObj>(SnapshotId(), *next);
+ if (_includeMetaData && next->metadata()) {
+ member->metadata() = next->metadata();
+ }
+ member->doc = {SnapshotId(), std::move(*next)};
member->transitionToOwnedObj();
return PlanStage::ADVANCED;
}
@@ -93,8 +99,8 @@ bool PipelineProxyStage::isEOF() {
if (!_stash.empty())
return false;
- if (boost::optional<BSONObj> next = getNextBson()) {
- _stash.push_back(*next);
+ if (auto next = getNext()) {
+ _stash.emplace_back(*next);
return false;
}
@@ -120,16 +126,8 @@ unique_ptr<PlanStageStats> PipelineProxyStage::getStats() {
return ret;
}
-boost::optional<BSONObj> PipelineProxyStage::getNextBson() {
- if (auto next = _pipeline->getNext()) {
- if (_includeMetaData) {
- return next->toBsonWithMetaData();
- } else {
- return next->toBson();
- }
- }
-
- return boost::none;
+boost::optional<Document> PipelineProxyStage::getNext() {
+ return _pipeline->getNext();
}
std::string PipelineProxyStage::getPlanSummaryStr() const {
diff --git a/src/mongo/db/exec/pipeline_proxy.h b/src/mongo/db/exec/pipeline_proxy.h
index bba37c6b489..0bdbbd69a9f 100644
--- a/src/mongo/db/exec/pipeline_proxy.h
+++ b/src/mongo/db/exec/pipeline_proxy.h
@@ -91,7 +91,7 @@ protected:
WorkingSet* ws,
const char* stageTypeName);
- virtual boost::optional<BSONObj> getNextBson();
+ virtual boost::optional<Document> getNext();
void doDispose() final;
// Items in the _stash should be returned before pulling items from _pipeline.
@@ -99,7 +99,7 @@ protected:
const bool _includeMetaData;
private:
- std::vector<BSONObj> _stash;
+ std::vector<Document> _stash;
WorkingSet* _ws;
};
diff --git a/src/mongo/db/exec/projection.cpp b/src/mongo/db/exec/projection.cpp
index f48a09ffb02..cc7ba51c095 100644
--- a/src/mongo/db/exec/projection.cpp
+++ b/src/mongo/db/exec/projection.cpp
@@ -39,6 +39,7 @@
#include "mongo/db/exec/working_set_common.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/matcher/expression.h"
+#include "mongo/db/pipeline/document.h"
#include "mongo/db/record_id.h"
#include "mongo/util/log.h"
#include "mongo/util/str.h"
@@ -79,7 +80,7 @@ double textScore(const WorkingSetMember& member) {
void transitionMemberToOwnedObj(const BSONObj& bo, WorkingSetMember* member) {
member->keyData.clear();
member->recordId = RecordId();
- member->obj = Snapshotted<BSONObj>(SnapshotId(), bo);
+ member->resetDocument(SnapshotId(), bo);
member->transitionToOwnedObj();
}
@@ -92,7 +93,7 @@ StatusWith<BSONObj> provideMetaFieldsAndPerformExec(const ProjectionExec& exec,
return Status(ErrorCodes::InternalError, "near loc proj requested but no data available");
return member.hasObj()
- ? exec.project(member.obj.value(),
+ ? exec.project(member.doc.value().toBson(),
exec.needsGeoNearDistance()
? boost::optional<const double>(geoDistance(member))
: boost::none,
@@ -277,7 +278,6 @@ Status ProjectionStageCovered::transform(WorkingSetMember* member) const {
}
++keyIndex;
}
-
transitionMemberToOwnedObj(bob.obj(), member);
return Status::OK();
}
@@ -300,12 +300,10 @@ Status ProjectionStageSimple::transform(WorkingSetMember* member) const {
// Apply the SIMPLE_DOC projection.
// Look at every field in the source document and see if we're including it.
- BSONObjIterator inputIt(member->obj.value());
- while (inputIt.more()) {
- BSONElement elt = inputIt.next();
+ auto objToProject = member->doc.value().toBson();
+ for (auto&& elt : objToProject) {
auto fieldIt = _includedFields.find(elt.fieldNameStringData());
if (_includedFields.end() != fieldIt) {
- // If so, add it to the builder.
bob.append(elt);
}
}
diff --git a/src/mongo/db/exec/shard_filter.cpp b/src/mongo/db/exec/shard_filter.cpp
index 879e4eef27d..ed25ef0ad8e 100644
--- a/src/mongo/db/exec/shard_filter.cpp
+++ b/src/mongo/db/exec/shard_filter.cpp
@@ -91,8 +91,9 @@ PlanStage::StageState ShardFilterStage::doWork(WorkingSetID* out) {
// Skip this working set member with a warning - no shard key should not be
// possible unless manually inserting data into a shard
- warning() << "no shard key found in document " << redact(member->obj.value())
- << " for shard key pattern " << _shardFilterer.getKeyPattern() << ", "
+ warning() << "no shard key found in document "
+ << redact(member->doc.value().toBson()) << " for shard key pattern "
+ << _shardFilterer.getKeyPattern() << ", "
<< "document may have been inserted manually into shard";
} else {
invariant(res == ShardFilterer::DocumentBelongsResult::kDoesNotBelong);
diff --git a/src/mongo/db/exec/sort_test.cpp b/src/mongo/db/exec/sort_test.cpp
index 7c8a1696399..65971d6ac15 100644
--- a/src/mongo/db/exec/sort_test.cpp
+++ b/src/mongo/db/exec/sort_test.cpp
@@ -92,7 +92,7 @@ public:
// Insert obj from input array into working set.
WorkingSetID id = ws.allocate();
WorkingSetMember* wsm = ws.get(id);
- wsm->obj = Snapshotted<BSONObj>(SnapshotId(), obj);
+ wsm->doc = {SnapshotId(), Document{obj}};
wsm->transitionToOwnedObj();
queuedDataStage->pushBack(id);
}
@@ -130,7 +130,7 @@ public:
BSONArrayBuilder arr(bob.subarrayStart("output"));
while (state == PlanStage::ADVANCED) {
WorkingSetMember* member = ws.get(id);
- const BSONObj& obj = member->obj.value();
+ BSONObj obj = member->doc.value().toBson();
arr.append(obj);
state = sort.work(&id);
}
diff --git a/src/mongo/db/exec/text_match.cpp b/src/mongo/db/exec/text_match.cpp
index a74a7149506..b405608c134 100644
--- a/src/mongo/db/exec/text_match.cpp
+++ b/src/mongo/db/exec/text_match.cpp
@@ -88,7 +88,7 @@ PlanStage::StageState TextMatchStage::doWork(WorkingSetID* out) {
WorkingSetMember* wsm = _ws->get(*out);
// Filter for phrases and negated terms.
- if (!_ftsMatcher.matches(wsm->obj.value())) {
+ if (!_ftsMatcher.matches(wsm->doc.value().toBson())) {
_ws->free(*out);
*out = WorkingSet::INVALID_ID;
++_specificStats.docsRejected;
diff --git a/src/mongo/db/exec/trial_stage.cpp b/src/mongo/db/exec/trial_stage.cpp
index a126d5c2bc7..c54c6e91dc9 100644
--- a/src/mongo/db/exec/trial_stage.cpp
+++ b/src/mongo/db/exec/trial_stage.cpp
@@ -143,8 +143,8 @@ PlanStage::StageState TrialStage::_workTrialPlan(WorkingSetID* out) {
return NEED_TIME;
case PlanStage::FAILURE:
// Either of these cause us to immediately end the trial phase and switch to the backup.
- BSONObj statusObj;
- WorkingSetCommon::getStatusMemberObject(*_ws, *out, &statusObj);
+ auto statusDoc = WorkingSetCommon::getStatusMemberDocument(*_ws, *out);
+ BSONObj statusObj = statusDoc ? statusDoc->toBson() : BSONObj();
LOG(1) << "Trial plan failed; switching to backup plan. Status: " << redact(statusObj);
_specificStats.trialCompleted = true;
_replaceCurrentPlan(_backupPlan);
diff --git a/src/mongo/db/exec/update_stage.cpp b/src/mongo/db/exec/update_stage.cpp
index 6236e45a6a0..4da1d88b854 100644
--- a/src/mongo/db/exec/update_stage.cpp
+++ b/src/mongo/db/exec/update_stage.cpp
@@ -662,8 +662,8 @@ PlanStage::StageState UpdateStage::doWork(WorkingSetID* out) {
BSONObj newObj = _specificStats.objInserted;
*out = _ws->allocate();
WorkingSetMember* member = _ws->get(*out);
- member->obj = Snapshotted<BSONObj>(getOpCtx()->recoveryUnit()->getSnapshotId(),
- newObj.getOwned());
+ member->resetDocument(getOpCtx()->recoveryUnit()->getSnapshotId(),
+ newObj.getOwned());
member->transitionToOwnedObj();
return PlanStage::ADVANCED;
}
@@ -759,14 +759,15 @@ PlanStage::StageState UpdateStage::doWork(WorkingSetID* out) {
// If we care about the pre-updated version of the doc, save it out here.
BSONObj oldObj;
+ SnapshotId oldSnapshot = member->doc.snapshotId();
if (_params.request->shouldReturnOldDocs()) {
- oldObj = member->obj.value().getOwned();
+ oldObj = member->doc.value().toBson().getOwned();
}
BSONObj newObj;
try {
// Do the update, get us the new version of the doc.
- newObj = transformAndUpdate(member->obj, recordId);
+ newObj = transformAndUpdate({oldSnapshot, member->doc.value().toBson()}, recordId);
} catch (const WriteConflictException&) {
memberFreer.dismiss(); // Keep this member around so we can retry updating it.
return prepareToRetryWSM(id, out);
@@ -775,11 +776,11 @@ PlanStage::StageState UpdateStage::doWork(WorkingSetID* out) {
// Set member's obj to be the doc we want to return.
if (_params.request->shouldReturnAnyDocs()) {
if (_params.request->shouldReturnNewDocs()) {
- member->obj = Snapshotted<BSONObj>(getOpCtx()->recoveryUnit()->getSnapshotId(),
- newObj.getOwned());
+ member->resetDocument(getOpCtx()->recoveryUnit()->getSnapshotId(),
+ newObj.getOwned());
} else {
invariant(_params.request->shouldReturnOldDocs());
- member->obj.setValue(oldObj);
+ member->resetDocument(oldSnapshot, oldObj);
}
member->recordId = RecordId();
member->transitionToOwnedObj();
diff --git a/src/mongo/db/exec/working_set.cpp b/src/mongo/db/exec/working_set.cpp
index 400be5acb83..1c565cca8cd 100644
--- a/src/mongo/db/exec/working_set.cpp
+++ b/src/mongo/db/exec/working_set.cpp
@@ -39,17 +39,8 @@ using std::string;
namespace dps = ::mongo::dotted_path_support;
-WorkingSet::MemberHolder::MemberHolder() : member(nullptr) {}
-WorkingSet::MemberHolder::~MemberHolder() {}
-
WorkingSet::WorkingSet() : _freeList(INVALID_ID) {}
-WorkingSet::~WorkingSet() {
- for (size_t i = 0; i < _data.size(); i++) {
- delete _data[i].member;
- }
-}
-
WorkingSetID WorkingSet::allocate() {
if (_freeList == INVALID_ID) {
// The free list is empty so we need to make a single new WSM to return. This relies on
@@ -58,7 +49,6 @@ WorkingSetID WorkingSet::allocate() {
WorkingSetID id = _data.size();
_data.resize(_data.size() + 1);
_data.back().nextFreeOrSelf = id;
- _data.back().member = new WorkingSetMember();
return id;
}
@@ -75,15 +65,12 @@ void WorkingSet::free(WorkingSetID i) {
verify(holder.nextFreeOrSelf == i); // ID currently in use.
// Free resources and push this WSM to the head of the freelist.
- holder.member->clear();
+ holder.member.clear();
holder.nextFreeOrSelf = _freeList;
_freeList = i;
}
void WorkingSet::clear() {
- for (size_t i = 0; i < _data.size(); i++) {
- delete _data[i].member;
- }
_data.clear();
// Since working set is now empty, the free list pointer should
@@ -120,14 +107,10 @@ std::vector<WorkingSetID> WorkingSet::getAndClearYieldSensitiveIds() {
// WorkingSetMember
//
-WorkingSetMember::WorkingSetMember() {}
-
-WorkingSetMember::~WorkingSetMember() {}
-
void WorkingSetMember::clear() {
_metadata = DocumentMetadataFields{};
keyData.clear();
- obj.reset();
+ doc = {SnapshotId(), Document()};
_state = WorkingSetMember::INVALID;
}
@@ -136,7 +119,7 @@ WorkingSetMember::MemberState WorkingSetMember::getState() const {
}
void WorkingSetMember::transitionToOwnedObj() {
- invariant(obj.value().isOwned());
+ invariant(doc.value().isOwned());
_state = OWNED_OBJ;
}
@@ -150,19 +133,23 @@ bool WorkingSetMember::hasObj() const {
}
bool WorkingSetMember::hasOwnedObj() const {
- return _state == OWNED_OBJ || (_state == RID_AND_OBJ && obj.value().isOwned());
+ return _state == OWNED_OBJ || _state == RID_AND_OBJ;
}
void WorkingSetMember::makeObjOwnedIfNeeded() {
- if (_state == RID_AND_OBJ && !obj.value().isOwned()) {
- obj.setValue(obj.value().getOwned());
+ if (_state == RID_AND_OBJ && !doc.value().isOwned()) {
+ doc.value() = doc.value().getOwned();
}
}
bool WorkingSetMember::getFieldDotted(const string& field, BSONElement* out) const {
// If our state is such that we have an object, use it.
if (hasObj()) {
- *out = dps::extractElementAtPath(obj.value(), field);
+ // The document must not be modified. Otherwise toBson() call would create a temporary BSON
+ // that would get destroyed at the end of this function. *out would then point to dangling
+ // memory.
+ invariant(!doc.value().isModified());
+ *out = dps::extractElementAtPath(doc.value().toBson(), field);
return true;
}
@@ -182,10 +169,8 @@ size_t WorkingSetMember::getMemUsage() const {
memUsage += sizeof(RecordId);
}
- // XXX: Unowned objects count towards current size.
- // See SERVER-12579
if (hasObj()) {
- memUsage += obj.value().objsize();
+ memUsage += doc.value().getApproximateSize();
}
for (size_t i = 0; i < keyData.size(); ++i) {
@@ -196,4 +181,10 @@ size_t WorkingSetMember::getMemUsage() const {
return memUsage;
}
+void WorkingSetMember::resetDocument(SnapshotId snapshot, const BSONObj& obj) {
+ doc.setSnapshotId(snapshot);
+ MutableDocument md(std::move(doc.value()));
+ md.reset(obj, false);
+ doc.value() = md.freeze();
+}
} // namespace mongo
diff --git a/src/mongo/db/exec/working_set.h b/src/mongo/db/exec/working_set.h
index 0f6089d978e..6398214ce87 100644
--- a/src/mongo/db/exec/working_set.h
+++ b/src/mongo/db/exec/working_set.h
@@ -33,6 +33,7 @@
#include <vector>
#include "mongo/db/jsobj.h"
+#include "mongo/db/pipeline/document.h"
#include "mongo/db/pipeline/document_metadata_fields.h"
#include "mongo/db/record_id.h"
#include "mongo/db/storage/snapshot.h"
@@ -46,101 +47,6 @@ class WorkingSetMember;
typedef size_t WorkingSetID;
/**
- * All data in use by a query. Data is passed through the stage tree by referencing the ID of
- * an element of the working set. Stages can add elements to the working set, delete elements
- * from the working set, or mutate elements in the working set.
- */
-class WorkingSet {
- WorkingSet(const WorkingSet&) = delete;
- WorkingSet& operator=(const WorkingSet&) = delete;
-
-public:
- static const WorkingSetID INVALID_ID = WorkingSetID(-1);
-
- WorkingSet();
- ~WorkingSet();
-
- /**
- * Allocate a new query result and return the ID used to get and free it.
- */
- WorkingSetID allocate();
-
- /**
- * Get the i-th mutable query result. The pointer will be valid for this id until freed.
- * Do not delete the returned pointer as the WorkingSet retains ownership. Call free() to
- * release it.
- */
- WorkingSetMember* get(WorkingSetID i) const {
- dassert(i < _data.size()); // ID has been allocated.
- dassert(_data[i].nextFreeOrSelf == i); // ID currently in use.
- return _data[i].member;
- }
-
- /**
- * Returns true if WorkingSetMember with id 'i' is free.
- */
- bool isFree(WorkingSetID i) const {
- return _data[i].nextFreeOrSelf != i;
- }
-
- /**
- * Deallocate the i-th query result and release its resources.
- */
- void free(WorkingSetID i);
-
- /**
- * Removes and deallocates all members of this working set.
- */
- void clear();
-
- //
- // WorkingSetMember state transitions
- //
-
- void transitionToRecordIdAndIdx(WorkingSetID id);
- void transitionToRecordIdAndObj(WorkingSetID id);
- void transitionToOwnedObj(WorkingSetID id);
-
- /**
- * Returns the list of working set ids that have transitioned into the RID_AND_IDX state since
- * the last yield. The members corresponding to these ids may have since transitioned to a
- * different state or been freed, so these cases must be handled by the caller. The list may
- * also contain duplicates.
- *
- * Execution stages are *not* responsible for managing this list, as working set ids are added
- * to the set automatically by WorkingSet::transitionToRecordIdAndIdx().
- *
- * As a side effect, calling this method clears the list of flagged yield sensitive ids kept by
- * the working set.
- */
- std::vector<WorkingSetID> getAndClearYieldSensitiveIds();
-
-private:
- struct MemberHolder {
- MemberHolder();
- ~MemberHolder();
-
- // Free list link if freed. Points to self if in use.
- WorkingSetID nextFreeOrSelf;
-
- // Owning pointer
- WorkingSetMember* member;
- };
-
- // All WorkingSetIDs are indexes into this, except for INVALID_ID.
- // Elements are added to _freeList rather than removed when freed.
- std::vector<MemberHolder> _data;
-
- // Index into _data, forming a linked-list using MemberHolder::nextFreeOrSelf as the next
- // link. INVALID_ID is the list terminator since 0 is a valid index.
- // If _freeList == INVALID_ID, the free list is empty and all elements in _data are in use.
- WorkingSetID _freeList;
-
- // Contains ids of WSMs that may need to be adjusted when we next yield.
- std::vector<WorkingSetID> _yieldSensitiveIds;
-};
-
-/**
* The key data extracted from an index. Keeps track of both the key (currently a BSONObj) and
* the index that provided the key. The index key pattern is required to correctly interpret
* the key.
@@ -191,13 +97,7 @@ struct IndexKeyDatum {
* A WorkingSetMember may have any of the data above.
*/
class WorkingSetMember {
- WorkingSetMember(const WorkingSetMember&) = delete;
- WorkingSetMember& operator=(const WorkingSetMember&) = delete;
-
public:
- WorkingSetMember();
- ~WorkingSetMember();
-
/**
* Reset to an "empty" state.
*/
@@ -233,7 +133,7 @@ public:
//
RecordId recordId;
- Snapshotted<BSONObj> obj;
+ Snapshotted<Document> doc;
std::vector<IndexKeyDatum> keyData;
// True if this WSM has survived a yield in RID_AND_IDX state.
@@ -302,6 +202,12 @@ public:
_metadata = std::move(metadata);
}
+ /**
+ * Resets the underlying BSONObj in the doc field. This avoids unnecessary allocation/
+ * deallocation of Document/DocumentStorage objects.
+ */
+ void resetDocument(SnapshotId snapshot, const BSONObj& obj);
+
private:
friend class WorkingSet;
@@ -310,4 +216,102 @@ private:
DocumentMetadataFields _metadata;
};
+/**
+ * All data in use by a query. Data is passed through the stage tree by referencing the ID of
+ * an element of the working set. Stages can add elements to the working set, delete elements
+ * from the working set, or mutate elements in the working set.
+ */
+class WorkingSet {
+ WorkingSet(const WorkingSet&) = delete;
+ WorkingSet& operator=(const WorkingSet&) = delete;
+
+public:
+ static const WorkingSetID INVALID_ID = WorkingSetID(-1);
+
+ WorkingSet();
+
+ ~WorkingSet() = default;
+
+ /**
+ * Allocate a new query result and return the ID used to get and free it.
+ */
+ WorkingSetID allocate();
+
+ /**
+ * Get the i-th mutable query result. The pointer will be valid for this id until freed.
+ * Do not delete the returned pointer as the WorkingSet retains ownership. Call free() to
+ * release it.
+ */
+ WorkingSetMember* get(WorkingSetID i) {
+ dassert(i < _data.size()); // ID has been allocated.
+ dassert(_data[i].nextFreeOrSelf == i); // ID currently in use.
+ return &_data[i].member;
+ }
+
+ const WorkingSetMember* get(WorkingSetID i) const {
+ dassert(i < _data.size()); // ID has been allocated.
+ dassert(_data[i].nextFreeOrSelf == i); // ID currently in use.
+ return &_data[i].member;
+ }
+
+ /**
+ * Returns true if WorkingSetMember with id 'i' is free.
+ */
+ bool isFree(WorkingSetID i) const {
+ return _data[i].nextFreeOrSelf != i;
+ }
+
+ /**
+ * Deallocate the i-th query result and release its resources.
+ */
+ void free(WorkingSetID i);
+
+ /**
+ * Removes and deallocates all members of this working set.
+ */
+ void clear();
+
+ //
+ // WorkingSetMember state transitions
+ //
+
+ void transitionToRecordIdAndIdx(WorkingSetID id);
+ void transitionToRecordIdAndObj(WorkingSetID id);
+ void transitionToOwnedObj(WorkingSetID id);
+
+ /**
+ * Returns the list of working set ids that have transitioned into the RID_AND_IDX state since
+ * the last yield. The members corresponding to these ids may have since transitioned to a
+ * different state or been freed, so these cases must be handled by the caller. The list may
+ * also contain duplicates.
+ *
+ * Execution stages are *not* responsible for managing this list, as working set ids are added
+ * to the set automatically by WorkingSet::transitionToRecordIdAndIdx().
+ *
+ * As a side effect, calling this method clears the list of flagged yield sensitive ids kept by
+ * the working set.
+ */
+ std::vector<WorkingSetID> getAndClearYieldSensitiveIds();
+
+private:
+ struct MemberHolder {
+ // Free list link if freed. Points to self if in use.
+ WorkingSetID nextFreeOrSelf;
+
+ WorkingSetMember member;
+ };
+
+ // All WorkingSetIDs are indexes into this, except for INVALID_ID.
+ // Elements are added to _freeList rather than removed when freed.
+ std::vector<MemberHolder> _data;
+
+ // Index into _data, forming a linked-list using MemberHolder::nextFreeOrSelf as the next
+ // link. INVALID_ID is the list terminator since 0 is a valid index.
+ // If _freeList == INVALID_ID, the free list is empty and all elements in _data are in use.
+ WorkingSetID _freeList;
+
+ // Contains ids of WSMs that may need to be adjusted when we next yield.
+ std::vector<WorkingSetID> _yieldSensitiveIds;
+};
+
} // namespace mongo
diff --git a/src/mongo/db/exec/working_set_common.cpp b/src/mongo/db/exec/working_set_common.cpp
index 39f08c153bb..66f2da690d5 100644
--- a/src/mongo/db/exec/working_set_common.cpp
+++ b/src/mongo/db/exec/working_set_common.cpp
@@ -54,7 +54,6 @@ void WorkingSetCommon::prepareForSnapshotChange(WorkingSet* workingSet) {
}
}
-// static
bool WorkingSetCommon::fetch(OperationContext* opCtx,
WorkingSet* workingSet,
WorkingSetID id,
@@ -65,13 +64,12 @@ bool WorkingSetCommon::fetch(OperationContext* opCtx,
// state appropriately.
invariant(member->hasRecordId());
- member->obj.reset();
auto record = cursor->seekExact(member->recordId);
if (!record) {
return false;
}
- member->obj = {opCtx->recoveryUnit()->getSnapshotId(), record->data.releaseToBson()};
+ member->resetDocument(opCtx->recoveryUnit()->getSnapshotId(), record->data.releaseToBson());
if (member->isSuspicious) {
// Make sure that all of the keyData is still valid for this copy of the document.
@@ -86,7 +84,7 @@ bool WorkingSetCommon::fetch(OperationContext* opCtx,
KeyStringSet* multikeyMetadataKeys = nullptr;
MultikeyPaths* multikeyPaths = nullptr;
auto* iam = member->keyData[i].index;
- iam->getKeys(member->obj.value(),
+ iam->getKeys(member->doc.value().toBson(),
IndexAccessMethod::GetKeysMode::kEnforceConstraints,
&keys,
multikeyMetadataKeys,
@@ -110,7 +108,6 @@ bool WorkingSetCommon::fetch(OperationContext* opCtx,
return true;
}
-// static
BSONObj WorkingSetCommon::buildMemberStatusObject(const Status& status) {
BSONObjBuilder bob;
bob.append("ok", status.isOK() ? 1.0 : 0.0);
@@ -123,45 +120,37 @@ BSONObj WorkingSetCommon::buildMemberStatusObject(const Status& status) {
return bob.obj();
}
-// static
WorkingSetID WorkingSetCommon::allocateStatusMember(WorkingSet* ws, const Status& status) {
invariant(ws);
WorkingSetID wsid = ws->allocate();
WorkingSetMember* member = ws->get(wsid);
- member->obj = Snapshotted<BSONObj>(SnapshotId(), buildMemberStatusObject(status));
+ member->resetDocument(SnapshotId(), buildMemberStatusObject(status));
member->transitionToOwnedObj();
return wsid;
}
-// static
bool WorkingSetCommon::isValidStatusMemberObject(const BSONObj& obj) {
return obj.hasField("ok") && obj["code"].type() == NumberInt && obj["errmsg"].type() == String;
}
-// static
-void WorkingSetCommon::getStatusMemberObject(const WorkingSet& ws,
- WorkingSetID wsid,
- BSONObj* objOut) {
- invariant(objOut);
-
- // Validate ID and working set member.
+boost::optional<Document> WorkingSetCommon::getStatusMemberDocument(const WorkingSet& ws,
+ WorkingSetID wsid) {
if (WorkingSet::INVALID_ID == wsid) {
- return;
+ return boost::none;
}
- WorkingSetMember* member = ws.get(wsid);
+ auto member = ws.get(wsid);
if (!member->hasOwnedObj()) {
- return;
+ return boost::none;
}
- BSONObj obj = member->obj.value();
+ BSONObj obj = member->doc.value().toBson();
if (!isValidStatusMemberObject(obj)) {
- return;
+ return boost::none;
}
- *objOut = obj;
+ return member->doc.value();
}
-// static
Status WorkingSetCommon::getMemberObjectStatus(const BSONObj& memberObj) {
invariant(WorkingSetCommon::isValidStatusMemberObject(memberObj));
return Status(ErrorCodes::Error(memberObj["code"].numberInt()),
@@ -169,13 +158,11 @@ Status WorkingSetCommon::getMemberObjectStatus(const BSONObj& memberObj) {
memberObj);
}
-// static
Status WorkingSetCommon::getMemberStatus(const WorkingSetMember& member) {
invariant(member.hasObj());
- return getMemberObjectStatus(member.obj.value());
+ return getMemberObjectStatus(member.doc.value().toBson());
}
-// static
std::string WorkingSetCommon::toStatusString(const BSONObj& obj) {
if (!isValidStatusMemberObject(obj)) {
Status unknownStatus(ErrorCodes::UnknownError, "no details available");
diff --git a/src/mongo/db/exec/working_set_common.h b/src/mongo/db/exec/working_set_common.h
index 08770e3eb3e..034adb38fba 100644
--- a/src/mongo/db/exec/working_set_common.h
+++ b/src/mongo/db/exec/working_set_common.h
@@ -90,12 +90,11 @@ public:
static bool isValidStatusMemberObject(const BSONObj& obj);
/**
- * Returns object in working set member created with allocateStatusMember().
- * Does not assume isValidStatusMemberObject.
- * If the WSID is invalid or the working set member is created by
- * allocateStatusMember, objOut will not be updated.
+ * If the working set member represents an error status, returns it as a Document (which can
+ * subsequently be converted to Status). Otherwise returns boost::none.
*/
- static void getStatusMemberObject(const WorkingSet& ws, WorkingSetID wsid, BSONObj* objOut);
+ static boost::optional<Document> getStatusMemberDocument(const WorkingSet& ws,
+ WorkingSetID wsid);
/**
* Returns status from working set member object.
diff --git a/src/mongo/db/exec/working_set_test.cpp b/src/mongo/db/exec/working_set_test.cpp
index 16c187e1f06..13015f2fcf9 100644
--- a/src/mongo/db/exec/working_set_test.cpp
+++ b/src/mongo/db/exec/working_set_test.cpp
@@ -77,9 +77,9 @@ TEST_F(WorkingSetFixture, noFieldToGet) {
ASSERT_TRUE(member->getFieldDotted("foo", &elt));
WorkingSetMember* member = ws->get(id);
- member->obj = {SnapshotId(),
- BSON("fake"
- << "obj")};
+ member->doc = {SnapshotId(),
+ Document{BSON("fake"
+ << "obj")}};
ws->transitionToOwnedObj(id);
ASSERT_TRUE(member->getFieldDotted("foo", &elt));
}
@@ -90,9 +90,9 @@ TEST_F(WorkingSetFixture, getFieldUnowned) {
BSONObj obj = BSON(fieldName << 5);
// Not truthful since the RecordId is bogus, but the RecordId isn't accessed anyway...
ws->transitionToRecordIdAndObj(id);
- member->obj = Snapshotted<BSONObj>(SnapshotId(), BSONObj(obj.objdata()));
+ member->doc = {SnapshotId(), Document{BSONObj(obj.objdata())}};
ASSERT_TRUE(obj.isOwned());
- ASSERT_FALSE(member->obj.value().isOwned());
+ ASSERT_FALSE(member->doc.value().isOwned());
// Get out the field we put in.
BSONElement elt;
@@ -104,8 +104,8 @@ TEST_F(WorkingSetFixture, getFieldOwned) {
string fieldName = "x";
BSONObj obj = BSON(fieldName << 5);
- member->obj = Snapshotted<BSONObj>(SnapshotId(), obj);
- ASSERT_TRUE(member->obj.value().isOwned());
+ member->doc = {SnapshotId(), Document{obj}};
+ ASSERT_TRUE(member->doc.value().isOwned());
ws->transitionToOwnedObj(id);
BSONElement elt;
ASSERT_TRUE(member->getFieldDotted(fieldName, &elt));
diff --git a/src/mongo/db/exec/write_stage_common.cpp b/src/mongo/db/exec/write_stage_common.cpp
index 8ea418d6dfa..5d62016cf25 100644
--- a/src/mongo/db/exec/write_stage_common.cpp
+++ b/src/mongo/db/exec/write_stage_common.cpp
@@ -52,7 +52,7 @@ bool ensureStillMatches(const Collection* collection,
// document we are planning to delete may have already been deleted or updated during yield.
WorkingSetMember* member = ws->get(id);
if (!supportsDocLocking() ||
- opCtx->recoveryUnit()->getSnapshotId() != member->obj.snapshotId()) {
+ opCtx->recoveryUnit()->getSnapshotId() != member->doc.snapshotId()) {
std::unique_ptr<SeekableRecordCursor> cursor(collection->getCursor(opCtx));
if (!WorkingSetCommon::fetch(opCtx, ws, id, cursor)) {
@@ -61,7 +61,7 @@ bool ensureStillMatches(const Collection* collection,
}
// Make sure the re-fetched doc still matches the predicate.
- if (cq && !cq->root()->matchesBSON(member->obj.value(), nullptr)) {
+ if (cq && !cq->root()->matchesBSON(member->doc.value().toBson(), nullptr)) {
// No longer matches.
return false;
}
diff --git a/src/mongo/db/index/sort_key_generator.cpp b/src/mongo/db/index/sort_key_generator.cpp
index 44351d8d664..634e0ff2d69 100644
--- a/src/mongo/db/index/sort_key_generator.cpp
+++ b/src/mongo/db/index/sort_key_generator.cpp
@@ -86,7 +86,7 @@ StatusWith<BSONObj> SortKeyGenerator::computeSortKey(const WorkingSetMember& wsm
if (_sortHasMeta && wsm.metadata().hasTextScore()) {
metadata.textScore = wsm.metadata().getTextScore();
}
- return computeSortKeyFromDocument(wsm.obj.value(), &metadata);
+ return computeSortKeyFromDocument(wsm.doc.value().toBson(), &metadata);
}
return computeSortKeyFromIndexKey(wsm);
diff --git a/src/mongo/db/index/sort_key_generator_test.cpp b/src/mongo/db/index/sort_key_generator_test.cpp
index c2bc61e4eb4..169d0cb8369 100644
--- a/src/mongo/db/index/sort_key_generator_test.cpp
+++ b/src/mongo/db/index/sort_key_generator_test.cpp
@@ -269,12 +269,12 @@ public:
: _wsid(_workingSet.allocate()), _member(_workingSet.get(_wsid)) {}
void setRecordIdAndObj(BSONObj obj) {
- _member->obj = {SnapshotId(), std::move(obj)};
+ _member->doc = {SnapshotId(), Document{obj}};
_workingSet.transitionToRecordIdAndObj(_wsid);
}
void setOwnedObj(BSONObj obj) {
- _member->obj = {SnapshotId(), std::move(obj)};
+ _member->doc = {SnapshotId(), Document{obj}};
_workingSet.transitionToOwnedObj(_wsid);
}
diff --git a/src/mongo/db/pipeline/document.cpp b/src/mongo/db/pipeline/document.cpp
index e9b46c9f82a..adbc3692f5b 100644
--- a/src/mongo/db/pipeline/document.cpp
+++ b/src/mongo/db/pipeline/document.cpp
@@ -341,7 +341,7 @@ DocumentStorage::~DocumentStorage() {
}
void DocumentStorage::reset(const BSONObj& bson, bool stripMetadata) {
- _bson = bson.getOwned();
+ _bson = bson;
_bsonIt = BSONObjIterator(_bson);
_stripMetadata = stripMetadata;
_modified = false;
@@ -485,6 +485,16 @@ Document Document::fromBsonWithMetaData(const BSONObj& bson) {
return md.freeze();
}
+Document Document::getOwned() const {
+ if (isOwned()) {
+ return *this;
+ } else {
+ MutableDocument md(*this);
+ md.makeOwned();
+ return md.freeze();
+ }
+}
+
MutableDocument::MutableDocument(size_t expectedFields)
: _storageHolder(nullptr), _storage(_storageHolder) {
if (expectedFields) {
diff --git a/src/mongo/db/pipeline/document.h b/src/mongo/db/pipeline/document.h
index 606db735c5c..fefbcbcdd67 100644
--- a/src/mongo/db/pipeline/document.h
+++ b/src/mongo/db/pipeline/document.h
@@ -271,8 +271,24 @@ public:
int memUsageForSorter() const {
return getApproximateSize();
}
- Document getOwned() const {
- return *this;
+
+ /**
+ * Returns a document that owns the underlying BSONObj.
+ */
+ Document getOwned() const;
+
+ /**
+ * Returns true if the underlying BSONObj is owned.
+ */
+ bool isOwned() const {
+ return _storage ? _storage->isOwned() : true;
+ }
+
+ /**
+ * Returns true if the document has been modified (i.e. it differs from the underlying BSONObj).
+ */
+ auto isModified() const {
+ return _storage ? _storage->isModified() : false;
}
/// only for testing
@@ -576,6 +592,20 @@ public:
return peek().getApproximateSize();
}
+ /**
+ * Returns true if the underlying BSONObj is owned.
+ */
+ bool isOwned() {
+ return storage().isOwned();
+ }
+
+ /**
+ * Takes the ownership of the underlying BSONObj if it is not owned.
+ */
+ void makeOwned() {
+ storage().makeOwned();
+ }
+
/** Create a new document storage with the BSON object.
*
* The optional paramater 'stripMetadata' controls whether we strip the metadata fields (the
diff --git a/src/mongo/db/pipeline/document_internal.h b/src/mongo/db/pipeline/document_internal.h
index 568274a7f6b..28417a6a46a 100644
--- a/src/mongo/db/pipeline/document_internal.h
+++ b/src/mongo/db/pipeline/document_internal.h
@@ -265,7 +265,10 @@ private:
/// Storage class used by both Document and MutableDocument
class DocumentStorage : public RefCountable {
public:
- DocumentStorage() : DocumentStorage(BSONObj(), false, false) {}
+ DocumentStorage() : DocumentStorage(BSONObj(), false, false) {
+ // Aggregation assumes ownership of underlying BSON even if it is empty.
+ makeOwned();
+ }
/**
* Construct a storage from the BSON. The BSON is lazily processed as fields are requested from
* the document. If we know that the BSON does not contain any metadata fields we can set the
@@ -277,7 +280,7 @@ public:
_usedBytes(0),
_numFields(0),
_hashTabMask(0),
- _bson(bson.getOwned()),
+ _bson(bson),
_bsonIt(_bson),
_stripMetadata(stripMetadata),
_modified(modified) {}
@@ -368,6 +371,15 @@ public:
return _bson.objsize();
}
+ bool isOwned() const {
+ return _bson.isOwned();
+ }
+
+ void makeOwned() {
+ _bson = _bson.getOwned();
+ _bsonIt = BSONObjIterator(_bson);
+ }
+
/**
* Compute the space allocated for the metadata fields. Will account for space allocated for
* unused metadata fields as well.
@@ -394,7 +406,9 @@ public:
* WorkingSetMember.
*/
const DocumentMetadataFields& metadata() const {
- loadLazyMetadata();
+ if (_stripMetadata) {
+ loadLazyMetadata();
+ }
return _metadataFields;
}
@@ -440,6 +454,7 @@ public:
auto isModified() const {
return _modified;
}
+
auto bsonObj() const {
return _bson;
}
diff --git a/src/mongo/db/pipeline/document_source_cursor.cpp b/src/mongo/db/pipeline/document_source_cursor.cpp
index fde3c279d33..277db77429c 100644
--- a/src/mongo/db/pipeline/document_source_cursor.cpp
+++ b/src/mongo/db/pipeline/document_source_cursor.cpp
@@ -74,9 +74,10 @@ DocumentSource::GetNextResult DocumentSourceCursor::doGetNext() {
}
Document DocumentSourceCursor::transformBSONObjToDocument(const BSONObj& obj) const {
- return _dependencies
- ? _dependencies->extractFields(obj)
- : (_inputHasMetadata ? Document::fromBsonWithMetaData(obj) : Document(obj));
+ // Aggregation assumes ownership of underlying BSON.
+ return _dependencies ? _dependencies->extractFields(obj)
+ : (_inputHasMetadata ? Document::fromBsonWithMetaData(obj.getOwned())
+ : Document(obj.getOwned()));
}
void DocumentSourceCursor::loadBatch() {
diff --git a/src/mongo/db/pipeline/value.cpp b/src/mongo/db/pipeline/value.cpp
index b804adaf797..e9bd0d4ccc2 100644
--- a/src/mongo/db/pipeline/value.cpp
+++ b/src/mongo/db/pipeline/value.cpp
@@ -151,7 +151,8 @@ Document ValueStorage::getDocument() const {
}
// not in header because document is fwd declared
-Value::Value(const BSONObj& obj) : _storage(Object, Document(obj)) {}
+Value::Value(const BSONObj& obj) : _storage(Object, Document(obj.getOwned())) {}
+Value::Value(const Document& doc) : _storage(Object, doc.isOwned() ? doc : doc.getOwned()) {}
Value::Value(const BSONElement& elem) : _storage(elem.type()) {
switch (elem.type()) {
@@ -174,7 +175,7 @@ Value::Value(const BSONElement& elem) : _storage(elem.type()) {
break;
case Object: {
- _storage.putDocument(Document(elem.embeddedObject()));
+ _storage.putDocument(Document(elem.embeddedObject().getOwned()));
break;
}
diff --git a/src/mongo/db/pipeline/value.h b/src/mongo/db/pipeline/value.h
index 296d6d08480..08562f32c4b 100644
--- a/src/mongo/db/pipeline/value.h
+++ b/src/mongo/db/pipeline/value.h
@@ -101,7 +101,7 @@ public:
explicit Value(const OID& value) : _storage(jstOID, value) {}
explicit Value(StringData value) : _storage(String, value) {}
explicit Value(const std::string& value) : _storage(String, StringData(value)) {}
- explicit Value(const Document& doc) : _storage(Object, doc) {}
+ explicit Value(const Document& doc);
explicit Value(const BSONObj& obj);
explicit Value(const BSONArray& arr);
explicit Value(const std::vector<BSONObj>& vec);
diff --git a/src/mongo/db/query/plan_executor_impl.cpp b/src/mongo/db/query/plan_executor_impl.cpp
index 0ff05e2bd27..b3f73cf26bc 100644
--- a/src/mongo/db/query/plan_executor_impl.cpp
+++ b/src/mongo/db/query/plan_executor_impl.cpp
@@ -546,7 +546,11 @@ PlanExecutor::ExecState PlanExecutorImpl::_getNextImpl(Snapshotted<BSONObj>* obj
*objOut = Snapshotted<BSONObj>(SnapshotId(), member->keyData[0].keyData);
}
} else if (member->hasObj()) {
- *objOut = member->obj;
+ *objOut =
+ Snapshotted<BSONObj>(member->doc.snapshotId(),
+ member->metadata() && member->doc.value().metadata()
+ ? member->doc.value().toBsonWithMetaData()
+ : member->doc.value().toBson());
} else {
_workingSet->free(id);
hasRequestedData = false;
@@ -603,9 +607,9 @@ PlanExecutor::ExecState PlanExecutorImpl::_getNextImpl(Snapshotted<BSONObj>* obj
invariant(PlanStage::FAILURE == code);
if (nullptr != objOut) {
- BSONObj statusObj;
invariant(WorkingSet::INVALID_ID != id);
- WorkingSetCommon::getStatusMemberObject(*_workingSet, id, &statusObj);
+ BSONObj statusObj =
+ WorkingSetCommon::getStatusMemberDocument(*_workingSet, id)->toBson();
*objOut = Snapshotted<BSONObj>(SnapshotId(), statusObj);
}
diff --git a/src/mongo/db/storage/snapshot.h b/src/mongo/db/storage/snapshot.h
index d169e4dada0..371e4cec29d 100644
--- a/src/mongo/db/storage/snapshot.h
+++ b/src/mongo/db/storage/snapshot.h
@@ -82,6 +82,11 @@ public:
SnapshotId snapshotId() const {
return _id;
}
+
+ void setSnapshotId(SnapshotId id) {
+ _id = id;
+ }
+
const T& value() const {
return _value;
}
diff --git a/src/mongo/dbtests/query_stage_and.cpp b/src/mongo/dbtests/query_stage_and.cpp
index 1bad15c7280..d48063a2903 100644
--- a/src/mongo/dbtests/query_stage_and.cpp
+++ b/src/mongo/dbtests/query_stage_and.cpp
@@ -148,7 +148,7 @@ public:
WorkingSetMember* member = ws->get(id);
ASSERT(member->hasObj());
- return member->obj.value();
+ return member->doc.value().toBson();
}
// We failed to produce a result.
@@ -818,7 +818,7 @@ public:
WorkingSetID id = ws.allocate();
WorkingSetMember* wsm = ws.get(id);
wsm->recordId = RecordId(1);
- wsm->obj = Snapshotted<BSONObj>(SnapshotId(), dataObj);
+ wsm->doc = {SnapshotId(), Document{dataObj}};
ws.transitionToRecordIdAndObj(id);
childStage1->pushBack(id);
}
@@ -852,7 +852,7 @@ public:
WorkingSetID id = ws.allocate();
WorkingSetMember* wsm = ws.get(id);
wsm->recordId = RecordId(1);
- wsm->obj = Snapshotted<BSONObj>(SnapshotId(), dataObj);
+ wsm->doc = {SnapshotId(), Document{dataObj}};
ws.transitionToRecordIdAndObj(id);
childStage1->pushBack(id);
}
@@ -863,7 +863,7 @@ public:
WorkingSetID id = ws.allocate();
WorkingSetMember* wsm = ws.get(id);
wsm->recordId = RecordId(2);
- wsm->obj = Snapshotted<BSONObj>(SnapshotId(), dataObj);
+ wsm->doc = {SnapshotId(), Document{dataObj}};
ws.transitionToRecordIdAndObj(id);
childStage2->pushBack(id);
}
@@ -892,7 +892,7 @@ public:
WorkingSetID id = ws.allocate();
WorkingSetMember* wsm = ws.get(id);
wsm->recordId = RecordId(1);
- wsm->obj = Snapshotted<BSONObj>(SnapshotId(), dataObj);
+ wsm->doc = {SnapshotId(), Document{dataObj}};
ws.transitionToRecordIdAndObj(id);
childStage1->pushBack(id);
}
@@ -902,7 +902,7 @@ public:
WorkingSetID id = ws.allocate();
WorkingSetMember* wsm = ws.get(id);
wsm->recordId = RecordId(2);
- wsm->obj = Snapshotted<BSONObj>(SnapshotId(), dataObj);
+ wsm->doc = {SnapshotId(), Document{dataObj}};
ws.transitionToRecordIdAndObj(id);
childStage2->pushBack(id);
}
diff --git a/src/mongo/dbtests/query_stage_cached_plan.cpp b/src/mongo/dbtests/query_stage_cached_plan.cpp
index 22da16dd594..bfb99241e81 100644
--- a/src/mongo/dbtests/query_stage_cached_plan.cpp
+++ b/src/mongo/dbtests/query_stage_cached_plan.cpp
@@ -135,8 +135,8 @@ public:
ASSERT_NE(state, PlanStage::FAILURE);
if (state == PlanStage::ADVANCED) {
- WorkingSetMember* member = ws.get(id);
- ASSERT(cq->root()->matchesBSON(member->obj.value()));
+ auto member = ws.get(id);
+ ASSERT(cq->root()->matchesBSON(member->doc.value().toBson()));
numResults++;
}
}
diff --git a/src/mongo/dbtests/query_stage_collscan.cpp b/src/mongo/dbtests/query_stage_collscan.cpp
index 8c2da93a18f..d067cdd9878 100644
--- a/src/mongo/dbtests/query_stage_collscan.cpp
+++ b/src/mongo/dbtests/query_stage_collscan.cpp
@@ -264,7 +264,7 @@ TEST_F(QueryStageCollectionScanTest, QueryStageCollscanDeleteUpcomingObject) {
if (PlanStage::ADVANCED == state) {
WorkingSetMember* member = ws.get(id);
ASSERT_EQUALS(coll->docFor(&_opCtx, recordIds[count]).value()["foo"].numberInt(),
- member->obj.value()["foo"].numberInt());
+ member->doc.value()["foo"].getInt());
++count;
}
}
@@ -284,7 +284,7 @@ TEST_F(QueryStageCollectionScanTest, QueryStageCollscanDeleteUpcomingObject) {
if (PlanStage::ADVANCED == state) {
WorkingSetMember* member = ws.get(id);
ASSERT_EQUALS(coll->docFor(&_opCtx, recordIds[count]).value()["foo"].numberInt(),
- member->obj.value()["foo"].numberInt());
+ member->doc.value()["foo"].getInt());
++count;
}
}
@@ -317,7 +317,7 @@ TEST_F(QueryStageCollectionScanTest, QueryStageCollscanDeleteUpcomingObjectBackw
if (PlanStage::ADVANCED == state) {
WorkingSetMember* member = ws.get(id);
ASSERT_EQUALS(coll->docFor(&_opCtx, recordIds[count]).value()["foo"].numberInt(),
- member->obj.value()["foo"].numberInt());
+ member->doc.value()["foo"].getInt());
++count;
}
}
@@ -337,7 +337,7 @@ TEST_F(QueryStageCollectionScanTest, QueryStageCollscanDeleteUpcomingObjectBackw
if (PlanStage::ADVANCED == state) {
WorkingSetMember* member = ws.get(id);
ASSERT_EQUALS(coll->docFor(&_opCtx, recordIds[count]).value()["foo"].numberInt(),
- member->obj.value()["foo"].numberInt());
+ member->doc.value()["foo"].getInt());
++count;
}
}
diff --git a/src/mongo/dbtests/query_stage_delete.cpp b/src/mongo/dbtests/query_stage_delete.cpp
index 708d68e068a..6b9659fc1c5 100644
--- a/src/mongo/dbtests/query_stage_delete.cpp
+++ b/src/mongo/dbtests/query_stage_delete.cpp
@@ -208,7 +208,7 @@ public:
WorkingSetMember* member = ws->get(id);
member->recordId = recordIds[targetDocIndex];
const BSONObj oldDoc = BSON("_id" << targetDocIndex << "foo" << targetDocIndex);
- member->obj = Snapshotted<BSONObj>(SnapshotId(), oldDoc);
+ member->doc = {SnapshotId(), Document{oldDoc}};
ws->transitionToRecordIdAndObj(id);
qds->pushBack(id);
@@ -236,10 +236,10 @@ public:
ASSERT_TRUE(resultMember->hasOwnedObj());
ASSERT_FALSE(resultMember->hasRecordId());
ASSERT_EQUALS(resultMember->getState(), WorkingSetMember::OWNED_OBJ);
- ASSERT_TRUE(resultMember->obj.value().isOwned());
+ ASSERT_TRUE(resultMember->doc.value().isOwned());
// Should be the old value.
- ASSERT_BSONOBJ_EQ(resultMember->obj.value(), oldDoc);
+ ASSERT_BSONOBJ_EQ(resultMember->doc.value().toBson(), oldDoc);
// Should have done the delete.
ASSERT_EQUALS(stats->docsDeleted, 1U);
diff --git a/src/mongo/dbtests/query_stage_distinct.cpp b/src/mongo/dbtests/query_stage_distinct.cpp
index 395dff2ba01..c82e4c5fe00 100644
--- a/src/mongo/dbtests/query_stage_distinct.cpp
+++ b/src/mongo/dbtests/query_stage_distinct.cpp
@@ -82,7 +82,7 @@ public:
WorkingSetID invalid = WorkingSet::INVALID_ID;
ASSERT_NOT_EQUALS(invalid, wsid);
- WorkingSetMember* member = ws.get(wsid);
+ auto member = ws.get(wsid);
// Distinct hack execution is always covered.
// Key value is retrieved from working set key data
diff --git a/src/mongo/dbtests/query_stage_ensure_sorted.cpp b/src/mongo/dbtests/query_stage_ensure_sorted.cpp
index 20d5750c958..f5d9c8b4c50 100644
--- a/src/mongo/dbtests/query_stage_ensure_sorted.cpp
+++ b/src/mongo/dbtests/query_stage_ensure_sorted.cpp
@@ -73,7 +73,7 @@ public:
// Insert obj from input array into working set.
WorkingSetID id = ws.allocate();
WorkingSetMember* wsm = ws.get(id);
- wsm->obj = Snapshotted<BSONObj>(SnapshotId(), obj);
+ wsm->doc = {SnapshotId(), Document{obj}};
wsm->transitionToOwnedObj();
queuedDataStage->pushBack(id);
}
@@ -100,7 +100,7 @@ public:
ASSERT_NE(state, PlanStage::FAILURE);
if (state == PlanStage::ADVANCED) {
WorkingSetMember* member = ws.get(id);
- const BSONObj& obj = member->obj.value();
+ auto obj = member->doc.value().toBson();
arr.append(obj);
}
}
diff --git a/src/mongo/dbtests/query_stage_fetch.cpp b/src/mongo/dbtests/query_stage_fetch.cpp
index f8bf178e5c3..a03c7a64e8e 100644
--- a/src/mongo/dbtests/query_stage_fetch.cpp
+++ b/src/mongo/dbtests/query_stage_fetch.cpp
@@ -122,7 +122,8 @@ public:
WorkingSetID id = ws.allocate();
WorkingSetMember* mockMember = ws.get(id);
mockMember->recordId = *recordIds.begin();
- mockMember->obj = coll->docFor(&_opCtx, mockMember->recordId);
+ auto snapshotBson = coll->docFor(&_opCtx, mockMember->recordId);
+ mockMember->doc = {snapshotBson.snapshotId(), Document{snapshotBson.value()}};
ws.transitionToRecordIdAndObj(id);
// Points into our DB.
mockStage->pushBack(id);
@@ -131,9 +132,9 @@ public:
WorkingSetID id = ws.allocate();
WorkingSetMember* mockMember = ws.get(id);
mockMember->recordId = RecordId();
- mockMember->obj = Snapshotted<BSONObj>(SnapshotId(), BSON("foo" << 6));
+ mockMember->doc = {SnapshotId(), Document{BSON("foo" << 6)}};
mockMember->transitionToOwnedObj();
- ASSERT_TRUE(mockMember->obj.value().isOwned());
+ ASSERT_TRUE(mockMember->doc.value().isOwned());
mockStage->pushBack(id);
}
diff --git a/src/mongo/dbtests/query_stage_limit_skip.cpp b/src/mongo/dbtests/query_stage_limit_skip.cpp
index 5fbd93b68d3..1ef07023e7c 100644
--- a/src/mongo/dbtests/query_stage_limit_skip.cpp
+++ b/src/mongo/dbtests/query_stage_limit_skip.cpp
@@ -65,7 +65,7 @@ QueuedDataStage* getMS(OperationContext* opCtx, WorkingSet* ws) {
WorkingSetID id = ws->allocate();
WorkingSetMember* wsm = ws->get(id);
- wsm->obj = Snapshotted<BSONObj>(SnapshotId(), BSON("x" << i));
+ wsm->doc = {SnapshotId(), Document{BSON("x" << i)}};
wsm->transitionToOwnedObj();
ms->pushBack(id);
diff --git a/src/mongo/dbtests/query_stage_merge_sort.cpp b/src/mongo/dbtests/query_stage_merge_sort.cpp
index e4e564aef78..445a6891ed9 100644
--- a/src/mongo/dbtests/query_stage_merge_sort.cpp
+++ b/src/mongo/dbtests/query_stage_merge_sort.cpp
@@ -688,7 +688,7 @@ public:
member = getNextResult(&ws, ms.get());
ASSERT_EQ(member->getState(), WorkingSetMember::RID_AND_OBJ);
ASSERT_EQ(member->recordId, *it);
- ASSERT_BSONOBJ_EQ(member->obj.value(), BSON("_id" << 4 << "a" << 4));
+ ASSERT_BSONOBJ_EQ(member->doc.value().toBson(), BSON("_id" << 4 << "a" << 4));
++it;
// Update doc {a: 5} such that the postimage will no longer match the query.
@@ -701,15 +701,15 @@ public:
member = getNextResult(&ws, ms.get());
ASSERT_EQ(member->getState(), WorkingSetMember::RID_AND_OBJ);
ASSERT(member->hasObj());
- ASSERT(member->obj.value().isOwned());
- ASSERT_BSONOBJ_EQ(member->obj.value(), BSON("_id" << 5 << "a" << 5));
+ // ASSERT(member->obj.value().isOwned());
+ ASSERT_BSONOBJ_EQ(member->doc.value().toBson(), BSON("_id" << 5 << "a" << 5));
++it;
// We correctly dedup the invalidated doc and return {a: 6} next.
member = getNextResult(&ws, ms.get());
ASSERT_EQ(member->getState(), WorkingSetMember::RID_AND_OBJ);
ASSERT_EQ(member->recordId, *it);
- ASSERT_BSONOBJ_EQ(member->obj.value(), BSON("_id" << 6 << "a" << 6));
+ ASSERT_BSONOBJ_EQ(member->doc.value().toBson(), BSON("_id" << 6 << "a" << 6));
}
private:
diff --git a/src/mongo/dbtests/query_stage_multiplan.cpp b/src/mongo/dbtests/query_stage_multiplan.cpp
index 7c15a206988..20d82757c3e 100644
--- a/src/mongo/dbtests/query_stage_multiplan.cpp
+++ b/src/mongo/dbtests/query_stage_multiplan.cpp
@@ -435,7 +435,7 @@ TEST_F(QueryStageMultiPlanTest, MPSBackupPlan) {
// Check the document returned by the query.
ASSERT(member->hasObj());
BSONObj expectedDoc = BSON("_id" << 1 << "a" << 1 << "b" << 1);
- ASSERT(expectedDoc.woCompare(member->obj.value()) == 0);
+ ASSERT_BSONOBJ_EQ(expectedDoc, member->doc.value().toBson());
// The blocking plan became unblocked, so we should no longer have a backup plan,
// and the winning plan should still be the index intersection one.
@@ -459,7 +459,7 @@ TEST_F(QueryStageMultiPlanTest, MPSBackupPlan) {
void addMember(QueuedDataStage* qds, WorkingSet* ws, BSONObj dataObj) {
WorkingSetID id = ws->allocate();
WorkingSetMember* wsm = ws->get(id);
- wsm->obj = Snapshotted<BSONObj>(SnapshotId(), BSON("x" << 1));
+ wsm->doc = {SnapshotId(), Document{BSON("x" << 1)}};
wsm->transitionToOwnedObj();
qds->pushBack(id);
}
diff --git a/src/mongo/dbtests/query_stage_near.cpp b/src/mongo/dbtests/query_stage_near.cpp
index 40f5ba3a5c5..77a9f0c81a1 100644
--- a/src/mongo/dbtests/query_stage_near.cpp
+++ b/src/mongo/dbtests/query_stage_near.cpp
@@ -122,7 +122,7 @@ public:
// Add all documents from the lastInterval into the QueuedDataStage.
const WorkingSetID id = workingSet->allocate();
WorkingSetMember* member = workingSet->get(id);
- member->obj = Snapshotted<BSONObj>(SnapshotId(), interval.data[i]);
+ member->doc = {SnapshotId(), Document{interval.data[i]}};
workingSet->transitionToOwnedObj(id);
queuedStage->pushBack(id);
}
@@ -134,7 +134,7 @@ public:
StatusWith<double> computeDistance(WorkingSetMember* member) final {
ASSERT(member->hasObj());
- return StatusWith<double>(member->obj.value()["distance"].numberDouble());
+ return StatusWith<double>(member->doc.value()["distance"].getDouble());
}
virtual StageState initialize(OperationContext* opCtx,
@@ -156,7 +156,7 @@ static vector<BSONObj> advanceStage(PlanStage* stage, WorkingSet* workingSet) {
while (PlanStage::NEED_TIME == state) {
while (PlanStage::ADVANCED == (state = stage->work(&nextMemberID))) {
- results.push_back(workingSet->get(nextMemberID)->obj.value());
+ results.push_back(workingSet->get(nextMemberID)->doc.value().toBson());
}
}
diff --git a/src/mongo/dbtests/query_stage_sort.cpp b/src/mongo/dbtests/query_stage_sort.cpp
index 0919fdea1ad..dcf7687ded7 100644
--- a/src/mongo/dbtests/query_stage_sort.cpp
+++ b/src/mongo/dbtests/query_stage_sort.cpp
@@ -98,7 +98,8 @@ public:
WorkingSetID id = ws->allocate();
WorkingSetMember* member = ws->get(id);
member->recordId = *it;
- member->obj = coll->docFor(&_opCtx, *it);
+ auto snapshotBson = coll->docFor(&_opCtx, *it);
+ member->doc = {snapshotBson.snapshotId(), Document{snapshotBson.value()}};
ws->transitionToRecordIdAndObj(id);
ms->pushBack(id);
}
@@ -403,10 +404,10 @@ public:
}
WorkingSetMember* member = exec->getWorkingSet()->get(id);
ASSERT(member->hasObj());
- if (member->obj.value().getField("_id").OID() == updatedId) {
- ASSERT(idBeforeUpdate == member->obj.snapshotId());
+ if (member->doc.value().getField("_id").getOid() == updatedId) {
+ ASSERT(idBeforeUpdate == member->doc.snapshotId());
}
- thisVal = member->obj.value().getField("foo").Int();
+ thisVal = member->doc.value().getField("foo").getInt();
ASSERT_LTE(lastVal, thisVal);
// Expect docs in range [0, limit)
ASSERT_LTE(0, thisVal);
@@ -541,15 +542,16 @@ public:
{
WorkingSetID id = ws->allocate();
WorkingSetMember* member = ws->get(id);
- member->obj = Snapshotted<BSONObj>(
- SnapshotId(), fromjson("{a: [1,2,3], b:[1,2,3], c:[1,2,3], d:[1,2,3,4]}"));
+ member->doc = {
+ SnapshotId(),
+ Document{fromjson("{a: [1,2,3], b:[1,2,3], c:[1,2,3], d:[1,2,3,4]}")}};
member->transitionToOwnedObj();
queuedDataStage->pushBack(id);
}
{
WorkingSetID id = ws->allocate();
WorkingSetMember* member = ws->get(id);
- member->obj = Snapshotted<BSONObj>(SnapshotId(), fromjson("{a:1, b:1, c:1}"));
+ member->doc = {SnapshotId(), Document{fromjson("{a:1, b:1, c:1}")}};
member->transitionToOwnedObj();
queuedDataStage->pushBack(id);
}
diff --git a/src/mongo/dbtests/query_stage_sort_key_generator.cpp b/src/mongo/dbtests/query_stage_sort_key_generator.cpp
index 8675a3e7a51..18185250ff9 100644
--- a/src/mongo/dbtests/query_stage_sort_key_generator.cpp
+++ b/src/mongo/dbtests/query_stage_sort_key_generator.cpp
@@ -72,7 +72,7 @@ BSONObj extractSortKey(const char* sortSpec, const char* doc, const CollatorInte
auto mockStage = std::make_unique<QueuedDataStage>(opCtx.get(), &workingSet);
auto wsid = workingSet.allocate();
auto wsm = workingSet.get(wsid);
- wsm->obj = Snapshotted<BSONObj>(SnapshotId(), fromjson(doc));
+ wsm->doc = {SnapshotId(), Document{fromjson(doc)}};
wsm->transitionToOwnedObj();
mockStage->pushBack(wsid);
diff --git a/src/mongo/dbtests/query_stage_trial.cpp b/src/mongo/dbtests/query_stage_trial.cpp
index 84b27d1ae8c..9f3681f2c89 100644
--- a/src/mongo/dbtests/query_stage_trial.cpp
+++ b/src/mongo/dbtests/query_stage_trial.cpp
@@ -59,7 +59,7 @@ protected:
}
const auto id = _ws.allocate();
auto* member = _ws.get(id);
- member->obj.setValue(result.getOwned());
+ member->doc.setValue(Document{result});
_ws.transitionToOwnedObj(id);
queuedData->pushBack(id);
}
@@ -73,7 +73,7 @@ protected:
state = trialStage->work(&id);
if (state == PlanStage::ADVANCED) {
auto* member = _ws.get(id);
- return member->obj.value();
+ return member->doc.value().toBson();
}
} while (state == PlanStage::NEED_TIME);
diff --git a/src/mongo/dbtests/query_stage_update.cpp b/src/mongo/dbtests/query_stage_update.cpp
index c6c67923ae5..0ce3c6cbe37 100644
--- a/src/mongo/dbtests/query_stage_update.cpp
+++ b/src/mongo/dbtests/query_stage_update.cpp
@@ -139,7 +139,7 @@ public:
if (PlanStage::ADVANCED == state) {
WorkingSetMember* member = ws.get(id);
verify(member->hasObj());
- out->push_back(member->obj.value().getOwned());
+ out->push_back(member->doc.value().toBson());
}
}
}
@@ -416,7 +416,7 @@ public:
WorkingSetMember* member = ws->get(id);
member->recordId = recordIds[targetDocIndex];
const BSONObj oldDoc = BSON("_id" << targetDocIndex << "foo" << targetDocIndex);
- member->obj = Snapshotted<BSONObj>(SnapshotId(), oldDoc);
+ member->doc = {SnapshotId(), Document{oldDoc}};
ws->transitionToRecordIdAndObj(id);
qds->pushBack(id);
@@ -441,10 +441,10 @@ public:
ASSERT_TRUE(resultMember->hasOwnedObj());
ASSERT_FALSE(resultMember->hasRecordId());
ASSERT_EQUALS(resultMember->getState(), WorkingSetMember::OWNED_OBJ);
- ASSERT_TRUE(resultMember->obj.value().isOwned());
+ ASSERT_TRUE(resultMember->doc.value().isOwned());
// Should be the old value.
- ASSERT_BSONOBJ_EQ(resultMember->obj.value(), oldDoc);
+ ASSERT_BSONOBJ_EQ(resultMember->doc.value().toBson(), oldDoc);
// Should have done the update.
BSONObj newDoc = BSON("_id" << targetDocIndex << "foo" << targetDocIndex << "x" << 0);
@@ -508,7 +508,7 @@ public:
WorkingSetMember* member = ws->get(id);
member->recordId = recordIds[targetDocIndex];
const BSONObj oldDoc = BSON("_id" << targetDocIndex << "foo" << targetDocIndex);
- member->obj = Snapshotted<BSONObj>(SnapshotId(), oldDoc);
+ member->doc = {SnapshotId(), Document{oldDoc}};
ws->transitionToRecordIdAndObj(id);
qds->pushBack(id);
@@ -533,11 +533,11 @@ public:
ASSERT_TRUE(resultMember->hasOwnedObj());
ASSERT_FALSE(resultMember->hasRecordId());
ASSERT_EQUALS(resultMember->getState(), WorkingSetMember::OWNED_OBJ);
- ASSERT_TRUE(resultMember->obj.value().isOwned());
+ ASSERT_TRUE(resultMember->doc.value().isOwned());
// Should be the new value.
BSONObj newDoc = BSON("_id" << targetDocIndex << "foo" << targetDocIndex << "x" << 0);
- ASSERT_BSONOBJ_EQ(resultMember->obj.value(), newDoc);
+ ASSERT_BSONOBJ_EQ(resultMember->doc.value().toBson(), newDoc);
// Should have done the update.
vector<BSONObj> objs;