summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEliot Horowitz <eliot@10gen.com>2015-02-02 15:39:22 -0500
committerEliot Horowitz <eliot@10gen.com>2015-02-02 15:40:16 -0500
commitd46a5c78c0d046f1975aca2eaa7f055a713c720f (patch)
treee694224fd2802cdbeecde7d87b5983d19bc49f31
parent87f1334585928a2b86407f68ecb6b35c62855b24 (diff)
downloadmongo-d46a5c78c0d046f1975aca2eaa7f055a713c720f.tar.gz
SERVER-17132: Added SnapshotId and Snapshotted and use them in query to make sure we use correct versions of docs
-rw-r--r--src/mongo/db/catalog/collection.cpp66
-rw-r--r--src/mongo/db/catalog/collection.h17
-rw-r--r--src/mongo/db/commands/list_collections.cpp2
-rw-r--r--src/mongo/db/commands/list_indexes.cpp2
-rw-r--r--src/mongo/db/commands/rename_collection.cpp5
-rw-r--r--src/mongo/db/dbhelpers.cpp4
-rw-r--r--src/mongo/db/exec/collection_scan.cpp3
-rw-r--r--src/mongo/db/exec/delete.cpp3
-rw-r--r--src/mongo/db/exec/filter.h4
-rw-r--r--src/mongo/db/exec/geo_near.cpp2
-rw-r--r--src/mongo/db/exec/group.cpp4
-rw-r--r--src/mongo/db/exec/idhack.cpp2
-rw-r--r--src/mongo/db/exec/oplogstart.cpp4
-rw-r--r--src/mongo/db/exec/pipeline_proxy.cpp4
-rw-r--r--src/mongo/db/exec/projection.cpp4
-rw-r--r--src/mongo/db/exec/projection_exec.cpp8
-rw-r--r--src/mongo/db/exec/projection_exec_test.cpp4
-rw-r--r--src/mongo/db/exec/shard_filter.cpp3
-rw-r--r--src/mongo/db/exec/sort.cpp2
-rw-r--r--src/mongo/db/exec/sort_test.cpp4
-rw-r--r--src/mongo/db/exec/text.cpp10
-rw-r--r--src/mongo/db/exec/update.cpp89
-rw-r--r--src/mongo/db/exec/update.h2
-rw-r--r--src/mongo/db/exec/working_set.cpp6
-rw-r--r--src/mongo/db/exec/working_set.h3
-rw-r--r--src/mongo/db/exec/working_set_common.cpp18
-rw-r--r--src/mongo/db/exec/working_set_test.cpp12
-rw-r--r--src/mongo/db/index/haystack_access_method_internal.h4
-rw-r--r--src/mongo/db/query/plan_executor.cpp2
-rw-r--r--src/mongo/db/storage/in_memory/in_memory_recovery_unit.h2
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace_details.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_recovery_unit.h1
-rw-r--r--src/mongo/db/storage/mmap_v1/heap_record_store_btree.h2
-rw-r--r--src/mongo/db/storage/mmap_v1/repair_database.cpp4
-rw-r--r--src/mongo/db/storage/recovery_unit.h3
-rw-r--r--src/mongo/db/storage/recovery_unit_noop.h2
-rw-r--r--src/mongo/db/storage/snapshot.h89
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp5
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h2
-rw-r--r--src/mongo/dbtests/dbhelper_tests.cpp2
-rw-r--r--src/mongo/dbtests/oplogstarttests.cpp2
-rw-r--r--src/mongo/dbtests/query_multi_plan_runner.cpp2
-rw-r--r--src/mongo/dbtests/query_stage_and.cpp19
-rw-r--r--src/mongo/dbtests/query_stage_collscan.cpp20
-rw-r--r--src/mongo/dbtests/query_stage_count.cpp8
-rw-r--r--src/mongo/dbtests/query_stage_delete.cpp2
-rw-r--r--src/mongo/dbtests/query_stage_fetch.cpp4
-rw-r--r--src/mongo/dbtests/query_stage_keep.cpp10
-rw-r--r--src/mongo/dbtests/query_stage_limit_skip.cpp2
-rw-r--r--src/mongo/dbtests/query_stage_near.cpp6
-rw-r--r--src/mongo/dbtests/query_stage_sort.cpp6
-rw-r--r--src/mongo/dbtests/query_stage_update.cpp4
-rw-r--r--src/mongo/dbtests/querytests.cpp12
-rw-r--r--src/mongo/dbtests/repltests.cpp4
-rw-r--r--src/mongo/dbtests/rollbacktests.cpp2
-rw-r--r--src/mongo/s/d_migrate.cpp7
-rw-r--r--src/mongo/s/d_split.cpp2
57 files changed, 302 insertions, 217 deletions
diff --git a/src/mongo/db/catalog/collection.cpp b/src/mongo/db/catalog/collection.cpp
index 1fceaad5300..3a721df56ec 100644
--- a/src/mongo/db/catalog/collection.cpp
+++ b/src/mongo/db/catalog/collection.cpp
@@ -152,32 +152,20 @@ namespace mongo {
return _recordStore->getManyIterators(txn);
}
- int64_t Collection::countTableScan( OperationContext* txn, const MatchExpression* expression ) {
- scoped_ptr<RecordIterator> iterator( getIterator( txn,
- RecordId(),
- CollectionScanParams::FORWARD ) );
- int64_t count = 0;
- while ( !iterator->isEOF() ) {
- RecordId loc = iterator->getNext();
- BSONObj obj = docFor( txn, loc );
- if ( expression->matchesBSON( obj ) )
- count++;
- }
-
- return count;
- }
-
- BSONObj Collection::docFor(OperationContext* txn, const RecordId& loc) const {
- return _recordStore->dataFor( txn, loc ).releaseToBson();
+ Snapshotted<BSONObj> Collection::docFor(OperationContext* txn, const RecordId& loc) const {
+ return Snapshotted<BSONObj>(txn->recoveryUnit()->getSnapshotId(),
+ _recordStore->dataFor( txn, loc ).releaseToBson());
}
- bool Collection::findDoc(OperationContext* txn, const RecordId& loc, BSONObj* out) const {
+ bool Collection::findDoc(OperationContext* txn,
+ const RecordId& loc,
+ Snapshotted<BSONObj>* out) const {
dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IS));
RecordData rd;
if ( !_recordStore->findRecord( txn, loc, &rd ) )
return false;
- *out = rd.releaseToBson();
+ *out = Snapshotted<BSONObj>(txn->recoveryUnit()->getSnapshotId(), rd.releaseToBson());
return true;
}
@@ -200,7 +188,7 @@ namespace mongo {
const BSONObj& docToInsert,
bool enforceQuota ) {
- uint64_t txnId = txn->recoveryUnit()->getMyTransactionCount();
+ const SnapshotId sid = txn->recoveryUnit()->getSnapshotId();
if ( _indexCatalog.findIdIndex( txn ) ) {
if ( docToInsert["_id"].eoo() ) {
@@ -211,7 +199,7 @@ namespace mongo {
}
StatusWith<RecordId> res = _insertDocument( txn, docToInsert, enforceQuota );
- invariant( txnId == txn->recoveryUnit()->getMyTransactionCount() );
+ invariant( sid == txn->recoveryUnit()->getSnapshotId() );
return res;
}
@@ -294,11 +282,11 @@ namespace mongo {
return;
}
- BSONObj doc = docFor( txn, loc );
+ Snapshotted<BSONObj> doc = docFor(txn, loc);
- if ( deletedId ) {
- BSONElement e = doc["_id"];
- if ( e.type() ) {
+ if (deletedId) {
+ BSONElement e = doc.value()["_id"];
+ if (e.type()) {
*deletedId = e.wrap();
}
}
@@ -306,9 +294,9 @@ namespace mongo {
/* check if any cursors point to us. if so, advance them. */
_cursorManager.invalidateDocument(txn, loc, INVALIDATION_DELETION);
- _indexCatalog.unindexRecord(txn, doc, loc, noWarn);
+ _indexCatalog.unindexRecord(txn, doc.value(), loc, noWarn);
- _recordStore->deleteRecord( txn, loc );
+ _recordStore->deleteRecord(txn, loc);
_infoCache.notifyOfWriteOp();
}
@@ -318,16 +306,17 @@ namespace mongo {
StatusWith<RecordId> Collection::updateDocument( OperationContext* txn,
const RecordId& oldLocation,
- const BSONObj& objOld,
+ const Snapshotted<BSONObj>& objOld,
const BSONObj& objNew,
bool enforceQuota,
bool indexesAffected,
OpDebug* debug ) {
dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
+ invariant(objOld.snapshotId() == txn->recoveryUnit()->getSnapshotId());
- uint64_t txnId = txn->recoveryUnit()->getMyTransactionCount();
+ SnapshotId sid = txn->recoveryUnit()->getSnapshotId();
- BSONElement oldId = objOld["_id"];
+ BSONElement oldId = objOld.value()["_id"];
if ( !oldId.eoo() && ( oldId != objNew["_id"] ) )
return StatusWith<RecordId>( ErrorCodes::InternalError,
"in Collection::updateDocument _id mismatch",
@@ -350,8 +339,12 @@ namespace mongo {
|| repl::getGlobalReplicationCoordinator()->shouldIgnoreUniqueIndex(descriptor);
UpdateTicket* updateTicket = new UpdateTicket();
updateTickets.mutableMap()[descriptor] = updateTicket;
- Status ret = iam->validateUpdate(
- txn, objOld, objNew, oldLocation, options, updateTicket );
+ Status ret = iam->validateUpdate(txn,
+ objOld.value(),
+ objNew,
+ oldLocation,
+ options,
+ updateTicket);
if ( !ret.isOK() ) {
return StatusWith<RecordId>( ret );
}
@@ -389,7 +382,7 @@ namespace mongo {
Status s = _indexCatalog.indexRecord(txn, objNew, newLocation.getValue());
if (!s.isOK())
return StatusWith<RecordId>(s);
- invariant( txnId == txn->recoveryUnit()->getMyTransactionCount() );
+ invariant( sid == txn->recoveryUnit()->getSnapshotId() );
return newLocation;
}
@@ -414,7 +407,7 @@ namespace mongo {
}
}
- invariant( txnId == txn->recoveryUnit()->getMyTransactionCount() );
+ invariant( sid == txn->recoveryUnit()->getSnapshotId() );
return newLocation;
}
@@ -438,15 +431,16 @@ namespace mongo {
Status Collection::updateDocumentWithDamages( OperationContext* txn,
const RecordId& loc,
- const RecordData& oldRec,
+ const Snapshotted<RecordData>& oldRec,
const char* damageSource,
const mutablebson::DamageVector& damages ) {
dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
+ invariant(oldRec.snapshotId() == txn->recoveryUnit()->getSnapshotId());
// Broadcast the mutation so that query results stay correct.
_cursorManager.invalidateDocument(txn, loc, INVALIDATION_MUTATION);
- return _recordStore->updateWithDamages( txn, loc, oldRec, damageSource, damages );
+ return _recordStore->updateWithDamages(txn, loc, oldRec.value(), damageSource, damages);
}
bool Collection::_enforceQuota( bool userEnforeQuota ) const {
diff --git a/src/mongo/db/catalog/collection.h b/src/mongo/db/catalog/collection.h
index 4d99eff98fc..d8ac4d63a39 100644
--- a/src/mongo/db/catalog/collection.h
+++ b/src/mongo/db/catalog/collection.h
@@ -42,6 +42,7 @@
#include "mongo/db/record_id.h"
#include "mongo/db/storage/capped_callback.h"
#include "mongo/db/storage/record_store.h"
+#include "mongo/db/storage/snapshot.h"
#include "mongo/platform/cstdint.h"
namespace mongo {
@@ -129,13 +130,13 @@ namespace mongo {
bool requiresIdIndex() const;
- BSONObj docFor(OperationContext* txn, const RecordId& loc) const;
+ Snapshotted<BSONObj> docFor(OperationContext* txn, const RecordId& loc) const;
/**
* @param out - contents set to the right docs if exists, or nothing.
* @return true iff loc exists
*/
- bool findDoc(OperationContext* txn, const RecordId& loc, BSONObj* out) const;
+ bool findDoc(OperationContext* txn, const RecordId& loc, Snapshotted<BSONObj>* out) const;
// ---- things that should move to a CollectionAccessMethod like thing
/**
@@ -152,14 +153,6 @@ namespace mongo {
*/
std::vector<RecordIterator*> getManyIterators( OperationContext* txn ) const;
-
- /**
- * does a table scan to do a count
- * this should only be used at a very low level
- * does no yielding, indexes, etc...
- */
- int64_t countTableScan( OperationContext* txn, const MatchExpression* expression );
-
void deleteDocument( OperationContext* txn,
const RecordId& loc,
bool cappedOK = false,
@@ -205,7 +198,7 @@ namespace mongo {
*/
StatusWith<RecordId> updateDocument( OperationContext* txn,
const RecordId& oldLocation,
- const BSONObj& oldDoc,
+ const Snapshotted<BSONObj>& oldDoc,
const BSONObj& newDoc,
bool enforceQuota,
bool indexesAffected,
@@ -216,7 +209,7 @@ namespace mongo {
*/
Status updateDocumentWithDamages( OperationContext* txn,
const RecordId& loc,
- const RecordData& oldRec,
+ const Snapshotted<RecordData>& oldRec,
const char* damageSource,
const mutablebson::DamageVector& damages );
diff --git a/src/mongo/db/commands/list_collections.cpp b/src/mongo/db/commands/list_collections.cpp
index 8508390317b..e9da451096e 100644
--- a/src/mongo/db/commands/list_collections.cpp
+++ b/src/mongo/db/commands/list_collections.cpp
@@ -146,7 +146,7 @@ namespace mongo {
member->state = WorkingSetMember::OWNED_OBJ;
member->keyData.clear();
member->loc = RecordId();
- member->obj = maybe;
+ member->obj = Snapshotted<BSONObj>(SnapshotId(), maybe);
root->pushBack(*member);
}
diff --git a/src/mongo/db/commands/list_indexes.cpp b/src/mongo/db/commands/list_indexes.cpp
index 16c9678229f..357afb0c372 100644
--- a/src/mongo/db/commands/list_indexes.cpp
+++ b/src/mongo/db/commands/list_indexes.cpp
@@ -142,7 +142,7 @@ namespace mongo {
member->state = WorkingSetMember::OWNED_OBJ;
member->keyData.clear();
member->loc = RecordId();
- member->obj = indexSpec;
+ member->obj = Snapshotted<BSONObj>(SnapshotId(), indexSpec);
root->pushBack(*member);
}
diff --git a/src/mongo/db/commands/rename_collection.cpp b/src/mongo/db/commands/rename_collection.cpp
index dcc1a1b68d6..eb9b87e2eb2 100644
--- a/src/mongo/db/commands/rename_collection.cpp
+++ b/src/mongo/db/commands/rename_collection.cpp
@@ -310,11 +310,12 @@ namespace mongo {
while (!sourceIt->isEOF()) {
txn->checkForInterrupt();
- const BSONObj obj = sourceColl->docFor(txn, sourceIt->getNext());
+ const Snapshotted<BSONObj> obj = sourceColl->docFor(txn, sourceIt->getNext());
WriteUnitOfWork wunit(txn);
// No logOp necessary because the entire renameCollection command is one logOp.
- Status status = targetColl->insertDocument(txn, obj, &indexer, true).getStatus();
+ Status status =
+ targetColl->insertDocument(txn, obj.value(), &indexer, true).getStatus();
if (!status.isOK())
return appendCommandStatus(result, status);
wunit.commit();
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp
index 2d211d94eae..32fa31f3725 100644
--- a/src/mongo/db/dbhelpers.cpp
+++ b/src/mongo/db/dbhelpers.cpp
@@ -115,7 +115,7 @@ namespace mongo {
RecordId loc = findOne( txn, collection, query, requireIndex );
if ( loc.isNull() )
return false;
- result = collection->docFor(txn, loc);
+ result = collection->docFor(txn, loc).value();
return true;
}
@@ -188,7 +188,7 @@ namespace mongo {
RecordId loc = accessMethod->findSingle( txn, query["_id"].wrap() );
if ( loc.isNull() )
return false;
- result = collection->docFor( txn, loc );
+ result = collection->docFor(txn, loc).value();
return true;
}
diff --git a/src/mongo/db/exec/collection_scan.cpp b/src/mongo/db/exec/collection_scan.cpp
index ac708ca13b7..0d857506950 100644
--- a/src/mongo/db/exec/collection_scan.cpp
+++ b/src/mongo/db/exec/collection_scan.cpp
@@ -146,7 +146,8 @@ namespace mongo {
WorkingSetID id = _workingSet->allocate();
WorkingSetMember* member = _workingSet->get(id);
member->loc = curr;
- member->obj = _iter->dataFor(member->loc).releaseToBson();
+ member->obj = Snapshotted<BSONObj>(_txn->recoveryUnit()->getSnapshotId(),
+ _iter->dataFor(member->loc).releaseToBson());
member->state = WorkingSetMember::LOC_AND_UNOWNED_OBJ;
// Advance the iterator.
diff --git a/src/mongo/db/exec/delete.cpp b/src/mongo/db/exec/delete.cpp
index 12ae932edbb..acfbedc97c0 100644
--- a/src/mongo/db/exec/delete.cpp
+++ b/src/mongo/db/exec/delete.cpp
@@ -101,8 +101,7 @@ namespace mongo {
// If the working set member is in the owned obj with loc state, then the document may
// have already been deleted after-being force-fetched.
if (WorkingSetMember::LOC_AND_OWNED_OBJ == member->state) {
- BSONObj deletedDoc;
- if (!_collection->findDoc(_txn, rloc, &deletedDoc)) {
+ if (!_collection->findDoc(_txn, rloc, &member->obj)) {
// Doc is already deleted. Nothing more to do.
++_commonStats.needTime;
return PlanStage::NEED_TIME;
diff --git a/src/mongo/db/exec/filter.h b/src/mongo/db/exec/filter.h
index 23970976e35..802c68993e5 100644
--- a/src/mongo/db/exec/filter.h
+++ b/src/mongo/db/exec/filter.h
@@ -48,14 +48,14 @@ namespace mongo {
// that it should do a fetch beforehand.
BSONObj toBSON() const {
invariant(_wsm->hasObj());
- return _wsm->obj;
+ return _wsm->obj.value();
}
virtual ElementIterator* allocateIterator(const ElementPath* path) const {
// BSONElementIterator does some interesting things with arrays that I don't think
// SimpleArrayElementIterator does.
if (_wsm->hasObj()) {
- return new BSONElementIterator(path, _wsm->obj);
+ return new BSONElementIterator(path, _wsm->obj.value());
}
// NOTE: This (kind of) duplicates code in WorkingSetMember::getFieldDotted.
diff --git a/src/mongo/db/exec/geo_near.cpp b/src/mongo/db/exec/geo_near.cpp
index f45fa051d08..5056a2cc962 100644
--- a/src/mongo/db/exec/geo_near.cpp
+++ b/src/mongo/db/exec/geo_near.cpp
@@ -154,7 +154,7 @@ namespace mongo {
// Extract all the geometries out of this document for the near query
OwnedPointerVector<StoredGeometry> geometriesOwned;
vector<StoredGeometry*>& geometries = geometriesOwned.mutableVector();
- extractGeometries(member->obj, nearParams.nearQuery->field, &geometries);
+ extractGeometries(member->obj.value(), nearParams.nearQuery->field, &geometries);
// Compute the minimum distance of all the geometries in the document
double minDistance = -1;
diff --git a/src/mongo/db/exec/group.cpp b/src/mongo/db/exec/group.cpp
index 23e200a2bfb..9b669b1b83b 100644
--- a/src/mongo/db/exec/group.cpp
+++ b/src/mongo/db/exec/group.cpp
@@ -225,7 +225,7 @@ namespace mongo {
// add a fetch. We should always get fetched data, and never just key data.
invariant(member->hasObj());
- Status status = processObject(member->obj);
+ Status status = processObject(member->obj.value());
if (!status.isOK()) {
*out = WorkingSetCommon::allocateStatusMember(_ws, status);
return PlanStage::FAILURE;
@@ -247,7 +247,7 @@ namespace mongo {
*out = _ws->allocate();
WorkingSetMember* member = _ws->get(*out);
- member->obj = results;
+ member->obj = Snapshotted<BSONObj>(SnapshotId(), results);
member->state = WorkingSetMember::OWNED_OBJ;
++_commonStats.advanced;
diff --git a/src/mongo/db/exec/idhack.cpp b/src/mongo/db/exec/idhack.cpp
index 15071729a88..8931285e9e3 100644
--- a/src/mongo/db/exec/idhack.cpp
+++ b/src/mongo/db/exec/idhack.cpp
@@ -161,7 +161,7 @@ namespace mongo {
if (_addKeyMetadata) {
BSONObjBuilder bob;
- BSONObj ownedKeyObj = member->obj["_id"].wrap().getOwned();
+ BSONObj ownedKeyObj = member->obj.value()["_id"].wrap().getOwned();
bob.appendKeys(_key, ownedKeyObj);
member->addComputed(new IndexKeyComputedData(bob.obj()));
}
diff --git a/src/mongo/db/exec/oplogstart.cpp b/src/mongo/db/exec/oplogstart.cpp
index 5d4a3296f92..80f18ebea5d 100644
--- a/src/mongo/db/exec/oplogstart.cpp
+++ b/src/mongo/db/exec/oplogstart.cpp
@@ -89,7 +89,7 @@ namespace mongo {
_subIterators.popAndDeleteBack();
// TODO: should we ever try and return NEED_FETCH here?
- if (!loc.isNull() && !_filter->matchesBSON(_collection->docFor(_txn, loc))) {
+ if (!loc.isNull() && !_filter->matchesBSON(_collection->docFor(_txn, loc).value())) {
_done = true;
WorkingSetID id = _workingSet->allocate();
WorkingSetMember* member = _workingSet->get(id);
@@ -130,7 +130,7 @@ namespace mongo {
verify(member->hasObj());
verify(member->hasLoc());
- if (!_filter->matchesBSON(member->obj)) {
+ if (!_filter->matchesBSON(member->obj.value())) {
_done = true;
// RecordId is returned in *out.
return PlanStage::ADVANCED;
diff --git a/src/mongo/db/exec/pipeline_proxy.cpp b/src/mongo/db/exec/pipeline_proxy.cpp
index 5369429cd07..2bcad44d998 100644
--- a/src/mongo/db/exec/pipeline_proxy.cpp
+++ b/src/mongo/db/exec/pipeline_proxy.cpp
@@ -58,7 +58,7 @@ namespace mongo {
if (!_stash.empty()) {
*out = _ws->allocate();
WorkingSetMember* member = _ws->get(*out);
- member->obj = _stash.back();
+ member->obj = Snapshotted<BSONObj>(SnapshotId(), _stash.back());
_stash.pop_back();
member->state = WorkingSetMember::OWNED_OBJ;
return PlanStage::ADVANCED;
@@ -67,7 +67,7 @@ namespace mongo {
if (boost::optional<BSONObj> next = getNextBson()) {
*out = _ws->allocate();
WorkingSetMember* member = _ws->get(*out);
- member->obj = *next;
+ member->obj = Snapshotted<BSONObj>(SnapshotId(), *next);
member->state = WorkingSetMember::OWNED_OBJ;
return PlanStage::ADVANCED;
}
diff --git a/src/mongo/db/exec/projection.cpp b/src/mongo/db/exec/projection.cpp
index 8043c1546a0..0a8b09e07bf 100644
--- a/src/mongo/db/exec/projection.cpp
+++ b/src/mongo/db/exec/projection.cpp
@@ -170,7 +170,7 @@ namespace mongo {
invariant(member->hasObj());
// Apply the SIMPLE_DOC projection.
- transformSimpleInclusion(member->obj, _includedFields, bob);
+ transformSimpleInclusion(member->obj.value(), _includedFields, bob);
}
else {
invariant(ProjectionStageParams::COVERED_ONE_INDEX == _projImpl);
@@ -194,7 +194,7 @@ namespace mongo {
member->state = WorkingSetMember::OWNED_OBJ;
member->keyData.clear();
member->loc = RecordId();
- member->obj = bob.obj();
+ member->obj = Snapshotted<BSONObj>(SnapshotId(), bob.obj());
return Status::OK();
}
diff --git a/src/mongo/db/exec/projection_exec.cpp b/src/mongo/db/exec/projection_exec.cpp
index 8ae16dc515d..62ea3fffe33 100644
--- a/src/mongo/db/exec/projection_exec.cpp
+++ b/src/mongo/db/exec/projection_exec.cpp
@@ -248,7 +248,7 @@ namespace mongo {
}
member->state = WorkingSetMember::OWNED_OBJ;
- member->obj = keyObj;
+ member->obj = Snapshotted<BSONObj>(SnapshotId(), keyObj);
member->keyData.clear();
member->loc = RecordId();
return Status::OK();
@@ -262,10 +262,10 @@ namespace mongo {
if (transformRequiresDetails()) {
matchDetails.requestElemMatchKey();
verify(NULL != _queryExpression);
- verify(_queryExpression->matchesBSON(member->obj, &matchDetails));
+ verify(_queryExpression->matchesBSON(member->obj.value(), &matchDetails));
}
- Status projStatus = transform(member->obj, &bob, &matchDetails);
+ Status projStatus = transform(member->obj.value(), &bob, &matchDetails);
if (!projStatus.isOK()) {
return projStatus;
}
@@ -349,7 +349,7 @@ namespace mongo {
BSONObj newObj = bob.obj();
member->state = WorkingSetMember::OWNED_OBJ;
- member->obj = newObj;
+ member->obj = Snapshotted<BSONObj>(SnapshotId(), newObj);
member->keyData.clear();
member->loc = RecordId();
diff --git a/src/mongo/db/exec/projection_exec_test.cpp b/src/mongo/db/exec/projection_exec_test.cpp
index 59af37dd6d4..2ad9fd3b4e0 100644
--- a/src/mongo/db/exec/projection_exec_test.cpp
+++ b/src/mongo/db/exec/projection_exec_test.cpp
@@ -83,7 +83,7 @@ namespace {
// Create working set member.
WorkingSetMember wsm;
wsm.state = WorkingSetMember::OWNED_OBJ;
- wsm.obj = fromjson(objStr);
+ wsm.obj = Snapshotted<BSONObj>(SnapshotId(), fromjson(objStr));
if (data) {
wsm.addComputed(data);
}
@@ -117,7 +117,7 @@ namespace {
}
// Finally, we compare the projected object.
- const BSONObj& obj = wsm.obj;
+ const BSONObj& obj = wsm.obj.value();
BSONObj expectedObj = fromjson(expectedObjStr);
if (obj != expectedObj) {
mongoutils::str::stream ss;
diff --git a/src/mongo/db/exec/shard_filter.cpp b/src/mongo/db/exec/shard_filter.cpp
index 60d31990be0..2e5388f3807 100644
--- a/src/mongo/db/exec/shard_filter.cpp
+++ b/src/mongo/db/exec/shard_filter.cpp
@@ -97,7 +97,8 @@ namespace mongo {
// Skip this document with a warning - no shard key should not be possible
// unless manually inserting data into a shard
- warning() << "no shard key found in document " << member->obj.toString() << " "
+ warning() << "no shard key found in document "
+ << member->obj.value().toString() << " "
<< "for shard key pattern " << _metadata->getKeyPattern() << ", "
<< "document may have been inserted manually into shard";
}
diff --git a/src/mongo/db/exec/sort.cpp b/src/mongo/db/exec/sort.cpp
index eee9a44eb45..611ae612a0b 100644
--- a/src/mongo/db/exec/sort.cpp
+++ b/src/mongo/db/exec/sort.cpp
@@ -126,7 +126,7 @@ namespace mongo {
BSONObj* objOut) const {
BSONObj btreeKeyToUse;
- Status btreeStatus = getBtreeKey(member.obj, &btreeKeyToUse);
+ Status btreeStatus = getBtreeKey(member.obj.value(), &btreeKeyToUse);
if (!btreeStatus.isOK()) {
return btreeStatus;
}
diff --git a/src/mongo/db/exec/sort_test.cpp b/src/mongo/db/exec/sort_test.cpp
index 3f627346f11..8f6bf64ca79 100644
--- a/src/mongo/db/exec/sort_test.cpp
+++ b/src/mongo/db/exec/sort_test.cpp
@@ -98,7 +98,7 @@ namespace {
// Insert obj from input array into working set.
WorkingSetMember wsm;
wsm.state = WorkingSetMember::OWNED_OBJ;
- wsm.obj = obj;
+ wsm.obj = Snapshotted<BSONObj>(SnapshotId(), obj);
ms->pushBack(wsm);
}
@@ -129,7 +129,7 @@ namespace {
BSONArrayBuilder arr(bob.subarrayStart("output"));
while (state == PlanStage::ADVANCED) {
WorkingSetMember* member = ws.get(id);
- const BSONObj& obj = member->obj;
+ const BSONObj& obj = member->obj.value();
arr.append(obj);
state = sort.work(&id);
}
diff --git a/src/mongo/db/exec/text.cpp b/src/mongo/db/exec/text.cpp
index ee1c733f955..08fc4055480 100644
--- a/src/mongo/db/exec/text.cpp
+++ b/src/mongo/db/exec/text.cpp
@@ -287,11 +287,11 @@ namespace mongo {
// a yield. If not, then we fetch the document here.
BSONObj doc;
if (wsm->hasObj()) {
- doc = wsm->obj;
+ doc = wsm->obj.value();
}
else {
- doc = _params.index->getCollection()->docFor(_txn, wsm->loc);
- wsm->obj = doc;
+ wsm->obj = _params.index->getCollection()->docFor(_txn, wsm->loc);
+ doc = wsm->obj.value();
wsm->keyData.clear();
wsm->state = WorkingSetMember::LOC_AND_UNOWNED_OBJ;
}
@@ -327,7 +327,7 @@ namespace mongo {
BSONObj toBSON() const {
*_fetched = true;
- return _collection->docFor(_txn, _loc);
+ return _collection->docFor(_txn, _loc).value();
}
virtual ElementIterator* allocateIterator(const ElementPath* path) const {
@@ -352,7 +352,7 @@ namespace mongo {
// All else fails, fetch.
*_fetched = true;
- return new BSONElementIterator(path, _collection->docFor(_txn, _loc));
+ return new BSONElementIterator(path, _collection->docFor(_txn, _loc).value());
}
virtual void releaseIterator( ElementIterator* iterator ) const {
diff --git a/src/mongo/db/exec/update.cpp b/src/mongo/db/exec/update.cpp
index ff092d0deb3..8e875f0cac1 100644
--- a/src/mongo/db/exec/update.cpp
+++ b/src/mongo/db/exec/update.cpp
@@ -451,7 +451,7 @@ namespace mongo {
_specificStats.isDocReplacement = params.driver->isDocReplacement();
}
- void UpdateStage::transformAndUpdate(BSONObj& oldObj, RecordId& loc) {
+ void UpdateStage::transformAndUpdate(const Snapshotted<BSONObj>& oldObj, RecordId& loc) {
const UpdateRequest* request = _params.request;
UpdateDriver* driver = _params.driver;
CanonicalQuery* cq = _params.canonicalQuery;
@@ -463,11 +463,10 @@ namespace mongo {
// is needed to accomodate the new bson layout of the resulting document. In any event,
// only enable in-place mutations if the underlying storage engine offers support for
// writing damage events.
- _doc.reset(
- oldObj,
- (_collection->getRecordStore()->updateWithDamagesSupported() ?
- mutablebson::Document::kInPlaceEnabled :
- mutablebson::Document::kInPlaceDisabled));
+ _doc.reset(oldObj.value(),
+ (_collection->getRecordStore()->updateWithDamagesSupported() ?
+ mutablebson::Document::kInPlaceEnabled :
+ mutablebson::Document::kInPlaceDisabled));
BSONObj logObj;
@@ -485,7 +484,7 @@ namespace mongo {
matchDetails.requestElemMatchKey();
dassert(cq);
- verify(cq->root()->matchesBSON(oldObj, &matchDetails));
+ verify(cq->root()->matchesBSON(oldObj.value(), &matchDetails));
string matchedField;
if (matchDetails.hasElemMatchKey())
@@ -529,7 +528,7 @@ namespace mongo {
if (lifecycle)
immutableFields = lifecycle->getImmutableFields();
- uassertStatusOK(validate(oldObj,
+ uassertStatusOK(validate(oldObj.value(),
updatedFields,
_doc,
immutableFields,
@@ -546,11 +545,14 @@ namespace mongo {
// Don't actually do the write if this is an explain.
if (!request->isExplain()) {
invariant(_collection);
- const RecordData oldRec(oldObj.objdata(), oldObj.objsize());
- _collection->updateDocumentWithDamages(_txn, loc, oldRec, source, _damages);
+ const RecordData oldRec(oldObj.value().objdata(), oldObj.value().objsize());
+ _collection->updateDocumentWithDamages(_txn, loc,
+ Snapshotted<RecordData>(oldObj.snapshotId(),
+ oldRec),
+ source, _damages);
}
- newObj = oldObj;
+ newObj = oldObj.value();
_specificStats.fastmod = true;
}
@@ -598,6 +600,7 @@ namespace mongo {
request->isFromMigration());
}
+ invariant(oldObj.snapshotId() == _txn->recoveryUnit()->getSnapshotId());
wunit.commit();
}
@@ -755,7 +758,6 @@ namespace mongo {
if (PlanStage::ADVANCED == status) {
// Need to get these things from the result returned by the child.
RecordId loc;
- BSONObj oldObj;
WorkingSetMember* member = _ws->get(id);
@@ -774,37 +776,14 @@ namespace mongo {
// Updates can't have projections. This means that covering analysis will always add
// a fetch. We should always get fetched data, and never just key data.
invariant(member->hasObj());
- oldObj = member->obj;
-
- // If the working set member is in the owned obj with loc state, then 'oldObj' may not
- // be the latest version in the database. In this case, we must refetch the doc from the
- // collection. We also must be tolerant of the possibility that the doc at the wsm's
- // RecordId was deleted or updated after being force-fetched.
- if (WorkingSetMember::LOC_AND_OWNED_OBJ == member->state) {
- if (!_collection->findDoc(_txn, loc, &oldObj)) {
- // The doc was deleted after the force-fetch, so we just move on.
- ++_commonStats.needTime;
- return PlanStage::NEED_TIME;
- }
- // We need to make sure that the doc still matches the predicate, as it may have
- // been updated since being force-fetched.
- //
- // 'cq' may be NULL in the case of idhack updates. In this case, doc-level locking
- // storage engines will look up the key in the _id index and fetch the keyed
- // document in a single work() cyle. Since yielding cannot happen between these
- // two events, the OperationContext protects from the doc changing under our feet.
- CanonicalQuery* cq = _params.canonicalQuery;
- if (cq && !cq->root()->matchesBSON(oldObj, NULL)) {
- ++_commonStats.needTime;
- return PlanStage::NEED_TIME;
- }
- }
+ Snapshotted<BSONObj> oldObj = member->obj;
// If we're here, then we have retrieved both a RecordId and the corresponding
// object from the child stage. Since we have the object and the diskloc,
// we can free the WSM.
_ws->free(id);
+ member = NULL;
// We fill this with the new locs of moved doc so we don't double-update.
if (_updatedLocs && _updatedLocs->count(loc) > 0) {
@@ -824,12 +803,29 @@ namespace mongo {
}
// Do the update and return.
- BSONObj reFetched;
uint64_t attempt = 1;
while ( attempt++ ) {
try {
- transformAndUpdate(reFetched.isEmpty() ? oldObj : reFetched , loc);
+ if (_txn->recoveryUnit()->getSnapshotId() != oldObj.snapshotId()) {
+ // our snapshot has changed, refetch
+ if ( !_collection->findDoc( _txn, loc, &oldObj ) ) {
+ // document was deleted, we're done here
+ ++_commonStats.needTime;
+ return PlanStage::NEED_TIME;
+ }
+
+ // we have to re-match the doc as it might not match anymore
+ if ( _params.canonicalQuery &&
+ _params.canonicalQuery->root() &&
+ !_params.canonicalQuery->root()->matchesBSON(oldObj.value(), NULL)) {
+ // doesn't match predicates anymore!
+ ++_commonStats.needTime;
+ return PlanStage::NEED_TIME;
+ }
+
+ }
+ transformAndUpdate(oldObj, loc);
break;
}
catch ( const WriteConflictException& de ) {
@@ -840,6 +836,8 @@ namespace mongo {
_params.opDebug->writeConflicts++;
+ // This is ok because we re-check all docs and predicates if the snapshot
+ // changes out from under us in the retry loop above.
_txn->recoveryUnit()->commitAndRestart();
_txn->checkForInterrupt();
@@ -857,19 +855,6 @@ namespace mongo {
SwitchToThread();
#endif
}
-
- if ( !_collection->findDoc( _txn, loc, &reFetched ) ) {
- // document was deleted, we're done here
- break;
- }
- // we have to re-match the doc as it might not match anymore
- if ( _params.canonicalQuery &&
- _params.canonicalQuery->root() &&
- !_params.canonicalQuery->root()->matchesBSON( reFetched, NULL ) ) {
- // doesn't match!
- break;
- }
- // now we try again!
}
}
diff --git a/src/mongo/db/exec/update.h b/src/mongo/db/exec/update.h
index 08bf949b958..23505d06bed 100644
--- a/src/mongo/db/exec/update.h
+++ b/src/mongo/db/exec/update.h
@@ -120,7 +120,7 @@ namespace mongo {
* Computes the result of applying mods to the document 'oldObj' at RecordId 'loc' in
* memory, then commits these changes to the database.
*/
- void transformAndUpdate(BSONObj& oldObj, RecordId& loc);
+ void transformAndUpdate(const Snapshotted<BSONObj>& oldObj, RecordId& loc);
/**
* Computes the document to insert and inserts it into the collection. Used if the
diff --git a/src/mongo/db/exec/working_set.cpp b/src/mongo/db/exec/working_set.cpp
index c35b39d7c94..cbe7217b1b6 100644
--- a/src/mongo/db/exec/working_set.cpp
+++ b/src/mongo/db/exec/working_set.cpp
@@ -183,7 +183,7 @@ namespace mongo {
}
keyData.clear();
- obj = BSONObj();
+ obj.reset();
state = WorkingSetMember::INVALID;
}
@@ -232,7 +232,7 @@ namespace mongo {
bool WorkingSetMember::getFieldDotted(const string& field, BSONElement* out) const {
// If our state is such that we have an object, use it.
if (hasObj()) {
- *out = obj.getFieldDotted(field);
+ *out = obj.value().getFieldDotted(field);
return true;
}
@@ -266,7 +266,7 @@ namespace mongo {
// XXX: Unowned objects count towards current size.
// See SERVER-12579
if (hasObj()) {
- memUsage += obj.objsize();
+ memUsage += obj.value().objsize();
}
for (size_t i = 0; i < keyData.size(); ++i) {
diff --git a/src/mongo/db/exec/working_set.h b/src/mongo/db/exec/working_set.h
index d8c8c6f3524..e725529188a 100644
--- a/src/mongo/db/exec/working_set.h
+++ b/src/mongo/db/exec/working_set.h
@@ -34,6 +34,7 @@
#include "mongo/base/disallow_copying.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/record_id.h"
+#include "mongo/db/storage/snapshot.h"
#include "mongo/platform/unordered_set.h"
namespace mongo {
@@ -286,7 +287,7 @@ namespace mongo {
//
RecordId loc;
- BSONObj obj;
+ Snapshotted<BSONObj> obj;
std::vector<IndexKeyDatum> keyData;
MemberState state;
diff --git a/src/mongo/db/exec/working_set_common.cpp b/src/mongo/db/exec/working_set_common.cpp
index 5ad9a650baf..972896b0cdb 100644
--- a/src/mongo/db/exec/working_set_common.cpp
+++ b/src/mongo/db/exec/working_set_common.cpp
@@ -43,7 +43,8 @@ namespace mongo {
if (!member->hasLoc()) { return false; }
// Do the fetch, invalidate the DL.
- member->obj = collection->docFor(txn, member->loc).getOwned();
+ member->obj = collection->docFor(txn, member->loc);
+ member->obj.setValue(member->obj.value().getOwned() );
member->state = WorkingSetMember::OWNED_OBJ;
member->loc = RecordId();
@@ -55,6 +56,7 @@ namespace mongo {
WorkingSet* workingSet,
const Collection* collection) {
invariant(collection);
+ dassert(supportsDocLocking());
for (WorkingSet::iterator it = workingSet->begin(); it != workingSet->end(); ++it) {
if (WorkingSetMember::LOC_AND_OWNED_OBJ == it->state) {
@@ -74,14 +76,14 @@ namespace mongo {
// and starts to delete the matching documents, including D. The working set members for
// D created by the two rejected are still present, but their RecordIds no longer refer
// to a valid document.
- BSONObj fetchedDoc;
- if (!collection->findDoc(txn, it->loc, &fetchedDoc)) {
+ it->obj.reset();
+ if (!collection->findDoc(txn, it->loc, &it->obj)) {
// Leftover working set members pointing to old docs can be safely freed.
it.free();
continue;
}
- it->obj = fetchedDoc.getOwned();
+ it->obj.setValue(it->obj.value().getOwned() );
it->state = WorkingSetMember::LOC_AND_OWNED_OBJ;
}
}
@@ -140,7 +142,7 @@ namespace mongo {
WorkingSetID wsid = ws->allocate();
WorkingSetMember* member = ws->get(wsid);
member->state = WorkingSetMember::OWNED_OBJ;
- member->obj = buildMemberStatusObject(status);
+ member->obj = Snapshotted<BSONObj>(SnapshotId(), buildMemberStatusObject(status));
return wsid;
}
@@ -166,11 +168,11 @@ namespace mongo {
if (!member->hasOwnedObj()) {
return;
}
- BSONObj obj = member->obj;
+ BSONObj obj = member->obj.value();
if (!isValidStatusMemberObject(obj)) {
return;
}
- *objOut = member->obj;
+ *objOut = obj;
}
// static
@@ -183,7 +185,7 @@ namespace mongo {
// static
Status WorkingSetCommon::getMemberStatus(const WorkingSetMember& member) {
invariant(member.hasObj());
- return getMemberObjectStatus(member.obj);
+ return getMemberObjectStatus(member.obj.value());
}
// static
diff --git a/src/mongo/db/exec/working_set_test.cpp b/src/mongo/db/exec/working_set_test.cpp
index c2c7f88a474..bc0c7b04230 100644
--- a/src/mongo/db/exec/working_set_test.cpp
+++ b/src/mongo/db/exec/working_set_test.cpp
@@ -88,9 +88,9 @@ namespace {
BSONObj obj = BSON(fieldName << 5);
// Not truthful since the loc is bogus, but the loc isn't accessed anyway...
member->state = WorkingSetMember::LOC_AND_UNOWNED_OBJ;
- member->obj = BSONObj(obj.objdata());
+ member->obj = Snapshotted<BSONObj>(SnapshotId(), BSONObj(obj.objdata()));
ASSERT_TRUE(obj.isOwned());
- ASSERT_FALSE(member->obj.isOwned());
+ ASSERT_FALSE(member->obj.value().isOwned());
// Get out the field we put in.
BSONElement elt;
@@ -102,8 +102,8 @@ namespace {
string fieldName = "x";
BSONObj obj = BSON(fieldName << 5);
- member->obj = obj;
- ASSERT_TRUE(member->obj.isOwned());
+ member->obj = Snapshotted<BSONObj>(SnapshotId(), obj);
+ ASSERT_TRUE(member->obj.value().isOwned());
member->state = WorkingSetMember::OWNED_OBJ;
BSONElement elt;
ASSERT_TRUE(member->getFieldDotted(fieldName, &elt));
@@ -164,7 +164,7 @@ namespace {
WorkingSetID id2 = ws.allocate();
WorkingSetMember* member2 = ws.get(id2);
member2->state = WorkingSetMember::LOC_AND_UNOWNED_OBJ;
- member2->obj = BSON("a" << 3);
+ member2->obj = Snapshotted<BSONObj>(SnapshotId(), BSON("a" << 3));
int counter = 0;
for (WorkingSet::iterator it = ws.begin(); it != ws.end(); ++it) {
@@ -207,7 +207,7 @@ namespace {
WorkingSetMember* member = ws.get(id2);
member->state = WorkingSetMember::LOC_AND_UNOWNED_OBJ;
- member->obj = BSON("a" << 3);
+ member->obj = Snapshotted<BSONObj>(SnapshotId(), BSON("a" << 3));
ws.free(id1);
ws.free(id3);
diff --git a/src/mongo/db/index/haystack_access_method_internal.h b/src/mongo/db/index/haystack_access_method_internal.h
index 14a5bf9f05e..2f068bfc15f 100644
--- a/src/mongo/db/index/haystack_access_method_internal.h
+++ b/src/mongo/db/index/haystack_access_method_internal.h
@@ -62,7 +62,7 @@ namespace mongo {
// it)
void consider(const RecordId& loc) {
if (limitReached()) return;
- Point p(_collection->docFor(_txn, loc).getFieldDotted(_geoField));
+ Point p(_collection->docFor(_txn, loc).value().getFieldDotted(_geoField));
if (distance(_near, p) > _maxDistance)
return;
_locs.push_back(loc);
@@ -70,7 +70,7 @@ namespace mongo {
int appendResultsTo(BSONArrayBuilder* b) {
for (unsigned i = 0; i <_locs.size(); i++)
- b->append(_collection->docFor(_txn, _locs[i]));
+ b->append(_collection->docFor(_txn, _locs[i]).value());
return _locs.size();
}
diff --git a/src/mongo/db/query/plan_executor.cpp b/src/mongo/db/query/plan_executor.cpp
index 3b01c4fcd5b..c0c387a7e46 100644
--- a/src/mongo/db/query/plan_executor.cpp
+++ b/src/mongo/db/query/plan_executor.cpp
@@ -334,7 +334,7 @@ namespace mongo {
}
}
else if (member->hasObj()) {
- *objOut = member->obj;
+ *objOut = member->obj.value();
}
else {
_workingSet->free(id);
diff --git a/src/mongo/db/storage/in_memory/in_memory_recovery_unit.h b/src/mongo/db/storage/in_memory/in_memory_recovery_unit.h
index 78de9b71d68..5b0f5b72940 100644
--- a/src/mongo/db/storage/in_memory/in_memory_recovery_unit.h
+++ b/src/mongo/db/storage/in_memory/in_memory_recovery_unit.h
@@ -65,6 +65,8 @@ namespace mongo {
virtual void setRollbackWritesDisabled() {}
+ virtual SnapshotId getSnapshotId() const { return SnapshotId(); }
+
private:
typedef boost::shared_ptr<Change> ChangePtr;
typedef std::vector<ChangePtr> Changes;
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_details.cpp b/src/mongo/db/storage/mmap_v1/catalog/namespace_details.cpp
index 078338c99eb..9ca7c0b104e 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace_details.cpp
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_details.cpp
@@ -230,7 +230,7 @@ namespace mongo {
bool includeBackgroundInProgress) const {
IndexIterator i = ii(includeBackgroundInProgress);
while( i.more() ) {
- const BSONObj obj = coll->docFor(txn, i.next().info.toRecordId());
+ const BSONObj obj = coll->docFor(txn, i.next().info.toRecordId()).value();
if ( name == obj.getStringField("name") )
return i.pos()-1;
}
diff --git a/src/mongo/db/storage/mmap_v1/dur_recovery_unit.h b/src/mongo/db/storage/mmap_v1/dur_recovery_unit.h
index 322dd170dcd..9eb76f652cf 100644
--- a/src/mongo/db/storage/mmap_v1/dur_recovery_unit.h
+++ b/src/mongo/db/storage/mmap_v1/dur_recovery_unit.h
@@ -60,6 +60,7 @@ namespace mongo {
virtual void setRollbackWritesDisabled();
+ virtual SnapshotId getSnapshotId() const { return SnapshotId(); }
private:
void commitChanges();
void pushChangesToDurSubSystem();
diff --git a/src/mongo/db/storage/mmap_v1/heap_record_store_btree.h b/src/mongo/db/storage/mmap_v1/heap_record_store_btree.h
index a3596d88851..c857baf5d00 100644
--- a/src/mongo/db/storage/mmap_v1/heap_record_store_btree.h
+++ b/src/mongo/db/storage/mmap_v1/heap_record_store_btree.h
@@ -206,6 +206,8 @@ namespace mongo {
virtual void setRollbackWritesDisabled() {}
+ virtual SnapshotId getSnapshotId() const { return SnapshotId(); }
+
// -----------------------
void notifyInsert( HeapRecordStoreBtree* rs, const RecordId& loc );
diff --git a/src/mongo/db/storage/mmap_v1/repair_database.cpp b/src/mongo/db/storage/mmap_v1/repair_database.cpp
index 33992f4ccbc..aff3379e6a6 100644
--- a/src/mongo/db/storage/mmap_v1/repair_database.cpp
+++ b/src/mongo/db/storage/mmap_v1/repair_database.cpp
@@ -341,7 +341,7 @@ namespace mongo {
scoped_ptr<RecordIterator> it( coll->getIterator(txn) );
while ( !it->isEOF() ) {
RecordId loc = it->getNext();
- BSONObj obj = coll->docFor( txn, loc );
+ BSONObj obj = coll->docFor(txn, loc).value();
string ns = obj["name"].String();
@@ -407,7 +407,7 @@ namespace mongo {
RecordId loc = iterator->getNext();
invariant( !loc.isNull() );
- BSONObj doc = originalCollection->docFor( txn, loc );
+ BSONObj doc = originalCollection->docFor(txn, loc).value();
WriteUnitOfWork wunit(txn);
StatusWith<RecordId> result = tempCollection->insertDocument(txn,
diff --git a/src/mongo/db/storage/recovery_unit.h b/src/mongo/db/storage/recovery_unit.h
index 15d2bae4466..c5481225c17 100644
--- a/src/mongo/db/storage/recovery_unit.h
+++ b/src/mongo/db/storage/recovery_unit.h
@@ -32,6 +32,7 @@
#include <string>
#include "mongo/base/disallow_copying.h"
+#include "mongo/db/storage/snapshot.h"
#include "mongo/platform/cstdint.h"
namespace mongo {
@@ -99,7 +100,7 @@ namespace mongo {
*/
virtual void commitAndRestart() = 0;
- virtual uint64_t getMyTransactionCount() const { return 0; }
+ virtual SnapshotId getSnapshotId() const = 0;
/**
* A Change is an action that is registerChange()'d while a WriteUnitOfWork exists. The
diff --git a/src/mongo/db/storage/recovery_unit_noop.h b/src/mongo/db/storage/recovery_unit_noop.h
index 58c341fb76b..7008f68298c 100644
--- a/src/mongo/db/storage/recovery_unit_noop.h
+++ b/src/mongo/db/storage/recovery_unit_noop.h
@@ -56,6 +56,8 @@ namespace mongo {
return data;
}
virtual void setRollbackWritesDisabled() {}
+
+ virtual SnapshotId getSnapshotId() const { return SnapshotId(); }
};
} // namespace mongo
diff --git a/src/mongo/db/storage/snapshot.h b/src/mongo/db/storage/snapshot.h
new file mode 100644
index 00000000000..5d432ba3cbc
--- /dev/null
+++ b/src/mongo/db/storage/snapshot.h
@@ -0,0 +1,89 @@
+// snapshot.h
+
+/**
+* Copyright (C) 2015 MongoDB Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*
+* As a special exception, the copyright holders give permission to link the
+* code of portions of this program with the OpenSSL library under certain
+* conditions as described in each individual source file and distribute
+* linked combinations including the program with the OpenSSL library. You
+* must comply with the GNU Affero General Public License in all respects for
+* all of the code used other than as permitted herein. If you modify file(s)
+* with this exception, you may extend this exception to your version of the
+* file(s), but you are not obligated to do so. If you do not wish to do so,
+* delete this exception statement from your version. If you delete this
+* exception statement from all source files in the program, then also delete
+* it in the license file.
+*/
+
+#pragma once
+
+#include "mongo/util/assert_util.h"
+
+namespace mongo {
+
+ class SnapshotId {
+ static const uint64_t kNullId = 0;
+ public:
+ SnapshotId()
+ : _id(kNullId) {
+ }
+
+ // 0 is NULL
+ explicit SnapshotId(uint64_t id)
+ : _id(id) {
+ invariant(id != kNullId);
+ }
+
+ bool isNull() const { return _id == kNullId; }
+
+ bool operator==(const SnapshotId& other) const {
+ return _id == other._id;
+ }
+
+ bool operator!=(const SnapshotId& other) const {
+ return _id != other._id;
+ }
+
+ private:
+ uint64_t _id;
+ };
+
+ template<typename T>
+ class Snapshotted {
+ public:
+ Snapshotted()
+ : _id(), _value() {
+ }
+
+ Snapshotted(SnapshotId id, const T& value ) :
+ _id(id), _value(value) {
+ }
+
+ void reset() {
+ *this = Snapshotted();
+ }
+
+ void setValue(const T& t) { _value = t; }
+
+ SnapshotId snapshotId() const { return _id; }
+ const T& value() const { return _value; }
+ T& value() { return _value; }
+
+ private:
+ SnapshotId _id;
+ T _value;
+ };
+}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp
index a248f84368a..5547f1a452b 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp
@@ -300,8 +300,9 @@ namespace mongo {
_ticket.reset(NULL);
}
- uint64_t WiredTigerRecoveryUnit::getMyTransactionCount() const {
- return _myTransactionCount;
+ SnapshotId WiredTigerRecoveryUnit::getSnapshotId() const {
+ // TODO: use actual wiredtiger txn id
+ return SnapshotId(_myTransactionCount);
}
void WiredTigerRecoveryUnit::markNoTicketRequired() {
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h
index e1247457420..f0142b46f36 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h
@@ -78,7 +78,7 @@ namespace mongo {
virtual void setRollbackWritesDisabled() {}
- virtual uint64_t getMyTransactionCount() const;
+ virtual SnapshotId getSnapshotId() const;
// ---- WT STUFF
diff --git a/src/mongo/dbtests/dbhelper_tests.cpp b/src/mongo/dbtests/dbhelper_tests.cpp
index 4593fd26e70..913618d7b29 100644
--- a/src/mongo/dbtests/dbhelper_tests.cpp
+++ b/src/mongo/dbtests/dbhelper_tests.cpp
@@ -167,7 +167,7 @@ namespace mongo {
// Make sure all the disklocs actually correspond to the right info
for ( set<RecordId>::const_iterator it = locs.begin(); it != locs.end(); ++it ) {
- const BSONObj obj = collection->docFor(&txn, *it);
+ const BSONObj obj = collection->docFor(&txn, *it).value();
ASSERT_EQUALS(obj["tag"].OID(), tag);
}
}
diff --git a/src/mongo/dbtests/oplogstarttests.cpp b/src/mongo/dbtests/oplogstarttests.cpp
index 2248391a5c5..ff907f964e9 100644
--- a/src/mongo/dbtests/oplogstarttests.cpp
+++ b/src/mongo/dbtests/oplogstarttests.cpp
@@ -92,7 +92,7 @@ namespace OplogStartTests {
void assertWorkingSetMemberHasId(WorkingSetID id, int expectedId) {
WorkingSetMember* member = _oplogws->get(id);
- BSONElement idEl = member->obj["_id"];
+ BSONElement idEl = member->obj.value()["_id"];
ASSERT(!idEl.eoo());
ASSERT(idEl.isNumber());
ASSERT_EQUALS(idEl.numberInt(), expectedId);
diff --git a/src/mongo/dbtests/query_multi_plan_runner.cpp b/src/mongo/dbtests/query_multi_plan_runner.cpp
index 0e2b9084d44..2b9d88bad83 100644
--- a/src/mongo/dbtests/query_multi_plan_runner.cpp
+++ b/src/mongo/dbtests/query_multi_plan_runner.cpp
@@ -263,7 +263,7 @@ namespace QueryMultiPlanRunner {
// Check the document returned by the query.
ASSERT(member->hasObj());
BSONObj expectedDoc = BSON("_id" << 1 << "a" << 1 << "b" << 1);
- ASSERT(expectedDoc.woCompare(member->obj) == 0);
+ ASSERT(expectedDoc.woCompare(member->obj.value()) == 0);
// The blocking plan became unblocked, so we should no longer have a backup plan,
// and the winning plan should still be the index intersection one.
diff --git a/src/mongo/dbtests/query_stage_and.cpp b/src/mongo/dbtests/query_stage_and.cpp
index 670c3e4ef72..44eca7ed214 100644
--- a/src/mongo/dbtests/query_stage_and.cpp
+++ b/src/mongo/dbtests/query_stage_and.cpp
@@ -134,7 +134,7 @@ namespace QueryStageAnd {
WorkingSetMember* member = ws->get(id);
ASSERT(member->hasObj());
- return member->obj;
+ return member->obj.value();
}
// We failed to produce a result.
@@ -215,9 +215,9 @@ namespace QueryStageAnd {
getLocs(&data, coll);
size_t memUsageBefore = ah->getMemUsage();
for (set<RecordId>::const_iterator it = data.begin(); it != data.end(); ++it) {
- if (coll->docFor(&_txn, *it)["foo"].numberInt() == 15) {
+ if (coll->docFor(&_txn, *it).value()["foo"].numberInt() == 15) {
ah->invalidate(&_txn, *it, INVALIDATION_DELETION);
- remove(coll->docFor(&_txn, *it));
+ remove(coll->docFor(&_txn, *it).value());
break;
}
}
@@ -318,7 +318,7 @@ namespace QueryStageAnd {
size_t memUsageBefore = ah->getMemUsage();
for (set<RecordId>::const_iterator it = data.begin(); it != data.end(); ++it) {
- if (0 == deletedObj.woCompare(coll->docFor(&_txn, *it))) {
+ if (0 == deletedObj.woCompare(coll->docFor(&_txn, *it).value())) {
ah->invalidate(&_txn, *it, INVALIDATION_DELETION);
break;
}
@@ -340,7 +340,8 @@ namespace QueryStageAnd {
PlanStage::StageState status = ah->work(&id);
if (PlanStage::ADVANCED != status) { continue; }
WorkingSetMember* wsm = ws.get(id);
- ASSERT_NOT_EQUALS(0, deletedObj.woCompare(coll->docFor(&_txn, wsm->loc)));
+ ASSERT_NOT_EQUALS(0,
+ deletedObj.woCompare(coll->docFor(&_txn, wsm->loc).value()));
++count;
}
@@ -964,7 +965,7 @@ namespace QueryStageAnd {
// and make sure it shows up in the flagged results.
ah->saveState();
ah->invalidate(&_txn, *data.begin(), INVALIDATION_DELETION);
- remove(coll->docFor(&_txn, *data.begin()));
+ remove(coll->docFor(&_txn, *data.begin()).value());
ah->restoreState(&_txn);
// Make sure the nuked obj is actually in the flagged data.
@@ -1003,7 +1004,7 @@ namespace QueryStageAnd {
// not flagged.
ah->saveState();
ah->invalidate(&_txn, *it, INVALIDATION_DELETION);
- remove(coll->docFor(&_txn, *it));
+ remove(coll->docFor(&_txn, *it).value());
ah->restoreState(&_txn);
// Get all results aside from the two we killed.
@@ -1268,11 +1269,11 @@ namespace QueryStageAnd {
WorkingSetID id = WorkingSet::INVALID_ID;
PlanStage::StageState status = ah->work(&id);
if (PlanStage::ADVANCED != status) { continue; }
- BSONObj thisObj = coll->docFor(&_txn, ws.get(id)->loc);
+ BSONObj thisObj = coll->docFor(&_txn, ws.get(id)->loc).value();
ASSERT_EQUALS(7 + count, thisObj["bar"].numberInt());
++count;
if (WorkingSet::INVALID_ID != lastId) {
- BSONObj lastObj = coll->docFor(&_txn, ws.get(lastId)->loc);
+ BSONObj lastObj = coll->docFor(&_txn, ws.get(lastId)->loc).value();
ASSERT_LESS_THAN(lastObj["bar"].woCompare(thisObj["bar"]), 0);
}
lastId = id;
diff --git a/src/mongo/dbtests/query_stage_collscan.cpp b/src/mongo/dbtests/query_stage_collscan.cpp
index 17f761b13d8..80ac05acd0c 100644
--- a/src/mongo/dbtests/query_stage_collscan.cpp
+++ b/src/mongo/dbtests/query_stage_collscan.cpp
@@ -286,8 +286,8 @@ namespace QueryStageCollectionScan {
PlanStage::StageState state = scan->work(&id);
if (PlanStage::ADVANCED == state) {
WorkingSetMember* member = ws.get(id);
- ASSERT_EQUALS(coll->docFor(&_txn, locs[count])["foo"].numberInt(),
- member->obj["foo"].numberInt());
+ ASSERT_EQUALS(coll->docFor(&_txn, locs[count]).value()["foo"].numberInt(),
+ member->obj.value()["foo"].numberInt());
++count;
}
}
@@ -295,7 +295,7 @@ namespace QueryStageCollectionScan {
// Remove locs[count].
scan->saveState();
scan->invalidate(&_txn, locs[count], INVALIDATION_DELETION);
- remove(coll->docFor(&_txn, locs[count]));
+ remove(coll->docFor(&_txn, locs[count]).value());
scan->restoreState(&_txn);
// Skip over locs[count].
@@ -307,8 +307,8 @@ namespace QueryStageCollectionScan {
PlanStage::StageState state = scan->work(&id);
if (PlanStage::ADVANCED == state) {
WorkingSetMember* member = ws.get(id);
- ASSERT_EQUALS(coll->docFor(&_txn, locs[count])["foo"].numberInt(),
- member->obj["foo"].numberInt());
+ ASSERT_EQUALS(coll->docFor(&_txn, locs[count]).value()["foo"].numberInt(),
+ member->obj.value()["foo"].numberInt());
++count;
}
}
@@ -347,8 +347,8 @@ namespace QueryStageCollectionScan {
PlanStage::StageState state = scan->work(&id);
if (PlanStage::ADVANCED == state) {
WorkingSetMember* member = ws.get(id);
- ASSERT_EQUALS(coll->docFor(&_txn, locs[count])["foo"].numberInt(),
- member->obj["foo"].numberInt());
+ ASSERT_EQUALS(coll->docFor(&_txn, locs[count]).value()["foo"].numberInt(),
+ member->obj.value()["foo"].numberInt());
++count;
}
}
@@ -356,7 +356,7 @@ namespace QueryStageCollectionScan {
// Remove locs[count].
scan->saveState();
scan->invalidate(&_txn, locs[count], INVALIDATION_DELETION);
- remove(coll->docFor(&_txn, locs[count]));
+ remove(coll->docFor(&_txn, locs[count]).value());
scan->restoreState(&_txn);
// Skip over locs[count].
@@ -368,8 +368,8 @@ namespace QueryStageCollectionScan {
PlanStage::StageState state = scan->work(&id);
if (PlanStage::ADVANCED == state) {
WorkingSetMember* member = ws.get(id);
- ASSERT_EQUALS(coll->docFor(&_txn, locs[count])["foo"].numberInt(),
- member->obj["foo"].numberInt());
+ ASSERT_EQUALS(coll->docFor(&_txn, locs[count]).value()["foo"].numberInt(),
+ member->obj.value()["foo"].numberInt());
++count;
}
}
diff --git a/src/mongo/dbtests/query_stage_count.cpp b/src/mongo/dbtests/query_stage_count.cpp
index 63a4649ae83..f100a8901f1 100644
--- a/src/mongo/dbtests/query_stage_count.cpp
+++ b/src/mongo/dbtests/query_stage_count.cpp
@@ -123,7 +123,9 @@ namespace QueryStageCount {
void update(const RecordId& oldLoc, const BSONObj& newDoc) {
WriteUnitOfWork wunit(&_txn);
BSONObj oldDoc = _coll->getRecordStore()->dataFor( &_txn, oldLoc ).releaseToBson();
- _coll->updateDocument(&_txn, oldLoc, oldDoc, newDoc, false, true, NULL);
+ _coll->updateDocument(&_txn, oldLoc,
+ Snapshotted<BSONObj>(_txn.recoveryUnit()->getSnapshotId(), oldDoc),
+ newDoc, false, true, NULL);
wunit.commit();
}
@@ -314,11 +316,11 @@ namespace QueryStageCount {
void interject(CountStage& count_stage, int interjection) {
if (interjection == 0) {
count_stage.invalidate(&_txn, _locs[0], INVALIDATION_MUTATION);
- OID id1 = _coll->docFor(&_txn, _locs[0]).getField("_id").OID();
+ OID id1 = _coll->docFor(&_txn, _locs[0]).value().getField("_id").OID();
update(_locs[0], BSON("_id" << id1 << "x" << 100));
count_stage.invalidate(&_txn, _locs[1], INVALIDATION_MUTATION);
- OID id2 = _coll->docFor(&_txn, _locs[1]).getField("_id").OID();
+ OID id2 = _coll->docFor(&_txn, _locs[1]).value().getField("_id").OID();
update(_locs[1], BSON("_id" << id2 << "x" << 100));
}
}
diff --git a/src/mongo/dbtests/query_stage_delete.cpp b/src/mongo/dbtests/query_stage_delete.cpp
index 1c39f39795f..5a2385887ce 100644
--- a/src/mongo/dbtests/query_stage_delete.cpp
+++ b/src/mongo/dbtests/query_stage_delete.cpp
@@ -148,7 +148,7 @@ namespace QueryStageDelete {
// Remove locs[targetDocIndex];
deleteStage.saveState();
deleteStage.invalidate(&_txn, locs[targetDocIndex], INVALIDATION_DELETION);
- BSONObj targetDoc = coll->docFor(&_txn, locs[targetDocIndex]);
+ BSONObj targetDoc = coll->docFor(&_txn, locs[targetDocIndex]).value();
ASSERT(!targetDoc.isEmpty());
remove(targetDoc);
deleteStage.restoreState(&_txn);
diff --git a/src/mongo/dbtests/query_stage_fetch.cpp b/src/mongo/dbtests/query_stage_fetch.cpp
index c7605f3b6a3..b58c9572125 100644
--- a/src/mongo/dbtests/query_stage_fetch.cpp
+++ b/src/mongo/dbtests/query_stage_fetch.cpp
@@ -122,8 +122,8 @@ namespace QueryStageFetch {
mockMember.state = WorkingSetMember::OWNED_OBJ;
mockMember.loc = RecordId();
- mockMember.obj = BSON("foo" << 6);
- ASSERT_TRUE(mockMember.obj.isOwned());
+ mockMember.obj = Snapshotted<BSONObj>(SnapshotId(), BSON("foo" << 6));
+ ASSERT_TRUE(mockMember.obj.value().isOwned());
mockStage->pushBack(mockMember);
}
diff --git a/src/mongo/dbtests/query_stage_keep.cpp b/src/mongo/dbtests/query_stage_keep.cpp
index 1113fc42f7e..5a6fb4e5f34 100644
--- a/src/mongo/dbtests/query_stage_keep.cpp
+++ b/src/mongo/dbtests/query_stage_keep.cpp
@@ -129,7 +129,7 @@ namespace QueryStageKeep {
WorkingSetID id = ws.allocate();
WorkingSetMember* member = ws.get(id);
member->state = WorkingSetMember::OWNED_OBJ;
- member->obj = BSON("x" << 2);
+ member->obj = Snapshotted<BSONObj>(SnapshotId(), BSON("x" << 2));
ws.flagForReview(id);
}
@@ -150,7 +150,7 @@ namespace QueryStageKeep {
WorkingSetID id = getNextResult(keep.get());
WorkingSetMember* member = ws.get(id);
ASSERT_FALSE(ws.isFlagged(id));
- ASSERT_EQUALS(member->obj["x"].numberInt(), 1);
+ ASSERT_EQUALS(member->obj.value()["x"].numberInt(), 1);
}
ASSERT(cs->isEOF());
@@ -160,7 +160,7 @@ namespace QueryStageKeep {
WorkingSetID id = getNextResult(keep.get());
WorkingSetMember* member = ws.get(id);
ASSERT(ws.isFlagged(id));
- ASSERT_EQUALS(member->obj["x"].numberInt(), 2);
+ ASSERT_EQUALS(member->obj.value()["x"].numberInt(), 2);
}
}
};
@@ -195,7 +195,7 @@ namespace QueryStageKeep {
WorkingSetID id = ws.allocate();
WorkingSetMember* member = ws.get(id);
member->state = WorkingSetMember::OWNED_OBJ;
- member->obj = BSON("x" << 1);
+ member->obj = Snapshotted<BSONObj>(SnapshotId(), BSON("x" << 1));
ws.flagForReview(id);
expectedResultIds.insert(id);
}
@@ -220,7 +220,7 @@ namespace QueryStageKeep {
WorkingSetID id = ws.allocate();
WorkingSetMember* member = ws.get(id);
member->state = WorkingSetMember::OWNED_OBJ;
- member->obj = BSON("x" << 1);
+ member->obj = Snapshotted<BSONObj>(SnapshotId(), BSON("x" << 1));
ws.flagForReview(id);
}
while ((id = getNextResult(keep.get())) != WorkingSet::INVALID_ID) {
diff --git a/src/mongo/dbtests/query_stage_limit_skip.cpp b/src/mongo/dbtests/query_stage_limit_skip.cpp
index 46967226a10..bc4697327de 100644
--- a/src/mongo/dbtests/query_stage_limit_skip.cpp
+++ b/src/mongo/dbtests/query_stage_limit_skip.cpp
@@ -61,7 +61,7 @@ namespace {
ms->pushBack(PlanStage::NEED_TIME);
WorkingSetMember wsm;
wsm.state = WorkingSetMember::OWNED_OBJ;
- wsm.obj = BSON("x" << i);
+ wsm.obj = Snapshotted<BSONObj>(SnapshotId(), BSON("x" << i));
ms->pushBack(wsm);
ms->pushBack(PlanStage::NEED_TIME);
}
diff --git a/src/mongo/dbtests/query_stage_near.cpp b/src/mongo/dbtests/query_stage_near.cpp
index 0ee881904ef..bbaa0c5510f 100644
--- a/src/mongo/dbtests/query_stage_near.cpp
+++ b/src/mongo/dbtests/query_stage_near.cpp
@@ -74,7 +74,7 @@ namespace {
*out = _workingSet->allocate();
WorkingSetMember* member = _workingSet->get(*out);
member->state = WorkingSetMember::OWNED_OBJ;
- member->obj = next;
+ member->obj = Snapshotted<BSONObj>(SnapshotId(), next);
return PlanStage::ADVANCED;
}
@@ -173,7 +173,7 @@ namespace {
virtual StatusWith<double> computeDistance(WorkingSetMember* member) {
ASSERT(member->hasObj());
- return StatusWith<double>(member->obj["distance"].numberDouble());
+ return StatusWith<double>(member->obj.value()["distance"].numberDouble());
}
private:
@@ -191,7 +191,7 @@ namespace {
while (PlanStage::NEED_TIME == state) {
while (PlanStage::ADVANCED == (state = stage->work(&nextMemberID))) {
- results.push_back(workingSet->get(nextMemberID)->obj);
+ results.push_back(workingSet->get(nextMemberID)->obj.value());
}
}
diff --git a/src/mongo/dbtests/query_stage_sort.cpp b/src/mongo/dbtests/query_stage_sort.cpp
index e4c42945f6f..eca4c36d4d8 100644
--- a/src/mongo/dbtests/query_stage_sort.cpp
+++ b/src/mongo/dbtests/query_stage_sort.cpp
@@ -368,10 +368,12 @@ namespace QueryStageSortTests {
WorkingSetMember member;
member.state = WorkingSetMember::OWNED_OBJ;
- member.obj = fromjson("{a: [1,2,3], b:[1,2,3], c:[1,2,3], d:[1,2,3,4]}");
+ member.obj = Snapshotted<BSONObj>(SnapshotId(),
+ fromjson("{a: [1,2,3], b:[1,2,3], c:[1,2,3], d:[1,2,3,4]}"));
ms->pushBack(member);
- member.obj = fromjson("{a:1, b:1, c:1}");
+ member.obj = Snapshotted<BSONObj>(SnapshotId(),
+ fromjson("{a:1, b:1, c:1}"));
ms->pushBack(member);
}
diff --git a/src/mongo/dbtests/query_stage_update.cpp b/src/mongo/dbtests/query_stage_update.cpp
index eed2e5911ac..305cd1a1e71 100644
--- a/src/mongo/dbtests/query_stage_update.cpp
+++ b/src/mongo/dbtests/query_stage_update.cpp
@@ -123,7 +123,7 @@ namespace QueryStageUpdate {
if (PlanStage::ADVANCED == state) {
WorkingSetMember* member = ws.get(id);
verify(member->hasObj());
- out->push_back(member->obj);
+ out->push_back(member->obj.value());
}
}
}
@@ -314,7 +314,7 @@ namespace QueryStageUpdate {
// Remove locs[targetDocIndex];
updateStage->saveState();
updateStage->invalidate(&_txn, locs[targetDocIndex], INVALIDATION_DELETION);
- BSONObj targetDoc = coll->docFor(&_txn, locs[targetDocIndex]);
+ BSONObj targetDoc = coll->docFor(&_txn, locs[targetDocIndex]).value();
ASSERT(!targetDoc.isEmpty());
remove(targetDoc);
updateStage->restoreState(&_txn);
diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp
index 369a694240c..728424e56b4 100644
--- a/src/mongo/dbtests/querytests.cpp
+++ b/src/mongo/dbtests/querytests.cpp
@@ -144,7 +144,8 @@ namespace QueryTests {
ASSERT( Helpers::findOne( &_txn, _collection, query, ret, true ) );
ASSERT_EQUALS( string( "b" ), ret.firstElement().fieldName() );
// Cross check with findOne() returning location.
- ASSERT_EQUALS(ret, _collection->docFor(&_txn, Helpers::findOne(&_txn, _collection, query, true)));
+ ASSERT_EQUALS(ret,
+ _collection->docFor(&_txn, Helpers::findOne(&_txn, _collection, query, true)).value());
}
};
@@ -158,7 +159,8 @@ namespace QueryTests {
// Check findOne() returning object, allowing unindexed scan.
ASSERT( Helpers::findOne( &_txn, _collection, query, ret, false ) );
// Check findOne() returning location, allowing unindexed scan.
- ASSERT_EQUALS(ret, _collection->docFor(&_txn, Helpers::findOne(&_txn, _collection, query, false)));
+ ASSERT_EQUALS(ret,
+ _collection->docFor(&_txn, Helpers::findOne(&_txn, _collection, query, false)).value());
// Check findOne() returning object, requiring indexed scan without index.
ASSERT_THROWS( Helpers::findOne( &_txn, _collection, query, ret, true ), MsgAssertionException );
@@ -169,7 +171,8 @@ namespace QueryTests {
// Check findOne() returning object, requiring indexed scan with index.
ASSERT( Helpers::findOne( &_txn, _collection, query, ret, true ) );
// Check findOne() returning location, requiring indexed scan with index.
- ASSERT_EQUALS(ret, _collection->docFor(&_txn, Helpers::findOne(&_txn, _collection, query, true)));
+ ASSERT_EQUALS(ret,
+ _collection->docFor(&_txn, Helpers::findOne(&_txn, _collection, query, true)).value());
}
};
@@ -204,7 +207,8 @@ namespace QueryTests {
BSONObj ret;
ASSERT( Helpers::findOne( &_txn, _collection, query, ret, false ) );
ASSERT( ret.isEmpty() );
- ASSERT_EQUALS(ret, _collection->docFor(&_txn, Helpers::findOne(&_txn, _collection, query, false)));
+ ASSERT_EQUALS(ret,
+ _collection->docFor(&_txn, Helpers::findOne(&_txn, _collection, query, false)).value());
}
};
diff --git a/src/mongo/dbtests/repltests.cpp b/src/mongo/dbtests/repltests.cpp
index dc94738218a..420b3975bd6 100644
--- a/src/mongo/dbtests/repltests.cpp
+++ b/src/mongo/dbtests/repltests.cpp
@@ -189,7 +189,7 @@ namespace ReplTests {
RecordIterator* it = coll->getIterator(&_txn);
while ( !it->isEOF() ) {
RecordId currLoc = it->getNext();
- ops.push_back(coll->docFor(&_txn, currLoc));
+ ops.push_back(coll->docFor(&_txn, currLoc).value());
}
delete it;
}
@@ -224,7 +224,7 @@ namespace ReplTests {
::mongo::log() << "all for " << ns << endl;
while ( !it->isEOF() ) {
RecordId currLoc = it->getNext();
- ::mongo::log() << coll->docFor(&_txn, currLoc).toString() << endl;
+ ::mongo::log() << coll->docFor(&_txn, currLoc).value().toString() << endl;
}
delete it;
}
diff --git a/src/mongo/dbtests/rollbacktests.cpp b/src/mongo/dbtests/rollbacktests.cpp
index 92710eff8ee..b28ce38755b 100644
--- a/src/mongo/dbtests/rollbacktests.cpp
+++ b/src/mongo/dbtests/rollbacktests.cpp
@@ -102,7 +102,7 @@ namespace {
ASSERT( !iter->isEOF() );
RecordId loc = iter->getNext();
ASSERT( iter->isEOF() );
- ASSERT_EQ( data, coll->docFor( txn, loc ) );
+ ASSERT_EQ( data, coll->docFor( txn, loc ).value() );
}
void assertEmpty( OperationContext* txn, const NamespaceString& nss ) {
Collection* coll = dbHolder().get( txn, nss.db() )->getCollection(nss.ns() );
diff --git a/src/mongo/s/d_migrate.cpp b/src/mongo/s/d_migrate.cpp
index 9b387e0bcf7..e416e726dd6 100644
--- a/src/mongo/s/d_migrate.cpp
+++ b/src/mongo/s/d_migrate.cpp
@@ -645,7 +645,7 @@ namespace mongo {
break;
RecordId dl = *cloneLocsIter;
- BSONObj doc;
+ Snapshotted<BSONObj> doc;
if (!collection->findDoc(txn, dl, &doc)) {
// doc was deleted
continue;
@@ -655,12 +655,13 @@ namespace mongo {
// into consideration the overhead of BSONArray indices, and *always*
// append one doc.
if (clonedDocsArrayBuilder.arrSize() != 0 &&
- clonedDocsArrayBuilder.len() + doc.objsize() + 1024 > BSONObjMaxUserSize) {
+ (clonedDocsArrayBuilder.len() + doc.value().objsize() + 1024)
+ > BSONObjMaxUserSize) {
isBufferFilled = true; // break out of outer while loop
break;
}
- clonedDocsArrayBuilder.append(doc);
+ clonedDocsArrayBuilder.append(doc.value());
}
_cloneLocs.erase(_cloneLocs.begin(), cloneLocsIter);
diff --git a/src/mongo/s/d_split.cpp b/src/mongo/s/d_split.cpp
index eaaf9926997..fb6a842c566 100644
--- a/src/mongo/s/d_split.cpp
+++ b/src/mongo/s/d_split.cpp
@@ -191,7 +191,7 @@ namespace mongo {
// This is a fetch, but it's OK. The underlying code won't throw a page fault
// exception.
- BSONObj obj = collection->docFor(txn, loc);
+ BSONObj obj = collection->docFor(txn, loc).value();
BSONObjIterator j( keyPattern );
BSONElement real;
for ( int x=0; x <= k; x++ )