diff options
author | James Wahlin <james@mongodb.com> | 2018-03-12 15:52:10 -0400 |
---|---|---|
committer | James Wahlin <james@mongodb.com> | 2018-03-14 12:04:30 -0400 |
commit | 8727cdfc81358fa829cb86153606b84efca4edfd (patch) | |
tree | de4e06f772684611663f0923db5761fda9468ae9 | |
parent | ebbd763514514d3ed77c25477c61d71df0d91420 (diff) | |
download | mongo-8727cdfc81358fa829cb86153606b84efca4edfd.tar.gz |
SERVER-33722 readConcern snapshot should return an error for metadata changes
-rw-r--r-- | jstests/noPassthrough/readConcern_atClusterTime_snapshot_selection.js | 16 | ||||
-rw-r--r-- | jstests/noPassthrough/readConcern_snapshot.js | 8 | ||||
-rw-r--r-- | jstests/noPassthrough/read_concern_snapshot_aggregation.js | 8 | ||||
-rw-r--r-- | src/mongo/db/catalog/index_catalog_impl.cpp | 2 | ||||
-rw-r--r-- | src/mongo/db/commands/run_aggregate.cpp | 1 | ||||
-rw-r--r-- | src/mongo/db/db_raii.cpp | 14 | ||||
-rw-r--r-- | src/mongo/db/pipeline/document_source_out.cpp | 6 | ||||
-rw-r--r-- | src/mongo/db/read_concern.cpp | 3 | ||||
-rw-r--r-- | src/mongo/db/storage/kv/kv_engine_test_timestamps.cpp | 15 | ||||
-rw-r--r-- | src/mongo/db/storage/recovery_unit.h | 26 | ||||
-rw-r--r-- | src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp | 21 | ||||
-rw-r--r-- | src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h | 16 | ||||
-rw-r--r-- | src/mongo/dbtests/storage_timestamp_tests.cpp | 49 |
13 files changed, 116 insertions, 69 deletions
diff --git a/jstests/noPassthrough/readConcern_atClusterTime_snapshot_selection.js b/jstests/noPassthrough/readConcern_atClusterTime_snapshot_selection.js index a94fb0f4737..1c377720ca8 100644 --- a/jstests/noPassthrough/readConcern_atClusterTime_snapshot_selection.js +++ b/jstests/noPassthrough/readConcern_atClusterTime_snapshot_selection.js @@ -129,13 +129,25 @@ })); assert.eq(res.cursor.firstBatch.length, 2, printjson(res)); - // A read at a time that is too old fails. + // A read at a timestamp that is no longer held by the storage engine fails. + // TODO SERVER-31767: Once mongod supports a snapshot window, performing a majority write will + // not be sufficient to make a previous majority commit point stale. + assert.commandWorked( + primaryDB.runCommand({insert: collName, documents: [{}], writeConcern: {w: "majority"}})); assert.commandFailedWithCode(primaryDB.runCommand({ find: collName, - readConcern: {level: "snapshot", atClusterTime: Timestamp(1, 1)}, + readConcern: {level: "snapshot", atClusterTime: clusterTimeAfter}, txnNumber: NumberLong(primaryTxnNumber++) }), ErrorCodes.SnapshotTooOld); + // A read at a timestamp that is older than our collection catalog min time fails. + assert.commandFailedWithCode(primaryDB.runCommand({ + find: collName, + readConcern: {level: "snapshot", atClusterTime: Timestamp(1, 1)}, + txnNumber: NumberLong(primaryTxnNumber++) + }), + ErrorCodes.SnapshotUnavailable); + rst.stopSet(); }()); diff --git a/jstests/noPassthrough/readConcern_snapshot.js b/jstests/noPassthrough/readConcern_snapshot.js index 67aee67ae5b..429488b6cd9 100644 --- a/jstests/noPassthrough/readConcern_snapshot.js +++ b/jstests/noPassthrough/readConcern_snapshot.js @@ -45,7 +45,7 @@ session = rst.getPrimary().getDB(dbName).getMongo().startSession({causalConsistency: false}); sessionDb = session.getDatabase(dbName); let txnNumber = 0; - assert.commandWorked(sessionDb.coll.insert({}, {w: 2})); + assert.commandWorked(sessionDb.coll.insert({}, {writeConcern: {w: "majority"}})); assert.commandWorked(sessionDb.runCommand( {find: collName, readConcern: {level: "snapshot"}, txnNumber: NumberLong(txnNumber++)})); @@ -95,7 +95,11 @@ let testDB = rst.getPrimary().getDB(dbName); let coll = testDB.coll; assert.commandWorked(coll.createIndex({geo: "2d"})); - assert.commandWorked(coll.createIndex({haystack: "geoHaystack", a: 1}, {bucketSize: 1})); + assert.commandWorked(testDB.runCommand({ + createIndexes: collName, + indexes: [{key: {haystack: "geoHaystack", a: 1}, name: "haystack_geo", bucketSize: 1}], + writeConcern: {w: "majority"} + })); session = testDB.getMongo().startSession({causalConsistency: false}); sessionDb = session.getDatabase(dbName); diff --git a/jstests/noPassthrough/read_concern_snapshot_aggregation.js b/jstests/noPassthrough/read_concern_snapshot_aggregation.js index 9dfa9e3d816..b69e9deff39 100644 --- a/jstests/noPassthrough/read_concern_snapshot_aggregation.js +++ b/jstests/noPassthrough/read_concern_snapshot_aggregation.js @@ -25,7 +25,7 @@ } let txnNumber = NumberLong(0); - assert.commandWorked(sessionDB.runCommand({create: kCollName})); + assert.commandWorked(sessionDB.runCommand({create: kCollName, writeConcern: {w: "majority"}})); function testSnapshotAggFailsWithCode(coll, pipeline, code) { let cmd = {aggregate: coll, pipeline: pipeline, cursor: {}}; @@ -47,7 +47,11 @@ testSnapshotAggFailsWithCode(kCollName, [{$collStats: {}}], kIllegalStageForSnapshotReadCode); // Test that $geoNear is disallowed with snapshot reads. - assert.commandWorked(sessionDB.getCollection(kCollName).createIndex({a: "2dsphere"})); + assert.commandWorked(sessionDB.runCommand({ + createIndexes: kCollName, + indexes: [{key: {a: "2dsphere"}, name: "a_2dsphere"}], + writeConcern: {w: "majority"} + })); testSnapshotAggFailsWithCode(kCollName, [{ $geoNear: { diff --git a/src/mongo/db/catalog/index_catalog_impl.cpp b/src/mongo/db/catalog/index_catalog_impl.cpp index c79a0802fa9..e6d5dfc1523 100644 --- a/src/mongo/db/catalog/index_catalog_impl.cpp +++ b/src/mongo/db/catalog/index_catalog_impl.cpp @@ -1161,7 +1161,7 @@ void IndexCatalogImpl::IndexIteratorImpl::_advance() { if (!_includeUnfinishedIndexes) { if (auto minSnapshot = entry->getMinimumVisibleSnapshot()) { - if (auto mySnapshot = _opCtx->recoveryUnit()->getMajorityCommittedSnapshot()) { + if (auto mySnapshot = _opCtx->recoveryUnit()->getPointInTimeReadTimestamp()) { if (mySnapshot < minSnapshot) { // This index isn't finished in my snapshot. continue; diff --git a/src/mongo/db/commands/run_aggregate.cpp b/src/mongo/db/commands/run_aggregate.cpp index d33a1a061ab..553f956cda6 100644 --- a/src/mongo/db/commands/run_aggregate.cpp +++ b/src/mongo/db/commands/run_aggregate.cpp @@ -542,6 +542,7 @@ Status runAggregate(OperationContext* opCtx, const bool keepCursor = handleCursorCommand(opCtx, origNss, pin.getCursor(), request, result); if (keepCursor) { + opCtx->setStashedCursor(); cursorFreer.Dismiss(); } } diff --git a/src/mongo/db/db_raii.cpp b/src/mongo/db/db_raii.cpp index f42857afb76..00b6a18352d 100644 --- a/src/mongo/db/db_raii.cpp +++ b/src/mongo/db/db_raii.cpp @@ -91,7 +91,7 @@ AutoGetCollectionForRead::AutoGetCollectionForRead(OperationContext* opCtx, if (!minSnapshot) { return; } - auto mySnapshot = opCtx->recoveryUnit()->getMajorityCommittedSnapshot(); + auto mySnapshot = opCtx->recoveryUnit()->getPointInTimeReadTimestamp(); if (!mySnapshot) { return; } @@ -99,6 +99,18 @@ AutoGetCollectionForRead::AutoGetCollectionForRead(OperationContext* opCtx, return; } + auto readConcernLevel = opCtx->recoveryUnit()->getReadConcernLevel(); + if (readConcernLevel == repl::ReadConcernLevel::kSnapshotReadConcern) { + uasserted(ErrorCodes::SnapshotUnavailable, + str::stream() + << "Unable to read from a snapshot due to pending collection catalog " + "changes; please retry the operation. Snapshot timestamp is " + << mySnapshot->toString() + << ". Collection minimum is " + << minSnapshot->toString()); + } + invariant(readConcernLevel == repl::ReadConcernLevel::kMajorityReadConcern); + // Yield locks in order to do the blocking call below _autoColl = boost::none; diff --git a/src/mongo/db/pipeline/document_source_out.cpp b/src/mongo/db/pipeline/document_source_out.cpp index 50678f82143..27f33f5268c 100644 --- a/src/mongo/db/pipeline/document_source_out.cpp +++ b/src/mongo/db/pipeline/document_source_out.cpp @@ -223,9 +223,11 @@ intrusive_ptr<DocumentSource> DocumentSourceOut::createFromBson( str::stream() << "$out only supports a string argument, not " << typeName(elem.type()), elem.type() == String); + auto readConcernLevel = pExpCtx->opCtx->recoveryUnit()->getReadConcernLevel(); uassert(ErrorCodes::InvalidOptions, - "$out can only be used with the 'local' read concern level", - !pExpCtx->opCtx->recoveryUnit()->isReadingFromMajorityCommittedSnapshot()); + "$out can not be used with either a 'majority' or 'snapshot' read concern level", + readConcernLevel != repl::ReadConcernLevel::kMajorityReadConcern && + readConcernLevel != repl::ReadConcernLevel::kSnapshotReadConcern); NamespaceString outputNs(pExpCtx->ns.db().toString() + '.' + elem.str()); uassert(17385, "Can't $out to special collection: " + elem.str(), !outputNs.isSpecial()); diff --git a/src/mongo/db/read_concern.cpp b/src/mongo/db/read_concern.cpp index 4e8c04440f8..132680d37d1 100644 --- a/src/mongo/db/read_concern.cpp +++ b/src/mongo/db/read_concern.cpp @@ -284,7 +284,8 @@ Status waitForReadConcern(OperationContext* opCtx, if (atClusterTime) { - fassert(39345, opCtx->recoveryUnit()->selectSnapshot(atClusterTime->asTimestamp())); + fassert(39345, + opCtx->recoveryUnit()->setPointInTimeReadTimestamp(atClusterTime->asTimestamp())); return Status::OK(); } diff --git a/src/mongo/db/storage/kv/kv_engine_test_timestamps.cpp b/src/mongo/db/storage/kv/kv_engine_test_timestamps.cpp index a99e632a456..dc829ce1e09 100644 --- a/src/mongo/db/storage/kv/kv_engine_test_timestamps.cpp +++ b/src/mongo/db/storage/kv/kv_engine_test_timestamps.cpp @@ -198,17 +198,18 @@ private: TEST_F(SnapshotManagerTests, ConsistentIfNotSupported) { if (snapshotManager) - return; // This test is only for engines that DON'T support SnapshotMangers. + return; // This test is only for engines that DON'T support SnapshotManagers. auto op = makeOperation(); auto ru = op->recoveryUnit(); - ASSERT(!ru->isReadingFromMajorityCommittedSnapshot()); - ASSERT(!ru->getMajorityCommittedSnapshot()); + auto readConcernLevel = ru->getReadConcernLevel(); + ASSERT(readConcernLevel != repl::ReadConcernLevel::kMajorityReadConcern && + readConcernLevel != repl::ReadConcernLevel::kSnapshotReadConcern); } TEST_F(SnapshotManagerTests, FailsWithNoCommittedSnapshot) { if (!snapshotManager) - return; // This test is only for engines that DO support SnapshotMangers. + return; // This test is only for engines that DO support SnapshotManagers. auto op = makeOperation(); auto ru = op->recoveryUnit(); @@ -236,7 +237,7 @@ TEST_F(SnapshotManagerTests, FailsWithNoCommittedSnapshot) { TEST_F(SnapshotManagerTests, FailsAfterDropAllSnapshotsWhileYielded) { if (!snapshotManager) - return; // This test is only for engines that DO support SnapshotMangers. + return; // This test is only for engines that DO support SnapshotManagers. auto op = makeOperation(); op->recoveryUnit()->setReadConcernLevelAndReplicationMode( @@ -260,7 +261,7 @@ TEST_F(SnapshotManagerTests, FailsAfterDropAllSnapshotsWhileYielded) { TEST_F(SnapshotManagerTests, BasicFunctionality) { if (!snapshotManager) - return; // This test is only for engines that DO support SnapshotMangers. + return; // This test is only for engines that DO support SnapshotManagers. auto snap0 = fetchAndIncrementTimestamp(); snapshotManager->setCommittedSnapshot(snap0); @@ -315,7 +316,7 @@ TEST_F(SnapshotManagerTests, BasicFunctionality) { TEST_F(SnapshotManagerTests, UpdateAndDelete) { if (!snapshotManager) - return; // This test is only for engines that DO support SnapshotMangers. + return; // This test is only for engines that DO support SnapshotManagers. auto snapBeforeInsert = fetchAndIncrementTimestamp(); diff --git a/src/mongo/db/storage/recovery_unit.h b/src/mongo/db/storage/recovery_unit.h index 846e7015eb9..857885fbf18 100644 --- a/src/mongo/db/storage/recovery_unit.h +++ b/src/mongo/db/storage/recovery_unit.h @@ -114,13 +114,6 @@ public: } /** - * Returns true if we are reading from a majority committed snapshot. - */ - virtual bool isReadingFromMajorityCommittedSnapshot() const { - return false; - } - - /** * Set this operation's readConcern level and replication mode on the recovery unit. */ void setReadConcernLevelAndReplicationMode(repl::ReadConcernLevel readConcernLevel, @@ -138,14 +131,15 @@ public: /** * Returns the Timestamp being used by this recovery unit or boost::none if not reading from - * a majority committed snapshot. - * - * It is possible for reads to occur from later snapshots, but they may not occur from earlier - * snapshots. + * a point in time. Any point in time returned will reflect either: + * - A timestamp set via call to setPointInTimeReadTimestamp() + * - A majority committed snapshot timestamp (chosen by the storage engine when read-majority + * has been enabled via call to obtainMajorityCommittedSnapshot()) */ - virtual boost::optional<Timestamp> getMajorityCommittedSnapshot() const { - dassert(!isReadingFromMajorityCommittedSnapshot()); - return {}; + virtual boost::optional<Timestamp> getPointInTimeReadTimestamp() const { + invariant(_readConcernLevel != repl::ReadConcernLevel::kMajorityReadConcern && + _readConcernLevel != repl::ReadConcernLevel::kSnapshotReadConcern); + return boost::none; } /** @@ -186,9 +180,9 @@ public: } /** - * Chooses which timestamp to use for read transactions. + * Sets which timestamp to use for read transactions. */ - virtual Status selectSnapshot(Timestamp timestamp) { + virtual Status setPointInTimeReadTimestamp(Timestamp timestamp) { return Status(ErrorCodes::CommandNotSupported, "point-in-time reads are not implemented for this storage engine"); } diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp index 00f4d8f9dab..5fe19158f9e 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp @@ -223,7 +223,7 @@ SnapshotId WiredTigerRecoveryUnit::getSnapshotId() const { } Status WiredTigerRecoveryUnit::obtainMajorityCommittedSnapshot() { - invariant(isReadingFromMajorityCommittedSnapshot()); + invariant(_isReadingFromPointInTime()); auto snapshotName = _sessionCache->snapshotManager().getMinSnapshotForNextCommittedRead(); if (!snapshotName) { return {ErrorCodes::ReadConcernMajorityNotAvailableYet, @@ -233,9 +233,16 @@ Status WiredTigerRecoveryUnit::obtainMajorityCommittedSnapshot() { return Status::OK(); } -boost::optional<Timestamp> WiredTigerRecoveryUnit::getMajorityCommittedSnapshot() const { - if (!isReadingFromMajorityCommittedSnapshot()) - return {}; +boost::optional<Timestamp> WiredTigerRecoveryUnit::getPointInTimeReadTimestamp() const { + if (!_isReadingFromPointInTime()) + return boost::none; + + if (getReadConcernLevel() == repl::ReadConcernLevel::kSnapshotReadConcern && + !_readAtTimestamp.isNull()) { + return _readAtTimestamp; + } + + invariant(!_majorityCommittedSnapshot.isNull()); return _majorityCommittedSnapshot; } @@ -249,6 +256,8 @@ void WiredTigerRecoveryUnit::_txnOpen() { } WT_SESSION* session = _session->getSession(); + // '_readAtTimestamp' is available outside of a check for readConcern level 'snapshot' to + // accommodate unit testing. if (_readAtTimestamp != Timestamp::min()) { auto status = _sessionCache->snapshotManager().beginTransactionAtTimestamp(_readAtTimestamp, session); @@ -258,7 +267,7 @@ void WiredTigerRecoveryUnit::_txnOpen() { << " is older than the oldest available timestamp."); } uassertStatusOK(status); - } else if (isReadingFromMajorityCommittedSnapshot()) { + } else if (_isReadingFromPointInTime()) { // We reset _majorityCommittedSnapshot to the actual read timestamp used when the // transaction was started. _majorityCommittedSnapshot = @@ -319,7 +328,7 @@ void WiredTigerRecoveryUnit::clearCommitTimestamp() { _commitTimestamp = Timestamp(); } -Status WiredTigerRecoveryUnit::selectSnapshot(Timestamp timestamp) { +Status WiredTigerRecoveryUnit::setPointInTimeReadTimestamp(Timestamp timestamp) { _readAtTimestamp = timestamp; return Status::OK(); } diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h index 99b11f060a8..5207cab95ab 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h @@ -75,13 +75,7 @@ public: Status obtainMajorityCommittedSnapshot() override; - bool isReadingFromMajorityCommittedSnapshot() const override { - return _replicationMode == repl::ReplicationCoordinator::modeReplSet && - (_readConcernLevel == repl::ReadConcernLevel::kMajorityReadConcern || - _readConcernLevel == repl::ReadConcernLevel::kSnapshotReadConcern); - } - - boost::optional<Timestamp> getMajorityCommittedSnapshot() const override; + boost::optional<Timestamp> getPointInTimeReadTimestamp() const override; SnapshotId getSnapshotId() const override; @@ -93,7 +87,7 @@ public: Timestamp getCommitTimestamp() override; - Status selectSnapshot(Timestamp timestamp) override; + Status setPointInTimeReadTimestamp(Timestamp timestamp) override; void* writingPtr(void* data, size_t len) override; @@ -132,6 +126,12 @@ public: static void appendGlobalStats(BSONObjBuilder& b); private: + bool _isReadingFromPointInTime() const { + return _replicationMode == repl::ReplicationCoordinator::modeReplSet && + (_readConcernLevel == repl::ReadConcernLevel::kMajorityReadConcern || + _readConcernLevel == repl::ReadConcernLevel::kSnapshotReadConcern); + } + void _abort(); void _commit(); diff --git a/src/mongo/dbtests/storage_timestamp_tests.cpp b/src/mongo/dbtests/storage_timestamp_tests.cpp index 4fcaf174c3e..74df3f43ebd 100644 --- a/src/mongo/dbtests/storage_timestamp_tests.cpp +++ b/src/mongo/dbtests/storage_timestamp_tests.cpp @@ -155,7 +155,7 @@ public: */ void reset(NamespaceString nss) const { ::mongo::writeConflictRetry(_opCtx, "deleteAll", nss.ns(), [&] { - invariant(_opCtx->recoveryUnit()->selectSnapshot(Timestamp::min()).isOK()); + invariant(_opCtx->recoveryUnit()->setPointInTimeReadTimestamp(Timestamp::min()).isOK()); AutoGetCollection collRaii(_opCtx, nss, LockMode::MODE_X); if (collRaii.getCollection()) { @@ -237,7 +237,7 @@ public: const repl::MinValidDocument& expectedDoc) { auto recoveryUnit = _opCtx->recoveryUnit(); recoveryUnit->abandonSnapshot(); - ASSERT_OK(recoveryUnit->selectSnapshot(ts)); + ASSERT_OK(recoveryUnit->setPointInTimeReadTimestamp(ts)); auto doc = repl::MinValidDocument::parse(IDLParserErrorContext("MinValidDocument"), findOne(coll)); ASSERT_EQ(expectedDoc.getMinValidTimestamp(), doc.getMinValidTimestamp()) @@ -263,7 +263,7 @@ public: const repl::CheckpointTimestampDocument& expectedDoc) { auto recoveryUnit = _opCtx->recoveryUnit(); recoveryUnit->abandonSnapshot(); - ASSERT_OK(recoveryUnit->selectSnapshot(ts)); + ASSERT_OK(recoveryUnit->setPointInTimeReadTimestamp(ts)); auto doc = repl::CheckpointTimestampDocument::parse( IDLParserErrorContext("CheckpointTimestampDocument"), findOne(coll)); ASSERT_EQ(expectedDoc.getCheckpointTimestamp(), doc.getCheckpointTimestamp()) @@ -277,7 +277,7 @@ public: void assertEmptyCollectionAtTimestamp(Collection* coll, const Timestamp& ts) { auto recoveryUnit = _opCtx->recoveryUnit(); recoveryUnit->abandonSnapshot(); - ASSERT_OK(recoveryUnit->selectSnapshot(ts)); + ASSERT_OK(recoveryUnit->setPointInTimeReadTimestamp(ts)); ASSERT_EQ(0, itCount(coll)) << "collection " << coll->ns() << " isn't empty at " << ts.toString() << ". One document is " << findOne(coll); } @@ -288,7 +288,7 @@ public: auto recoveryUnit = _opCtx->recoveryUnit(); recoveryUnit->abandonSnapshot(); - ASSERT_OK(recoveryUnit->selectSnapshot(ts)); + ASSERT_OK(recoveryUnit->setPointInTimeReadTimestamp(ts)); if (expectedDoc.isEmpty()) { ASSERT_EQ(0, itCount(coll)) << "Should not find any documents in " << coll->ns() << " at ts: " << ts; @@ -320,7 +320,7 @@ public: auto recoveryUnit = _opCtx->recoveryUnit(); recoveryUnit->abandonSnapshot(); - ASSERT_OK(recoveryUnit->selectSnapshot(ts)); + ASSERT_OK(recoveryUnit->setPointInTimeReadTimestamp(ts)); // getCollectionIdent() returns the ident for the given namespace in the KVCatalog. // getAllIdents() actually looks in the RecordStore for a list of all idents, and is thus @@ -388,7 +388,7 @@ public: Timestamp timestamp) { auto recoveryUnit = _opCtx->recoveryUnit(); recoveryUnit->abandonSnapshot(); - ASSERT_OK(_opCtx->recoveryUnit()->selectSnapshot(timestamp)); + ASSERT_OK(_opCtx->recoveryUnit()->setPointInTimeReadTimestamp(timestamp)); auto allIdents = kvCatalog->getAllIdents(_opCtx); if (collIdent.size() > 0) { // Index build test does not pass in a collection ident. @@ -407,7 +407,7 @@ public: Timestamp timestamp) { auto recoveryUnit = _opCtx->recoveryUnit(); recoveryUnit->abandonSnapshot(); - ASSERT_OK(_opCtx->recoveryUnit()->selectSnapshot(timestamp)); + ASSERT_OK(_opCtx->recoveryUnit()->setPointInTimeReadTimestamp(timestamp)); auto allIdents = kvCatalog->getAllIdents(_opCtx); if (collIdent.size() > 0) { // Index build test does not pass in a collection ident. @@ -444,7 +444,7 @@ public: auto recoveryUnit = _opCtx->recoveryUnit(); recoveryUnit->abandonSnapshot(); - ASSERT_OK(recoveryUnit->selectSnapshot(ts)); + ASSERT_OK(recoveryUnit->setPointInTimeReadTimestamp(ts)); MultikeyPaths actualMultikeyPaths; if (!shouldBeMultikey) { @@ -517,7 +517,8 @@ public: for (std::uint32_t idx = 0; idx < docsToInsert; ++idx) { auto recoveryUnit = _opCtx->recoveryUnit(); recoveryUnit->abandonSnapshot(); - ASSERT_OK(recoveryUnit->selectSnapshot(firstInsertTime.addTicks(idx).asTimestamp())); + ASSERT_OK(recoveryUnit->setPointInTimeReadTimestamp( + firstInsertTime.addTicks(idx).asTimestamp())); BSONObj result; ASSERT(Helpers::getLast(_opCtx, nss.ns().c_str(), result)) << " idx is " << idx; ASSERT_EQ(0, SimpleBSONObjComparator::kInstance.compare(result, BSON("_id" << idx))) @@ -599,7 +600,8 @@ public: for (std::uint32_t idx = 0; idx < docsToInsert; ++idx) { auto recoveryUnit = _opCtx->recoveryUnit(); recoveryUnit->abandonSnapshot(); - ASSERT_OK(recoveryUnit->selectSnapshot(firstInsertTime.addTicks(idx).asTimestamp())); + ASSERT_OK(recoveryUnit->setPointInTimeReadTimestamp( + firstInsertTime.addTicks(idx).asTimestamp())); BSONObj result; ASSERT(Helpers::getLast(_opCtx, nss.ns().c_str(), result)) << " idx is " << idx; ASSERT_EQ(0, SimpleBSONObjComparator::kInstance.compare(result, BSON("_id" << idx))) @@ -665,7 +667,8 @@ public: // at each successive tick counts one less document. auto recoveryUnit = _opCtx->recoveryUnit(); recoveryUnit->abandonSnapshot(); - ASSERT_OK(recoveryUnit->selectSnapshot(lastInsertTime.addTicks(num).asTimestamp())); + ASSERT_OK(recoveryUnit->setPointInTimeReadTimestamp( + lastInsertTime.addTicks(num).asTimestamp())); ASSERT_EQ(docsToInsert - num, itCount(autoColl.getCollection())); } } @@ -741,7 +744,8 @@ public: // the series. auto recoveryUnit = _opCtx->recoveryUnit(); recoveryUnit->abandonSnapshot(); - ASSERT_OK(recoveryUnit->selectSnapshot(insertTime.addTicks(idx + 1).asTimestamp())); + ASSERT_OK(recoveryUnit->setPointInTimeReadTimestamp( + insertTime.addTicks(idx + 1).asTimestamp())); auto doc = findOne(autoColl.getCollection()); ASSERT_EQ(0, SimpleBSONObjComparator::kInstance.compare(doc, updates[idx].second)) @@ -799,7 +803,7 @@ public: // Reading at `insertTime` should show the original document, `{_id: 0, field: 0}`. auto recoveryUnit = _opCtx->recoveryUnit(); recoveryUnit->abandonSnapshot(); - ASSERT_OK(recoveryUnit->selectSnapshot(insertTime.asTimestamp())); + ASSERT_OK(recoveryUnit->setPointInTimeReadTimestamp(insertTime.asTimestamp())); auto doc = findOne(autoColl.getCollection()); ASSERT_EQ(0, SimpleBSONObjComparator::kInstance.compare(doc, BSON("_id" << 0 << "field" << 0))) @@ -808,7 +812,7 @@ public: // Reading at `insertTime + 1` should show the second insert that got converted to an // upsert, `{_id: 0}`. recoveryUnit->abandonSnapshot(); - ASSERT_OK(recoveryUnit->selectSnapshot(insertTime.addTicks(1).asTimestamp())); + ASSERT_OK(recoveryUnit->setPointInTimeReadTimestamp(insertTime.addTicks(1).asTimestamp())); doc = findOne(autoColl.getCollection()); ASSERT_EQ(0, SimpleBSONObjComparator::kInstance.compare(doc, BSON("_id" << 0))) << "Doc: " << doc.toString() << " Expected: {_id: 0}"; @@ -857,7 +861,7 @@ public: // Reading at `preInsertTimestamp` should not find anything. auto recoveryUnit = _opCtx->recoveryUnit(); recoveryUnit->abandonSnapshot(); - ASSERT_OK(recoveryUnit->selectSnapshot(preInsertTimestamp.asTimestamp())); + ASSERT_OK(recoveryUnit->setPointInTimeReadTimestamp(preInsertTimestamp.asTimestamp())); ASSERT_EQ(0, itCount(autoColl.getCollection())) << "Should not observe a write at `preInsertTimestamp`. TS: " << preInsertTimestamp.asTimestamp(); @@ -865,7 +869,8 @@ public: // Reading at `preInsertTimestamp + 1` should observe both inserts. recoveryUnit = _opCtx->recoveryUnit(); recoveryUnit->abandonSnapshot(); - ASSERT_OK(recoveryUnit->selectSnapshot(preInsertTimestamp.addTicks(1).asTimestamp())); + ASSERT_OK(recoveryUnit->setPointInTimeReadTimestamp( + preInsertTimestamp.addTicks(1).asTimestamp())); ASSERT_EQ(2, itCount(autoColl.getCollection())) << "Should observe both writes at `preInsertTimestamp + 1`. TS: " << preInsertTimestamp.addTicks(1).asTimestamp(); @@ -919,14 +924,15 @@ public: // Reading at `insertTime` should not see any documents. auto recoveryUnit = _opCtx->recoveryUnit(); recoveryUnit->abandonSnapshot(); - ASSERT_OK(recoveryUnit->selectSnapshot(preInsertTimestamp.asTimestamp())); + ASSERT_OK(recoveryUnit->setPointInTimeReadTimestamp(preInsertTimestamp.asTimestamp())); ASSERT_EQ(0, itCount(autoColl.getCollection())) << "Should not find any documents at `preInsertTimestamp`. TS: " << preInsertTimestamp.asTimestamp(); // Reading at `preInsertTimestamp + 1` should show the final state of the document. recoveryUnit->abandonSnapshot(); - ASSERT_OK(recoveryUnit->selectSnapshot(preInsertTimestamp.addTicks(1).asTimestamp())); + ASSERT_OK(recoveryUnit->setPointInTimeReadTimestamp( + preInsertTimestamp.addTicks(1).asTimestamp())); auto doc = findOne(autoColl.getCollection()); ASSERT_EQ(0, SimpleBSONObjComparator::kInstance.compare(doc, BSON("_id" << 0))) << "Doc: " << doc.toString() << " Expected: {_id: 0}"; @@ -1832,7 +1838,8 @@ public: assertIdentsExistAtTimestamp(kvCatalog, "", indexIdent, afterIndexInit.asTimestamp()); { _opCtx->recoveryUnit()->abandonSnapshot(); - ASSERT_OK(_opCtx->recoveryUnit()->selectSnapshot(afterIndexInit.asTimestamp())); + ASSERT_OK( + _opCtx->recoveryUnit()->setPointInTimeReadTimestamp(afterIndexInit.asTimestamp())); auto collMetaData = kvCatalog->getMetaData(_opCtx, nss.ns()); auto indexMetaData = collMetaData.indexes[collMetaData.findIndexOffset("a_1")]; ASSERT_FALSE(indexMetaData.ready); @@ -1842,7 +1849,7 @@ public: assertIdentsExistAtTimestamp(kvCatalog, "", indexIdent, afterIndexBuild); { _opCtx->recoveryUnit()->abandonSnapshot(); - ASSERT_OK(_opCtx->recoveryUnit()->selectSnapshot(afterIndexBuild)); + ASSERT_OK(_opCtx->recoveryUnit()->setPointInTimeReadTimestamp(afterIndexBuild)); auto collMetaData = kvCatalog->getMetaData(_opCtx, nss.ns()); auto indexMetaData = collMetaData.indexes[collMetaData.findIndexOffset("a_1")]; ASSERT(indexMetaData.ready); |