summaryrefslogtreecommitdiff
path: root/src/mongo/dbtests
diff options
context:
space:
mode:
authorPierlauro Sciarelli <pierlauro.sciarelli@mongodb.com>2020-07-22 10:27:34 +0200
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-07-22 11:48:25 +0000
commit86d94765b9030c9aa9adb3899277456e64bbd348 (patch)
tree44c3d16af4417684998a1ca7672522d95fae5fee /src/mongo/dbtests
parent3cab87121764327271221b205c481707c97c1c71 (diff)
downloadmongo-86d94765b9030c9aa9adb3899277456e64bbd348.tar.gz
SERVER-49321 Make VectorClock::Component protected
Diffstat (limited to 'src/mongo/dbtests')
-rw-r--r--src/mongo/dbtests/storage_timestamp_tests.cpp165
1 files changed, 88 insertions, 77 deletions
diff --git a/src/mongo/dbtests/storage_timestamp_tests.cpp b/src/mongo/dbtests/storage_timestamp_tests.cpp
index cde7dea323d..45a3a5f5400 100644
--- a/src/mongo/dbtests/storage_timestamp_tests.cpp
+++ b/src/mongo/dbtests/storage_timestamp_tests.cpp
@@ -163,10 +163,9 @@ public:
VectorClockMutable* _clock = VectorClockMutable::get(_opCtx);
// Set up Timestamps in the past, present, and future.
- static constexpr auto ClusterTime = VectorClock::Component::ClusterTime;
- const LogicalTime pastLt = _clock->tick(ClusterTime, 1);
+ const LogicalTime pastLt = _clock->tickClusterTime(1);
const Timestamp pastTs = pastLt.asTimestamp();
- const LogicalTime presentLt = _clock->tick(ClusterTime, 1);
+ const LogicalTime presentLt = _clock->tickClusterTime(1);
const Timestamp presentTs = presentLt.asTimestamp();
const LogicalTime futureLt = presentLt.addTicks(1);
const Timestamp futureTs = futureLt.asTimestamp();
@@ -213,7 +212,7 @@ public:
repl::setOplogCollectionName(_opCtx->getServiceContext());
repl::createOplog(_opCtx);
- _clock->tickTo(ClusterTime, LogicalTime(Timestamp(1, 0)));
+ _clock->tickClusterTimeTo(LogicalTime(Timestamp(1, 0)));
ASSERT_EQUALS(presentTs, pastLt.addTicks(1).asTimestamp());
setReplCoordAppliedOpTime(repl::OpTime(presentTs, presentTerm));
@@ -718,7 +717,7 @@ public:
AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_IX);
const std::int32_t docsToInsert = 10;
- const LogicalTime firstInsertTime = _clock->tick(ClusterTime, docsToInsert);
+ const LogicalTime firstInsertTime = _clock->tickClusterTime(docsToInsert);
for (std::int32_t idx = 0; idx < docsToInsert; ++idx) {
BSONObjBuilder result;
ASSERT_OK(applyOps(
@@ -767,7 +766,7 @@ public:
AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_IX);
const std::int32_t docsToInsert = 10;
- const LogicalTime firstInsertTime = _clock->tick(ClusterTime, docsToInsert);
+ const LogicalTime firstInsertTime = _clock->tickClusterTime(docsToInsert);
BSONObjBuilder oplogCommonBuilder;
oplogCommonBuilder << "v" << 2 << "op"
@@ -826,7 +825,7 @@ public:
// Insert some documents.
const std::int32_t docsToInsert = 10;
- const LogicalTime firstInsertTime = _clock->tick(ClusterTime, docsToInsert);
+ const LogicalTime firstInsertTime = _clock->tickClusterTime(docsToInsert);
const LogicalTime lastInsertTime = firstInsertTime.addTicks(docsToInsert - 1);
WriteUnitOfWork wunit(_opCtx);
for (std::int32_t num = 0; num < docsToInsert; ++num) {
@@ -839,7 +838,7 @@ public:
ASSERT_EQ(docsToInsert, itCount(autoColl.getCollection()));
// Delete all documents one at a time.
- const LogicalTime startDeleteTime = _clock->tick(ClusterTime, docsToInsert);
+ const LogicalTime startDeleteTime = _clock->tickClusterTime(docsToInsert);
for (std::int32_t num = 0; num < docsToInsert; ++num) {
ASSERT_OK(doNonAtomicApplyOps(
nss.db().toString(),
@@ -873,7 +872,7 @@ public:
AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_IX);
// Insert one document that will go through a series of updates.
- const LogicalTime insertTime = _clock->tick(ClusterTime, 1);
+ const LogicalTime insertTime = _clock->tickClusterTime(1);
WriteUnitOfWork wunit(_opCtx);
insertDocument(autoColl.getCollection(),
InsertStatement(BSON("_id" << 0), insertTime.asTimestamp(), 0LL));
@@ -898,7 +897,7 @@ public:
<< "theOtherSet")),
BSON("_id" << 0 << "theMap" << BSON("val" << 1) << "theOtherSet" << BSONArray())}};
- const LogicalTime firstUpdateTime = _clock->tick(ClusterTime, updates.size());
+ const LogicalTime firstUpdateTime = _clock->tickClusterTime(updates.size());
for (std::size_t idx = 0; idx < updates.size(); ++idx) {
ASSERT_OK(doNonAtomicApplyOps(
nss.db().toString(),
@@ -935,7 +934,7 @@ public:
AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_IX);
- const LogicalTime insertTime = _clock->tick(ClusterTime, 2);
+ const LogicalTime insertTime = _clock->tickClusterTime(2);
// This applyOps runs into an insert of `{_id: 0, field: 0}` followed by a second insert
// on the same collection with `{_id: 0}`. It's expected for this second insert to be
@@ -987,7 +986,7 @@ public:
AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_IX);
// Reserve a timestamp before the inserts should happen.
- const LogicalTime preInsertTimestamp = _clock->tick(ClusterTime, 1);
+ const LogicalTime preInsertTimestamp = _clock->tickClusterTime(1);
auto swResult =
doAtomicApplyOps(nss.db().toString(),
{BSON("op"
@@ -1039,7 +1038,7 @@ public:
AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_IX);
- const LogicalTime preInsertTimestamp = _clock->tick(ClusterTime, 1);
+ const LogicalTime preInsertTimestamp = _clock->tickClusterTime(1);
auto swResult =
doAtomicApplyOps(nss.db().toString(),
{BSON("op"
@@ -1295,10 +1294,10 @@ public:
_coordinatorMock->alwaysAllowWrites(false);
- const LogicalTime pastTime = _clock->tick(ClusterTime, 1);
- const LogicalTime insertTime0 = _clock->tick(ClusterTime, 1);
- const LogicalTime insertTime1 = _clock->tick(ClusterTime, 1);
- const LogicalTime insertTime2 = _clock->tick(ClusterTime, 1);
+ const LogicalTime pastTime = _clock->tickClusterTime(1);
+ const LogicalTime insertTime0 = _clock->tickClusterTime(1);
+ const LogicalTime insertTime1 = _clock->tickClusterTime(1);
+ const LogicalTime insertTime2 = _clock->tickClusterTime(1);
BSONObj doc0 = BSON("_id" << 0 << "a" << 3);
BSONObj doc1 = BSON("_id" << 1 << "a" << BSON_ARRAY(1 << 2));
@@ -1366,11 +1365,11 @@ public:
_coordinatorMock->alwaysAllowWrites(false);
ASSERT_OK(_coordinatorMock->setFollowerMode({repl::MemberState::MS::RS_STARTUP2}));
- const LogicalTime pastTime = _clock->tick(ClusterTime, 1);
- const LogicalTime insertTime0 = _clock->tick(ClusterTime, 1);
- const LogicalTime indexBuildTime = _clock->tick(ClusterTime, 1);
- const LogicalTime insertTime1 = _clock->tick(ClusterTime, 1);
- const LogicalTime insertTime2 = _clock->tick(ClusterTime, 1);
+ const LogicalTime pastTime = _clock->tickClusterTime(1);
+ const LogicalTime insertTime0 = _clock->tickClusterTime(1);
+ const LogicalTime indexBuildTime = _clock->tickClusterTime(1);
+ const LogicalTime insertTime1 = _clock->tickClusterTime(1);
+ const LogicalTime insertTime2 = _clock->tickClusterTime(1);
BSONObj doc0 = BSON("_id" << 0 << "a" << 3);
BSONObj doc1 = BSON("_id" << 1 << "a" << BSON_ARRAY(1 << 2));
@@ -1457,7 +1456,7 @@ public:
<< static_cast<int>(kIndexVersion));
ASSERT_OK(dbtests::createIndexFromSpec(_opCtx, nss.ns(), indexSpec));
- const LogicalTime pastTime = _clock->tick(ClusterTime, 1);
+ const LogicalTime pastTime = _clock->tickClusterTime(1);
const LogicalTime insertTime = pastTime.addTicks(1);
BSONObj doc = BSON("_id" << 1 << "a" << BSON_ARRAY(1 << 2));
@@ -1486,7 +1485,7 @@ public:
<< static_cast<int>(kIndexVersion));
ASSERT_OK(dbtests::createIndexFromSpec(_opCtx, nss.ns(), indexSpec));
- const LogicalTime pastTime = _clock->tick(ClusterTime, 1);
+ const LogicalTime pastTime = _clock->tickClusterTime(1);
const LogicalTime insertTime = pastTime.addTicks(1);
BSONObj doc = BSON("_id" << 1 << "a" << BSON_ARRAY(1 << 2));
@@ -1523,14 +1522,15 @@ public:
ASSERT_OK(dbtests::createIndexFromSpec(_opCtx, nss.ns(), indexSpec));
}
- auto presentTs = _clock->getTime()[ClusterTime].asTimestamp();
+ const auto currentTime = _clock->getTime();
+ const auto presentTs = currentTime.clusterTime().asTimestamp();
// This test does not run a real ReplicationCoordinator, so must advance the snapshot
// manager manually.
auto storageEngine = cc().getServiceContext()->getStorageEngine();
storageEngine->getSnapshotManager()->setLastApplied(presentTs);
- const auto beforeTxnTime = _clock->tick(ClusterTime, 1);
+ const auto beforeTxnTime = _clock->tickClusterTime(1);
auto beforeTxnTs = beforeTxnTime.asTimestamp();
const auto multikeyNoopTime = beforeTxnTime.addTicks(1);
auto multikeyNoopTs = multikeyNoopTime.asTimestamp();
@@ -1750,7 +1750,7 @@ public:
auto durableCatalog = storageEngine->getCatalog();
// Declare the database to be in a "synced" state, i.e: in steady-state replication.
- Timestamp syncTime = _clock->tick(ClusterTime, 1).asTimestamp();
+ Timestamp syncTime = _clock->tickClusterTime(1).asTimestamp();
invariant(!syncTime.isNull());
storageEngine->setInitialDataTimestamp(syncTime);
@@ -1779,7 +1779,7 @@ public:
// side-effect of not timestamping the collection creation.
repl::UnreplicatedWritesBlock notReplicated(_opCtx);
if (nss.isReplicated()) {
- TimestampBlock tsBlock(_opCtx, _clock->tick(ClusterTime, 1).asTimestamp());
+ TimestampBlock tsBlock(_opCtx, _clock->tickClusterTime(1).asTimestamp());
reset(nss);
} else {
reset(nss);
@@ -1803,7 +1803,7 @@ public:
// Reserve a tick, this represents a time after the rename in which the `kvDropDatabase`
// ident for `kvDropDatabase` still exists.
- const Timestamp postRenameTime = _clock->tick(ClusterTime, 1).asTimestamp();
+ const Timestamp postRenameTime = _clock->tickClusterTime(1).asTimestamp();
// If the storage engine is managing drops internally, the ident should not be visible after
// a drop.
@@ -1814,7 +1814,7 @@ public:
assertIdentsExistAtTimestamp(durableCatalog, collIdent, indexIdent, postRenameTime);
}
- const Timestamp dropTime = _clock->tick(ClusterTime, 1).asTimestamp();
+ const Timestamp dropTime = _clock->tickClusterTime(1).asTimestamp();
if (SimulatePrimary) {
ASSERT_OK(dropDatabaseForApplyOps(_opCtx, nss.db().toString()));
} else {
@@ -1871,7 +1871,7 @@ public:
AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_X);
RecordId catalogId = autoColl.getCollection()->getCatalogId();
- const LogicalTime insertTimestamp = _clock->tick(ClusterTime, 1);
+ const LogicalTime insertTimestamp = _clock->tickClusterTime(1);
{
WriteUnitOfWork wuow(_opCtx);
insertDocument(autoColl.getCollection(),
@@ -1892,7 +1892,7 @@ public:
indexer.abortIndexBuild(
_opCtx, autoColl.getCollection(), MultiIndexBlock::kNoopOnCleanUpFn);
});
- const LogicalTime beforeIndexBuild = _clock->tick(ClusterTime, 2);
+ const LogicalTime beforeIndexBuild = _clock->tickClusterTime(2);
BSONObj indexInfoObj;
{
// Primaries do not have a wrapping `TimestampBlock`; secondaries do.
@@ -1918,7 +1918,7 @@ public:
indexInfoObj = std::move(swIndexInfoObj.getValue()[0]);
}
- const LogicalTime afterIndexInit = _clock->tick(ClusterTime, 2);
+ const LogicalTime afterIndexInit = _clock->tickClusterTime(2);
// Inserting all the documents has the side-effect of setting internal state on the index
// builder that the index is multikey.
@@ -1939,8 +1939,9 @@ public:
_opCtx->getServiceContext()->getOpObserver()->onCreateIndex(
_opCtx, nss, autoColl.getCollection()->uuid(), indexSpec, false);
} else {
+ const auto currentTime = _clock->getTime();
ASSERT_OK(_opCtx->recoveryUnit()->setTimestamp(
- _clock->getTime()[ClusterTime].asTimestamp()));
+ currentTime.clusterTime().asTimestamp()));
}
},
MultiIndexBlock::kNoopOnCommitFn));
@@ -1948,7 +1949,7 @@ public:
}
abortOnExit.dismiss();
- const Timestamp afterIndexBuild = _clock->tick(ClusterTime, 1).asTimestamp();
+ const Timestamp afterIndexBuild = _clock->tickClusterTime(1).asTimestamp();
const std::string indexIdent =
getNewIndexIdentAtTime(durableCatalog, origIdents, Timestamp::min());
@@ -2005,7 +2006,7 @@ public:
indexer.abortIndexBuild(
_opCtx, autoColl.getCollection(), MultiIndexBlock::kNoopOnCleanUpFn);
});
- const LogicalTime beforeIndexBuild = _clock->tick(ClusterTime, 2);
+ const LogicalTime beforeIndexBuild = _clock->tickClusterTime(2);
BSONObj indexInfoObj;
{
// Primaries do not have a wrapping `TimestampBlock`; secondaries do.
@@ -2031,11 +2032,11 @@ public:
indexInfoObj = std::move(swIndexInfoObj.getValue()[0]);
}
- const LogicalTime afterIndexInit = _clock->tick(ClusterTime, 1);
+ const LogicalTime afterIndexInit = _clock->tickClusterTime(1);
// Insert a document that will be intercepted and need to be drained. This timestamp will
// become the lastApplied time.
- const LogicalTime firstInsert = _clock->tick(ClusterTime, 1);
+ const LogicalTime firstInsert = _clock->tickClusterTime(1);
{
WriteUnitOfWork wuow(_opCtx);
insertDocument(autoColl.getCollection(),
@@ -2081,7 +2082,7 @@ public:
}
// Insert a second document that will be intercepted and need to be drained.
- const LogicalTime secondInsert = _clock->tick(ClusterTime, 1);
+ const LogicalTime secondInsert = _clock->tickClusterTime(1);
{
WriteUnitOfWork wuow(_opCtx);
insertDocument(autoColl.getCollection(),
@@ -2094,7 +2095,7 @@ public:
// Advance the lastApplied optime to observe a point before the drain where there are
// un-drained writes.
- const LogicalTime afterSecondInsert = _clock->tick(ClusterTime, 1);
+ const LogicalTime afterSecondInsert = _clock->tickClusterTime(1);
setReplCoordAppliedOpTime(repl::OpTime(afterSecondInsert.asTimestamp(), presentTerm));
ASSERT_OK(indexer.drainBackgroundWrites(_opCtx,
@@ -2126,8 +2127,9 @@ public:
_opCtx->getServiceContext()->getOpObserver()->onCreateIndex(
_opCtx, nss, autoColl.getCollection()->uuid(), indexSpec, false);
} else {
+ const auto currentTime = _clock->getTime();
ASSERT_OK(_opCtx->recoveryUnit()->setTimestamp(
- _clock->getTime()[ClusterTime].asTimestamp()));
+ currentTime.clusterTime().asTimestamp()));
}
},
MultiIndexBlock::kNoopOnCommitFn));
@@ -2166,7 +2168,7 @@ public:
{
AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_X);
- const LogicalTime insertTimestamp = _clock->tick(ClusterTime, 1);
+ const LogicalTime insertTimestamp = _clock->tickClusterTime(1);
WriteUnitOfWork wuow(_opCtx);
insertDocument(autoColl.getCollection(),
@@ -2273,7 +2275,7 @@ public:
{
AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_X);
- const LogicalTime insertTimestamp = _clock->tick(ClusterTime, 1);
+ const LogicalTime insertTimestamp = _clock->tickClusterTime(1);
WriteUnitOfWork wuow(_opCtx);
insertDocument(autoColl.getCollection(),
@@ -2391,8 +2393,8 @@ public:
{
AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_X);
- auto insertTimestamp1 = _clock->tick(ClusterTime, 1);
- auto insertTimestamp2 = _clock->tick(ClusterTime, 1);
+ auto insertTimestamp1 = _clock->tickClusterTime(1);
+ auto insertTimestamp2 = _clock->tickClusterTime(1);
// Insert two documents with the same value for field 'a' so that
// we will fail to create a unique index.
@@ -2490,7 +2492,7 @@ public:
AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_X);
- const LogicalTime insertTimestamp = _clock->tick(ClusterTime, 1);
+ const LogicalTime insertTimestamp = _clock->tickClusterTime(1);
{
WriteUnitOfWork wuow(_opCtx);
insertDocument(autoColl.getCollection(),
@@ -2502,7 +2504,7 @@ public:
}
- const Timestamp beforeIndexBuild = _clock->tick(ClusterTime, 1).asTimestamp();
+ const Timestamp beforeIndexBuild = _clock->tickClusterTime(1).asTimestamp();
// Save the pre-state idents so we can capture the specific ident related to index
// creation.
@@ -2515,7 +2517,7 @@ public:
createIndex(autoColl.getCollection(), str::stream() << key << "_1", BSON(key << 1));
// Timestamps at the completion of each index build.
- afterCreateTimestamps.push_back(_clock->tick(ClusterTime, 1).asTimestamp());
+ afterCreateTimestamps.push_back(_clock->tickClusterTime(1).asTimestamp());
// Add the new ident to the vector and reset the current idents.
indexIdents.push_back(
@@ -2531,7 +2533,8 @@ public:
durableCatalog, "", indexIdents[i], afterCreateTimestamps[i]);
}
- const LogicalTime beforeDropTs = _clock->getTime()[ClusterTime];
+ const auto currentTime = _clock->getTime();
+ const LogicalTime beforeDropTs = currentTime.clusterTime();
// Drop all of the indexes.
BSONObjBuilder result;
@@ -2568,7 +2571,7 @@ public:
AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_X);
- const LogicalTime insertTimestamp = _clock->tick(ClusterTime, 1);
+ const LogicalTime insertTimestamp = _clock->tickClusterTime(1);
{
WriteUnitOfWork wuow(_opCtx);
insertDocument(autoColl.getCollection(),
@@ -2580,7 +2583,7 @@ public:
}
- const Timestamp beforeIndexBuild = _clock->tick(ClusterTime, 1).asTimestamp();
+ const Timestamp beforeIndexBuild = _clock->tickClusterTime(1).asTimestamp();
// Save the pre-state idents so we can capture the specific ident related to index
// creation.
@@ -2593,7 +2596,7 @@ public:
createIndex(autoColl.getCollection(), str::stream() << key << "_1", BSON(key << 1));
// Timestamps at the completion of each index build.
- afterCreateTimestamps.push_back(_clock->tick(ClusterTime, 1).asTimestamp());
+ afterCreateTimestamps.push_back(_clock->tickClusterTime(1).asTimestamp());
// Add the new ident to the vector and reset the current idents.
indexIdents.push_back(
@@ -2609,7 +2612,8 @@ public:
durableCatalog, "", indexIdents[i], afterCreateTimestamps[i]);
}
- const LogicalTime beforeDropTs = _clock->getTime()[ClusterTime];
+ const auto currentTime = _clock->getTime();
+ const LogicalTime beforeDropTs = currentTime.clusterTime();
// Drop all of the indexes.
BSONObjBuilder result;
@@ -2725,7 +2729,7 @@ public:
BSON("_id" << 2 << "a" << BSON_ARRAY(4 << 5) << "b" << BSON_ARRAY(4 << 5));
// NOTE: This test does not test any timestamp reads.
- const LogicalTime insert1 = _clock->tick(ClusterTime, 1);
+ const LogicalTime insert1 = _clock->tickClusterTime(1);
{
LOGV2(22505, "inserting {badDoc1}", "badDoc1"_attr = badDoc1);
WriteUnitOfWork wuow(_opCtx);
@@ -2734,7 +2738,7 @@ public:
wuow.commit();
}
- const LogicalTime insert2 = _clock->tick(ClusterTime, 1);
+ const LogicalTime insert2 = _clock->tickClusterTime(1);
{
LOGV2(22506, "inserting {badDoc2}", "badDoc2"_attr = badDoc2);
WriteUnitOfWork wuow(_opCtx);
@@ -2753,7 +2757,7 @@ public:
const auto buildUUID = UUID::gen();
indexer.setTwoPhaseBuildUUID(buildUUID);
- const LogicalTime indexInit = _clock->tick(ClusterTime, 3);
+ const LogicalTime indexInit = _clock->tickClusterTime(3);
// First, simulate being a secondary. Indexing errors are ignored.
{
@@ -2952,7 +2956,7 @@ public:
NamespaceString nss(dbName, "oplogApplicationOnPrimary");
BSONObj doc = BSON("_id" << 1 << "field" << 1);
- const LogicalTime setupStart = _clock->tick(ClusterTime, 1);
+ const LogicalTime setupStart = _clock->tickClusterTime(1);
UUID collUUID = UUID::gen();
{
@@ -2973,7 +2977,8 @@ public:
auto coll = autoColl.getCollection();
ASSERT(coll);
- const auto presentTs = _clock->getTime()[ClusterTime].asTimestamp();
+ const auto currentTime = _clock->getTime();
+ const auto presentTs = currentTime.clusterTime().asTimestamp();
assertDocumentAtTimestamp(coll, presentTs, doc);
}
@@ -2982,7 +2987,7 @@ public:
// the applyOps command no longer allows createIndexes (see SERVER-41554).
_coordinatorMock->alwaysAllowWrites(false);
{
- const auto beforeBuildTime = _clock->tick(ClusterTime, 2);
+ const auto beforeBuildTime = _clock->tickClusterTime(2);
const auto startBuildTs = beforeBuildTime.addTicks(1).asTimestamp();
// Grab the existing idents to identify the ident created by the index build.
@@ -3200,12 +3205,13 @@ public:
ui = coll->uuid();
}
- presentTs = _clock->getTime()[ClusterTime].asTimestamp();
+ const auto currentTime = _clock->getTime();
+ presentTs = currentTime.clusterTime().asTimestamp();
// This test does not run a real ReplicationCoordinator, so must advance the snapshot
// manager manually.
auto storageEngine = cc().getServiceContext()->getStorageEngine();
storageEngine->getSnapshotManager()->setLastApplied(presentTs);
- const auto beforeTxnTime = _clock->tick(ClusterTime, 1);
+ const auto beforeTxnTime = _clock->tickClusterTime(1);
beforeTxnTs = beforeTxnTime.asTimestamp();
commitEntryTs = beforeTxnTime.addTicks(1).asTimestamp();
@@ -3331,9 +3337,10 @@ private:
class MultiOplogEntryTransaction : public MultiDocumentTransactionTest {
public:
MultiOplogEntryTransaction() : MultiDocumentTransactionTest("multiOplogEntryTransaction") {
- const auto currentTime = _clock->getTime()[ClusterTime];
- firstOplogEntryTs = currentTime.addTicks(1).asTimestamp();
- commitEntryTs = currentTime.addTicks(2).asTimestamp();
+ const auto currentTime = _clock->getTime();
+ const auto clusterTime = currentTime.clusterTime();
+ firstOplogEntryTs = clusterTime.addTicks(1).asTimestamp();
+ commitEntryTs = clusterTime.addTicks(2).asTimestamp();
}
void run() {
@@ -3435,10 +3442,11 @@ class CommitPreparedMultiOplogEntryTransaction : public MultiDocumentTransaction
public:
CommitPreparedMultiOplogEntryTransaction()
: MultiDocumentTransactionTest("preparedMultiOplogEntryTransaction") {
- const auto currentTime = _clock->getTime()[ClusterTime];
- firstOplogEntryTs = currentTime.addTicks(1).asTimestamp();
- prepareEntryTs = currentTime.addTicks(2).asTimestamp();
- commitEntryTs = currentTime.addTicks(3).asTimestamp();
+ const auto currentTime = _clock->getTime();
+ const auto clusterTime = currentTime.clusterTime();
+ firstOplogEntryTs = clusterTime.addTicks(1).asTimestamp();
+ prepareEntryTs = clusterTime.addTicks(2).asTimestamp();
+ commitEntryTs = clusterTime.addTicks(3).asTimestamp();
}
void run() {
@@ -3630,9 +3638,10 @@ class AbortPreparedMultiOplogEntryTransaction : public MultiDocumentTransactionT
public:
AbortPreparedMultiOplogEntryTransaction()
: MultiDocumentTransactionTest("preparedMultiOplogEntryTransaction") {
- const auto currentTime = _clock->getTime()[ClusterTime];
- prepareEntryTs = currentTime.addTicks(1).asTimestamp();
- abortEntryTs = currentTime.addTicks(2).asTimestamp();
+ const auto currentTime = _clock->getTime();
+ const auto clusterTime = currentTime.clusterTime();
+ prepareEntryTs = clusterTime.addTicks(1).asTimestamp();
+ abortEntryTs = clusterTime.addTicks(2).asTimestamp();
}
void run() {
@@ -3739,9 +3748,10 @@ public:
auto txnParticipant = TransactionParticipant::get(_opCtx);
ASSERT(txnParticipant);
- const auto currentTime = _clock->getTime()[ClusterTime];
- const auto prepareTs = currentTime.addTicks(1).asTimestamp();
- commitEntryTs = currentTime.addTicks(2).asTimestamp();
+ const auto currentTime = _clock->getTime();
+ const auto clusterTime = currentTime.clusterTime();
+ const auto prepareTs = clusterTime.addTicks(1).asTimestamp();
+ commitEntryTs = clusterTime.addTicks(2).asTimestamp();
LOGV2(22514, "Prepare TS: {prepareTs}", "prepareTs"_attr = prepareTs);
logTimestamps();
@@ -3840,9 +3850,10 @@ public:
auto txnParticipant = TransactionParticipant::get(_opCtx);
ASSERT(txnParticipant);
- const auto currentTime = _clock->getTime()[ClusterTime];
- const auto prepareTs = currentTime.addTicks(1).asTimestamp();
- const auto abortEntryTs = currentTime.addTicks(2).asTimestamp();
+ const auto currentTime = _clock->getTime();
+ const auto clusterTime = currentTime.clusterTime();
+ const auto prepareTs = clusterTime.addTicks(1).asTimestamp();
+ const auto abortEntryTs = clusterTime.addTicks(2).asTimestamp();
LOGV2(22515, "Prepare TS: {prepareTs}", "prepareTs"_attr = prepareTs);
logTimestamps();