summaryrefslogtreecommitdiff
path: root/src/mongo/db
diff options
context:
space:
mode:
authorIsrael Hsu <israel.hsu@mongodb.com>2022-10-26 19:03:26 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-11-18 12:13:20 +0000
commit9ce81dcabd0fd0930c8a44df47551285124ba4eb (patch)
tree0edaa1e57ae3029fcadeced2c249901b91aa3317 /src/mongo/db
parent4cfd9b936b68622274e39100b7859ea8eb089ad8 (diff)
downloadmongo-9ce81dcabd0fd0930c8a44df47551285124ba4eb.tar.gz
SERVER-69237 Make preImage doc available to CollectionUpdateArgs
Diffstat (limited to 'src/mongo/db')
-rw-r--r--src/mongo/db/catalog/collection.h5
-rw-r--r--src/mongo/db/catalog/collection_test.cpp4
-rw-r--r--src/mongo/db/catalog/collection_write_path.cpp14
-rw-r--r--src/mongo/db/catalog/views_for_database.cpp4
-rw-r--r--src/mongo/db/dbhelpers.cpp2
-rw-r--r--src/mongo/db/exec/update_stage.cpp91
-rw-r--r--src/mongo/db/exec/update_stage.h27
-rw-r--r--src/mongo/db/op_observer/op_observer_impl.cpp14
-rw-r--r--src/mongo/db/op_observer/op_observer_impl_test.cpp201
-rw-r--r--src/mongo/db/op_observer/user_write_block_mode_op_observer_test.cpp5
-rw-r--r--src/mongo/db/ops/write_ops_exec.cpp5
-rw-r--r--src/mongo/db/repl/storage_timestamp_test.cpp12
-rw-r--r--src/mongo/db/repl/tenant_migration_donor_service.cpp2
-rw-r--r--src/mongo/db/s/query_analysis_op_observer.cpp4
-rw-r--r--src/mongo/db/s/sharding_recovery_service_test.cpp6
-rw-r--r--src/mongo/db/serverless/shard_split_donor_op_observer_test.cpp15
-rw-r--r--src/mongo/db/serverless/shard_split_donor_service.cpp2
-rw-r--r--src/mongo/db/transaction/transaction_participant.cpp4
18 files changed, 222 insertions, 195 deletions
diff --git a/src/mongo/db/catalog/collection.h b/src/mongo/db/catalog/collection.h
index 7db6915cd86..3cf710b7509 100644
--- a/src/mongo/db/catalog/collection.h
+++ b/src/mongo/db/catalog/collection.h
@@ -65,13 +65,16 @@ class CollectionPtr;
struct CollectionUpdateArgs {
enum class StoreDocOption { None, PreImage, PostImage };
+ CollectionUpdateArgs() = delete;
+ CollectionUpdateArgs(BSONObj preImageDoc_) : preImageDoc(preImageDoc_) {}
+
std::vector<StmtId> stmtIds = {kUninitializedStmtId};
// The unique sample id for this update if it has been chosen for sampling.
boost::optional<UUID> sampleId;
// The document before modifiers were applied.
- boost::optional<BSONObj> preImageDoc;
+ const BSONObj preImageDoc;
// Fully updated document with damages (update modifiers) applied.
BSONObj updatedDoc;
diff --git a/src/mongo/db/catalog/collection_test.cpp b/src/mongo/db/catalog/collection_test.cpp
index f03632ceaab..48c653f975f 100644
--- a/src/mongo/db/catalog/collection_test.cpp
+++ b/src/mongo/db/catalog/collection_test.cpp
@@ -277,7 +277,7 @@ TEST_F(CollectionTest, VerifyIndexIsUpdated) {
{
WriteUnitOfWork wuow(opCtx);
Snapshotted<BSONObj> oldSnap(opCtx->recoveryUnit()->getSnapshotId(), oldDoc);
- CollectionUpdateArgs args;
+ CollectionUpdateArgs args{oldDoc};
collection_internal::updateDocument(
opCtx, coll, oldRecordId, oldSnap, newDoc, true, nullptr, &args);
wuow.commit();
@@ -321,7 +321,7 @@ TEST_F(CollectionTest, VerifyIndexIsUpdatedWithDamages) {
{
WriteUnitOfWork wuow(opCtx);
Snapshotted<BSONObj> oldSnap(opCtx->recoveryUnit()->getSnapshotId(), oldDoc);
- CollectionUpdateArgs args;
+ CollectionUpdateArgs args{oldDoc};
auto newDocStatus =
collection_internal::updateDocumentWithDamages(opCtx,
coll,
diff --git a/src/mongo/db/catalog/collection_write_path.cpp b/src/mongo/db/catalog/collection_write_path.cpp
index 9a11ba7ce2a..266f31f42eb 100644
--- a/src/mongo/db/catalog/collection_write_path.cpp
+++ b/src/mongo/db/catalog/collection_write_path.cpp
@@ -446,12 +446,6 @@ RecordId updateDocument(OperationContext* opCtx,
if (!oldId.eoo() && SimpleBSONElementComparator::kInstance.evaluate(oldId != newDoc["_id"]))
uasserted(13596, "in Collection::updateDocument _id mismatch");
- // The preImageDoc may not be boost::none if this update was a retryable findAndModify or if
- // the update may have changed the shard key. For non-in-place updates we always set the
- // preImageDoc here to an owned copy of the pre-image.
- if (!args->preImageDoc) {
- args->preImageDoc = oldDoc.value().getOwned();
- }
args->changeStreamPreAndPostImagesEnabledForCollection =
collection->isChangeStreamPreAndPostImagesEnabled();
@@ -485,7 +479,7 @@ RecordId updateDocument(OperationContext* opCtx,
uassertStatusOK(collection->getIndexCatalog()->updateRecord(opCtx,
collection,
- *args->preImageDoc,
+ args->preImageDoc,
newDoc,
oldLocation,
&keysInserted,
@@ -525,12 +519,6 @@ StatusWith<BSONObj> updateDocumentWithDamages(OperationContext* opCtx,
invariant(oldDoc.snapshotId() == opCtx->recoveryUnit()->getSnapshotId());
invariant(collection->updateWithDamagesSupported());
- // For in-place updates we need to grab an owned copy of the pre-image doc if pre-image
- // recording is enabled and we haven't already set the pre-image due to this update being
- // a retryable findAndModify or a possible update to the shard key.
- if (!args->preImageDoc && collection->isChangeStreamPreAndPostImagesEnabled()) {
- args->preImageDoc = oldDoc.value().getOwned();
- }
OplogUpdateEntryArgs onUpdateArgs(args, collection);
const bool setNeedsRetryImageOplogField =
args->storeDocOption != CollectionUpdateArgs::StoreDocOption::None;
diff --git a/src/mongo/db/catalog/views_for_database.cpp b/src/mongo/db/catalog/views_for_database.cpp
index 2d9ab5ad9da..e41ed08f9db 100644
--- a/src/mongo/db/catalog/views_for_database.cpp
+++ b/src/mongo/db/catalog/views_for_database.cpp
@@ -323,9 +323,9 @@ Status ViewsForDatabase::_upsertIntoCatalog(OperationContext* opCtx,
return status;
}
} else {
- CollectionUpdateArgs args;
- args.update = viewObj;
+ CollectionUpdateArgs args(oldView.value());
args.criteria = BSON("_id" << NamespaceStringUtil::serialize(view.name()));
+ args.update = viewObj;
collection_internal::updateDocument(opCtx,
systemViews,
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp
index 154da0a09c7..0aad03060cd 100644
--- a/src/mongo/db/dbhelpers.cpp
+++ b/src/mongo/db/dbhelpers.cpp
@@ -384,7 +384,7 @@ bool Helpers::findByIdAndNoopUpdate(OperationContext* opCtx,
// BSONObj because that's a second way OpObserverImpl::onUpdate() detects and ignores no-op
// updates.
repl::UnreplicatedWritesBlock uwb(opCtx);
- CollectionUpdateArgs args;
+ CollectionUpdateArgs args(snapshottedDoc.value());
args.criteria = idQuery;
args.update = BSONObj();
collection_internal::updateDocument(
diff --git a/src/mongo/db/exec/update_stage.cpp b/src/mongo/db/exec/update_stage.cpp
index 2a6c969c110..7e6423854c6 100644
--- a/src/mongo/db/exec/update_stage.cpp
+++ b/src/mongo/db/exec/update_stage.cpp
@@ -138,8 +138,7 @@ BSONObj UpdateStage::transformAndUpdate(const Snapshotted<BSONObj>& oldObj,
UpdateDriver* driver = _params.driver;
CanonicalQuery* cq = _params.canonicalQuery;
- // If asked to return new doc, default to the oldObj, in case nothing changes.
- BSONObj newObj = oldObj.value();
+ const BSONObj& oldObjValue = oldObj.value();
// Ask the driver to apply the mods. It may be that the driver can apply those "in
// place", that is, some values of the old document just get adjusted without any
@@ -147,7 +146,7 @@ BSONObj UpdateStage::transformAndUpdate(const Snapshotted<BSONObj>& oldObj,
// is needed to accomodate the new bson layout of the resulting document. In any event,
// only enable in-place mutations if the underlying storage engine offers support for
// writing damage events.
- _doc.reset(oldObj.value(),
+ _doc.reset(oldObjValue,
(collection()->updateWithDamagesSupported()
? mutablebson::Document::kInPlaceEnabled
: mutablebson::Document::kInPlaceDisabled));
@@ -191,7 +190,7 @@ BSONObj UpdateStage::transformAndUpdate(const Snapshotted<BSONObj>& oldObj,
matchDetails.requestElemMatchKey();
dassert(cq);
- verify(cq->root()->matchesBSON(oldObj.value(), &matchDetails));
+ verify(cq->root()->matchesBSON(oldObjValue, &matchDetails));
std::string matchedField;
if (matchDetails.hasElemMatchKey())
@@ -232,10 +231,13 @@ BSONObj UpdateStage::transformAndUpdate(const Snapshotted<BSONObj>& oldObj,
docWasModified = false;
}
+ BSONObj newObj;
+
+
if (docWasModified) {
// Prepare to write back the modified document
RecordId newRecordId;
- CollectionUpdateArgs args;
+ CollectionUpdateArgs args{oldObjValue};
if (!request->explain()) {
args.stmtIds = request->getStmtIds();
@@ -245,18 +247,15 @@ BSONObj UpdateStage::transformAndUpdate(const Snapshotted<BSONObj>& oldObj,
auto scopedCss = CollectionShardingState::assertCollectionLockedAndAcquire(
opCtx(), collection()->ns());
auto collDesc = scopedCss->getCollectionDescription(opCtx());
- args.criteria = collDesc.extractDocumentKey(newObj);
+ args.criteria = collDesc.extractDocumentKey(oldObjValue);
} else {
- const auto docId = newObj[idFieldName];
- args.criteria = docId ? docId.wrap() : newObj;
+ const auto docId = oldObjValue[idFieldName];
+ args.criteria = docId ? docId.wrap() : oldObjValue;
}
uassert(16980,
"Multi-update operations require all documents to have an '_id' field",
!request->isMulti() || args.criteria.hasField("_id"_sd));
args.storeDocOption = getStoreDocMode(*request);
- if (args.storeDocOption == CollectionUpdateArgs::StoreDocOption::PreImage) {
- args.preImageDoc = oldObj.value().getOwned();
- }
}
// Ensure we set the type correctly
@@ -266,14 +265,12 @@ BSONObj UpdateStage::transformAndUpdate(const Snapshotted<BSONObj>& oldObj,
if (inPlace) {
if (!request->explain()) {
- newObj = oldObj.value();
const RecordData oldRec(oldObj.value().objdata(), oldObj.value().objsize());
Snapshotted<RecordData> snap(oldObj.snapshotId(), oldRec);
- if (_isUserInitiatedWrite &&
- checkUpdateChangesShardKeyFields(boost::none, oldObj) && !args.preImageDoc) {
- args.preImageDoc = oldObj.value().getOwned();
+ if (_isUserInitiatedWrite) {
+ checkUpdateChangesShardKeyFields(boost::none /* newObj */, oldObj);
}
WriteUnitOfWork wunit(opCtx());
@@ -304,11 +301,9 @@ BSONObj UpdateStage::transformAndUpdate(const Snapshotted<BSONObj>& oldObj,
}
if (!request->explain()) {
- if (_isUserInitiatedWrite && checkUpdateChangesShardKeyFields(newObj, oldObj) &&
- !args.preImageDoc) {
- args.preImageDoc = oldObj.value().getOwned();
+ if (_isUserInitiatedWrite) {
+ checkUpdateChangesShardKeyFields(newObj, oldObj);
}
-
WriteUnitOfWork wunit(opCtx());
newRecordId = collection_internal::updateDocument(opCtx(),
collection(),
@@ -342,6 +337,13 @@ BSONObj UpdateStage::transformAndUpdate(const Snapshotted<BSONObj>& oldObj,
_specificStats.nModified += _params.numStatsForDoc ? _params.numStatsForDoc(newObj) : 1;
}
+ // If not modified or explaining only, then there are no changes, so default to
+ // returning oldObj.
+ if (!docWasModified || request->explain()) {
+ newObj = oldObjValue;
+ }
+ invariant(!newObj.isEmpty());
+
return newObj;
}
@@ -506,6 +508,8 @@ PlanStage::StageState UpdateStage::doWork(WorkingSetID* out) {
// Ensure that the BSONObj underlying the WorkingSetMember is owned because saveState()
// is allowed to free the memory.
member->makeObjOwnedIfNeeded();
+ BSONObj oldObj = member->doc.value().toBson();
+ invariant(oldObj.isOwned());
// Save state before making changes.
handlePlanStageYield(expCtx(),
@@ -519,13 +523,8 @@ PlanStage::StageState UpdateStage::doWork(WorkingSetID* out) {
// yieldHandler
std::terminate();
});
-
// If we care about the pre-updated version of the doc, save it out here.
- BSONObj oldObj;
SnapshotId oldSnapshot = member->doc.snapshotId();
- if (_params.request->shouldReturnOldDocs()) {
- oldObj = member->doc.value().toBson().getOwned();
- }
BSONObj newObj;
@@ -536,8 +535,7 @@ PlanStage::StageState UpdateStage::doWork(WorkingSetID* out) {
collection()->ns().ns(),
[&] {
// Do the update, get us the new version of the doc.
- newObj = transformAndUpdate(
- {oldSnapshot, member->doc.value().toBson()}, recordId, writeToOrphan);
+ newObj = transformAndUpdate({oldSnapshot, oldObj}, recordId, writeToOrphan);
return PlanStage::NEED_TIME;
},
[&] {
@@ -567,7 +565,7 @@ PlanStage::StageState UpdateStage::doWork(WorkingSetID* out) {
// Set member's obj to be the doc we want to return.
if (_params.request->shouldReturnAnyDocs()) {
if (_params.request->shouldReturnNewDocs()) {
- member->resetDocument(opCtx()->recoveryUnit()->getSnapshotId(), newObj.getOwned());
+ member->resetDocument(opCtx()->recoveryUnit()->getSnapshotId(), newObj);
} else {
invariant(_params.request->shouldReturnOldDocs());
member->resetDocument(oldSnapshot, oldObj);
@@ -722,21 +720,20 @@ void UpdateStage::_checkRestrictionsOnUpdatingShardKeyAreNotViolated(
}
}
-bool UpdateStage::wasReshardingKeyUpdated(const ShardingWriteRouter& shardingWriteRouter,
- const BSONObj& newObj,
- const Snapshotted<BSONObj>& oldObj) {
+void UpdateStage::checkUpdateChangesReshardingKey(const ShardingWriteRouter& shardingWriteRouter,
+ const BSONObj& newObj,
+ const Snapshotted<BSONObj>& oldObj) {
const auto& collDesc = shardingWriteRouter.getCollDesc();
- invariant(collDesc);
auto reshardingKeyPattern = collDesc->getReshardingKeyIfShouldForwardOps();
if (!reshardingKeyPattern)
- return false;
+ return;
auto oldShardKey = reshardingKeyPattern->extractShardKeyFromDoc(oldObj.value());
auto newShardKey = reshardingKeyPattern->extractShardKeyFromDoc(newObj);
if (newShardKey.binaryEqual(oldShardKey))
- return false;
+ return;
FieldRefSet shardKeyPaths(collDesc->getKeyPatternFields());
_checkRestrictionsOnUpdatingShardKeyAreNotViolated(*collDesc, shardKeyPaths);
@@ -749,11 +746,9 @@ bool UpdateStage::wasReshardingKeyUpdated(const ShardingWriteRouter& shardingWri
oldObj.value(), newObj, false /* upsert */, collection()->ns(), collection()->uuid()),
"This update would cause the doc to change owning shards under the new shard key",
oldRecipShard == newRecipShard);
-
- return true;
}
-bool UpdateStage::checkUpdateChangesShardKeyFields(const boost::optional<BSONObj>& newObjCopy,
+void UpdateStage::checkUpdateChangesShardKeyFields(const boost::optional<BSONObj>& newObjCopy,
const Snapshotted<BSONObj>& oldObj) {
ShardingWriteRouter shardingWriteRouter(
opCtx(), collection()->ns(), Grid::get(opCtx())->catalogCache());
@@ -762,7 +757,7 @@ bool UpdateStage::checkUpdateChangesShardKeyFields(const boost::optional<BSONObj
// css can be null when this is a config server.
if (css == nullptr) {
- return false;
+ return;
}
const auto collDesc = css->getCollectionDescription(opCtx());
@@ -771,25 +766,21 @@ bool UpdateStage::checkUpdateChangesShardKeyFields(const boost::optional<BSONObj
// can be expensive for larger documents, so we skip calling it when the collection isn't even
// sharded.
if (!collDesc.isSharded()) {
- return false;
+ return;
}
const auto& newObj = newObjCopy ? *newObjCopy : _doc.getObject();
// It is possible that both the existing and new shard keys are being updated, so we do not want
// to short-circuit checking whether either is being modified.
- bool existingShardKeyUpdated = wasExistingShardKeyUpdated(shardingWriteRouter, newObj, oldObj);
- bool reshardingKeyUpdated = wasReshardingKeyUpdated(shardingWriteRouter, newObj, oldObj);
-
- return existingShardKeyUpdated || reshardingKeyUpdated;
+ checkUpdateChangesExistingShardKey(shardingWriteRouter, newObj, oldObj);
+ checkUpdateChangesReshardingKey(shardingWriteRouter, newObj, oldObj);
}
-bool UpdateStage::wasExistingShardKeyUpdated(const ShardingWriteRouter& shardingWriteRouter,
- const BSONObj& newObj,
- const Snapshotted<BSONObj>& oldObj) {
+void UpdateStage::checkUpdateChangesExistingShardKey(const ShardingWriteRouter& shardingWriteRouter,
+ const BSONObj& newObj,
+ const Snapshotted<BSONObj>& oldObj) {
const auto& collDesc = shardingWriteRouter.getCollDesc();
- invariant(collDesc);
-
const auto& shardKeyPattern = collDesc->getShardKeyPattern();
auto oldShardKey = shardKeyPattern.extractShardKeyFromDoc(oldObj.value());
@@ -799,7 +790,7 @@ bool UpdateStage::wasExistingShardKeyUpdated(const ShardingWriteRouter& sharding
// Using BSONObj::binaryEqual() still allows a missing shard key field to be filled in with an
// explicit null value.
if (newShardKey.binaryEqual(oldShardKey)) {
- return false;
+ return;
}
FieldRefSet shardKeyPaths(collDesc->getKeyPatternFields());
@@ -833,10 +824,6 @@ bool UpdateStage::wasExistingShardKeyUpdated(const ShardingWriteRouter& sharding
collection()->uuid()),
"This update would cause the doc to change owning shards");
}
-
- // We passed all checks, so we will return that this update changes the shard key field, and
- // the updated document will remain on the same node.
- return true;
}
} // namespace mongo
diff --git a/src/mongo/db/exec/update_stage.h b/src/mongo/db/exec/update_stage.h
index 74885df5a6c..c69970dec6c 100644
--- a/src/mongo/db/exec/update_stage.h
+++ b/src/mongo/db/exec/update_stage.h
@@ -165,14 +165,18 @@ private:
void prepareToRetryWSM(WorkingSetID idToRetry, WorkingSetID* out);
/**
- * Returns true if the owning shard under the current key pattern would change as a result of
- * the update, or if the destined recipient under the new shard key pattern from resharding
- * would change as a result of the update, and returns false otherwise.
+ * Performs checks on whether the existing or new shard key fields would change the owning
+ * shard, including whether the owning shard under the current key pattern would change as a
+ * result of the update, or if the destined recipient under the new shard key pattern from
+ * resharding would change as a result of the update.
+ *
+ * Throws if the updated document does not have all of the shard key fields or no longer belongs
+ * to this shard.
*
* Accepting a 'newObjCopy' parameter is a performance enhancement for updates which weren't
* performed in-place to avoid rendering a full copy of the updated document multiple times.
*/
- bool checkUpdateChangesShardKeyFields(const boost::optional<BSONObj>& newObjCopy,
+ void checkUpdateChangesShardKeyFields(const boost::optional<BSONObj>& newObjCopy,
const Snapshotted<BSONObj>& oldObj);
/**
@@ -182,17 +186,14 @@ private:
* doc no longer belongs to this shard, this means that one or more shard key field values have
* been updated to a value belonging to a chunk that is not owned by this shard. We cannot apply
* this update atomically.
- *
- * If the update changes shard key fields but the new shard key remains on the same node,
- * returns true. If the update does not change shard key fields, returns false.
*/
- bool wasExistingShardKeyUpdated(const ShardingWriteRouter& shardingWriteRouter,
- const BSONObj& newObj,
- const Snapshotted<BSONObj>& oldObj);
+ void checkUpdateChangesExistingShardKey(const ShardingWriteRouter& shardingWriteRouter,
+ const BSONObj& newObj,
+ const Snapshotted<BSONObj>& oldObj);
- bool wasReshardingKeyUpdated(const ShardingWriteRouter& shardingWriteRouter,
- const BSONObj& newObj,
- const Snapshotted<BSONObj>& oldObj);
+ void checkUpdateChangesReshardingKey(const ShardingWriteRouter& shardingWriteRouter,
+ const BSONObj& newObj,
+ const Snapshotted<BSONObj>& oldObj);
// If not WorkingSet::INVALID_ID, we use this rather than asking our child what to do next.
WorkingSetID _idRetrying;
diff --git a/src/mongo/db/op_observer/op_observer_impl.cpp b/src/mongo/db/op_observer/op_observer_impl.cpp
index 98b96b745ce..cce3608b77e 100644
--- a/src/mongo/db/op_observer/op_observer_impl.cpp
+++ b/src/mongo/db/op_observer/op_observer_impl.cpp
@@ -815,8 +815,8 @@ void OpObserverImpl::onUpdate(OperationContext* opCtx, const OplogUpdateEntryArg
if (inRetryableInternalTransaction) {
operation.setInitializedStatementIds(args.updateArgs->stmtIds);
if (args.updateArgs->storeDocOption == CollectionUpdateArgs::StoreDocOption::PreImage) {
- invariant(args.updateArgs->preImageDoc);
- operation.setPreImage(args.updateArgs->preImageDoc->getOwned());
+ invariant(!args.updateArgs->preImageDoc.isEmpty());
+ operation.setPreImage(args.updateArgs->preImageDoc.getOwned());
operation.setPreImageRecordedForRetryableInternalTransaction();
if (args.retryableFindAndModifyLocation ==
RetryableFindAndModifyLocation::kSideCollection) {
@@ -835,8 +835,8 @@ void OpObserverImpl::onUpdate(OperationContext* opCtx, const OplogUpdateEntryArg
}
if (args.updateArgs->changeStreamPreAndPostImagesEnabledForCollection) {
- invariant(args.updateArgs->preImageDoc);
- operation.setPreImage(args.updateArgs->preImageDoc->getOwned());
+ invariant(!args.updateArgs->preImageDoc.isEmpty());
+ operation.setPreImage(args.updateArgs->preImageDoc.getOwned());
operation.setChangeStreamPreImageRecordingMode(
ChangeStreamPreImageRecordingMode::kPreImagesCollection);
}
@@ -877,7 +877,7 @@ void OpObserverImpl::onUpdate(OperationContext* opCtx, const OplogUpdateEntryArg
// If the oplog entry has `needsRetryImage`, copy the image into image collection.
const BSONObj& dataImage = [&]() {
if (oplogEntry.getNeedsRetryImage().value() == repl::RetryImageEnum::kPreImage) {
- return args.updateArgs->preImageDoc.value();
+ return args.updateArgs->preImageDoc;
} else {
return args.updateArgs->updatedDoc;
}
@@ -906,10 +906,10 @@ void OpObserverImpl::onUpdate(OperationContext* opCtx, const OplogUpdateEntryArg
args.updateArgs->source != OperationSource::kFromMigrate &&
!args.coll->ns().isTemporaryReshardingCollection()) {
const auto& preImageDoc = args.updateArgs->preImageDoc;
- tassert(5868600, "PreImage must be set", preImageDoc && !preImageDoc.value().isEmpty());
+ tassert(5868600, "PreImage must be set", !preImageDoc.isEmpty());
ChangeStreamPreImageId id(args.coll->uuid(), opTime.writeOpTime.getTimestamp(), 0);
- ChangeStreamPreImage preImage(id, opTime.wallClockTime, preImageDoc.value());
+ ChangeStreamPreImage preImage(id, opTime.wallClockTime, preImageDoc);
ChangeStreamPreImagesCollectionManager::insertPreImage(
opCtx, args.coll->ns().tenantId(), preImage);
diff --git a/src/mongo/db/op_observer/op_observer_impl_test.cpp b/src/mongo/db/op_observer/op_observer_impl_test.cpp
index fe7ff8950e3..a627b61bf7b 100644
--- a/src/mongo/db/op_observer/op_observer_impl_test.cpp
+++ b/src/mongo/db/op_observer/op_observer_impl_test.cpp
@@ -913,12 +913,15 @@ TEST_F(OpObserverTest, SingleStatementUpdateTestIncludesTenantId) {
RAIIServerParameterControllerForTest multitenancyController("multitenancySupport", true);
RAIIServerParameterControllerForTest featureFlagController("featureFlagRequireTenantID", true);
- CollectionUpdateArgs updateArgs;
+ const auto criteria = BSON("_id" << 0);
+ // Create a fake preImageDoc; the tested code path does not care about this value.
+ const auto preImageDoc = criteria;
+ CollectionUpdateArgs updateArgs{preImageDoc};
+ updateArgs.criteria = criteria;
updateArgs.updatedDoc = BSON("_id" << 0 << "data"
<< "x");
updateArgs.update = BSON("$set" << BSON("data"
<< "x"));
- updateArgs.criteria = BSON("_id" << 0);
auto opCtx = cc().makeOperationContext();
WriteUnitOfWork wuow(opCtx.get());
@@ -1272,13 +1275,16 @@ TEST_F(OpObserverTransactionTest, TransactionalPrepareTest) {
<< "y"));
opObserver().onInserts(opCtx(), *autoColl1, inserts1.begin(), inserts1.end(), false);
- CollectionUpdateArgs updateArgs2;
+ const auto criteria = BSON("_id" << 0);
+ // Create a fake preImageDoc; the tested code path does not care about this value.
+ const auto preImageDoc = criteria;
+ CollectionUpdateArgs updateArgs2{preImageDoc};
+ updateArgs2.criteria = criteria;
updateArgs2.stmtIds = {1};
updateArgs2.updatedDoc = BSON("_id" << 0 << "data"
<< "y");
updateArgs2.update = BSON("$set" << BSON("data"
<< "y"));
- updateArgs2.criteria = BSON("_id" << 0);
OplogUpdateEntryArgs update2(&updateArgs2, *autoColl2);
opObserver().onUpdate(opCtx(), update2);
@@ -1797,22 +1803,26 @@ TEST_F(OpObserverTransactionTest, TransactionalUpdateTest) {
AutoGetCollection autoColl1(opCtx(), nss1, MODE_IX);
AutoGetCollection autoColl2(opCtx(), nss2, MODE_IX);
- CollectionUpdateArgs updateArgs1;
+ const auto criteria1 = BSON("_id" << 0);
+ const auto preImageDoc1 = criteria1;
+ CollectionUpdateArgs updateArgs1{preImageDoc1};
+ updateArgs1.criteria = criteria1;
updateArgs1.stmtIds = {0};
updateArgs1.updatedDoc = BSON("_id" << 0 << "data"
<< "x");
updateArgs1.update = BSON("$set" << BSON("data"
<< "x"));
- updateArgs1.criteria = BSON("_id" << 0);
OplogUpdateEntryArgs update1(&updateArgs1, *autoColl1);
- CollectionUpdateArgs updateArgs2;
+ const auto criteria2 = BSON("_id" << 1);
+ const auto preImageDoc2 = criteria2;
+ CollectionUpdateArgs updateArgs2{preImageDoc2};
+ updateArgs2.criteria = criteria2;
updateArgs2.stmtIds = {1};
updateArgs2.updatedDoc = BSON("_id" << 1 << "data"
<< "y");
updateArgs2.update = BSON("$set" << BSON("data"
<< "y"));
- updateArgs2.criteria = BSON("_id" << 1);
OplogUpdateEntryArgs update2(&updateArgs2, *autoColl2);
opObserver().onUpdate(opCtx(), update1);
@@ -1852,22 +1862,26 @@ TEST_F(OpObserverTransactionTest, TransactionalUpdateTestIncludesTenantId) {
AutoGetCollection autoColl1(opCtx(), nss1, MODE_IX);
AutoGetCollection autoColl2(opCtx(), nss2, MODE_IX);
- CollectionUpdateArgs updateArgs1;
+ const auto criteria1 = BSON("_id" << 0);
+ const auto preImageDoc1 = criteria1;
+ CollectionUpdateArgs updateArgs1{preImageDoc1};
+ updateArgs1.criteria = criteria1;
updateArgs1.stmtIds = {0};
updateArgs1.updatedDoc = BSON("_id" << 0 << "data"
<< "x");
updateArgs1.update = BSON("$set" << BSON("data"
<< "x"));
- updateArgs1.criteria = BSON("_id" << 0);
OplogUpdateEntryArgs update1(&updateArgs1, *autoColl1);
- CollectionUpdateArgs updateArgs2;
+ const auto criteria2 = BSON("_id" << 1);
+ const auto preImageDoc2 = criteria2;
+ CollectionUpdateArgs updateArgs2{preImageDoc2};
+ updateArgs2.criteria = criteria2;
updateArgs2.stmtIds = {1};
updateArgs2.updatedDoc = BSON("_id" << 1 << "data"
<< "y");
updateArgs2.update = BSON("$set" << BSON("data"
<< "y"));
- updateArgs2.criteria = BSON("_id" << 1);
OplogUpdateEntryArgs update2(&updateArgs2, *autoColl2);
opObserver().onUpdate(opCtx(), update1);
@@ -2037,13 +2051,16 @@ public:
protected:
void testRetryableFindAndModifyUpdateRequestingPostImageHasNeedsRetryImage() {
- CollectionUpdateArgs updateArgs;
+ NamespaceString nss = {boost::none /* tenantId */, "test", "coll"};
+ const auto criteria = BSON("_id" << 0);
+ const auto preImageDoc = criteria;
+ CollectionUpdateArgs updateArgs{preImageDoc};
+ updateArgs.criteria = criteria;
updateArgs.stmtIds = {0};
updateArgs.updatedDoc = BSON("_id" << 0 << "data"
<< "x");
updateArgs.update = BSON("$set" << BSON("data"
<< "x"));
- updateArgs.criteria = BSON("_id" << 0);
updateArgs.storeDocOption = CollectionUpdateArgs::StoreDocOption::PostImage;
AutoGetCollection autoColl(opCtx(), nss, MODE_IX);
@@ -2066,13 +2083,15 @@ protected:
}
void testRetryableFindAndModifyUpdateRequestingPreImageHasNeedsRetryImage() {
- CollectionUpdateArgs updateArgs;
- updateArgs.stmtIds = {0};
- updateArgs.preImageDoc = BSON("_id" << 0 << "data"
+ NamespaceString nss = {boost::none, "test", "coll"};
+ const auto criteria = BSON("_id" << 0);
+ const auto preImageDoc = BSON("_id" << 0 << "data"
<< "y");
+ CollectionUpdateArgs updateArgs{preImageDoc};
+ updateArgs.criteria = criteria;
+ updateArgs.stmtIds = {0};
updateArgs.update = BSON("$set" << BSON("data"
<< "x"));
- updateArgs.criteria = BSON("_id" << 0);
updateArgs.storeDocOption = CollectionUpdateArgs::StoreDocOption::PreImage;
AutoGetCollection autoColl(opCtx(), nss, MODE_IX);
@@ -2350,14 +2369,8 @@ protected:
}
break;
}
- update->updateArgs->preImageDoc = boost::none;
- if (testCase.imageType == StoreDocOption::PreImage || testCase.changeStreamImagesEnabled) {
- update->updateArgs->preImageDoc = BSON("_id" << 0 << "preImage" << true);
- }
- update->updateArgs->updatedDoc = BSON("_id" << 0 << "postImage" << true);
- update->updateArgs->update =
- BSON("$set" << BSON("postImage" << true) << "$unset" << BSON("preImage" << 1));
- update->updateArgs->criteria = BSON("_id" << 0);
+ update->updateArgs->updatedDoc = kPostImageDoc;
+ update->updateArgs->update = kUpdate;
update->updateArgs->storeDocOption = testCase.imageType;
}
@@ -2374,7 +2387,7 @@ protected:
repl::ImageEntry imageEntry =
getImageEntryFromSideCollection(opCtx, *updateOplogEntry.getSessionId());
const BSONObj& expectedImage = testCase.imageType == StoreDocOption::PreImage
- ? update.updateArgs->preImageDoc.value()
+ ? update.updateArgs->preImageDoc
: update.updateArgs->updatedDoc;
ASSERT_BSONOBJ_EQ(expectedImage, imageEntry.getImage());
ASSERT(imageEntry.getImageKind() == updateOplogEntry.getNeedsRetryImage());
@@ -2387,8 +2400,8 @@ protected:
ASSERT(imageEntry.getImageKind() == repl::RetryImageEnum::kPostImage);
}
- // If 'updateOplogEntry' has opTime T, opTime T-1 must be reserved for potential forged
- // noop oplog entry for the pre/postImage written to the side collection.
+ // If 'updateOplogEntry' has opTime T, opTime T-1 must be reserved for potential
+ // forged noop oplog entry for the pre/postImage written to the side collection.
const Timestamp forgeNoopTimestamp = updateOplogEntry.getTimestamp() - 1;
ASSERT_FALSE(findByTimestamp(oplogs, forgeNoopTimestamp));
} else {
@@ -2412,7 +2425,7 @@ protected:
ChangeStreamPreImageId preImageId(
_uuid, updateOplogEntry.getOpTime().getTimestamp(), 0);
ChangeStreamPreImage preImage = getChangeStreamPreImage(opCtx, preImageId, &container);
- const BSONObj& expectedImage = update.updateArgs->preImageDoc.value();
+ const BSONObj& expectedImage = update.updateArgs->preImageDoc;
ASSERT_BSONOBJ_EQ(expectedImage, preImage.getPreImage());
ASSERT_EQ(updateOplogEntry.getWallClockTime(), preImage.getOperationTime());
}
@@ -2442,6 +2455,12 @@ protected:
const NamespaceString _nss{boost::none, "test", "coll"};
const UUID _uuid = UUID::gen();
+
+ const BSONObj kCriteria = BSON("_id" << 0);
+ const BSONObj kPreImageDoc = BSON("_id" << 0 << "preImage" << true);
+ const BSONObj kPostImageDoc = BSON("_id" << 0 << "postImage" << true);
+ const BSONObj kUpdate =
+ BSON("$set" << BSON("postImage" << true) << "$unset" << BSON("preImage" << 1));
};
TEST_F(OnUpdateOutputsTest, TestNonTransactionFundamentalOnUpdateOutputs) {
@@ -2468,9 +2487,11 @@ TEST_F(OnUpdateOutputsTest, TestNonTransactionFundamentalOnUpdateOutputs) {
}
// Phase 2: Call the code we're testing.
+ CollectionUpdateArgs updateArgs{kPreImageDoc};
+ updateArgs.criteria = kCriteria;
+
WriteUnitOfWork wuow(opCtx);
AutoGetCollection locks(opCtx, _nss, MODE_IX);
- CollectionUpdateArgs updateArgs;
OplogUpdateEntryArgs updateEntryArgs(&updateArgs, *locks);
initializeOplogUpdateEntryArgs(opCtx, testCase, &updateEntryArgs);
opObserver.onUpdate(opCtx, updateEntryArgs);
@@ -2514,7 +2535,8 @@ TEST_F(OnUpdateOutputsTest, TestFundamentalTransactionOnUpdateOutputs) {
// Phase 2: Call the code we're testing.
WriteUnitOfWork wuow(opCtx);
AutoGetCollection locks(opCtx, _nss, MODE_IX);
- CollectionUpdateArgs updateArgs;
+ CollectionUpdateArgs updateArgs{kPreImageDoc};
+ updateArgs.criteria = kCriteria;
OplogUpdateEntryArgs updateEntryArgs(&updateArgs, *locks);
initializeOplogUpdateEntryArgs(opCtx, testCase, &updateEntryArgs);
@@ -2539,9 +2561,10 @@ struct InsertTestCase {
int numDocsToInsert;
};
TEST_F(OpObserverTest, TestFundamentalOnInsertsOutputs) {
- // Create a registry that only registers the Impl. It can be challenging to call methods on the
- // Impl directly. It falls into cases where `ReservedTimes` is expected to be instantiated. Due
- // to strong encapsulation, we use the registry that managers the `ReservedTimes` on our behalf.
+ // Create a registry that only registers the Impl. It can be challenging to call methods on
+ // the Impl directly. It falls into cases where `ReservedTimes` is expected to be instantiated.
+ // Due to strong encapsulation, we use the registry that managers the `ReservedTimes` on our
+ // behalf.
OpObserverRegistry opObserver;
opObserver.addObserver(std::make_unique<OpObserverImpl>(std::make_unique<OplogWriterImpl>()));
@@ -2606,8 +2629,8 @@ TEST_F(OpObserverTest, TestFundamentalOnInsertsOutputs) {
ASSERT_EQ(opCtx->getTxnNumber().value(), entry.getTxnNumber().value());
ASSERT_EQ(1, entry.getStatementIds().size());
ASSERT_EQ(StmtId(opIdx), entry.getStatementIds()[0]);
- // When we insert multiple documents in retryable writes, each insert will "link" back
- // to the previous insert. This code verifies that C["prevOpTime"] -> B and
+ // When we insert multiple documents in retryable writes, each insert will "link"
+ // back to the previous insert. This code verifies that C["prevOpTime"] -> B and
// B["prevOpTime"] -> A.
Timestamp expectedPrevWriteOpTime = Timestamp(0, 0);
if (opIdx > 0) {
@@ -2764,8 +2787,8 @@ TEST_F(BatchedWriteOutputsTest, TestApplyOpsGrouping) {
AutoGetCollection autoColl(opCtx, _nss, MODE_IX);
for (size_t doc = 0; doc < docsToBeBatched; doc++) {
- // This test does not call `OpObserver::aboutToDelete`. That method has the side-effect
- // of setting of `documentKey` on the delete for sharding purposes.
+ // This test does not call `OpObserver::aboutToDelete`. That method has the
+ // side-effect of setting of `documentKey` on the delete for sharding purposes.
// `OpObserverImpl::onDelete` asserts its existence.
repl::documentKeyDecoration(opCtx).emplace(docsToDelete[doc]["_id"].wrap(),
boost::none);
@@ -2776,10 +2799,11 @@ TEST_F(BatchedWriteOutputsTest, TestApplyOpsGrouping) {
wuow.commit();
- // Retrieve the oplog entries. We expect 'docsToBeBatched' oplog entries because of previous
- // iteration of this loop that exercised previous batch sizes.
+ // Retrieve the oplog entries. We expect 'docsToBeBatched' oplog entries because of
+ // previous iteration of this loop that exercised previous batch sizes.
std::vector<BSONObj> oplogs = getNOplogEntries(opCtx, docsToBeBatched);
- // Entries in ascending timestamp order, so fetch the last one at the back of the vector.
+ // Entries in ascending timestamp order, so fetch the last one at the back of the
+ // vector.
auto lastOplogEntry = oplogs.back();
auto lastOplogEntryParsed = assertGet(OplogEntry::parse(oplogs.back()));
@@ -2801,8 +2825,8 @@ TEST_F(BatchedWriteOutputsTest, TestApplyOpsGrouping) {
}
}
-// Verifies that a WriteUnitOfWork with groupOplogEntries=true constisting of an insert, an update
-// and a delete replicates as a single applyOps.
+// Verifies that a WriteUnitOfWork with groupOplogEntries=true constisting of an insert, an
+// update and a delete replicates as a single applyOps.
TEST_F(BatchedWriteOutputsTest, TestApplyOpsInsertDeleteUpdate) {
// Setup.
auto opCtxRaii = cc().makeOperationContext();
@@ -2836,10 +2860,12 @@ TEST_F(BatchedWriteOutputsTest, TestApplyOpsInsertDeleteUpdate) {
}
// (2) Update
{
- CollectionUpdateArgs collUpdateArgs;
+ const auto criteria = BSON("_id" << 2);
+ const auto preImageDoc = criteria;
+ CollectionUpdateArgs collUpdateArgs{preImageDoc};
+ collUpdateArgs.criteria = criteria;
collUpdateArgs.update = BSON("fieldToUpdate"
<< "valueToUpdate");
- collUpdateArgs.criteria = BSON("_id" << 2);
auto args = OplogUpdateEntryArgs(&collUpdateArgs, *autoColl);
opCtx->getServiceContext()->getOpObserver()->onUpdate(opCtx, args);
}
@@ -2852,8 +2878,8 @@ TEST_F(BatchedWriteOutputsTest, TestApplyOpsInsertDeleteUpdate) {
auto lastOplogEntry = oplogs.back();
auto lastOplogEntryParsed = assertGet(OplogEntry::parse(oplogs.back()));
- // The batch consists of an applyOps, whose array contains the three writes issued within the
- // WUOW.
+ // The batch consists of an applyOps, whose array contains the three writes issued within
+ // the WUOW.
ASSERT(lastOplogEntryParsed.getCommandType() == OplogEntry::CommandType::kApplyOps);
std::vector<repl::OplogEntry> innerEntries;
repl::ApplyOps::extractOperationsTo(
@@ -2923,10 +2949,12 @@ TEST_F(BatchedWriteOutputsTest, TestApplyOpsInsertDeleteUpdateIncludesTenantId)
}
// (2) Update
{
- CollectionUpdateArgs collUpdateArgs;
+ const auto criteria = BSON("_id" << 2);
+ const auto preImageDoc = criteria;
+ CollectionUpdateArgs collUpdateArgs{preImageDoc};
+ collUpdateArgs.criteria = criteria;
collUpdateArgs.update = BSON("fieldToUpdate"
<< "valueToUpdate");
- collUpdateArgs.criteria = BSON("_id" << 2);
auto args = OplogUpdateEntryArgs(&collUpdateArgs, *autoColl);
opCtx->getServiceContext()->getOpObserver()->onUpdate(opCtx, args);
}
@@ -2939,8 +2967,8 @@ TEST_F(BatchedWriteOutputsTest, TestApplyOpsInsertDeleteUpdateIncludesTenantId)
auto lastOplogEntry = oplogs.back();
auto lastOplogEntryParsed = assertGet(OplogEntry::parse(oplogs.back()));
- // The batch consists of an applyOps, whose array contains the three writes issued within the
- // WUOW.
+ // The batch consists of an applyOps, whose array contains the three writes issued within
+ // the WUOW.
ASSERT(lastOplogEntryParsed.getCommandType() == OplogEntry::CommandType::kApplyOps);
std::vector<repl::OplogEntry> innerEntries;
repl::ApplyOps::extractOperationsTo(
@@ -3027,8 +3055,8 @@ TEST_F(BatchedWriteOutputsTest, testWUOWLarge) {
}
wuow.commit();
- // Retrieve the oplog entries, implicitly asserting that there's exactly one entry in the whole
- // oplog.
+ // Retrieve the oplog entries, implicitly asserting that there's exactly one entry in the
+ // whole oplog.
std::vector<BSONObj> oplogs = getNOplogEntries(opCtx, 1);
auto lastOplogEntry = oplogs.back();
auto lastOplogEntryParsed = assertGet(OplogEntry::parse(oplogs.back()));
@@ -3142,8 +3170,8 @@ protected:
ASSERT(imageEntry.getImageKind() == repl::RetryImageEnum::kPreImage);
ASSERT_BSONOBJ_EQ(_deletedDoc, imageEntry.getImage());
- // If 'deleteOplogEntry' has opTime T, opTime T-1 must be reserved for potential forged
- // noop oplog entry for the preImage written to the side collection.
+ // If 'deleteOplogEntry' has opTime T, opTime T-1 must be reserved for potential
+ // forged noop oplog entry for the preImage written to the side collection.
const Timestamp forgeNoopTimestamp = deleteOplogEntry.getTimestamp() - 1;
ASSERT_FALSE(findByTimestamp(oplogs, forgeNoopTimestamp));
} else {
@@ -3383,8 +3411,8 @@ TEST_F(OpObserverMultiEntryTransactionTest, TransactionalInsertTest) {
<< "partialTxn" << true);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[2].getObject());
- // This should be the implicit commit oplog entry, indicated by the absence of the 'partialTxn'
- // field.
+ // This should be the implicit commit oplog entry, indicated by the absence of the
+ // 'partialTxn' field.
oExpected =
BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
@@ -3402,22 +3430,26 @@ TEST_F(OpObserverMultiEntryTransactionTest, TransactionalUpdateTest) {
AutoGetCollection autoColl1(opCtx(), nss1, MODE_IX);
AutoGetCollection autoColl2(opCtx(), nss2, MODE_IX);
- CollectionUpdateArgs updateArgs1;
+ const auto criteria1 = BSON("_id" << 0);
+ const auto preImageDoc1 = criteria1;
+ CollectionUpdateArgs updateArgs1{preImageDoc1};
+ updateArgs1.criteria = criteria1;
updateArgs1.stmtIds = {0};
updateArgs1.updatedDoc = BSON("_id" << 0 << "data"
<< "x");
updateArgs1.update = BSON("$set" << BSON("data"
<< "x"));
- updateArgs1.criteria = BSON("_id" << 0);
OplogUpdateEntryArgs update1(&updateArgs1, *autoColl1);
- CollectionUpdateArgs updateArgs2;
+ const auto criteria2 = BSON("_id" << 1);
+ const auto preImageDoc2 = criteria2;
+ CollectionUpdateArgs updateArgs2{preImageDoc2};
+ updateArgs2.criteria = criteria2;
updateArgs2.stmtIds = {1};
updateArgs2.updatedDoc = BSON("_id" << 1 << "data"
<< "y");
updateArgs2.update = BSON("$set" << BSON("data"
<< "y"));
- updateArgs2.criteria = BSON("_id" << 1);
OplogUpdateEntryArgs update2(&updateArgs2, *autoColl2);
opObserver().onUpdate(opCtx(), update1);
@@ -3449,8 +3481,8 @@ TEST_F(OpObserverMultiEntryTransactionTest, TransactionalUpdateTest) {
<< "partialTxn" << true);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[0].getObject());
- // This should be the implicit commit oplog entry, indicated by the absence of the 'partialTxn'
- // field.
+ // This should be the implicit commit oplog entry, indicated by the absence of the
+ // 'partialTxn' field.
oExpected =
BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "u"
@@ -3503,8 +3535,8 @@ TEST_F(OpObserverMultiEntryTransactionTest, TransactionalDeleteTest) {
<< "partialTxn" << true);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[0].getObject());
- // This should be the implicit commit oplog entry, indicated by the absence of the 'partialTxn'
- // field.
+ // This should be the implicit commit oplog entry, indicated by the absence of the
+ // 'partialTxn' field.
oExpected = oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "d"
<< "ns" << nss2.toString() << "ui"
@@ -3595,22 +3627,26 @@ TEST_F(OpObserverMultiEntryTransactionTest, TransactionalUpdatePrepareTest) {
AutoGetCollection autoColl1(opCtx(), nss1, MODE_IX);
AutoGetCollection autoColl2(opCtx(), nss2, MODE_IX);
- CollectionUpdateArgs updateArgs1;
+ const auto criteria1 = BSON("_id" << 0);
+ const auto preImageDoc1 = criteria1;
+ CollectionUpdateArgs updateArgs1{preImageDoc1};
+ updateArgs1.criteria = criteria1;
updateArgs1.stmtIds = {0};
updateArgs1.updatedDoc = BSON("_id" << 0 << "data"
<< "x");
updateArgs1.update = BSON("$set" << BSON("data"
<< "x"));
- updateArgs1.criteria = BSON("_id" << 0);
OplogUpdateEntryArgs update1(&updateArgs1, *autoColl1);
- CollectionUpdateArgs updateArgs2;
+ const auto criteria2 = BSON("_id" << 1);
+ const auto preImageDoc2 = criteria2;
+ CollectionUpdateArgs updateArgs2{preImageDoc2};
+ updateArgs2.criteria = criteria2;
updateArgs2.stmtIds = {1};
updateArgs2.updatedDoc = BSON("_id" << 1 << "data"
<< "y");
updateArgs2.update = BSON("$set" << BSON("data"
<< "y"));
- updateArgs2.criteria = BSON("_id" << 1);
OplogUpdateEntryArgs update2(&updateArgs2, *autoColl2);
opObserver().onUpdate(opCtx(), update1);
@@ -4005,9 +4041,9 @@ TEST_F(OpObserverMultiEntryTransactionTest, CommitPreparedPackingTest) {
ASSERT_EQ(insertEntry.getObject()["prepare"].boolean(), true);
// If we are only going to write a single prepare oplog entry, but we have reserved multiple
- // oplog slots, at T=1 and T=2, for example, then the 'prepare' oplog entry should be written at
- // T=2 i.e. the last reserved slot. In this case, the 'startOpTime' of the transaction should
- // also be set to T=2, not T=1. We verify that below.
+ // oplog slots, at T=1 and T=2, for example, then the 'prepare' oplog entry should be
+ // written at T=2 i.e. the last reserved slot. In this case, the 'startOpTime' of the
+ // transaction should also be set to T=2, not T=1. We verify that below.
const auto startOpTime = prepareOpTime;
const auto prepareTimestamp = prepareOpTime.getTimestamp();
@@ -4056,8 +4092,9 @@ class OpObserverLargeTransactionTest : public OpObserverTransactionTest {
private:
repl::ReplSettings createReplSettings() override {
repl::ReplSettings settings;
- // We need an oplog comfortably large enough to hold an oplog entry that exceeds the BSON
- // size limit. Otherwise we will get the wrong error code when trying to write one.
+ // We need an oplog comfortably large enough to hold an oplog entry that exceeds the
+ // BSON size limit. Otherwise we will get the wrong error code when trying to write
+ // one.
settings.setOplogSizeBytes(BSONObjMaxInternalSize + 2 * 1024 * 1024);
settings.setReplSetString("mySet/node1:12345");
return settings;
@@ -4065,15 +4102,15 @@ private:
};
// Tests that a large transaction may be committed. This test creates a transaction with two
-// operations that together are just big enough to exceed the size limit, which should result in a
-// two oplog entry transaction.
+// operations that together are just big enough to exceed the size limit, which should result in
+// a two oplog entry transaction.
TEST_F(OpObserverLargeTransactionTest, LargeTransactionCreatesMultipleOplogEntries) {
auto txnParticipant = TransactionParticipant::get(opCtx());
txnParticipant.unstashTransactionResources(opCtx(), "insert");
- // This size is crafted such that two operations of this size are not too big to fit in a single
- // oplog entry, but two operations plus oplog overhead are too big to fit in a single oplog
- // entry.
+ // This size is crafted such that two operations of this size are not too big to fit in a
+ // single oplog entry, but two operations plus oplog overhead are too big to fit in a single
+ // oplog entry.
constexpr size_t kHalfTransactionSize = BSONObjMaxInternalSize / 2 - 175;
std::unique_ptr<uint8_t[]> halfTransactionData(new uint8_t[kHalfTransactionSize]());
auto operation1 = repl::DurableOplogEntry::makeInsertOperation(
@@ -4129,8 +4166,8 @@ TEST_F(OpObserverTest, OnRollbackInvalidatesDefaultRWConcernCache) {
ASSERT_EQ(Timestamp(10, 20), *origCachedDefaults.getUpdateOpTime());
ASSERT_EQ(Date_t::fromMillisSinceEpoch(1234), *origCachedDefaults.getUpdateWallClockTime());
- // Change the mock's defaults, but don't invalidate the cache yet. The cache should still return
- // the original defaults.
+ // Change the mock's defaults, but don't invalidate the cache yet. The cache should still
+ // return the original defaults.
{
RWConcernDefault newDefaults;
newDefaults.setUpdateOpTime(Timestamp(50, 20));
diff --git a/src/mongo/db/op_observer/user_write_block_mode_op_observer_test.cpp b/src/mongo/db/op_observer/user_write_block_mode_op_observer_test.cpp
index 6a551fe9d6e..cd57bf41085 100644
--- a/src/mongo/db/op_observer/user_write_block_mode_op_observer_test.cpp
+++ b/src/mongo/db/op_observer/user_write_block_mode_op_observer_test.cpp
@@ -85,7 +85,10 @@ protected:
UserWriteBlockModeOpObserver opObserver;
std::vector<InsertStatement> inserts;
- CollectionUpdateArgs collectionUpdateArgs;
+ const auto criteria = BSON("_id" << 0);
+ const auto preImageDoc = criteria;
+ CollectionUpdateArgs collectionUpdateArgs{preImageDoc};
+ collectionUpdateArgs.criteria = criteria;
collectionUpdateArgs.source =
fromMigrate ? OperationSource::kFromMigrate : OperationSource::kStandard;
OplogUpdateEntryArgs updateArgs(&collectionUpdateArgs, *autoColl);
diff --git a/src/mongo/db/ops/write_ops_exec.cpp b/src/mongo/db/ops/write_ops_exec.cpp
index e98a5931dd8..28848459976 100644
--- a/src/mongo/db/ops/write_ops_exec.cpp
+++ b/src/mongo/db/ops/write_ops_exec.cpp
@@ -1439,12 +1439,11 @@ Status performAtomicTimeseriesWrites(
auto original = coll->docFor(opCtx, recordId);
- CollectionUpdateArgs args;
+ CollectionUpdateArgs args{original.value()};
+ args.criteria = update.getQ();
if (const auto& stmtIds = op.getStmtIds()) {
args.stmtIds = *stmtIds;
}
- args.preImageDoc = original.value();
- args.criteria = update.getQ();
args.source = OperationSource::kTimeseriesInsert;
BSONObj updated;
diff --git a/src/mongo/db/repl/storage_timestamp_test.cpp b/src/mongo/db/repl/storage_timestamp_test.cpp
index df33d614242..9accf4a37b8 100644
--- a/src/mongo/db/repl/storage_timestamp_test.cpp
+++ b/src/mongo/db/repl/storage_timestamp_test.cpp
@@ -3184,14 +3184,14 @@ TEST_F(RetryableFindAndModifyTest, RetryableFindAndModifyUpdate) {
"storeFindAndModifyImagesInSideCollection", true);
AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_X);
CollectionWriter collection(_opCtx, autoColl);
+ const auto criteria = BSON("_id" << 0);
const auto newObj = BSON("_id" << 0 << "a" << 1 << "b" << 1);
- CollectionUpdateArgs args;
+ CollectionUpdateArgs args{oldObj};
+ args.criteria = criteria;
args.stmtIds = {1};
- args.preImageDoc = oldObj;
args.updatedDoc = newObj;
args.storeDocOption = CollectionUpdateArgs::StoreDocOption::PreImage;
args.update = BSON("$set" << BSON("b" << 1));
- args.criteria = BSON("_id" << 0);
args.retryableWrite = true;
{
@@ -3242,14 +3242,14 @@ TEST_F(RetryableFindAndModifyTest, RetryableFindAndModifyUpdateWithDamages) {
ASSERT_EQUALS(mmb::Document::kInPlaceEnabled, doc.getCurrentInPlaceMode());
AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_X);
CollectionWriter collection(_opCtx, autoColl);
+ const auto criteria = BSON("_id" << 0);
const auto newObj = BSON("_id" << 0 << "a" << 0);
- CollectionUpdateArgs args;
+ CollectionUpdateArgs args{oldObj};
+ args.criteria = criteria;
args.stmtIds = {1};
- args.preImageDoc = oldObj;
args.updatedDoc = newObj;
args.storeDocOption = CollectionUpdateArgs::StoreDocOption::PreImage;
args.update = BSON("$set" << BSON("a" << 0));
- args.criteria = BSON("_id" << 0);
args.retryableWrite = true;
{
diff --git a/src/mongo/db/repl/tenant_migration_donor_service.cpp b/src/mongo/db/repl/tenant_migration_donor_service.cpp
index 0f8f8459535..3a4505280a3 100644
--- a/src/mongo/db/repl/tenant_migration_donor_service.cpp
+++ b/src/mongo/db/repl/tenant_migration_donor_service.cpp
@@ -652,7 +652,7 @@ ExecutorFuture<repl::OpTime> TenantMigrationDonorService::Instance::_updateState
return _stateDoc.toBSON();
}();
- CollectionUpdateArgs args;
+ CollectionUpdateArgs args{originalSnapshot.value()};
args.criteria = BSON("_id" << _migrationUuid);
args.oplogSlots = {oplogSlot};
args.update = updatedStateDocBson;
diff --git a/src/mongo/db/s/query_analysis_op_observer.cpp b/src/mongo/db/s/query_analysis_op_observer.cpp
index 64270b08f50..96ebcdf7d8d 100644
--- a/src/mongo/db/s/query_analysis_op_observer.cpp
+++ b/src/mongo/db/s/query_analysis_op_observer.cpp
@@ -91,12 +91,12 @@ void QueryAnalysisOpObserver::onUpdate(OperationContext* opCtx, const OplogUpdat
}
if (analyze_shard_key::supportsPersistingSampledQueries() && args.updateArgs->sampleId &&
- args.updateArgs->preImageDoc && opCtx->writesAreReplicated()) {
+ opCtx->writesAreReplicated()) {
analyze_shard_key::QueryAnalysisWriter::get(opCtx)
.addDiff(*args.updateArgs->sampleId,
args.coll->ns(),
args.coll->uuid(),
- *args.updateArgs->preImageDoc,
+ args.updateArgs->preImageDoc,
args.updateArgs->updatedDoc)
.getAsync([](auto) {});
}
diff --git a/src/mongo/db/s/sharding_recovery_service_test.cpp b/src/mongo/db/s/sharding_recovery_service_test.cpp
index c39229e4c00..1b1f9f63a33 100644
--- a/src/mongo/db/s/sharding_recovery_service_test.cpp
+++ b/src/mongo/db/s/sharding_recovery_service_test.cpp
@@ -588,11 +588,12 @@ TEST_F(ShardingRecoveryServiceTestOnSecondary, BlockAndUnblockOperationsOnDataba
// Simulate an update notification on the `config.collection_critical_sections` collection, that
// is what a secondary node would receive when the primary node enters the commit phase of the
// critical section.
+ auto preImageDoc = doc.toBSON();
doc.setBlockReads(true); // NOTE: This has no semantic effect as the critical section is
// promoted to any update event on the document!
// TODO (SERVER-71056): React to `blockReads` changes only.
{
- CollectionUpdateArgs updateArgs;
+ CollectionUpdateArgs updateArgs{preImageDoc};
updateArgs.updatedDoc = doc.toBSON();
OplogUpdateEntryArgs update(&updateArgs, criticalSectionColl());
@@ -632,6 +633,7 @@ TEST_F(ShardingRecoveryServiceTestOnSecondary, BlockAndUnblockOperationsOnCollec
// is what a secondary node would receive when the primary node enters the catch-up phase of the
// critical section.
auto doc = CollectionCriticalSectionDocument(collNss, collOpReason, false);
+ auto preImageDoc = doc.toBSON();
{
std::vector<InsertStatement> inserts;
inserts.emplace_back(doc.toBSON());
@@ -657,7 +659,7 @@ TEST_F(ShardingRecoveryServiceTestOnSecondary, BlockAndUnblockOperationsOnCollec
// promoted to any update event on the document!
// TODO (SERVER-71056): React to `blockReads` changes only.
{
- CollectionUpdateArgs updateArgs;
+ CollectionUpdateArgs updateArgs{preImageDoc};
updateArgs.updatedDoc = doc.toBSON();
OplogUpdateEntryArgs update(&updateArgs, criticalSectionColl());
diff --git a/src/mongo/db/serverless/shard_split_donor_op_observer_test.cpp b/src/mongo/db/serverless/shard_split_donor_op_observer_test.cpp
index cc679386b00..0f44710a21f 100644
--- a/src/mongo/db/serverless/shard_split_donor_op_observer_test.cpp
+++ b/src/mongo/db/serverless/shard_split_donor_op_observer_test.cpp
@@ -111,13 +111,18 @@ protected:
// invariant. This creates a confusing error log in the test output.
test::shard_split::ScopedTenantAccessBlocker scopedTenants(_tenantIds, _opCtx.get());
- CollectionUpdateArgs updateArgs;
+ const auto criteria = BSON("_id" << stateDocument.getId());
+ auto preImageDoc = defaultStateDocument();
+ preImageDoc.setState(ShardSplitDonorStateEnum::kBlocking);
+ preImageDoc.setBlockOpTime(repl::OpTime(Timestamp(1, 1), 1));
+
+ CollectionUpdateArgs updateArgs{preImageDoc.toBSON()};
+ updateArgs.criteria = criteria;
updateArgs.stmtIds = {};
updateArgs.updatedDoc = stateDocument.toBSON();
updateArgs.update =
BSON("$set" << BSON(ShardSplitDonorDocument::kStateFieldName
<< ShardSplitDonorState_serializer(stateDocument.getState())));
- updateArgs.criteria = BSON("_id" << stateDocument.getId());
AutoGetCollection autoColl(_opCtx.get(), _nss, MODE_IX);
OplogUpdateEntryArgs update(&updateArgs, *autoColl);
@@ -334,13 +339,15 @@ TEST_F(ShardSplitDonorOpObserverTest, TransitionToAbortingIndexBuildsFail) {
auto stateDocument = defaultStateDocument();
stateDocument.setState(ShardSplitDonorStateEnum::kAbortingIndexBuilds);
- CollectionUpdateArgs updateArgs;
+ const auto criteria = BSON("_id" << stateDocument.getId());
+ const auto preImageDoc = criteria;
+ CollectionUpdateArgs updateArgs{preImageDoc};
+ updateArgs.criteria = criteria;
updateArgs.stmtIds = {};
updateArgs.updatedDoc = stateDocument.toBSON();
updateArgs.update =
BSON("$set" << BSON(ShardSplitDonorDocument::kStateFieldName
<< ShardSplitDonorState_serializer(stateDocument.getState())));
- updateArgs.criteria = BSON("_id" << stateDocument.getId());
AutoGetCollection autoColl(_opCtx.get(), _nss, MODE_IX);
OplogUpdateEntryArgs update(&updateArgs, *autoColl);
diff --git a/src/mongo/db/serverless/shard_split_donor_service.cpp b/src/mongo/db/serverless/shard_split_donor_service.cpp
index bd715cb167e..b272ef76aa1 100644
--- a/src/mongo/db/serverless/shard_split_donor_service.cpp
+++ b/src/mongo/db/serverless/shard_split_donor_service.cpp
@@ -961,7 +961,7 @@ ExecutorFuture<repl::OpTime> ShardSplitDonorService::DonorStateMachine::_updateS
opCtx->recoveryUnit()->getSnapshotId(), originalStateDocBson);
invariant(!originalRecordId.isNull());
- CollectionUpdateArgs args;
+ CollectionUpdateArgs args{originalSnapshot.value()};
args.criteria = BSON("_id" << uuid);
args.oplogSlots = {oplogSlot};
args.update = updatedStateDocBson;
diff --git a/src/mongo/db/transaction/transaction_participant.cpp b/src/mongo/db/transaction/transaction_participant.cpp
index c83ad6baa66..caa49b9e694 100644
--- a/src/mongo/db/transaction/transaction_participant.cpp
+++ b/src/mongo/db/transaction/transaction_participant.cpp
@@ -447,9 +447,9 @@ void updateSessionEntry(OperationContext* opCtx,
<< "session "_sd << sessionId << ", transaction "_sd << txnNum);
}
- CollectionUpdateArgs args;
- args.update = updateMod;
+ CollectionUpdateArgs args{originalDoc};
args.criteria = toUpdateIdDoc;
+ args.update = updateMod;
// Specify indexesAffected = false because the sessions collection has two indexes: {_id: 1} and
// {parentLsid: 1, _id.txnNumber: 1, _id: 1}, and none of the fields are mutable.