summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJordi Serra Torrens <jordi.serra-torrens@mongodb.com>2023-05-10 14:46:43 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2023-05-10 16:48:24 +0000
commitbf9d194c58cc6defa46a9a560787b64ef333f12d (patch)
treef5ead79dd8cbe4c822b0589223ff954f69f09afa
parentbb845ae48bf76b7e818590745ad58231fc24c383 (diff)
downloadmongo-bf9d194c58cc6defa46a9a560787b64ef333f12d.tar.gz
SERVER-73298 Use acquisitions on insert paths
-rw-r--r--src/mongo/db/ops/write_ops_exec.cpp55
-rw-r--r--src/mongo/db/shard_role.cpp13
-rw-r--r--src/mongo/db/shard_role_test.cpp49
3 files changed, 92 insertions, 25 deletions
diff --git a/src/mongo/db/ops/write_ops_exec.cpp b/src/mongo/db/ops/write_ops_exec.cpp
index 1c3efeaf7cf..9e00b4dab48 100644
--- a/src/mongo/db/ops/write_ops_exec.cpp
+++ b/src/mongo/db/ops/write_ops_exec.cpp
@@ -294,7 +294,7 @@ void makeCollection(OperationContext* opCtx, const NamespaceString& ns) {
}
void insertDocumentsAtomically(OperationContext* opCtx,
- const CollectionPtr& collection,
+ const ScopedCollectionAcquisition& collection,
std::vector<InsertStatement>::iterator begin,
std::vector<InsertStatement>::iterator end,
bool fromMigrate) {
@@ -312,7 +312,7 @@ void insertDocumentsAtomically(OperationContext* opCtx,
auto replCoord = repl::ReplicationCoordinator::get(opCtx);
auto inTransaction = opCtx->inMultiDocumentTransaction();
- if (!inTransaction && !replCoord->isOplogDisabledFor(opCtx, collection->ns())) {
+ if (!inTransaction && !replCoord->isOplogDisabledFor(opCtx, collection.nss())) {
// Populate 'slots' with new optimes for each insert.
// This also notifies the storage engine of each new timestamp.
auto oplogSlots = repl::getNextOpTimes(opCtx, batchSize);
@@ -334,11 +334,15 @@ void insertDocumentsAtomically(OperationContext* opCtx,
[&](const BSONObj& data) {
// Check if the failpoint specifies no collection or matches the existing one.
const auto collElem = data["collectionNS"];
- return !collElem || collection->ns().ns() == collElem.str();
+ return !collElem || collection.nss().ns() == collElem.str();
});
- uassertStatusOK(collection_internal::insertDocuments(
- opCtx, collection, begin, end, &CurOp::get(opCtx)->debug(), fromMigrate));
+ uassertStatusOK(collection_internal::insertDocuments(opCtx,
+ collection.getCollectionPtr(),
+ begin,
+ end,
+ &CurOp::get(opCtx)->debug(),
+ fromMigrate));
wuow.commit();
}
@@ -536,14 +540,15 @@ bool insertBatchAndHandleErrors(OperationContext* opCtx,
uasserted(ErrorCodes::InternalError, "failAllInserts failpoint active!");
}
- boost::optional<AutoGetCollection> collection;
+ boost::optional<ScopedCollectionAcquisition> collection;
auto acquireCollection = [&] {
while (true) {
- collection.emplace(opCtx,
- nss,
- fixLockModeForSystemDotViewsChanges(nss, MODE_IX),
- AutoGetCollection::Options{}.expectedUUID(collectionUUID));
- if (*collection) {
+ collection.emplace(mongo::acquireCollection(
+ opCtx,
+ CollectionAcquisitionRequest::fromOpCtx(
+ opCtx, nss, AcquisitionPrerequisites::kWrite, collectionUUID),
+ fixLockModeForSystemDotViewsChanges(nss, MODE_IX)));
+ if (collection->exists()) {
break;
}
@@ -586,12 +591,12 @@ bool insertBatchAndHandleErrors(OperationContext* opCtx,
if (shouldProceedWithBatchInsert) {
try {
- if (!collection->getCollection()->isCapped() && !inTxn && batch.size() > 1) {
+ if (!collection->getCollectionPtr()->isCapped() && !inTxn && batch.size() > 1) {
// First try doing it all together. If all goes well, this is all we need to do.
// See Collection::_insertDocuments for why we do all capped inserts one-at-a-time.
lastOpFixer->startingOp(nss);
insertDocumentsAtomically(opCtx,
- collection->getCollection(),
+ *collection,
batch.begin(),
batch.end(),
source == OperationSource::kFromMigrate);
@@ -628,13 +633,10 @@ bool insertBatchAndHandleErrors(OperationContext* opCtx,
acquireCollection();
// Transactions are not allowed to operate on capped collections.
uassertStatusOK(
- checkIfTransactionOnCappedColl(opCtx, collection->getCollection()));
+ checkIfTransactionOnCappedColl(opCtx, collection->getCollectionPtr()));
lastOpFixer->startingOp(nss);
- insertDocumentsAtomically(opCtx,
- collection->getCollection(),
- it,
- it + 1,
- source == OperationSource::kFromMigrate);
+ insertDocumentsAtomically(
+ opCtx, *collection, it, it + 1, source == OperationSource::kFromMigrate);
lastOpFixer->finishedOpSuccessfully();
SingleWriteResult result;
result.setN(1);
@@ -1763,8 +1765,11 @@ Status performAtomicTimeseriesWrites(
LastOpFixer lastOpFixer(opCtx);
lastOpFixer.startingOp(ns);
- AutoGetCollection coll{opCtx, ns, MODE_IX};
- if (!coll) {
+ const auto coll = acquireCollection(
+ opCtx,
+ CollectionAcquisitionRequest::fromOpCtx(opCtx, ns, AcquisitionPrerequisites::kWrite),
+ MODE_IX);
+ if (!coll.exists()) {
assertTimeseriesBucketsCollectionNotFound(ns);
}
@@ -1805,7 +1810,7 @@ Status performAtomicTimeseriesWrites(
if (!insertOps.empty()) {
auto status = collection_internal::insertDocuments(
- opCtx, *coll, inserts.begin(), inserts.end(), &curOp->debug());
+ opCtx, coll.getCollectionPtr(), inserts.begin(), inserts.end(), &curOp->debug());
if (!status.isOK()) {
return status;
}
@@ -1820,10 +1825,10 @@ Status performAtomicTimeseriesWrites(
invariant(op.getUpdates().size() == 1);
auto& update = op.getUpdates().front();
- invariant(coll->isClustered());
+ invariant(coll.getCollectionPtr()->isClustered());
auto recordId = record_id_helpers::keyForOID(update.getQ()["_id"].OID());
- auto original = coll->docFor(opCtx, recordId);
+ auto original = coll.getCollectionPtr()->docFor(opCtx, recordId);
CollectionUpdateArgs args{original.value()};
args.criteria = update.getQ();
@@ -1862,7 +1867,7 @@ Status performAtomicTimeseriesWrites(
}
collection_internal::updateDocument(opCtx,
- *coll,
+ coll.getCollectionPtr(),
recordId,
original,
updated,
diff --git a/src/mongo/db/shard_role.cpp b/src/mongo/db/shard_role.cpp
index 450e343befc..0041ebe9355 100644
--- a/src/mongo/db/shard_role.cpp
+++ b/src/mongo/db/shard_role.cpp
@@ -95,6 +95,10 @@ ResolvedNamespaceOrViewAcquisitionRequestsMap resolveNamespaceOrViewAcquisitionR
for (const auto& ar : acquisitionRequests) {
if (ar.nss) {
+ uassert(ErrorCodes::InvalidNamespace,
+ str::stream() << "Namespace " << ar.nss->toStringForErrorMsg()
+ << "is not a valid collection name",
+ ar.nss->isValid());
auto coll = catalog.lookupCollectionByNamespace(opCtx, *ar.nss);
checkCollectionUUIDMismatch(opCtx, *ar.nss, coll, ar.uuid);
@@ -107,6 +111,10 @@ ResolvedNamespaceOrViewAcquisitionRequestsMap resolveNamespaceOrViewAcquisitionR
std::move(resolvedAcquisitionRequest));
} else if (ar.dbname) {
invariant(ar.uuid);
+ uassert(ErrorCodes::InvalidNamespace,
+ str::stream() << "Invalid db name " << ar.dbname->toStringForErrorMsg(),
+ NamespaceString::validDBName(*ar.dbname,
+ NamespaceString::DollarInDbNameBehavior::Allow));
auto coll = catalog.lookupCollectionByUUID(opCtx, *ar.uuid);
uassert(ErrorCodes::NamespaceNotFound,
str::stream() << "Namespace " << (*ar.dbname).toStringForErrorMsg() << ":"
@@ -607,6 +615,11 @@ ResolvedNamespaceOrViewAcquisitionRequestsMap generateSortedAcquisitionRequests(
for (const auto& ar : acquisitionRequests) {
NamespaceStringOrUUID nssOrUUID =
ar.nss ? NamespaceStringOrUUID(*ar.nss) : NamespaceStringOrUUID(*ar.dbname, *ar.uuid);
+ uassertStatusOK(nssOrUUID.isNssValid());
+ uassert(ErrorCodes::InvalidNamespace,
+ str::stream() << "Invalid db name " << ar.dbname->toStringForErrorMsg(),
+ NamespaceString::validDBName(nssOrUUID.dbName(),
+ NamespaceString::DollarInDbNameBehavior::Allow));
auto coll =
CollectionPtr(catalog.establishConsistentCollection(opCtx, nssOrUUID, readTimestamp));
diff --git a/src/mongo/db/shard_role_test.cpp b/src/mongo/db/shard_role_test.cpp
index 57626db51d4..0a3d3fe93c1 100644
--- a/src/mongo/db/shard_role_test.cpp
+++ b/src/mongo/db/shard_role_test.cpp
@@ -263,6 +263,55 @@ TEST_F(ShardRoleTest, NamespaceOrViewAcquisitionRequestWithOpCtxTakesPlacementFr
}
}
+TEST_F(ShardRoleTest, AcquisitionWithInvalidNamespaceFails) {
+ const auto checkAcquisitionByNss = [&](const NamespaceString& nss) {
+ // With locks
+ ASSERT_THROWS_CODE(
+ acquireCollection(opCtx(),
+ {nss, {}, repl::ReadConcernArgs(), AcquisitionPrerequisites::kWrite},
+ MODE_IX),
+ DBException,
+ ErrorCodes::InvalidNamespace);
+
+ // Without locks
+ ASSERT_THROWS_CODE(
+ acquireCollectionsOrViewsWithoutTakingLocks(
+ opCtx(), {{nss, {}, repl::ReadConcernArgs(), AcquisitionPrerequisites::kWrite}}),
+ DBException,
+ ErrorCodes::InvalidNamespace);
+ };
+
+ const auto checkAcquisitionByNssOrUUID = [&](const NamespaceStringOrUUID& nssOrUuid) {
+ // With locks
+ ASSERT_THROWS_CODE(
+ acquireCollection(
+ opCtx(),
+ {nssOrUuid, {}, repl::ReadConcernArgs(), AcquisitionPrerequisites::kWrite},
+ MODE_IX),
+ DBException,
+ ErrorCodes::InvalidNamespace);
+
+ // Without locks
+ ASSERT_THROWS_CODE(
+ acquireCollectionsOrViewsWithoutTakingLocks(
+ opCtx(),
+ {{nssOrUuid, {}, repl::ReadConcernArgs(), AcquisitionPrerequisites::kWrite}}),
+ DBException,
+ ErrorCodes::InvalidNamespace);
+ };
+
+ const NamespaceString nssEmptyCollectionName =
+ NamespaceString::createNamespaceString_forTest(dbNameTestDb, "");
+ checkAcquisitionByNss(nssEmptyCollectionName);
+ checkAcquisitionByNssOrUUID(nssEmptyCollectionName);
+
+ const NamespaceString nssEmptyDbName =
+ NamespaceString::createNamespaceString_forTest("", "foo");
+ checkAcquisitionByNss(nssEmptyDbName);
+ checkAcquisitionByNssOrUUID(nssEmptyDbName);
+ checkAcquisitionByNssOrUUID(NamespaceStringOrUUID("", UUID::gen()));
+}
+
// ---------------------------------------------------------------------------
// Placement checks when acquiring unsharded collections