summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEnrico Golfieri <enrico.golfieri@mongodb.com>2022-07-29 09:40:45 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-08-09 17:12:37 +0000
commitc5b3bb366c231e9f6afb06c64835e1b3f198a8bd (patch)
treed30df7ad6cd01f67b21a047f070aadb353972583
parent8e55aa4ae2719be344a689a678f96531d4f82629 (diff)
downloadmongo-c5b3bb366c231e9f6afb06c64835e1b3f198a8bd.tar.gz
SERVER-67725 check uuid consistency over all participants for renameCollection
-rw-r--r--jstests/sharding/rename.js29
-rw-r--r--src/mongo/db/s/rename_collection_coordinator.cpp6
-rw-r--r--src/mongo/db/s/sharding_ddl_util.cpp105
-rw-r--r--src/mongo/db/s/sharding_ddl_util.h11
4 files changed, 149 insertions, 2 deletions
diff --git a/jstests/sharding/rename.js b/jstests/sharding/rename.js
index 4759127de02..aebd9b69bb6 100644
--- a/jstests/sharding/rename.js
+++ b/jstests/sharding/rename.js
@@ -73,6 +73,35 @@ jsTest.log("Testing that rename operations involving views are not allowed");
assert.eq(1, sameColl.countDocuments({}), "Rename a collection to itself must not loose data");
}
+const testDB = s.rs0.getPrimary().getDB('test');
+const fcvDoc = testDB.adminCommand({getParameter: 1, featureCompatibilityVersion: 1});
+if (MongoRunner.compareBinVersions(fcvDoc.featureCompatibilityVersion.version, '6.0') >= 0) {
+ // Create collection on non-primary shard (shard1 for test db) to simulate wrong creation via
+ // direct connection: collection rename should fail since `badcollection` uuids are inconsistent
+ // across shards
+ jsTest.log("Testing uuid consistency across shards");
+ assert.commandWorked(
+ s.shard1.getDB('test').badcollection.insert({_id: 1})); // direct connection
+ assert.commandWorked(s.s0.getDB('test').badcollection.insert({_id: 1})); // mongos connection
+ assert.commandFailedWithCode(
+ s.s0.getDB('test').badcollection.renameCollection('goodcollection'),
+ [ErrorCodes.InvalidUUID],
+ "collection rename should fail since test.badcollection uuids are inconsistent across shards");
+
+ // Target collection existing on non-primary shard: rename with `dropTarget=false` must fail
+ jsTest.log(
+ "Testing rename behavior when target collection [wrongly] exists on non-primary shards");
+ assert.commandWorked(
+ s.shard1.getDB('test').superbadcollection.insert({_id: 1})); // direct connection
+ assert.commandWorked(s.s0.getDB('test').goodcollection.insert({_id: 1})); // mongos connection
+ assert.commandFailedWithCode(
+ s.s0.getDB('test').goodcollection.renameCollection('superbadcollection', false),
+ [ErrorCodes.NamespaceExists],
+ "Collection rename with `dropTarget=false` must have failed because target collection exists on a non-primary shard");
+ // Target collection existing on non-primary shard: rename with `dropTarget=true` must succeed
+ assert.commandWorked(
+ s.s0.getDB('test').goodcollection.renameCollection('superbadcollection', true));
+}
// Ensure write concern works by shutting down 1 node in a replica set shard
jsTest.log("Testing write concern (2)");
diff --git a/src/mongo/db/s/rename_collection_coordinator.cpp b/src/mongo/db/s/rename_collection_coordinator.cpp
index d79274bab71..386e3b81266 100644
--- a/src/mongo/db/s/rename_collection_coordinator.cpp
+++ b/src/mongo/db/s/rename_collection_coordinator.cpp
@@ -33,7 +33,6 @@
#include "mongo/db/s/rename_collection_coordinator.h"
-#include "mongo/db/auth/authorization_session.h"
#include "mongo/db/catalog/collection_catalog.h"
#include "mongo/db/catalog/collection_uuid_mismatch.h"
#include "mongo/db/catalog/database_holder.h"
@@ -169,7 +168,7 @@ ExecutorFuture<void> RenameCollectionCoordinator::_runImpl(
return ExecutorFuture<void>(**executor)
.then(_executePhase(
Phase::kCheckPreconditions,
- [this, anchor = shared_from_this()] {
+ [this, executor = executor, anchor = shared_from_this()] {
auto opCtxHolder = cc().makeOperationContext();
auto* opCtx = opCtxHolder.get();
getForwardableOpMetadata().setOn(opCtx);
@@ -265,6 +264,9 @@ ExecutorFuture<void> RenameCollectionCoordinator::_runImpl(
sharding_ddl_util::checkRenamePreconditions(
opCtx, sourceIsSharded, toNss, _doc.getDropTarget());
+ sharding_ddl_util::checkCatalogConsistencyAcrossShardsForRename(
+ opCtx, fromNss, toNss, _doc.getDropTarget(), executor);
+
{
AutoGetCollection coll{opCtx, toNss, MODE_IS};
checkCollectionUUIDMismatch(
diff --git a/src/mongo/db/s/sharding_ddl_util.cpp b/src/mongo/db/s/sharding_ddl_util.cpp
index 2fc2bd1c2f3..564fb64009e 100644
--- a/src/mongo/db/s/sharding_ddl_util.cpp
+++ b/src/mongo/db/s/sharding_ddl_util.cpp
@@ -173,6 +173,93 @@ void setAllowMigrations(OperationContext* opCtx,
}
}
+
+// Check that the collection UUID is the same in every shard knowing the collection
+void checkCollectionUUIDConsistencyAcrossShards(
+ OperationContext* opCtx,
+ const NamespaceString& nss,
+ const UUID& collectionUuid,
+ const std::vector<mongo::ShardId>& shardIds,
+ std::shared_ptr<executor::ScopedTaskExecutor> executor) {
+ const BSONObj filterObj = BSON("name" << nss.coll());
+ BSONObj cmdObj = BSON("listCollections" << 1 << "filter" << filterObj);
+
+ auto responses = sharding_ddl_util::sendAuthenticatedCommandToShards(
+ opCtx, nss.db().toString(), cmdObj, shardIds, **executor);
+
+ struct MismatchedShard {
+ std::string shardId;
+ std::string uuid;
+ };
+
+ std::vector<MismatchedShard> mismatches;
+
+ for (auto cmdResponse : responses) {
+ auto responseData = uassertStatusOK(cmdResponse.swResponse);
+ auto collectionVector = responseData.data.firstElement()["firstBatch"].Array();
+ auto shardId = cmdResponse.shardId;
+
+ if (collectionVector.empty()) {
+ // Collection does not exist on the shard
+ continue;
+ }
+
+ auto bsonCollectionUuid = collectionVector.front()["info"]["uuid"];
+ if (collectionUuid.data() != bsonCollectionUuid.uuid()) {
+ mismatches.push_back({shardId.toString(), bsonCollectionUuid.toString()});
+ }
+ }
+
+ if (!mismatches.empty()) {
+ std::stringstream errorMessage;
+ errorMessage << "The collection " << nss.toString()
+ << " with expected UUID: " << collectionUuid.toString()
+ << " has different UUIDs on the following shards: [";
+
+ for (auto mismatch : mismatches) {
+ errorMessage << "{ " << mismatch.shardId << ":" << mismatch.uuid << " },";
+ }
+ errorMessage << "]";
+ uasserted(ErrorCodes::InvalidUUID, errorMessage.str());
+ }
+}
+
+
+// Check the collection does not exist in any shard when `dropTarget` is set to false
+void checkTargetCollectionDoesNotExistInCluster(
+ OperationContext* opCtx,
+ const NamespaceString& toNss,
+ const std::vector<mongo::ShardId>& shardIds,
+ std::shared_ptr<executor::ScopedTaskExecutor> executor) {
+ const BSONObj filterObj = BSON("name" << toNss.coll());
+ BSONObj cmdObj = BSON("listCollections" << 1 << "filter" << filterObj);
+
+ auto responses = sharding_ddl_util::sendAuthenticatedCommandToShards(
+ opCtx, toNss.db(), cmdObj, shardIds, **executor);
+
+ std::vector<std::string> shardsContainingTargetCollection;
+ for (auto cmdResponse : responses) {
+ uassertStatusOK(cmdResponse.swResponse);
+ auto responseData = uassertStatusOK(cmdResponse.swResponse);
+ auto collectionVector = responseData.data.firstElement()["firstBatch"].Array();
+
+ if (!collectionVector.empty()) {
+ shardsContainingTargetCollection.push_back(cmdResponse.shardId.toString());
+ }
+ }
+
+ if (!shardsContainingTargetCollection.empty()) {
+ std::stringstream errorMessage;
+ errorMessage << "The collection " << toNss.toString()
+ << " already exists in the following shards: [";
+ std::move(shardsContainingTargetCollection.begin(),
+ shardsContainingTargetCollection.end(),
+ std::ostream_iterator<std::string>(errorMessage, ", "));
+ errorMessage << "]";
+ uasserted(ErrorCodes::NamespaceExists, errorMessage.str());
+ }
+}
+
} // namespace
void linearizeCSRSReads(OperationContext* opCtx) {
@@ -343,6 +430,24 @@ void shardedRenameMetadata(OperationContext* opCtx,
opCtx, CollectionType::ConfigNS, fromCollType.toBSON(), writeConcern));
}
+void checkCatalogConsistencyAcrossShardsForRename(
+ OperationContext* opCtx,
+ const NamespaceString& fromNss,
+ const NamespaceString& toNss,
+ const bool dropTarget,
+ std::shared_ptr<executor::ScopedTaskExecutor> executor) {
+
+ auto participants = Grid::get(opCtx)->shardRegistry()->getAllShardIds(opCtx);
+
+ auto sourceCollUuid = *getCollectionUUID(opCtx, fromNss);
+ checkCollectionUUIDConsistencyAcrossShards(
+ opCtx, fromNss, sourceCollUuid, participants, executor);
+
+ if (!dropTarget) {
+ checkTargetCollectionDoesNotExistInCluster(opCtx, toNss, participants, executor);
+ }
+}
+
void checkRenamePreconditions(OperationContext* opCtx,
bool sourceIsSharded,
const NamespaceString& toNss,
diff --git a/src/mongo/db/s/sharding_ddl_util.h b/src/mongo/db/s/sharding_ddl_util.h
index 35602f5d723..157a65399a1 100644
--- a/src/mongo/db/s/sharding_ddl_util.h
+++ b/src/mongo/db/s/sharding_ddl_util.h
@@ -104,6 +104,17 @@ void shardedRenameMetadata(OperationContext* opCtx,
const WriteConcernOptions& writeConcern);
/**
+ * Ensure source collection uuid is consistent on every shard
+ * Ensure target collection is not present on any shard when `dropTarget` is false
+ */
+void checkCatalogConsistencyAcrossShardsForRename(
+ OperationContext* opCtx,
+ const NamespaceString& fromNss,
+ const NamespaceString& toNss,
+ bool dropTarget,
+ std::shared_ptr<executor::ScopedTaskExecutor> executor);
+
+/**
* Ensures rename preconditions for collections are met:
* - Check that the namespace of the destination collection is not too long
* - Check that `dropTarget` is true if the destination collection exists