summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarcos José Grillo Ramirez <marcos.grillo@mongodb.com>2021-10-25 14:57:27 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-10-26 18:18:28 +0000
commit512107f95fb8f41c8f2d66a8514a6bc910156c20 (patch)
tree318a72227e30652f67724ecf39fabc18cbf54b95
parent5da2ce69f7c863d14ddd0b1e2d36d2a675785ab2 (diff)
downloadmongo-512107f95fb8f41c8f2d66a8514a6bc910156c20.tar.gz
SERVER-56226 Implement a 'permitMigrations' collection property to prevent chunk migration's commits and balancing rounds
(cherry picked from commit 53bf3779928dccbb0abf1e302cf80096b8a63f4a)
-rw-r--r--jstests/sharding/move_chunk_permitMigrations.js123
-rw-r--r--src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp2
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp26
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp26
-rw-r--r--src/mongo/s/catalog/type_collection.cpp18
-rw-r--r--src/mongo/s/catalog/type_collection.h18
6 files changed, 203 insertions, 10 deletions
diff --git a/jstests/sharding/move_chunk_permitMigrations.js b/jstests/sharding/move_chunk_permitMigrations.js
new file mode 100644
index 00000000000..3966ea0229b
--- /dev/null
+++ b/jstests/sharding/move_chunk_permitMigrations.js
@@ -0,0 +1,123 @@
+/**
+ * Tests that a collection with permitMigrations: false in config.collections prohibits committing a
+ * moveChunk and disables the balancer.
+ *
+ * @tags: [
+ * multiversion_incompatible
+ * ]
+ */
+(function() {
+'use strict';
+
+load('jstests/libs/fail_point_util.js');
+load('jstests/libs/parallel_shell_helpers.js');
+
+const st = new ShardingTest({shards: 2});
+const configDB = st.s.getDB("config");
+const dbName = 'PermitMigrations';
+
+// Resets database dbName and enables sharding and establishes shard0 as primary, test case agnostic
+const setUpDb = function setUpDatabaseAndEnableSharding() {
+ assert.commandWorked(st.s.getDB(dbName).dropDatabase());
+ assert.commandWorked(
+ st.s.adminCommand({enableSharding: dbName, primaryShard: st.shard0.shardName}));
+};
+
+const setPermitMigrations = function(ns, permit) {
+ // For now update the flag manually, a user-facing command will be implemented with
+ // SERVER-56227.
+ assert.commandWorked(configDB.collections.updateOne(
+ {_id: ns}, {$set: {permitMigrations: permit}}, {writeConcern: {w: "majority"}}));
+};
+
+// Tests that moveChunk does not succeed when {permitMigrations: false}
+(function testPermitMigrationsFalsePreventsMoveChunk() {
+ setUpDb();
+
+ const collName = "collA";
+ const ns = dbName + "." + collName;
+
+ assert.commandWorked(st.s.getDB(dbName).getCollection(collName).insert({_id: 0}));
+ assert.commandWorked(st.s.getDB(dbName).getCollection(collName).insert({_id: 1}));
+ assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+
+ // Confirm that an inProgress moveChunk fails once {permitMigrations: false}
+ const fp = configureFailPoint(st.shard0, "moveChunkHangAtStep4");
+ const awaitResult = startParallelShell(
+ funWithArgs(function(ns, toShardName) {
+ assert.commandFailedWithCode(
+ db.adminCommand({moveChunk: ns, find: {_id: 0}, to: toShardName}),
+ ErrorCodes.ConflictingOperationInProgress);
+ }, ns, st.shard1.shardName), st.s.port);
+ fp.wait();
+ setPermitMigrations(ns, false);
+ fp.off();
+ awaitResult();
+
+ // {permitMigrations: false} is set, sending a new moveChunk command should also fail.
+ assert.commandFailedWithCode(
+ st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: st.shard1.shardName}),
+ ErrorCodes.ConflictingOperationInProgress);
+})();
+
+// Tests {permitMigrations: false} disables balancing for collB and does not interfere with
+// balancing for collA.
+//
+// collBSetParams specify the field(s) that will be set on the collB in config.collections.
+const testBalancer = function testPermitMigrationsFalseDisablesBalancer(permitMigrations,
+ collBSetNoBalanceParam) {
+ setUpDb();
+
+ const collAName = "collA";
+ const collBName = "collB";
+ const collA = st.s.getCollection(`${dbName}.${collAName}`);
+ const collB = st.s.getCollection(`${dbName}.${collBName}`);
+
+ assert.commandWorked(st.s.adminCommand({shardCollection: collA.getFullName(), key: {_id: 1}}));
+ assert.commandWorked(st.s.adminCommand({shardCollection: collB.getFullName(), key: {_id: 1}}));
+
+ // Split both collections into 4 chunks so balancing can occur.
+ for (let coll of [collA, collB]) {
+ coll.insert({_id: 1});
+ coll.insert({_id: 10});
+ coll.insert({_id: 20});
+ coll.insert({_id: 30});
+
+ assert.commandWorked(st.splitAt(coll.getFullName(), {_id: 10}));
+ assert.commandWorked(st.splitAt(coll.getFullName(), {_id: 20}));
+ assert.commandWorked(st.splitAt(coll.getFullName(), {_id: 30}));
+
+ assert.eq(
+ 4,
+ configDB.chunks.countDocuments({ns: coll.getFullName(), shard: st.shard0.shardName}));
+ }
+
+ jsTestLog(`Disabling balancing of ${collB.getFullName()} with permitMigrations ${
+ permitMigrations} and parameters ${tojson(collBSetNoBalanceParam)}`);
+ assert.commandWorked(
+ configDB.collections.update({_id: collB.getFullName()}, {$set: collBSetNoBalanceParam}));
+ setPermitMigrations(collB.getFullName(), permitMigrations);
+
+ st.startBalancer();
+ assert.soon(() => {
+ st.awaitBalancerRound();
+ const shard0Chunks =
+ configDB.chunks.countDocuments({ns: collA.getFullName(), shard: st.shard0.shardName});
+ const shard1Chunks =
+ configDB.chunks.countDocuments({ns: collA.getFullName(), shard: st.shard1.shardName});
+ jsTestLog(`shard0 chunks ${shard0Chunks}, shard1 chunks ${shard1Chunks}`);
+ return shard0Chunks == 2 && shard1Chunks == 2;
+ }, `Balancer failed to balance ${collA.getFullName()}`, 1000 * 60 * 10);
+ st.stopBalancer();
+
+ assert.eq(
+ 4, configDB.chunks.countDocuments({ns: collB.getFullName(), shard: st.shard0.shardName}));
+};
+
+// Test cases that should disable the balancer.
+testBalancer(false /* permitMigrations */, {});
+testBalancer(false /* permitMigrations */, {noBalance: false});
+testBalancer(false /* permitMigrations */, {noBalance: true});
+
+st.stop();
+})();
diff --git a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
index c1dfdf9095d..0f8ee14997d 100644
--- a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
+++ b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
@@ -369,7 +369,7 @@ StatusWith<MigrateInfoVector> BalancerChunkSelectionPolicyImpl::selectChunksToMo
const NamespaceString nss(coll.getNs());
- if (!coll.getAllowBalance()) {
+ if (!coll.getAllowBalance() || !coll.getPermitMigrations()) {
LOG(1) << "Not balancing collection " << nss << "; explicitly disabled.";
continue;
}
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
index 4da04122002..d821e141506 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
@@ -902,6 +902,32 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration(
return {ErrorCodes::IllegalOperation, "chunk operation requires validAfter timestamp"};
}
+ // Check if migrations are permitted.
+ auto findCollResponse =
+ configShard->exhaustiveFindOnConfig(opCtx,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ repl::ReadConcernLevel::kLocalReadConcern,
+ CollectionType::ConfigNS,
+ BSON(CollectionType::fullNs << nss.ns()),
+ {},
+ 1);
+ if (!findCollResponse.isOK()) {
+ return findCollResponse.getStatus();
+ }
+
+ if (findCollResponse.getValue().docs.empty()) {
+ return {ErrorCodes::ConflictingOperationInProgress,
+ str::stream() << "Collection '" << nss.ns() << "' does not exist"};
+ }
+
+ auto coll = uassertStatusOK(CollectionType::fromBSON(findCollResponse.getValue().docs[0]));
+
+ if (!coll.getPermitMigrations()) {
+ return {ErrorCodes::ConflictingOperationInProgress,
+ str::stream() << "Collection '" << nss.ns()
+ << "' does not permit migrations so chunks cannot be moved"};
+ }
+
// Must use local read concern because we will perform subsequent writes.
auto findResponse =
configShard->exhaustiveFindOnConfig(opCtx,
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp
index 7322b3caf08..5993fb28eba 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp
@@ -47,8 +47,11 @@ using unittest::assertGet;
using CommitChunkMigrate = ConfigServerTestFixture;
const NamespaceString kNamespace("TestDB.TestColl");
+const KeyPattern kKeyPattern(BSON("a" << 1));
TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectlyWithControlChunk) {
+ const auto collUuid = UUID::gen();
+
ShardType shard0;
shard0.setName("shard0");
shard0.setHost("shard0:12");
@@ -81,7 +84,7 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectlyWithControlChunk) {
controlChunk.setJumbo(true);
}
- setupChunks({migratedChunk, controlChunk});
+ setupCollection(migratedChunk.getNS(), collUuid, kKeyPattern, {migratedChunk, controlChunk});
Timestamp validAfter{101, 0};
BSONObj versions = assertGet(ShardingCatalogManager::get(operationContext())
@@ -129,6 +132,7 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectlyWithControlChunk) {
}
TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectlyWithoutControlChunk) {
+ const auto collUuid = UUID::gen();
ShardType shard0;
shard0.setName("shard0");
@@ -155,7 +159,7 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectlyWithoutControlChunk) {
auto chunkMax = BSON("a" << 10);
chunk0.setMax(chunkMax);
- setupChunks({chunk0});
+ setupCollection(chunk0.getNS(), collUuid, kKeyPattern, {chunk0});
Timestamp validAfter{101, 0};
@@ -189,6 +193,7 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectlyWithoutControlChunk) {
}
TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandNoCtlTrimHistory) {
+ const auto collUuid = UUID::gen();
ShardType shard0;
shard0.setName("shard0");
@@ -215,7 +220,7 @@ TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandNoCtlTrimHistory) {
auto chunkMax = BSON("a" << 10);
chunk0.setMax(chunkMax);
- setupChunks({chunk0});
+ setupCollection(chunk0.getNS(), collUuid, kKeyPattern, {chunk0});
// Make the time distance between the last history element large enough.
Timestamp validAfter{200, 0};
@@ -250,6 +255,7 @@ TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandNoCtlTrimHistory) {
}
TEST_F(CommitChunkMigrate, RejectOutOfOrderHistory) {
+ const auto collUuid = UUID::gen();
ShardType shard0;
shard0.setName("shard0");
@@ -276,7 +282,7 @@ TEST_F(CommitChunkMigrate, RejectOutOfOrderHistory) {
auto chunkMax = BSON("a" << 10);
chunk0.setMax(chunkMax);
- setupChunks({chunk0});
+ setupCollection(chunk0.getNS(), collUuid, kKeyPattern, {chunk0});
// Make the time before the last change to trigger the failure.
Timestamp validAfter{99, 0};
@@ -294,6 +300,7 @@ TEST_F(CommitChunkMigrate, RejectOutOfOrderHistory) {
}
TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch0) {
+ const auto collUuid = UUID::gen();
ShardType shard0;
shard0.setName("shard0");
@@ -328,7 +335,7 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch0) {
auto chunkMaxax = BSON("a" << 20);
chunk1.setMax(chunkMaxax);
- setupChunks({chunk0, chunk1});
+ setupCollection(chunk0.getNS(), collUuid, kKeyPattern, {chunk0, chunk1});
Timestamp validAfter{1};
@@ -345,6 +352,7 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch0) {
}
TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch1) {
+ const auto collUuid = UUID::gen();
ShardType shard0;
shard0.setName("shard0");
@@ -381,7 +389,7 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch1) {
chunk1.setMax(chunkMaxax);
// get version from the control chunk this time
- setupChunks({chunk1, chunk0});
+ setupCollection(chunk0.getNS(), collUuid, kKeyPattern, {chunk1, chunk0});
Timestamp validAfter{1};
@@ -398,6 +406,7 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch1) {
}
TEST_F(CommitChunkMigrate, RejectChunkMissing0) {
+ const auto collUuid = UUID::gen();
ShardType shard0;
shard0.setName("shard0");
@@ -432,7 +441,7 @@ TEST_F(CommitChunkMigrate, RejectChunkMissing0) {
auto chunkMaxax = BSON("a" << 20);
chunk1.setMax(chunkMaxax);
- setupChunks({chunk1});
+ setupCollection(chunk1.getNS(), collUuid, kKeyPattern, {chunk1});
Timestamp validAfter{1};
@@ -449,6 +458,7 @@ TEST_F(CommitChunkMigrate, RejectChunkMissing0) {
}
TEST_F(CommitChunkMigrate, CommitWithLastChunkOnShardShouldNotAffectOtherChunks) {
+ const auto collUuid = UUID::gen();
ShardType shard0;
shard0.setName("shard0");
@@ -487,7 +497,7 @@ TEST_F(CommitChunkMigrate, CommitWithLastChunkOnShardShouldNotAffectOtherChunks)
Timestamp ctrlChunkValidAfter = Timestamp(50, 0);
chunk1.setHistory({ChunkHistory(ctrlChunkValidAfter, shard1.getName())});
- setupChunks({chunk0, chunk1});
+ setupCollection(chunk0.getNS(), collUuid, kKeyPattern, {chunk0, chunk1});
Timestamp validAfter{101, 0};
StatusWith<BSONObj> resultBSON = ShardingCatalogManager::get(operationContext())
diff --git a/src/mongo/s/catalog/type_collection.cpp b/src/mongo/s/catalog/type_collection.cpp
index c7d3df26499..884aef938f2 100644
--- a/src/mongo/s/catalog/type_collection.cpp
+++ b/src/mongo/s/catalog/type_collection.cpp
@@ -44,6 +44,7 @@ namespace {
const BSONField<bool> kNoBalance("noBalance");
const BSONField<bool> kDropped("dropped");
const auto kIsAssignedShardKey = "isAssignedShardKey"_sd;
+const BSONField<bool> kPermitMigrations("permitMigrations");
} // namespace
@@ -192,6 +193,19 @@ StatusWith<CollectionType> CollectionType::fromBSON(const BSONObj& source) {
}
}
+ {
+ bool collPermitMigrations;
+ Status status =
+ bsonExtractBooleanField(source, kPermitMigrations.name(), &collPermitMigrations);
+ if (status.isOK()) {
+ coll._permitMigrations = collPermitMigrations;
+ } else if (status == ErrorCodes::NoSuchKey) {
+ // PermitMigrations can be missing.
+ } else {
+ return status;
+ }
+ }
+
return StatusWith<CollectionType>(coll);
}
@@ -269,6 +283,10 @@ BSONObj CollectionType::toBSON() const {
builder.append(kIsAssignedShardKey, !_isAssignedShardKey.get());
}
+ if (_permitMigrations.is_initialized()) {
+ builder.append(kPermitMigrations.name(), _permitMigrations.get());
+ }
+
return builder.obj();
}
diff --git a/src/mongo/s/catalog/type_collection.h b/src/mongo/s/catalog/type_collection.h
index 5dcfbd95573..0e138402c51 100644
--- a/src/mongo/s/catalog/type_collection.h
+++ b/src/mongo/s/catalog/type_collection.h
@@ -64,7 +64,8 @@ class StatusWith;
* "unique" : false,
* "uuid" : UUID,
* "noBalance" : false,
- * "allowSplit" : false
+ * "allowSplit" : false,
+ * "permitMigrations": false
* }
*
*/
@@ -159,6 +160,18 @@ public:
return _allowBalance.get_value_or(true);
}
+ void setPermitMigrations(bool permit) {
+ if (permit) {
+ _permitMigrations = boost::none;
+ } else {
+ _permitMigrations = permit;
+ }
+ }
+
+ bool getPermitMigrations() const {
+ return _permitMigrations.get_value_or(true);
+ }
+
void setIsAssignedShardKey(bool isAssignedShardKey) {
_isAssignedShardKey = isAssignedShardKey;
}
@@ -200,6 +213,9 @@ private:
// Optional whether user has assigned a shard key to this collection before.
// Implicitly true if missing.
boost::optional<bool> _isAssignedShardKey;
+
+ // Optional whether migration is allowed for this collection. If missing, implies true.
+ boost::optional<bool> _permitMigrations;
};
} // namespace mongo