summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMilena Ivanova <milena.ivanova@mongodb.com>2021-10-27 12:56:18 +0000
committerMilena Ivanova <milena.ivanova@mongodb.com>2021-10-27 12:56:18 +0000
commit1e33d78e43989756f5d0278d468eab27ef6b30d3 (patch)
treea57f9d8c007e77142fbbded32bf5c3f9f6ef9a46
parent53d1e21a3afc2fd0a39564e1bd3f3fd7d3ba2501 (diff)
parent794a300754eab0e53f69a2119ea0f2a722d221e9 (diff)
downloadmongo-BACKPORT-9836.tar.gz
Merge branch 'v4.4' into BACKPORT-9836BACKPORT-9836
-rw-r--r--etc/evergreen.yml26
-rw-r--r--jstests/sharding/move_chunk_permitMigrations.js139
-rw-r--r--src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp2
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp26
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp38
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp18
-rw-r--r--src/mongo/s/catalog/type_collection.cpp18
-rw-r--r--src/mongo/s/catalog/type_collection.h16
8 files changed, 237 insertions, 46 deletions
diff --git a/etc/evergreen.yml b/etc/evergreen.yml
index 55f4e88c16b..2630e7d56c3 100644
--- a/etc/evergreen.yml
+++ b/etc/evergreen.yml
@@ -811,31 +811,6 @@ functions:
https_validate_certificates = False
EOF
- "call BF Suggestion service":
- command: shell.exec
- params:
- working_dir: src
- shell: bash
- silent: true
- script: |
- report_file="report.json"
- # Check if the report file exists and has failures.
- if [ -f $report_file ] && grep -Eq "\"failures\": [1-9]" $report_file; then
- # Calling the BF Suggestion server endpoint to start feature extraction.
- payload="{\"task_id\": \"${task_id}\", \"execution\": ${execution}}"
- echo "Sending task info to the BF suggestion service"
- # The --user option is passed through stdin to avoid showing in process list.
- user_option="--user ${bfsuggestion_user}:${bfsuggestion_password}"
- curl --header "Content-Type: application/json" \
- --data "$payload" \
- --max-time 10 \
- --silent \
- --show-error \
- --config - \
- https://bfsuggestion.corp.mongodb.com/tasks <<< $user_option
- echo "Request to BF Suggestion service status: $?"
- fi
-
"upload debugsymbols": &upload_debugsymbols
command: s3.put
params:
@@ -3815,7 +3790,6 @@ post:
- func: "attach report"
- func: "attach artifacts"
- func: "save ec2 task artifacts"
- - func: "call BF Suggestion service"
- func: "attach wiki page"
- func: "kill processes"
- func: "save local client logs"
diff --git a/jstests/sharding/move_chunk_permitMigrations.js b/jstests/sharding/move_chunk_permitMigrations.js
new file mode 100644
index 00000000000..849f0d3ed2a
--- /dev/null
+++ b/jstests/sharding/move_chunk_permitMigrations.js
@@ -0,0 +1,139 @@
+/**
+ * Tests that a collection with permitMigrations: false in config.collections prohibits committing a
+ * moveChunk and disables the balancer.
+ *
+ * @tags: [
+ * multiversion_incompatible
+ * ]
+ */
+(function() {
+'use strict';
+
+load('jstests/libs/fail_point_util.js');
+load('jstests/libs/parallel_shell_helpers.js');
+load("jstests/sharding/libs/shard_versioning_util.js");
+
+const st = new ShardingTest({shards: 2});
+const configDB = st.s.getDB("config");
+const dbName = 'PermitMigrations';
+
+// Resets database dbName and enables sharding and establishes shard0 as primary, test case agnostic
+const setUpDb = function setUpDatabaseAndEnableSharding() {
+ assert.commandWorked(st.s.getDB(dbName).dropDatabase());
+ assert.commandWorked(
+ st.s.adminCommand({enableSharding: dbName, primaryShard: st.shard0.shardName}));
+};
+
+const setPermitMigrations = function(ns, permit) {
+ // For now update the flag manually, a user-facing command will be implemented with
+ // SERVER-56227.
+ assert.commandWorked(configDB.collections.updateOne(
+ {_id: ns}, {$set: {permitMigrations: permit}}, {writeConcern: {w: "majority"}}));
+};
+
+// Tests that moveChunk does not succeed when {permitMigrations: false}
+(function testPermitMigrationsFalsePreventsMoveChunk() {
+ setUpDb();
+
+ const collName = "collA";
+ const ns = dbName + "." + collName;
+
+ assert.commandWorked(st.s.getDB(dbName).getCollection(collName).insert({_id: 0}));
+ assert.commandWorked(st.s.getDB(dbName).getCollection(collName).insert({_id: 1}));
+ assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+
+ // Confirm that an inProgress moveChunk fails once {permitMigrations: false}
+ const fp = configureFailPoint(st.shard0, "moveChunkHangAtStep4");
+ const awaitResult = startParallelShell(
+ funWithArgs(function(ns, toShardName) {
+ assert.commandFailedWithCode(
+ db.adminCommand({moveChunk: ns, find: {_id: 0}, to: toShardName}),
+ ErrorCodes.ConflictingOperationInProgress);
+ }, ns, st.shard1.shardName), st.s.port);
+ fp.wait();
+ setPermitMigrations(ns, false);
+ fp.off();
+ awaitResult();
+
+ // {permitMigrations: false} is set, sending a new moveChunk command should also fail.
+ assert.commandFailedWithCode(
+ st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: st.shard1.shardName}),
+ ErrorCodes.ConflictingOperationInProgress);
+})();
+
+// Tests {permitMigrations: false} disables balancing for collB and does not interfere with
+// balancing for collA.
+//
+// collBSetParams specify the field(s) that will be set on the collB in config.collections.
+const testBalancer = function testPermitMigrationsFalseDisablesBalancer(permitMigrations,
+ collBSetNoBalanceParam) {
+ setUpDb();
+
+ const collAName = "collA";
+ const collBName = "collB";
+ const collA = st.s.getCollection(`${dbName}.${collAName}`);
+ const collB = st.s.getCollection(`${dbName}.${collBName}`);
+
+ assert.commandWorked(st.s.adminCommand({shardCollection: collA.getFullName(), key: {_id: 1}}));
+ assert.commandWorked(st.s.adminCommand({shardCollection: collB.getFullName(), key: {_id: 1}}));
+
+ // Split both collections into 4 chunks so balancing can occur.
+ for (let coll of [collA, collB]) {
+ coll.insert({_id: 1});
+ coll.insert({_id: 10});
+ coll.insert({_id: 20});
+ coll.insert({_id: 30});
+
+ assert.commandWorked(st.splitAt(coll.getFullName(), {_id: 10}));
+ assert.commandWorked(st.splitAt(coll.getFullName(), {_id: 20}));
+ assert.commandWorked(st.splitAt(coll.getFullName(), {_id: 30}));
+
+ // Confirm the chunks are initially unbalanced. All chunks should start out on shard0
+ // (primary shard for the database).
+ const balancerStatus = assert.commandWorked(
+ st.s0.adminCommand({balancerCollectionStatus: coll.getFullName()}));
+ assert.eq(balancerStatus.balancerCompliant, false);
+ assert.eq(balancerStatus.firstComplianceViolation, 'chunksImbalance');
+ assert.eq(
+ 4,
+ configDB.chunks.countDocuments({ns: coll.getFullName(), shard: st.shard0.shardName}));
+ }
+
+ jsTestLog(`Disabling balancing of ${collB.getFullName()} with permitMigrations ${
+ permitMigrations} and parameters ${tojson(collBSetNoBalanceParam)}`);
+ assert.commandWorked(
+ configDB.collections.update({_id: collB.getFullName()}, {$set: collBSetNoBalanceParam}));
+ setPermitMigrations(collB.getFullName(), permitMigrations);
+
+ st.startBalancer();
+ assert.soon(() => {
+ st.awaitBalancerRound();
+ const shard0Chunks =
+ configDB.chunks.countDocuments({ns: collA.getFullName(), shard: st.shard0.shardName});
+ const shard1Chunks =
+ configDB.chunks.countDocuments({ns: collA.getFullName(), shard: st.shard1.shardName});
+ jsTestLog(`shard0 chunks ${shard0Chunks}, shard1 chunks ${shard1Chunks}`);
+ return shard0Chunks == 2 && shard1Chunks == 2;
+ }, `Balancer failed to balance ${collA.getFullName()}`, 1000 * 60 * 10);
+ st.stopBalancer();
+
+ const collABalanceStatus =
+ assert.commandWorked(st.s.adminCommand({balancerCollectionStatus: collA.getFullName()}));
+ assert.eq(collABalanceStatus.balancerCompliant, true);
+
+ // Test that collB remains unbalanced.
+ const collBBalanceStatus =
+ assert.commandWorked(st.s.adminCommand({balancerCollectionStatus: collB.getFullName()}));
+ assert.eq(collBBalanceStatus.balancerCompliant, false);
+ assert.eq(collBBalanceStatus.firstComplianceViolation, 'chunksImbalance');
+ assert.eq(
+ 4, configDB.chunks.countDocuments({ns: collB.getFullName(), shard: st.shard0.shardName}));
+};
+
+// Test cases that should disable the balancer.
+testBalancer(false /* permitMigrations */, {});
+testBalancer(false /* permitMigrations */, {noBalance: false});
+testBalancer(false /* permitMigrations */, {noBalance: true});
+
+st.stop();
+})();
diff --git a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
index 347429d6bd9..35811a68745 100644
--- a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
+++ b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
@@ -390,7 +390,7 @@ StatusWith<MigrateInfoVector> BalancerChunkSelectionPolicyImpl::selectChunksToMo
const NamespaceString nss(coll.getNs());
- if (!coll.getAllowBalance()) {
+ if (!coll.getAllowBalance() || !coll.getPermitMigrations()) {
LOGV2_DEBUG(21851,
1,
"Not balancing collection {namespace}; explicitly disabled.",
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
index 3f50a47ae07..7a69458577d 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
@@ -965,6 +965,32 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration(
return {ErrorCodes::IllegalOperation, "chunk operation requires validAfter timestamp"};
}
+ // Check if migrations are permitted.
+ auto findCollResponse =
+ configShard->exhaustiveFindOnConfig(opCtx,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ repl::ReadConcernLevel::kLocalReadConcern,
+ CollectionType::ConfigNS,
+ BSON(CollectionType::fullNs << nss.ns()),
+ {},
+ 1);
+ if (!findCollResponse.isOK()) {
+ return findCollResponse.getStatus();
+ }
+
+ if (findCollResponse.getValue().docs.empty()) {
+ return {ErrorCodes::ConflictingOperationInProgress,
+ str::stream() << "Collection '" << nss.ns() << "' does not exist"};
+ }
+
+ auto coll = uassertStatusOK(CollectionType::fromBSON(findCollResponse.getValue().docs[0]));
+
+ if (!coll.getPermitMigrations()) {
+ return {ErrorCodes::ConflictingOperationInProgress,
+ str::stream() << "Collection '" << nss.ns()
+ << "' does not permit migrations so chunks cannot be moved"};
+ }
+
// Must use local read concern because we will perform subsequent writes.
auto findResponse =
configShard->exhaustiveFindOnConfig(opCtx,
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp
index 68d49d99dc4..bb326ecfa7a 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp
@@ -47,8 +47,11 @@ using unittest::assertGet;
using CommitChunkMigrate = ConfigServerTestFixture;
const NamespaceString kNamespace("TestDB.TestColl");
+const KeyPattern kKeyPattern(BSON("a" << 1));
TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectlyWithControlChunk) {
+ const auto collUuid = UUID::gen();
+
ShardType shard0;
shard0.setName("shard0");
shard0.setHost("shard0:12");
@@ -83,7 +86,7 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectlyWithControlChunk) {
controlChunk.setJumbo(true);
}
- setupChunks({migratedChunk, controlChunk});
+ setupCollection(migratedChunk.getNS(), collUuid, kKeyPattern, {migratedChunk, controlChunk});
Timestamp validAfter{101, 0};
BSONObj versions = assertGet(ShardingCatalogManager::get(operationContext())
@@ -131,6 +134,7 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectlyWithControlChunk) {
}
TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectlyWithoutControlChunk) {
+ const auto collUuid = UUID::gen();
ShardType shard0;
shard0.setName("shard0");
@@ -158,7 +162,7 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectlyWithoutControlChunk) {
auto chunkMax = BSON("a" << 10);
chunk0.setMax(chunkMax);
- setupChunks({chunk0});
+ setupCollection(chunk0.getNS(), collUuid, kKeyPattern, {chunk0});
Timestamp validAfter{101, 0};
@@ -192,6 +196,7 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectlyWithoutControlChunk) {
}
TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandNoCtlTrimHistory) {
+ const auto collUuid = UUID::gen();
ShardType shard0;
shard0.setName("shard0");
@@ -219,7 +224,7 @@ TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandNoCtlTrimHistory) {
auto chunkMax = BSON("a" << 10);
chunk0.setMax(chunkMax);
- setupChunks({chunk0});
+ setupCollection(chunk0.getNS(), collUuid, kKeyPattern, {chunk0});
// Make the time distance between the last history element large enough.
Timestamp validAfter{200, 0};
@@ -254,6 +259,7 @@ TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandNoCtlTrimHistory) {
}
TEST_F(CommitChunkMigrate, RejectOutOfOrderHistory) {
+ const auto collUuid = UUID::gen();
ShardType shard0;
shard0.setName("shard0");
@@ -281,7 +287,7 @@ TEST_F(CommitChunkMigrate, RejectOutOfOrderHistory) {
auto chunkMax = BSON("a" << 10);
chunk0.setMax(chunkMax);
- setupChunks({chunk0});
+ setupCollection(chunk0.getNS(), collUuid, kKeyPattern, {chunk0});
// Make the time before the last change to trigger the failure.
Timestamp validAfter{99, 0};
@@ -299,6 +305,7 @@ TEST_F(CommitChunkMigrate, RejectOutOfOrderHistory) {
}
TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch0) {
+ const auto collUuid = UUID::gen();
ShardType shard0;
shard0.setName("shard0");
@@ -335,7 +342,7 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch0) {
auto chunkMaxax = BSON("a" << 20);
chunk1.setMax(chunkMaxax);
- setupChunks({chunk0, chunk1});
+ setupCollection(chunk0.getNS(), collUuid, kKeyPattern, {chunk0, chunk1});
Timestamp validAfter{1};
@@ -352,6 +359,7 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch0) {
}
TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch1) {
+ const auto collUuid = UUID::gen();
ShardType shard0;
shard0.setName("shard0");
@@ -390,7 +398,7 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch1) {
chunk1.setMax(chunkMaxax);
// get version from the control chunk this time
- setupChunks({chunk1, chunk0});
+ setupCollection(chunk0.getNS(), collUuid, kKeyPattern, {chunk1, chunk0});
Timestamp validAfter{1};
@@ -407,6 +415,7 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch1) {
}
TEST_F(CommitChunkMigrate, RejectChunkMissing0) {
+ const auto collUuid = UUID::gen();
ShardType shard0;
shard0.setName("shard0");
@@ -443,7 +452,7 @@ TEST_F(CommitChunkMigrate, RejectChunkMissing0) {
auto chunkMaxax = BSON("a" << 20);
chunk1.setMax(chunkMaxax);
- setupChunks({chunk1});
+ setupCollection(chunk1.getNS(), collUuid, kKeyPattern, {chunk1});
Timestamp validAfter{1};
@@ -460,6 +469,7 @@ TEST_F(CommitChunkMigrate, RejectChunkMissing0) {
}
TEST_F(CommitChunkMigrate, CommitWithLastChunkOnShardShouldNotAffectOtherChunks) {
+ const auto collUuid = UUID::gen();
ShardType shard0;
shard0.setName("shard0");
@@ -500,7 +510,7 @@ TEST_F(CommitChunkMigrate, CommitWithLastChunkOnShardShouldNotAffectOtherChunks)
Timestamp ctrlChunkValidAfter = Timestamp(50, 0);
chunk1.setHistory({ChunkHistory(ctrlChunkValidAfter, shard1.getName())});
- setupChunks({chunk0, chunk1});
+ setupCollection(chunk0.getNS(), collUuid, kKeyPattern, {chunk0, chunk1});
Timestamp validAfter{101, 0};
StatusWith<BSONObj> resultBSON = ShardingCatalogManager::get(operationContext())
@@ -543,6 +553,7 @@ TEST_F(CommitChunkMigrate, CommitWithLastChunkOnShardShouldNotAffectOtherChunks)
TEST_F(CommitChunkMigrate, RejectMissingChunkVersionOnFCV44) {
serverGlobalParams.featureCompatibility.setVersion(
ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44);
+ const auto collUuid = UUID::gen();
ShardType shard0;
shard0.setName("shard0");
@@ -574,7 +585,7 @@ TEST_F(CommitChunkMigrate, RejectMissingChunkVersionOnFCV44) {
currentChunk.setMin(BSON("a" << 1));
currentChunk.setMax(BSON("a" << 10));
- setupChunks({currentChunk});
+ setupCollection(currentChunk.getNS(), collUuid, kKeyPattern, {currentChunk});
Timestamp validAfter{101, 0};
ASSERT_THROWS_CODE(ShardingCatalogManager::get(operationContext())
@@ -592,6 +603,7 @@ TEST_F(CommitChunkMigrate, RejectMissingChunkVersionOnFCV44) {
TEST_F(CommitChunkMigrate, AcceptMissingChunkVersionOnFCV42) {
serverGlobalParams.featureCompatibility.setVersion(
ServerGlobalParams::FeatureCompatibility::Version::kFullyDowngradedTo42);
+ const auto collUuid = UUID::gen();
ShardType shard0;
shard0.setName("shard0");
@@ -623,7 +635,7 @@ TEST_F(CommitChunkMigrate, AcceptMissingChunkVersionOnFCV42) {
currentChunk.setMin(BSON("a" << 1));
currentChunk.setMax(BSON("a" << 10));
- setupChunks({currentChunk});
+ setupCollection(currentChunk.getNS(), collUuid, kKeyPattern, {currentChunk});
Timestamp validAfter{101, 0};
auto result = ShardingCatalogManager::get(operationContext())
@@ -641,6 +653,7 @@ TEST_F(CommitChunkMigrate, AcceptMissingChunkVersionOnFCV42) {
TEST_F(CommitChunkMigrate, RejectOlderChunkVersionOnFCV44) {
serverGlobalParams.featureCompatibility.setVersion(
ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44);
+ const auto collUuid = UUID::gen();
ShardType shard0;
shard0.setName("shard0");
@@ -675,7 +688,7 @@ TEST_F(CommitChunkMigrate, RejectOlderChunkVersionOnFCV44) {
currentChunk.setMin(BSON("a" << 1));
currentChunk.setMax(BSON("a" << 10));
- setupChunks({currentChunk});
+ setupCollection(currentChunk.getNS(), collUuid, kKeyPattern, {currentChunk});
Timestamp validAfter{101, 0};
auto result = ShardingCatalogManager::get(operationContext())
@@ -694,6 +707,7 @@ TEST_F(CommitChunkMigrate, RejectOlderChunkVersionOnFCV44) {
TEST_F(CommitChunkMigrate, RejectMismatchedEpochOnFCV44) {
serverGlobalParams.featureCompatibility.setVersion(
ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44);
+ const auto collUuid = UUID::gen();
ShardType shard0;
shard0.setName("shard0");
@@ -727,7 +741,7 @@ TEST_F(CommitChunkMigrate, RejectMismatchedEpochOnFCV44) {
currentChunk.setMin(BSON("a" << 1));
currentChunk.setMax(BSON("a" << 10));
- setupChunks({currentChunk});
+ setupCollection(currentChunk.getNS(), collUuid, kKeyPattern, {currentChunk});
Timestamp validAfter{101, 0};
auto result = ShardingCatalogManager::get(operationContext())
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
index ca750c49f1b..3a4fc015900 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
@@ -690,15 +690,10 @@ WiredTigerKVEngine::WiredTigerKVEngine(const std::string& canonicalName,
ss << "verbose=[recovery_progress,checkpoint_progress,compact_progress],";
}
- if (kDebugBuild) {
- // Enable debug write-ahead logging for all tables under debug build.
- ss << "debug_mode=(table_logging=true,";
+ if (kDebugBuild && gWiredTigerEvictionDebugMode) {
// For select debug builds, support enabling WiredTiger eviction debug mode. This uses
// more aggressive eviction tactics, but may have a negative performance impact.
- if (gWiredTigerEvictionDebugMode) {
- ss << "eviction=true,";
- }
- ss << "),";
+ ss << "debug_mode=(eviction=true),";
}
if (kAddressSanitizerEnabled) {
// For applications using WT, advancing a cursor invalidates the data/memory that cursor was
@@ -717,6 +712,15 @@ WiredTigerKVEngine::WiredTigerKVEngine(const std::string& canonicalName,
ss << "debug_mode=(cursor_copy=true),";
}
+ if (TestingProctor::instance().isEnabled()) {
+ // Enable debug write-ahead logging for all tables when testing is enabled.
+ //
+ // If MongoDB startup fails, there may be clues from the previous run still left in the WT
+ // log files that can provide some insight into how the system got into a bad state. When
+ // testing is enabled, keep around some of these files for investigative purposes.
+ ss << "debug_mode=(table_logging=true,checkpoint_retention=4),";
+ }
+
ss << WiredTigerCustomizationHooks::get(getGlobalServiceContext())
->getTableCreateConfig("system");
ss << WiredTigerExtensions::get(getGlobalServiceContext())->getOpenExtensionsConfig();
diff --git a/src/mongo/s/catalog/type_collection.cpp b/src/mongo/s/catalog/type_collection.cpp
index 3ff95150dfc..ecd2addfca0 100644
--- a/src/mongo/s/catalog/type_collection.cpp
+++ b/src/mongo/s/catalog/type_collection.cpp
@@ -43,6 +43,7 @@ namespace {
const BSONField<bool> kNoBalance("noBalance");
const BSONField<bool> kDropped("dropped");
+const BSONField<bool> kPermitMigrations("permitMigrations");
} // namespace
@@ -200,6 +201,19 @@ StatusWith<CollectionType> CollectionType::fromBSON(const BSONObj& source) {
}
}
+ {
+ bool collPermitMigrations;
+ Status status =
+ bsonExtractBooleanField(source, kPermitMigrations.name(), &collPermitMigrations);
+ if (status.isOK()) {
+ coll._permitMigrations = collPermitMigrations;
+ } else if (status == ErrorCodes::NoSuchKey) {
+ // PermitMigrations can be missing.
+ } else {
+ return status;
+ }
+ }
+
return StatusWith<CollectionType>(coll);
}
@@ -273,6 +287,10 @@ BSONObj CollectionType::toBSON() const {
builder.append(kNoBalance.name(), !_allowBalance.get());
}
+ if (_permitMigrations.is_initialized()) {
+ builder.append(kPermitMigrations.name(), _permitMigrations.get());
+ }
+
if (_distributionMode) {
if (*_distributionMode == DistributionMode::kUnsharded) {
builder.append(distributionMode.name(), "unsharded");
diff --git a/src/mongo/s/catalog/type_collection.h b/src/mongo/s/catalog/type_collection.h
index dd40380453f..cba156013ba 100644
--- a/src/mongo/s/catalog/type_collection.h
+++ b/src/mongo/s/catalog/type_collection.h
@@ -65,6 +65,7 @@ class StatusWith;
* "uuid" : UUID,
* "noBalance" : false,
* "distributionMode" : "unsharded|sharded",
+ * "permitMigrations": false
* }
*
*/
@@ -165,6 +166,18 @@ public:
return _allowBalance.get_value_or(true);
}
+ void setPermitMigrations(bool permit) {
+ if (permit) {
+ _permitMigrations = boost::none;
+ } else {
+ _permitMigrations = permit;
+ }
+ }
+
+ bool getPermitMigrations() const {
+ return _permitMigrations.get_value_or(true);
+ }
+
void setDistributionMode(DistributionMode distributionMode) {
_distributionMode = distributionMode;
}
@@ -206,6 +219,9 @@ private:
// Optional whether balancing is allowed for this collection. If missing, implies true.
boost::optional<bool> _allowBalance;
+
+ // Optional whether migration is allowed for this collection. If missing, implies true.
+ boost::optional<bool> _permitMigrations;
};
} // namespace mongo