summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2020-11-03 14:08:19 -0500
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-11-11 14:52:36 +0000
commit7950f071b21957be1ab8e5ac2db8b650695a2bd0 (patch)
treec18442fd863b1d955d8194a2ddf6759201182e08
parentb43f6fefe1ac3e941fd55d5452a1ee21e7ff0ae6 (diff)
downloadmongo-7950f071b21957be1ab8e5ac2db8b650695a2bd0.tar.gz
SERVER-50027 Implement an 'allowMigrations' collection property
-rw-r--r--jstests/sharding/move_chunk_allowMigrations.js133
-rw-r--r--src/mongo/db/commands/count_cmd.cpp26
-rw-r--r--src/mongo/db/exec/upsert_stage.cpp8
-rw-r--r--src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp6
-rw-r--r--src/mongo/db/s/collection_metadata.cpp4
-rw-r--r--src/mongo/db/s/collection_metadata.h2
-rw-r--r--src/mongo/db/s/collection_metadata_filtering_test.cpp1
-rw-r--r--src/mongo/db/s/collection_metadata_test.cpp1
-rw-r--r--src/mongo/db/s/collection_sharding_runtime.cpp25
-rw-r--r--src/mongo/db/s/collection_sharding_runtime_test.cpp1
-rw-r--r--src/mongo/db/s/config/config_server_test_fixture.cpp13
-rw-r--r--src/mongo/db/s/config/config_server_test_fixture.h4
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_bump_shard_versions_and_change_metadata_test.cpp7
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp54
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp9
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp27
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp11
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp24
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp10
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp21
-rw-r--r--src/mongo/db/s/metadata_manager_test.cpp7
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp1
-rw-r--r--src/mongo/db/s/migration_source_manager.cpp3
-rw-r--r--src/mongo/db/s/op_observer_sharding_test.cpp1
-rw-r--r--src/mongo/db/s/range_deletion_util_test.cpp1
-rw-r--r--src/mongo/db/s/resharding/resharding_coordinator_service.cpp15
-rw-r--r--src/mongo/db/s/resharding/resharding_donor_recipient_common_test.cpp1
-rw-r--r--src/mongo/db/s/resharding_destined_recipient_test.cpp110
-rw-r--r--src/mongo/db/s/shard_server_catalog_cache_loader.cpp23
-rw-r--r--src/mongo/db/s/type_shard_collection.cpp7
-rw-r--r--src/mongo/db/s/type_shard_collection.h6
-rw-r--r--src/mongo/db/s/type_shard_collection.idl11
-rw-r--r--src/mongo/db/s/type_shard_collection_test.cpp13
-rw-r--r--src/mongo/s/catalog/type_collection.h8
-rw-r--r--src/mongo/s/catalog/type_collection.idl6
-rw-r--r--src/mongo/s/catalog_cache.cpp2
-rw-r--r--src/mongo/s/catalog_cache_loader.cpp2
-rw-r--r--src/mongo/s/catalog_cache_loader.h5
-rw-r--r--src/mongo/s/catalog_cache_loader_mock.cpp1
-rw-r--r--src/mongo/s/chunk_manager.cpp14
-rw-r--r--src/mongo/s/chunk_manager.h15
-rw-r--r--src/mongo/s/chunk_manager_query_test.cpp13
-rw-r--r--src/mongo/s/chunk_manager_refresh_bm.cpp17
-rw-r--r--src/mongo/s/config_server_catalog_cache_loader.cpp1
-rw-r--r--src/mongo/s/routing_table_history_test.cpp105
45 files changed, 555 insertions, 220 deletions
diff --git a/jstests/sharding/move_chunk_allowMigrations.js b/jstests/sharding/move_chunk_allowMigrations.js
new file mode 100644
index 00000000000..cc10fa77a7e
--- /dev/null
+++ b/jstests/sharding/move_chunk_allowMigrations.js
@@ -0,0 +1,133 @@
+/**
+ * Tests that a collection with alloMigrations: false in config.collections prohibits committing a
+ * moveChunk and disables the balancer.
+ *
+ * @tags: [
+ * requires_fcv_47,
+ * ]
+ */
+(function() {
+'use strict';
+
+load('jstests/libs/fail_point_util.js');
+load('jstests/libs/parallel_shell_helpers.js');
+
+const st = new ShardingTest({config: 1, shards: 2});
+const configDB = st.s.getDB("config");
+const dbName = 'AllowMigrations';
+
+// Resets database dbName and enables sharding and establishes shard0 as primary, test case agnostic
+const setUpDb = function setUpDatabaseAndEnableSharding() {
+ assert.commandWorked(st.s.getDB(dbName).dropDatabase());
+ assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+ assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: st.shard0.shardName}));
+};
+
+// Tests that moveChunk does not succeed when {allowMigrations: false}
+(function testAllowMigrationsFalsePreventsMoveChunk() {
+ setUpDb();
+
+ const collName = "collA";
+ const ns = dbName + "." + collName;
+
+ assert.commandWorked(st.s.getDB(dbName).getCollection(collName).insert({_id: 0}));
+ assert.commandWorked(st.s.getDB(dbName).getCollection(collName).insert({_id: 1}));
+ assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+
+ // Confirm that an inProgress moveChunk fails once {allowMigrations: false}
+ const fp = configureFailPoint(st.shard0, "moveChunkHangAtStep5");
+ const awaitResult = startParallelShell(
+ funWithArgs(function(ns, toShardName) {
+ assert.commandFailedWithCode(
+ db.adminCommand({moveChunk: ns, find: {_id: 0}, to: toShardName}),
+ ErrorCodes.ConflictingOperationInProgress);
+ }, ns, st.shard1.shardName), st.s.port);
+ fp.wait();
+ assert.commandWorked(
+ configDB.collections.update({_id: ns}, {$set: {allowMigrations: false}}, {upsert: true}));
+ fp.off();
+ awaitResult();
+
+ // {allowMigrations: false} is set, sending a new moveChunk command should also fail.
+ assert.commandFailedWithCode(
+ st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: st.shard1.shardName}),
+ ErrorCodes.ConflictingOperationInProgress);
+
+ // Confirm shard0 reports {allowMigrations: false} in the local cache as well
+ const cachedEntry = st.shard0.getDB("config").cache.collections.findOne({_id: ns});
+ assert.eq(false, cachedEntry.allowMigrations);
+})();
+
+// Tests {allowMigrations: false} disables balancing for collB and does not interfere with balancing
+// for collA.
+//
+// collBSetParams specify the field(s) that will be set on the collB in config.collections.
+const testBalancer = function testAllowMigrationsFalseDisablesBalancer(collBSetParams) {
+ setUpDb();
+
+ const collAName = "collA";
+ const collBName = "collB";
+ const collA = st.s.getCollection(`${dbName}.${collAName}`);
+ const collB = st.s.getCollection(`${dbName}.${collBName}`);
+
+ assert.commandWorked(st.s.adminCommand({shardCollection: collA.getFullName(), key: {_id: 1}}));
+ assert.commandWorked(st.s.adminCommand({shardCollection: collB.getFullName(), key: {_id: 1}}));
+
+ // Split both collections into 4 chunks so balancing can occur.
+ for (let coll of [collA, collB]) {
+ coll.insert({_id: 1});
+ coll.insert({_id: 10});
+ coll.insert({_id: 20});
+ coll.insert({_id: 30});
+
+ assert.commandWorked(st.splitAt(coll.getFullName(), {_id: 10}));
+ assert.commandWorked(st.splitAt(coll.getFullName(), {_id: 20}));
+ assert.commandWorked(st.splitAt(coll.getFullName(), {_id: 30}));
+
+ // Confirm the chunks are initially unbalanced. All chunks should start out on shard0
+ // (primary shard for the database).
+ const balancerStatus = assert.commandWorked(
+ st.s0.adminCommand({balancerCollectionStatus: coll.getFullName()}));
+ assert.eq(balancerStatus.balancerCompliant, false);
+ assert.eq(balancerStatus.firstComplianceViolation, 'chunksImbalance');
+ assert.eq(
+ 4, configDB.chunks.find({ns: coll.getFullName(), shard: st.shard0.shardName}).count());
+ }
+
+ jsTestLog(
+ `Disabling balancing of ${collB.getFullName()} with parameters ${tojson(collBSetParams)}`);
+ assert.commandWorked(
+ configDB.collections.update({_id: collB.getFullName()}, {$set: collBSetParams}));
+
+ st.startBalancer();
+ assert.soon(() => {
+ st.awaitBalancerRound();
+ const shard0Chunks =
+ configDB.chunks.find({ns: collA.getFullName(), shard: st.shard0.shardName}).itcount();
+ const shard1Chunks =
+ configDB.chunks.find({ns: collA.getFullName(), shard: st.shard1.shardName}).itcount();
+ jsTestLog(`shard0 chunks ${shard0Chunks}, shard1 chunks ${shard1Chunks}`);
+ return shard0Chunks == 2 && shard1Chunks == 2;
+ }, `Balancer failed to balance ${collA.getFullName()}`, 1000 * 60 * 10);
+ st.stopBalancer();
+
+ const collABalanceStatus =
+ assert.commandWorked(st.s.adminCommand({balancerCollectionStatus: collA.getFullName()}));
+ assert.eq(collABalanceStatus.balancerCompliant, true);
+
+ // Test that collB remains unbalanced.
+ const collBBalanceStatus =
+ assert.commandWorked(st.s.adminCommand({balancerCollectionStatus: collB.getFullName()}));
+ assert.eq(collBBalanceStatus.balancerCompliant, false);
+ assert.eq(collBBalanceStatus.firstComplianceViolation, 'chunksImbalance');
+ assert.eq(4,
+ configDB.chunks.find({ns: collB.getFullName(), shard: st.shard0.shardName}).count());
+};
+
+// Test cases that should disable the balancer.
+testBalancer({allowMigrations: false});
+testBalancer({allowMigrations: false, noBalance: false});
+testBalancer({allowMigrations: false, noBalance: true});
+
+st.stop();
+})();
diff --git a/src/mongo/db/commands/count_cmd.cpp b/src/mongo/db/commands/count_cmd.cpp
index 2f9a844950b..b2e23e4887c 100644
--- a/src/mongo/db/commands/count_cmd.cpp
+++ b/src/mongo/db/commands/count_cmd.cpp
@@ -181,10 +181,15 @@ public:
// Prevent chunks from being cleaned up during yields - this allows us to only check the
// version on initial entry into count.
- auto rangePreserver =
- CollectionShardingState::get(opCtx, nss)
- ->getOwnershipFilter(
- opCtx, CollectionShardingState::OrphanCleanupPolicy::kDisallowOrphanCleanup);
+ auto* const css = CollectionShardingState::get(opCtx, nss);
+ boost::optional<ScopedCollectionFilter> rangePreserver;
+ if (css->getCollectionDescription(opCtx).isSharded()) {
+ rangePreserver.emplace(
+ CollectionShardingState::get(opCtx, nss)
+ ->getOwnershipFilter(
+ opCtx,
+ CollectionShardingState::OrphanCleanupPolicy::kDisallowOrphanCleanup));
+ }
auto expCtx = makeExpressionContextForGetExecutor(
opCtx, request.getCollation().value_or(BSONObj()), nss);
@@ -244,10 +249,15 @@ public:
// Prevent chunks from being cleaned up during yields - this allows us to only check the
// version on initial entry into count.
- auto rangePreserver =
- CollectionShardingState::get(opCtx, nss)
- ->getOwnershipFilter(
- opCtx, CollectionShardingState::OrphanCleanupPolicy::kDisallowOrphanCleanup);
+ auto* const css = CollectionShardingState::get(opCtx, nss);
+ boost::optional<ScopedCollectionFilter> rangePreserver;
+ if (css->getCollectionDescription(opCtx).isSharded()) {
+ rangePreserver.emplace(
+ CollectionShardingState::get(opCtx, nss)
+ ->getOwnershipFilter(
+ opCtx,
+ CollectionShardingState::OrphanCleanupPolicy::kDisallowOrphanCleanup));
+ }
auto statusWithPlanExecutor =
getExecutorCount(makeExpressionContextForGetExecutor(
diff --git a/src/mongo/db/exec/upsert_stage.cpp b/src/mongo/db/exec/upsert_stage.cpp
index 4f0fa990c23..132e6aad0d1 100644
--- a/src/mongo/db/exec/upsert_stage.cpp
+++ b/src/mongo/db/exec/upsert_stage.cpp
@@ -119,10 +119,9 @@ void UpsertStage::_performInsert(BSONObj newDocument) {
// throw so that MongoS can target the insert to the correct shard.
if (_isUserInitiatedWrite) {
auto* const css = CollectionShardingState::get(opCtx(), collection()->ns());
- const auto collFilter = css->getOwnershipFilter(
- opCtx(), CollectionShardingState::OrphanCleanupPolicy::kAllowOrphanCleanup);
-
- if (collFilter.isSharded()) {
+ if (css->getCollectionDescription(opCtx()).isSharded()) {
+ const auto collFilter = css->getOwnershipFilter(
+ opCtx(), CollectionShardingState::OrphanCleanupPolicy::kAllowOrphanCleanup);
const ShardKeyPattern shardKeyPattern(collFilter.getKeyPattern());
auto newShardKey = shardKeyPattern.extractShardKeyFromDoc(newDocument);
@@ -274,4 +273,5 @@ void UpsertStage::_assertDocumentToBeInsertedIsValid(const mb::Document& documen
_assertPathsNotArray(document, shardKeyPaths);
}
}
+
} // namespace mongo
diff --git a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
index a7416ef95e2..25414f71f69 100644
--- a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
+++ b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
@@ -388,12 +388,14 @@ StatusWith<MigrateInfoVector> BalancerChunkSelectionPolicyImpl::selectChunksToMo
const NamespaceString& nss(coll.getNss());
- if (!coll.getAllowBalance()) {
+ if (!coll.getAllowBalance() || !coll.getAllowMigrations()) {
LOGV2_DEBUG(21851,
1,
"Not balancing collection {namespace}; explicitly disabled.",
"Not balancing explicitly disabled collection",
- "namespace"_attr = nss);
+ "namespace"_attr = nss,
+ "allowBalance"_attr = coll.getAllowBalance(),
+ "allowMigrations"_attr = coll.getAllowMigrations());
continue;
}
diff --git a/src/mongo/db/s/collection_metadata.cpp b/src/mongo/db/s/collection_metadata.cpp
index ccb0a5a4050..354bab6cdec 100644
--- a/src/mongo/db/s/collection_metadata.cpp
+++ b/src/mongo/db/s/collection_metadata.cpp
@@ -44,6 +44,10 @@ namespace mongo {
CollectionMetadata::CollectionMetadata(ChunkManager cm, const ShardId& thisShardId)
: _cm(std::move(cm)), _thisShardId(thisShardId) {}
+bool CollectionMetadata::allowMigrations() const {
+ return _cm ? _cm->allowMigrations() : true;
+}
+
BSONObj CollectionMetadata::extractDocumentKey(const BSONObj& doc) const {
BSONObj key;
diff --git a/src/mongo/db/s/collection_metadata.h b/src/mongo/db/s/collection_metadata.h
index 36c9fa844ad..e9b202f5ecf 100644
--- a/src/mongo/db/s/collection_metadata.h
+++ b/src/mongo/db/s/collection_metadata.h
@@ -69,6 +69,8 @@ public:
return bool(_cm);
}
+ bool allowMigrations() const;
+
/**
* Returns the current shard version for the collection or UNSHARDED if it is not sharded.
*
diff --git a/src/mongo/db/s/collection_metadata_filtering_test.cpp b/src/mongo/db/s/collection_metadata_filtering_test.cpp
index ddeb8f1c7ba..3d3763a5928 100644
--- a/src/mongo/db/s/collection_metadata_filtering_test.cpp
+++ b/src/mongo/db/s/collection_metadata_filtering_test.cpp
@@ -71,6 +71,7 @@ protected:
false,
epoch,
boost::none,
+ true,
[&] {
ChunkVersion version(1, 0, epoch);
diff --git a/src/mongo/db/s/collection_metadata_test.cpp b/src/mongo/db/s/collection_metadata_test.cpp
index 0c343a5ed37..c52d13d3e50 100644
--- a/src/mongo/db/s/collection_metadata_test.cpp
+++ b/src/mongo/db/s/collection_metadata_test.cpp
@@ -90,6 +90,7 @@ CollectionMetadata makeCollectionMetadataImpl(
false,
epoch,
boost::none,
+ true,
allChunks)),
kChunkManager),
kThisShard);
diff --git a/src/mongo/db/s/collection_sharding_runtime.cpp b/src/mongo/db/s/collection_sharding_runtime.cpp
index c5c264af4c7..0e95190e915 100644
--- a/src/mongo/db/s/collection_sharding_runtime.cpp
+++ b/src/mongo/db/s/collection_sharding_runtime.cpp
@@ -112,11 +112,22 @@ CollectionShardingRuntime* CollectionShardingRuntime::get_UNSAFE(ServiceContext*
ScopedCollectionFilter CollectionShardingRuntime::getOwnershipFilter(
OperationContext* opCtx, OrphanCleanupPolicy orphanCleanupPolicy) {
const auto optReceivedShardVersion = getOperationReceivedVersion(opCtx, _nss);
- invariant(!optReceivedShardVersion || !ChunkVersion::isIgnoredVersion(*optReceivedShardVersion),
- "getOwnershipFilter called by operation that doesn't have a valid shard version");
+ // TODO (SERVER-52764): No operations should be calling getOwnershipFilter without a shard
+ // version
+ //
+ // invariant(optReceivedShardVersion,
+ // "getOwnershipFilter called by operation that doesn't specify shard version");
+ if (!optReceivedShardVersion)
+ return {kUnshardedCollection};
+
+ auto metadata = _getMetadataWithVersionCheckAt(
+ opCtx, repl::ReadConcernArgs::get(opCtx).getArgsAtClusterTime());
+ invariant(!ChunkVersion::isIgnoredVersion(*optReceivedShardVersion) ||
+ !metadata->get().allowMigrations() || !metadata->get().isSharded(),
+ "For sharded collections getOwnershipFilter cannot be relied on without a valid "
+ "shard version");
- return _getMetadataWithVersionCheckAt(opCtx,
- repl::ReadConcernArgs::get(opCtx).getArgsAtClusterTime());
+ return {std::move(metadata)};
}
ScopedCollectionDescription CollectionShardingRuntime::getCollectionDescription(
@@ -335,10 +346,8 @@ CollectionShardingRuntime::_getMetadataWithVersionCheckAt(
!criticalSectionSignal);
}
- if (ChunkVersion::isIgnoredVersion(receivedShardVersion))
- return kUnshardedCollection;
-
- if (receivedShardVersion.isWriteCompatibleWith(wantedShardVersion))
+ if (wantedShardVersion.isWriteCompatibleWith(receivedShardVersion) ||
+ ChunkVersion::isIgnoredVersion(receivedShardVersion))
return optCurrentMetadata;
StaleConfigInfo sci(
diff --git a/src/mongo/db/s/collection_sharding_runtime_test.cpp b/src/mongo/db/s/collection_sharding_runtime_test.cpp
index 952924bf6f9..215dd006b95 100644
--- a/src/mongo/db/s/collection_sharding_runtime_test.cpp
+++ b/src/mongo/db/s/collection_sharding_runtime_test.cpp
@@ -63,6 +63,7 @@ protected:
false,
epoch,
boost::none,
+ true,
{std::move(chunk)})),
boost::none);
diff --git a/src/mongo/db/s/config/config_server_test_fixture.cpp b/src/mongo/db/s/config/config_server_test_fixture.cpp
index 977fd9640b0..7aa957717c0 100644
--- a/src/mongo/db/s/config/config_server_test_fixture.cpp
+++ b/src/mongo/db/s/config/config_server_test_fixture.cpp
@@ -325,10 +325,17 @@ StatusWith<ShardType> ConfigServerTestFixture::getShardDoc(OperationContext* opC
return ShardType::fromBSON(doc.getValue());
}
-void ConfigServerTestFixture::setupChunks(const std::vector<ChunkType>& chunks) {
- const NamespaceString chunkNS(ChunkType::ConfigNS);
+void ConfigServerTestFixture::setupCollection(const NamespaceString& nss,
+ const KeyPattern& shardKey,
+ const std::vector<ChunkType>& chunks) {
+ CollectionType coll(nss, chunks[0].getVersion().epoch(), Date_t::now(), UUID::gen());
+ coll.setKeyPattern(shardKey);
+ ASSERT_OK(
+ insertToConfigCollection(operationContext(), CollectionType::ConfigNS, coll.toBSON()));
+
for (const auto& chunk : chunks) {
- ASSERT_OK(insertToConfigCollection(operationContext(), chunkNS, chunk.toConfigBSON()));
+ ASSERT_OK(insertToConfigCollection(
+ operationContext(), ChunkType::ConfigNS, chunk.toConfigBSON()));
}
}
diff --git a/src/mongo/db/s/config/config_server_test_fixture.h b/src/mongo/db/s/config/config_server_test_fixture.h
index ba008a827f6..b5264d9131c 100644
--- a/src/mongo/db/s/config/config_server_test_fixture.h
+++ b/src/mongo/db/s/config/config_server_test_fixture.h
@@ -103,7 +103,9 @@ protected:
/**
* Setup the config.chunks collection to contain the given chunks.
*/
- void setupChunks(const std::vector<ChunkType>& chunks);
+ void setupCollection(const NamespaceString& nss,
+ const KeyPattern& shardKey,
+ const std::vector<ChunkType>& chunks);
/**
* Retrieves the chunk document from the config server.
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_bump_shard_versions_and_change_metadata_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_bump_shard_versions_and_change_metadata_test.cpp
index 7df7df0e6c7..98f7dd5288c 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_bump_shard_versions_and_change_metadata_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_bump_shard_versions_and_change_metadata_test.cpp
@@ -44,6 +44,7 @@ namespace mongo {
namespace {
const NamespaceString kNss("TestDB", "TestColl");
+const KeyPattern kKeyPattern(BSON("a" << 1));
const ShardType kShard0("shard0000", "shard0000:1234");
const ShardType kShard1("shard0001", "shard0001:1234");
@@ -144,7 +145,7 @@ TEST_F(ShardingCatalogManagerBumpShardVersionsAndChangeMetadataTest,
ChunkVersion targetChunkVersion(
collectionVersion.majorVersion() + 1, 0, collectionVersion.epoch());
- setupChunks({shard0Chunk0, shard1Chunk0});
+ setupCollection(kNss, kKeyPattern, {shard0Chunk0, shard1Chunk0});
auto opCtx = operationContext();
@@ -173,7 +174,7 @@ TEST_F(ShardingCatalogManagerBumpShardVersionsAndChangeMetadataTest,
ChunkVersion targetChunkVersion(
collectionVersion.majorVersion() + 1, 0, collectionVersion.epoch());
- setupChunks({shard0Chunk0, shard0Chunk1, shard1Chunk0});
+ setupCollection(kNss, kKeyPattern, {shard0Chunk0, shard0Chunk1, shard1Chunk0});
auto opCtx = operationContext();
std::vector<ShardId> shardIds{kShard0.getName(), kShard1.getName()};
@@ -203,7 +204,7 @@ TEST_F(ShardingCatalogManagerBumpShardVersionsAndChangeMetadataTest,
ChunkVersion targetChunkVersion(
collectionVersion.majorVersion() + 1, 0, collectionVersion.epoch());
- setupChunks({shard0Chunk0, shard0Chunk1, shard1Chunk0, shard1Chunk1});
+ setupCollection(kNss, kKeyPattern, {shard0Chunk0, shard0Chunk1, shard1Chunk0, shard1Chunk1});
auto opCtx = operationContext();
std::vector<ShardId> shardIds{kShard0.getName(), kShard1.getName()};
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
index d1f09247cdb..a5823b34a33 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
@@ -774,12 +774,12 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration(
boost::none));
uassert(ErrorCodes::ShardNotFound,
- str::stream() << "shard " << toShard << " does not exist",
+ str::stream() << "Shard " << toShard << " does not exist",
!shardResult.docs.empty());
auto shard = uassertStatusOK(ShardType::fromBSON(shardResult.docs.front()));
uassert(ErrorCodes::ShardNotFound,
- str::stream() << toShard << " is draining",
+ str::stream() << "Shard " << toShard << " is currently draining",
!shard.getDraining());
// Take _kChunkOpLock in exclusive mode to prevent concurrent chunk splits, merges, and
@@ -798,39 +798,43 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration(
return {ErrorCodes::IllegalOperation, "chunk operation requires validAfter timestamp"};
}
- // Must use local read concern because we will perform subsequent writes.
- auto findResponse =
+ auto findCollResponse = uassertStatusOK(
+ configShard->exhaustiveFindOnConfig(opCtx,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ repl::ReadConcernLevel::kLocalReadConcern,
+ CollectionType::ConfigNS,
+ BSON(CollectionType::kNssFieldName << nss.ns()),
+ {},
+ 1));
+ uassert(ErrorCodes::ConflictingOperationInProgress,
+ "Collection does not exist",
+ !findCollResponse.docs.empty());
+ const CollectionType coll(findCollResponse.docs[0]);
+ uassert(ErrorCodes::ConflictingOperationInProgress,
+ "Collection is undergoing changes and chunks cannot be moved",
+ coll.getAllowMigrations());
+
+ auto findResponse = uassertStatusOK(
configShard->exhaustiveFindOnConfig(opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kLocalReadConcern,
ChunkType::ConfigNS,
BSON("ns" << nss.ns()),
BSON(ChunkType::lastmod << -1),
- 1);
- if (!findResponse.isOK()) {
- return findResponse.getStatus();
- }
-
- if (MONGO_unlikely(migrationCommitVersionError.shouldFail())) {
- uassert(ErrorCodes::StaleEpoch,
- "failpoint 'migrationCommitVersionError' generated error",
- false);
- }
+ 1));
+ uassert(ErrorCodes::IncompatibleShardingMetadata,
+ str::stream() << "Tried to find max chunk version for collection '" << nss.ns()
+ << ", but found no chunks",
+ !findResponse.docs.empty());
- const auto chunksVector = std::move(findResponse.getValue().docs);
- if (chunksVector.empty()) {
- return {ErrorCodes::IncompatibleShardingMetadata,
- str::stream() << "Tried to find max chunk version for collection '" << nss.ns()
- << ", but found no chunks"};
- }
+ const auto chunk = uassertStatusOK(ChunkType::fromConfigBSON(findResponse.docs[0]));
+ const auto currentCollectionVersion = chunk.getVersion();
- const auto swChunk = ChunkType::fromConfigBSON(chunksVector.front());
- if (!swChunk.isOK()) {
- return swChunk.getStatus();
+ if (MONGO_unlikely(migrationCommitVersionError.shouldFail())) {
+ uasserted(ErrorCodes::StaleEpoch,
+ "Failpoint 'migrationCommitVersionError' generated error");
}
- const auto currentCollectionVersion = swChunk.getValue().getVersion();
-
// It is possible for a migration to end up running partly without the protection of the
// distributed lock if the config primary stepped down since the start of the migration and
// failed to recover the migration. Check that the collection has not been dropped and recreated
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp
index ca19cc84e80..0982c6fe76e 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp
@@ -44,6 +44,7 @@ namespace {
using unittest::assertGet;
+const KeyPattern kKeyPattern(BSON("x" << 1));
class ClearJumboFlagTest : public ConfigServerTestFixture {
public:
@@ -73,12 +74,6 @@ protected:
setupShards({shard});
- CollectionType collection(_namespace, _epoch, Date_t::now(), UUID::gen());
- collection.setKeyPattern(BSON("x" << 1));
-
- ASSERT_OK(insertToConfigCollection(
- operationContext(), CollectionType::ConfigNS, collection.toBSON()));
-
ChunkType chunk;
chunk.setName(OID::gen());
chunk.setNS(_namespace);
@@ -96,7 +91,7 @@ protected:
otherChunk.setMin(nonJumboChunk().getMin());
otherChunk.setMax(nonJumboChunk().getMax());
- setupChunks({chunk, otherChunk});
+ setupCollection(_namespace, kKeyPattern, {chunk, otherChunk});
}
private:
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp
index 0283b4c1cc4..754deb1bf86 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp
@@ -47,6 +47,7 @@ using unittest::assertGet;
using CommitChunkMigrate = ConfigServerTestFixture;
const NamespaceString kNamespace("TestDB.TestColl");
+const KeyPattern kKeyPattern(BSON("x" << 1));
TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectly) {
ShardType shard0;
@@ -83,7 +84,7 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectly) {
controlChunk.setJumbo(true);
}
- setupChunks({migratedChunk, controlChunk});
+ setupCollection(kNamespace, kKeyPattern, {migratedChunk, controlChunk});
Timestamp validAfter{101, 0};
BSONObj versions = assertGet(ShardingCatalogManager::get(operationContext())
@@ -154,7 +155,7 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectlyWithoutControlChunk) {
auto chunkMax = BSON("a" << 10);
chunk0.setMax(chunkMax);
- setupChunks({chunk0});
+ setupCollection(kNamespace, kKeyPattern, {chunk0});
Timestamp validAfter{101, 0};
@@ -211,7 +212,7 @@ TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandNoCtlTrimHistory) {
auto chunkMax = BSON("a" << 10);
chunk0.setMax(chunkMax);
- setupChunks({chunk0});
+ setupCollection(kNamespace, kKeyPattern, {chunk0});
// Make the time distance between the last history element large enough.
Timestamp validAfter{200, 0};
@@ -270,7 +271,7 @@ TEST_F(CommitChunkMigrate, RejectOutOfOrderHistory) {
auto chunkMax = BSON("a" << 10);
chunk0.setMax(chunkMax);
- setupChunks({chunk0});
+ setupCollection(kNamespace, kKeyPattern, {chunk0});
// Make the time before the last change to trigger the failure.
Timestamp validAfter{99, 0};
@@ -288,7 +289,6 @@ TEST_F(CommitChunkMigrate, RejectOutOfOrderHistory) {
}
TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch0) {
-
ShardType shard0;
shard0.setName("shard0");
shard0.setHost("shard0:12");
@@ -324,7 +324,7 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch0) {
auto chunkMaxax = BSON("a" << 20);
chunk1.setMax(chunkMaxax);
- setupChunks({chunk0, chunk1});
+ setupCollection(kNamespace, kKeyPattern, {chunk0, chunk1});
Timestamp validAfter{1};
@@ -341,7 +341,6 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch0) {
}
TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch1) {
-
ShardType shard0;
shard0.setName("shard0");
shard0.setHost("shard0:12");
@@ -379,7 +378,7 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch1) {
chunk1.setMax(chunkMaxax);
// get version from the control chunk this time
- setupChunks({chunk1, chunk0});
+ setupCollection(kNamespace, kKeyPattern, {chunk1, chunk0});
Timestamp validAfter{1};
@@ -396,7 +395,6 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch1) {
}
TEST_F(CommitChunkMigrate, RejectChunkMissing0) {
-
ShardType shard0;
shard0.setName("shard0");
shard0.setHost("shard0:12");
@@ -432,7 +430,7 @@ TEST_F(CommitChunkMigrate, RejectChunkMissing0) {
auto chunkMaxax = BSON("a" << 20);
chunk1.setMax(chunkMaxax);
- setupChunks({chunk1});
+ setupCollection(kNamespace, kKeyPattern, {chunk1});
Timestamp validAfter{1};
@@ -449,7 +447,6 @@ TEST_F(CommitChunkMigrate, RejectChunkMissing0) {
}
TEST_F(CommitChunkMigrate, CommitWithLastChunkOnShardShouldNotAffectOtherChunks) {
-
ShardType shard0;
shard0.setName("shard0");
shard0.setHost("shard0:12");
@@ -489,7 +486,7 @@ TEST_F(CommitChunkMigrate, CommitWithLastChunkOnShardShouldNotAffectOtherChunks)
Timestamp ctrlChunkValidAfter = Timestamp(50, 0);
chunk1.setHistory({ChunkHistory(ctrlChunkValidAfter, shard1.getName())});
- setupChunks({chunk0, chunk1});
+ setupCollection(kNamespace, kKeyPattern, {chunk0, chunk1});
Timestamp validAfter{101, 0};
StatusWith<BSONObj> resultBSON = ShardingCatalogManager::get(operationContext())
@@ -557,7 +554,7 @@ TEST_F(CommitChunkMigrate, RejectMissingChunkVersion) {
currentChunk.setMin(BSON("a" << 1));
currentChunk.setMax(BSON("a" << 10));
- setupChunks({currentChunk});
+ setupCollection(kNamespace, kKeyPattern, {currentChunk});
Timestamp validAfter{101, 0};
ASSERT_THROWS_CODE(ShardingCatalogManager::get(operationContext())
@@ -606,7 +603,7 @@ TEST_F(CommitChunkMigrate, RejectOlderChunkVersion) {
currentChunk.setMin(BSON("a" << 1));
currentChunk.setMax(BSON("a" << 10));
- setupChunks({currentChunk});
+ setupCollection(kNamespace, kKeyPattern, {currentChunk});
Timestamp validAfter{101, 0};
auto result = ShardingCatalogManager::get(operationContext())
@@ -655,7 +652,7 @@ TEST_F(CommitChunkMigrate, RejectMismatchedEpoch) {
currentChunk.setMin(BSON("a" << 1));
currentChunk.setMax(BSON("a" << 10));
- setupChunks({currentChunk});
+ setupCollection(kNamespace, kKeyPattern, {currentChunk});
Timestamp validAfter{101, 0};
auto result = ShardingCatalogManager::get(operationContext())
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp
index dfda75a049c..f2f308811d3 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp
@@ -36,6 +36,7 @@ namespace mongo {
namespace {
const NamespaceString kNss("TestDB", "TestColl");
+const KeyPattern kKeyPattern(BSON("x" << 1));
using EnsureChunkVersionIsGreaterThanTest = ConfigServerTestFixture;
@@ -103,7 +104,7 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoChunkWithMatchingEpochFoundRetur
ChunkType existingChunkType = requestedChunkType;
// Epoch is different.
existingChunkType.setVersion(ChunkVersion(10, 2, OID::gen()));
- setupChunks({existingChunkType});
+ setupCollection(kNss, kKeyPattern, {existingChunkType});
ShardingCatalogManager::get(operationContext())
->ensureChunkVersionIsGreaterThan(operationContext(),
@@ -125,7 +126,7 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoChunkWithMatchingMinKeyFoundRetu
ChunkType existingChunkType = requestedChunkType;
// Min key is different.
existingChunkType.setMin(BSON("a" << -1));
- setupChunks({existingChunkType});
+ setupCollection(kNss, kKeyPattern, {existingChunkType});
ShardingCatalogManager::get(operationContext())
->ensureChunkVersionIsGreaterThan(operationContext(),
@@ -147,7 +148,7 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoChunkWithMatchingMaxKeyFoundRetu
ChunkType existingChunkType = requestedChunkType;
// Max key is different.
existingChunkType.setMax(BSON("a" << 20));
- setupChunks({existingChunkType});
+ setupCollection(kNss, kKeyPattern, {existingChunkType});
ShardingCatalogManager::get(operationContext())
->ensureChunkVersionIsGreaterThan(operationContext(),
@@ -168,7 +169,7 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest,
const auto existingChunkType = requestedChunkType;
const auto highestChunkType = generateChunkType(
kNss, ChunkVersion(20, 3, epoch), ShardId("shard0001"), BSON("a" << 11), BSON("a" << 20));
- setupChunks({existingChunkType, highestChunkType});
+ setupCollection(kNss, kKeyPattern, {existingChunkType, highestChunkType});
ShardingCatalogManager::get(operationContext())
->ensureChunkVersionIsGreaterThan(operationContext(),
@@ -191,7 +192,7 @@ TEST_F(
ChunkType existingChunkType = requestedChunkType;
existingChunkType.setVersion(ChunkVersion(11, 1, epoch));
- setupChunks({existingChunkType});
+ setupCollection(kNss, kKeyPattern, {existingChunkType});
ShardingCatalogManager::get(operationContext())
->ensureChunkVersionIsGreaterThan(operationContext(),
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp
index 05478c71aa6..269bb6e6253 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp
@@ -38,12 +38,14 @@
namespace mongo {
namespace {
-using unittest::assertGet;
-const NamespaceString kNamespace("TestDB.TestColl");
+using unittest::assertGet;
using MergeChunkTest = ConfigServerTestFixture;
+const NamespaceString kNamespace("TestDB.TestColl");
+const KeyPattern kKeyPattern(BSON("x" << 1));
+
TEST_F(MergeChunkTest, MergeExistingChunksCorrectlyShouldSucceed) {
ChunkType chunk;
chunk.setName(OID::gen());
@@ -69,7 +71,7 @@ TEST_F(MergeChunkTest, MergeExistingChunksCorrectlyShouldSucceed) {
std::vector<BSONObj> chunkBoundaries{chunkMin, chunkBound, chunkMax};
- setupChunks({chunk, chunk2});
+ setupCollection(kNamespace, kKeyPattern, {chunk, chunk2});
Timestamp validAfter{100, 0};
@@ -151,7 +153,7 @@ TEST_F(MergeChunkTest, MergeSeveralChunksCorrectlyShouldSucceed) {
// Record chunk boundaries for passing into commitChunkMerge
std::vector<BSONObj> chunkBoundaries{chunkMin, chunkBound, chunkBound2, chunkMax};
- setupChunks({chunk, chunk2, chunk3});
+ setupCollection(kNamespace, kKeyPattern, {chunk, chunk2, chunk3});
Timestamp validAfter{100, 0};
@@ -229,7 +231,7 @@ TEST_F(MergeChunkTest, NewMergeShouldClaimHighestVersion) {
otherChunk.setMin(BSON("a" << 10));
otherChunk.setMax(BSON("a" << 20));
- setupChunks({chunk, chunk2, otherChunk});
+ setupCollection(kNamespace, kKeyPattern, {chunk, chunk2, otherChunk});
Timestamp validAfter{100, 0};
@@ -303,7 +305,7 @@ TEST_F(MergeChunkTest, MergeLeavesOtherChunksAlone) {
otherChunk.setMin(BSON("a" << 10));
otherChunk.setMax(BSON("a" << 20));
- setupChunks({chunk, chunk2, otherChunk});
+ setupCollection(kNamespace, kKeyPattern, {chunk, chunk2, otherChunk});
Timestamp validAfter{1};
@@ -369,7 +371,7 @@ TEST_F(MergeChunkTest, NonExistingNamespace) {
// Record chunk boundaries for passing into commitChunkMerge
std::vector<BSONObj> chunkBoundaries{chunkMin, chunkBound, chunkMax};
- setupChunks({chunk, chunk2});
+ setupCollection(kNamespace, kKeyPattern, {chunk, chunk2});
Timestamp validAfter{1};
@@ -406,7 +408,7 @@ TEST_F(MergeChunkTest, NonMatchingEpochsOfChunkAndRequestErrors) {
// Record chunk baoundaries for passing into commitChunkMerge
std::vector<BSONObj> chunkBoundaries{chunkMin, chunkBound, chunkMax};
- setupChunks({chunk, chunk2});
+ setupCollection(kNamespace, kKeyPattern, {chunk, chunk2});
Timestamp validAfter{1};
@@ -451,7 +453,7 @@ TEST_F(MergeChunkTest, MergeAlreadyHappenedSucceeds) {
mergedChunk.setVersion(mergedVersion);
mergedChunk.setMax(chunkMax);
- setupChunks({mergedChunk});
+ setupCollection(kNamespace, kKeyPattern, {mergedChunk});
Timestamp validAfter{1};
@@ -516,7 +518,7 @@ TEST_F(MergeChunkTest, ChunkBoundariesOutOfOrderFails) {
chunk.setVersion(version);
originalChunks.push_back(chunk);
- setupChunks(originalChunks);
+ setupCollection(kNamespace, kKeyPattern, originalChunks);
}
Timestamp validAfter{1};
@@ -557,7 +559,7 @@ TEST_F(MergeChunkTest, MergingChunksWithDollarPrefixShouldSucceed) {
chunk3.setMin(chunkBound2);
chunk3.setMax(chunkMax);
- setupChunks({chunk1, chunk2, chunk3});
+ setupCollection(kNamespace, kKeyPattern, {chunk1, chunk2, chunk3});
// Record chunk boundaries for passing into commitChunkMerge
std::vector<BSONObj> chunkBoundaries{chunkMin, chunkBound1, chunkBound2, chunkMax};
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp
index 254afab7c12..f8f73c0c7c7 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp
@@ -63,6 +63,8 @@ using std::string;
using std::vector;
using unittest::assertGet;
+const KeyPattern kKeyPattern(BSON("_id" << 1));
+
BSONObj getReplSecondaryOkMetadata() {
BSONObjBuilder o;
ReadPreferenceSetting(ReadPreference::Nearest).toContainingBSON(&o);
@@ -214,7 +216,9 @@ TEST_F(RemoveShardTest, RemoveShardStillDrainingChunksRemaining) {
setupShards(std::vector<ShardType>{shard1, shard2});
setupDatabase("testDB", shard1.getName(), true);
- setupChunks(std::vector<ChunkType>{chunk1, chunk2, chunk3});
+ setupCollection(NamespaceString("testDB.testColl"),
+ kKeyPattern,
+ std::vector<ChunkType>{chunk1, chunk2, chunk3});
auto startedResult = ShardingCatalogManager::get(operationContext())
->removeShard(operationContext(), shard1.getName());
@@ -297,7 +301,9 @@ TEST_F(RemoveShardTest, RemoveShardCompletion) {
setupShards(std::vector<ShardType>{shard1, shard2});
setupDatabase("testDB", shard2.getName(), false);
- setupChunks(std::vector<ChunkType>{chunk1, chunk2, chunk3});
+ setupCollection(NamespaceString("testDB.testColl"),
+ kKeyPattern,
+ std::vector<ChunkType>{chunk1, chunk2, chunk3});
auto startedResult = ShardingCatalogManager::get(operationContext())
->removeShard(operationContext(), shard1.getName());
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp
index 4bb7ad1ec6d..24281e68389 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp
@@ -40,6 +40,7 @@ namespace {
using unittest::assertGet;
const NamespaceString kNamespace("TestDB", "TestColl");
+const KeyPattern kKeyPattern(BSON("a" << 1));
using SplitChunkTest = ConfigServerTestFixture;
@@ -62,7 +63,7 @@ TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) {
auto chunkSplitPoint = BSON("a" << 5);
std::vector<BSONObj> splitPoints{chunkSplitPoint};
- setupChunks({chunk});
+ setupCollection(kNamespace, kKeyPattern, {chunk});
auto versions = assertGet(ShardingCatalogManager::get(operationContext())
->commitChunkSplit(operationContext(),
@@ -135,7 +136,7 @@ TEST_F(SplitChunkTest, MultipleSplitsOnExistingChunkShouldSucceed) {
auto chunkSplitPoint2 = BSON("a" << 7);
std::vector<BSONObj> splitPoints{chunkSplitPoint, chunkSplitPoint2};
- setupChunks({chunk});
+ setupCollection(kNamespace, kKeyPattern, {chunk});
ASSERT_OK(ShardingCatalogManager::get(operationContext())
->commitChunkSplit(operationContext(),
@@ -221,7 +222,7 @@ TEST_F(SplitChunkTest, NewSplitShouldClaimHighestVersion) {
chunk2.setMin(BSON("a" << 10));
chunk2.setMax(BSON("a" << 20));
- setupChunks({chunk, chunk2});
+ setupCollection(kNamespace, kKeyPattern, {chunk, chunk2});
ASSERT_OK(ShardingCatalogManager::get(operationContext())
->commitChunkSplit(operationContext(),
@@ -272,7 +273,7 @@ TEST_F(SplitChunkTest, PreConditionFailErrors) {
auto chunkSplitPoint = BSON("a" << 5);
splitPoints.push_back(chunkSplitPoint);
- setupChunks({chunk});
+ setupCollection(kNamespace, kKeyPattern, {chunk});
auto splitStatus = ShardingCatalogManager::get(operationContext())
->commitChunkSplit(operationContext(),
@@ -299,7 +300,7 @@ TEST_F(SplitChunkTest, NonExisingNamespaceErrors) {
std::vector<BSONObj> splitPoints{BSON("a" << 5)};
- setupChunks({chunk});
+ setupCollection(kNamespace, kKeyPattern, {chunk});
auto splitStatus = ShardingCatalogManager::get(operationContext())
->commitChunkSplit(operationContext(),
@@ -326,7 +327,7 @@ TEST_F(SplitChunkTest, NonMatchingEpochsOfChunkAndRequestErrors) {
std::vector<BSONObj> splitPoints{BSON("a" << 5)};
- setupChunks({chunk});
+ setupCollection(kNamespace, kKeyPattern, {chunk});
auto splitStatus = ShardingCatalogManager::get(operationContext())
->commitChunkSplit(operationContext(),
@@ -354,7 +355,7 @@ TEST_F(SplitChunkTest, SplitPointsOutOfOrderShouldFail) {
std::vector<BSONObj> splitPoints{BSON("a" << 5), BSON("a" << 4)};
- setupChunks({chunk});
+ setupCollection(kNamespace, kKeyPattern, {chunk});
auto splitStatus = ShardingCatalogManager::get(operationContext())
->commitChunkSplit(operationContext(),
@@ -381,7 +382,7 @@ TEST_F(SplitChunkTest, SplitPointsOutOfRangeAtMinShouldFail) {
std::vector<BSONObj> splitPoints{BSON("a" << 0), BSON("a" << 5)};
- setupChunks({chunk});
+ setupCollection(kNamespace, kKeyPattern, {chunk});
auto splitStatus = ShardingCatalogManager::get(operationContext())
->commitChunkSplit(operationContext(),
@@ -409,7 +410,7 @@ TEST_F(SplitChunkTest, SplitPointsOutOfRangeAtMaxShouldFail) {
std::vector<BSONObj> splitPoints{BSON("a" << 5), BSON("a" << 15)};
- setupChunks({chunk});
+ setupCollection(kNamespace, kKeyPattern, {chunk});
auto splitStatus = ShardingCatalogManager::get(operationContext())
->commitChunkSplit(operationContext(),
@@ -433,7 +434,7 @@ TEST_F(SplitChunkTest, SplitPointsWithDollarPrefixShouldFail) {
auto chunkMax = BSON("a" << kMaxBSONKey);
chunk.setMin(chunkMin);
chunk.setMax(chunkMax);
- setupChunks({chunk});
+ setupCollection(kNamespace, kKeyPattern, {chunk});
ASSERT_NOT_OK(ShardingCatalogManager::get(operationContext())
->commitChunkSplit(operationContext(),
diff --git a/src/mongo/db/s/metadata_manager_test.cpp b/src/mongo/db/s/metadata_manager_test.cpp
index 71995b20b72..4786c4a9c1f 100644
--- a/src/mongo/db/s/metadata_manager_test.cpp
+++ b/src/mongo/db/s/metadata_manager_test.cpp
@@ -86,6 +86,7 @@ protected:
false,
epoch,
boost::none,
+ true,
{ChunkType{kNss, range, ChunkVersion(1, 0, epoch), kOtherShard}});
return CollectionMetadata(ChunkManager(kThisShard,
@@ -132,7 +133,7 @@ protected:
splitChunks.emplace_back(
kNss, ChunkRange(maxKey, chunkToSplit.getMax()), chunkVersion, kOtherShard);
- auto rt = cm->getRoutingTableHistory_ForTest().makeUpdated(boost::none, splitChunks);
+ auto rt = cm->getRoutingTableHistory_ForTest().makeUpdated(boost::none, true, splitChunks);
return CollectionMetadata(ChunkManager(cm->dbPrimary(),
cm->dbVersion(),
@@ -157,7 +158,9 @@ protected:
chunkVersion.incMajor();
auto rt = cm->getRoutingTableHistory_ForTest().makeUpdated(
- boost::none, {ChunkType(kNss, ChunkRange(minKey, maxKey), chunkVersion, kOtherShard)});
+ boost::none,
+ true,
+ {ChunkType(kNss, ChunkRange(minKey, maxKey), chunkVersion, kOtherShard)});
return CollectionMetadata(ChunkManager(cm->dbPrimary(),
cm->dbVersion(),
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp
index 3d87d74f94b..6ceedcb45af 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp
@@ -155,6 +155,7 @@ protected:
false,
epoch,
boost::none,
+ true,
{ChunkType{kNss,
ChunkRange{BSON(kShardKey << MINKEY), BSON(kShardKey << MAXKEY)},
ChunkVersion(1, 0, epoch),
diff --git a/src/mongo/db/s/migration_source_manager.cpp b/src/mongo/db/s/migration_source_manager.cpp
index 570c9c3ecb5..e6951a2cf85 100644
--- a/src/mongo/db/s/migration_source_manager.cpp
+++ b/src/mongo/db/s/migration_source_manager.cpp
@@ -164,6 +164,9 @@ MigrationSourceManager::MigrationSourceManager(OperationContext* opCtx,
uassert(ErrorCodes::IncompatibleShardingMetadata,
"Cannot move chunks for an unsharded collection",
metadata.isSharded());
+ uassert(ErrorCodes::ConflictingOperationInProgress,
+ "Collection is undergoing changes so moveChunk is not allowed.",
+ metadata.allowMigrations());
return std::make_tuple(std::move(metadata), std::move(collectionUUID));
}();
diff --git a/src/mongo/db/s/op_observer_sharding_test.cpp b/src/mongo/db/s/op_observer_sharding_test.cpp
index f1c78c2d9e9..e95b45c99a3 100644
--- a/src/mongo/db/s/op_observer_sharding_test.cpp
+++ b/src/mongo/db/s/op_observer_sharding_test.cpp
@@ -73,6 +73,7 @@ protected:
false,
epoch,
boost::none,
+ true,
{std::move(chunk)});
return CollectionMetadata(ChunkManager(ShardId("this"),
diff --git a/src/mongo/db/s/range_deletion_util_test.cpp b/src/mongo/db/s/range_deletion_util_test.cpp
index b2de302ab59..c9e10579df7 100644
--- a/src/mongo/db/s/range_deletion_util_test.cpp
+++ b/src/mongo/db/s/range_deletion_util_test.cpp
@@ -97,6 +97,7 @@ public:
false,
epoch,
boost::none,
+ true,
{ChunkType{kNss,
ChunkRange{BSON(kShardKey << MINKEY), BSON(kShardKey << MAXKEY)},
ChunkVersion(1, 0, epoch),
diff --git a/src/mongo/db/s/resharding/resharding_coordinator_service.cpp b/src/mongo/db/s/resharding/resharding_coordinator_service.cpp
index 7d81fe332c7..d9ef0c52d6c 100644
--- a/src/mongo/db/s/resharding/resharding_coordinator_service.cpp
+++ b/src/mongo/db/s/resharding/resharding_coordinator_service.cpp
@@ -159,10 +159,11 @@ BSONObj createReshardingFieldsUpdateForOriginalNss(
TypeCollectionDonorFields donorField(coordinatorDoc.getReshardingKey());
originalEntryReshardingFields.setDonorFields(donorField);
- return BSON(
- "$set" << BSON("reshardingFields"
- << originalEntryReshardingFields.toBSON() << "lastmod"
- << opCtx->getServiceContext()->getPreciseClockSource()->now()));
+ return BSON("$set" << BSON(CollectionType::kReshardingFieldsFieldName
+ << originalEntryReshardingFields.toBSON()
+ << CollectionType::kUpdatedAtFieldName
+ << opCtx->getServiceContext()->getPreciseClockSource()->now()
+ << CollectionType::kAllowMigrationsFieldName << false));
}
case CoordinatorStateEnum::kCommitted:
// Update the config.collections entry for the original nss to reflect
@@ -179,10 +180,10 @@ BSONObj createReshardingFieldsUpdateForOriginalNss(
case mongo::CoordinatorStateEnum::kDone:
// Remove 'reshardingFields' from the config.collections entry
return BSON(
- "$unset" << BSON("reshardingFields"
- << "")
+ "$unset" << BSON(CollectionType::kReshardingFieldsFieldName
+ << "" << CollectionType::kAllowMigrationsFieldName << "")
<< "$set"
- << BSON("lastmod"
+ << BSON(CollectionType::kUpdatedAtFieldName
<< opCtx->getServiceContext()->getPreciseClockSource()->now()));
default:
// Update the 'state' field in the 'reshardingFields' section
diff --git a/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.cpp b/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.cpp
index f23de8b3a08..4ffebac60fb 100644
--- a/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.cpp
@@ -105,6 +105,7 @@ protected:
false,
epoch,
boost::none,
+ true,
{std::move(chunk)})),
boost::none);
diff --git a/src/mongo/db/s/resharding_destined_recipient_test.cpp b/src/mongo/db/s/resharding_destined_recipient_test.cpp
index 7e3a1c9f3bc..164a51fda5e 100644
--- a/src/mongo/db/s/resharding_destined_recipient_test.cpp
+++ b/src/mongo/db/s/resharding_destined_recipient_test.cpp
@@ -156,13 +156,6 @@ public:
}
protected:
- CollectionType createCollection(const OID& epoch) {
- CollectionType coll(kNss, epoch, Date_t::now(), UUID::gen());
- coll.setKeyPattern(BSON(kShardKey << 1));
- coll.setUnique(false);
- return coll;
- }
-
std::vector<ChunkType> createChunks(const OID& epoch, const std::string& shardKey) {
auto range1 = ChunkRange(BSON(shardKey << MINKEY), BSON(shardKey << 5));
ChunkType chunk1(kNss, range1, ChunkVersion(1, 0, epoch), kShardList[0].getName());
@@ -201,20 +194,22 @@ protected:
client.createCollection(env.tempNss.ns());
-
DatabaseType db(kNss.db().toString(), kShardList[0].getName(), true, env.dbVersion);
TypeCollectionReshardingFields reshardingFields;
reshardingFields.setUuid(UUID::gen());
reshardingFields.setDonorFields(TypeCollectionDonorFields{BSON("y" << 1)});
- auto collType = createCollection(env.version.epoch());
+ CollectionType coll(kNss, env.version.epoch(), Date_t::now(), UUID::gen());
+ coll.setKeyPattern(BSON(kShardKey << 1));
+ coll.setUnique(false);
+ coll.setAllowMigrations(false);
_mockCatalogCacheLoader->setDatabaseRefreshReturnValue(db);
_mockCatalogCacheLoader->setCollectionRefreshValues(
- kNss, collType, createChunks(env.version.epoch(), kShardKey), reshardingFields);
+ kNss, coll, createChunks(env.version.epoch(), kShardKey), reshardingFields);
_mockCatalogCacheLoader->setCollectionRefreshValues(
- env.tempNss, collType, createChunks(env.version.epoch(), "y"), boost::none);
+ env.tempNss, coll, createChunks(env.version.epoch(), "y"), boost::none);
forceDatabaseRefresh(opCtx, kNss.db());
forceShardFilteringMetadataRefresh(opCtx, kNss);
@@ -229,21 +224,9 @@ protected:
const NamespaceString& nss,
const BSONObj& doc,
const ReshardingEnv& env) {
+ AutoGetCollection coll(opCtx, nss, MODE_IX);
WriteUnitOfWork wuow(opCtx);
- AutoGetCollection autoColl1(opCtx, nss, MODE_IX);
-
- // TODO(SERVER-50027): This is to temporarily make this test pass until getOwnershipFilter
- // has been updated to detect frozen migrations.
- if (!OperationShardingState::isOperationVersioned(opCtx)) {
- OperationShardingState::get(opCtx).initializeClientRoutingVersions(
- nss, env.version, env.dbVersion);
- }
-
- auto collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss);
- ASSERT(collection);
- auto status = collection->insertDocument(opCtx, InsertStatement(doc), nullptr);
- ASSERT_OK(status);
-
+ ASSERT_OK(coll->insertDocument(opCtx, InsertStatement(doc), nullptr));
wuow.commit();
}
@@ -253,14 +236,6 @@ protected:
const BSONObj& update,
const ReshardingEnv& env) {
AutoGetCollection coll(opCtx, nss, MODE_IX);
-
- // TODO(SERVER-50027): This is to temporarily make this test pass until getOwnershipFilter
- // has been updated to detect frozen migrations.
- if (!OperationShardingState::isOperationVersioned(opCtx)) {
- OperationShardingState::get(opCtx).initializeClientRoutingVersions(
- kNss, env.version, env.dbVersion);
- }
-
Helpers::update(opCtx, nss.toString(), filter, update);
}
@@ -270,13 +245,6 @@ protected:
const ReshardingEnv& env) {
AutoGetCollection coll(opCtx, nss, MODE_IX);
- // TODO(SERVER-50027): This is to temporarily make this test pass until getOwnershipFilter
- // has been updated to detect frozen migrations.
- if (!OperationShardingState::isOperationVersioned(opCtx)) {
- OperationShardingState::get(opCtx).initializeClientRoutingVersions(
- kNss, env.version, env.dbVersion);
- }
-
RecordId rid = Helpers::findOne(opCtx, coll.getCollection(), query, false);
ASSERT(!rid.isNull());
@@ -303,14 +271,8 @@ TEST_F(DestinedRecipientTest, TestGetDestinedRecipient) {
auto env = setupReshardingEnv(opCtx, true);
AutoGetCollection coll(opCtx, kNss, MODE_IX);
-
- // TODO(SERVER-50027): This is to temporarily make this test pass until getOwnershipFilter has
- // been updated to detect frozen migrations.
- if (!OperationShardingState::isOperationVersioned(opCtx)) {
- OperationShardingState::get(opCtx).initializeClientRoutingVersions(
- kNss, env.version, env.dbVersion);
- }
-
+ OperationShardingState::get(opCtx).initializeClientRoutingVersions(
+ kNss, env.version, env.dbVersion);
auto destShardId = getDestinedRecipient(opCtx, kNss, BSON("x" << 2 << "y" << 10));
ASSERT(destShardId);
ASSERT_EQ(*destShardId, env.destShard);
@@ -322,14 +284,8 @@ TEST_F(DestinedRecipientTest, TestGetDestinedRecipientThrowsOnBlockedRefresh) {
{
AutoGetCollection coll(opCtx, kNss, MODE_IX);
-
- // TODO(SERVER-50027): This is to temporarily make this test pass until getOwnershipFilter
- // has been updated to detect frozen migrations.
- if (!OperationShardingState::isOperationVersioned(opCtx)) {
- OperationShardingState::get(opCtx).initializeClientRoutingVersions(
- kNss, env.version, env.dbVersion);
- }
-
+ OperationShardingState::get(opCtx).initializeClientRoutingVersions(
+ kNss, env.version, env.dbVersion);
ASSERT_THROWS(getDestinedRecipient(opCtx, kNss, BSON("x" << 2 << "y" << 10)),
ExceptionFor<ErrorCodes::ShardInvalidatedForTargeting>);
}
@@ -341,6 +297,8 @@ TEST_F(DestinedRecipientTest, TestOpObserverSetsDestinedRecipientOnInserts) {
auto opCtx = operationContext();
auto env = setupReshardingEnv(opCtx, true);
+ OperationShardingState::get(opCtx).initializeClientRoutingVersions(
+ kNss, env.version, env.dbVersion);
writeDoc(opCtx, kNss, BSON("_id" << 0 << "x" << 2 << "y" << 10), env);
auto entry = getLastOplogEntry(opCtx);
@@ -354,6 +312,8 @@ TEST_F(DestinedRecipientTest, TestOpObserverSetsDestinedRecipientOnInsertsInTran
auto opCtx = operationContext();
auto env = setupReshardingEnv(opCtx, true);
+ OperationShardingState::get(opCtx).initializeClientRoutingVersions(
+ kNss, env.version, env.dbVersion);
runInTransaction(
opCtx, [&]() { writeDoc(opCtx, kNss, BSON("_id" << 0 << "x" << 2 << "y" << 10), env); });
@@ -380,6 +340,8 @@ TEST_F(DestinedRecipientTest, TestOpObserverSetsDestinedRecipientOnUpdates) {
auto env = setupReshardingEnv(opCtx, true);
+ OperationShardingState::get(opCtx).initializeClientRoutingVersions(
+ kNss, env.version, env.dbVersion);
updateDoc(opCtx, kNss, BSON("_id" << 0), BSON("$set" << BSON("z" << 50)), env);
auto entry = getLastOplogEntry(opCtx);
@@ -389,6 +351,30 @@ TEST_F(DestinedRecipientTest, TestOpObserverSetsDestinedRecipientOnUpdates) {
ASSERT_EQ(*recipShard, env.destShard);
}
+TEST_F(DestinedRecipientTest, TestOpObserverSetsDestinedRecipientOnMultiUpdates) {
+ auto opCtx = operationContext();
+
+ DBDirectClient client(opCtx);
+ client.insert(kNss.toString(), BSON("x" << 0 << "y" << 10 << "z" << 4));
+ client.insert(kNss.toString(), BSON("x" << 0 << "y" << 10 << "z" << 4));
+
+ auto env = setupReshardingEnv(opCtx, true);
+
+ OperationShardingState::get(opCtx).initializeClientRoutingVersions(
+ kNss, ChunkVersion::IGNORED(), env.dbVersion);
+ client.update(kNss.ns(),
+ Query{BSON("x" << 0)},
+ BSON("$set" << BSON("z" << 5)),
+ false /*upsert*/,
+ true /*multi*/);
+
+ auto entry = getLastOplogEntry(opCtx);
+ auto recipShard = entry.getDestinedRecipient();
+
+ ASSERT(recipShard);
+ ASSERT_EQ(*recipShard, env.destShard);
+}
+
TEST_F(DestinedRecipientTest, TestOpObserverSetsDestinedRecipientOnUpdatesOutOfPlace) {
auto opCtx = operationContext();
@@ -397,6 +383,8 @@ TEST_F(DestinedRecipientTest, TestOpObserverSetsDestinedRecipientOnUpdatesOutOfP
auto env = setupReshardingEnv(opCtx, true);
+ OperationShardingState::get(opCtx).initializeClientRoutingVersions(
+ kNss, env.version, env.dbVersion);
updateDoc(opCtx, kNss, BSON("_id" << 0), BSON("$set" << BSON("z" << 50)), env);
auto entry = getLastOplogEntry(opCtx);
@@ -414,6 +402,8 @@ TEST_F(DestinedRecipientTest, TestOpObserverSetsDestinedRecipientOnUpdatesInTran
auto env = setupReshardingEnv(opCtx, true);
+ OperationShardingState::get(opCtx).initializeClientRoutingVersions(
+ kNss, env.version, env.dbVersion);
runInTransaction(opCtx, [&]() {
updateDoc(opCtx, kNss, BSON("_id" << 0), BSON("$set" << BSON("z" << 50)), env);
});
@@ -441,6 +431,8 @@ TEST_F(DestinedRecipientTest, TestOpObserverSetsDestinedRecipientOnDeletes) {
auto env = setupReshardingEnv(opCtx, true);
+ OperationShardingState::get(opCtx).initializeClientRoutingVersions(
+ kNss, env.version, env.dbVersion);
deleteDoc(opCtx, kNss, BSON("_id" << 0), env);
auto entry = getLastOplogEntry(opCtx);
@@ -458,6 +450,8 @@ TEST_F(DestinedRecipientTest, TestOpObserverSetsDestinedRecipientOnDeletesInTran
auto env = setupReshardingEnv(opCtx, true);
+ OperationShardingState::get(opCtx).initializeClientRoutingVersions(
+ kNss, env.version, env.dbVersion);
runInTransaction(opCtx, [&]() { deleteDoc(opCtx, kNss, BSON("_id" << 0), env); });
// Look for destined recipient in latest oplog entry. Since this write was done in a
@@ -483,6 +477,8 @@ TEST_F(DestinedRecipientTest, TestUpdateChangesOwningShardThrows) {
auto env = setupReshardingEnv(opCtx, true);
+ OperationShardingState::get(opCtx).initializeClientRoutingVersions(
+ kNss, env.version, env.dbVersion);
ASSERT_THROWS(runInTransaction(
opCtx,
[&]() {
@@ -500,6 +496,8 @@ TEST_F(DestinedRecipientTest, TestUpdateSameOwningShard) {
auto env = setupReshardingEnv(opCtx, true);
+ OperationShardingState::get(opCtx).initializeClientRoutingVersions(
+ kNss, env.version, env.dbVersion);
runInTransaction(opCtx, [&]() {
updateDoc(opCtx, kNss, BSON("_id" << 0), BSON("$set" << BSON("y" << 3)), env);
});
diff --git a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
index 385ab96afc8..77704557cb9 100644
--- a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
+++ b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
@@ -94,20 +94,14 @@ Status persistCollectionAndChangedChunks(OperationContext* opCtx,
const CollectionAndChangedChunks& collAndChunks,
const ChunkVersion& maxLoaderVersion) {
// Update the collections collection entry for 'nss' in case there are any new updates.
- ShardCollectionType update = ShardCollectionType(nss,
- collAndChunks.epoch,
- *collAndChunks.uuid,
- collAndChunks.shardKeyPattern,
- collAndChunks.shardKeyIsUnique);
-
- update.setUuid(*collAndChunks.uuid);
- if (!collAndChunks.defaultCollation.isEmpty()) {
- update.setDefaultCollation(collAndChunks.defaultCollation.getOwned());
- }
-
- if (collAndChunks.reshardingFields) {
- update.setReshardingFields(collAndChunks.reshardingFields.get());
- }
+ ShardCollectionType update(nss,
+ collAndChunks.epoch,
+ *collAndChunks.uuid,
+ collAndChunks.shardKeyPattern,
+ collAndChunks.shardKeyIsUnique);
+ update.setDefaultCollation(collAndChunks.defaultCollation);
+ update.setReshardingFields(collAndChunks.reshardingFields);
+ update.setAllowMigrations(collAndChunks.allowMigrations);
// Mark the chunk metadata as refreshing, so that secondaries are aware of refresh.
update.setRefreshing(true);
@@ -242,6 +236,7 @@ CollectionAndChangedChunks getPersistedMetadataSinceVersion(OperationContext* op
shardCollectionEntry.getDefaultCollation(),
shardCollectionEntry.getUnique(),
shardCollectionEntry.getReshardingFields(),
+ shardCollectionEntry.getAllowMigrations(),
std::move(changedChunks)};
}
diff --git a/src/mongo/db/s/type_shard_collection.cpp b/src/mongo/db/s/type_shard_collection.cpp
index 297d06471dc..a4720650021 100644
--- a/src/mongo/db/s/type_shard_collection.cpp
+++ b/src/mongo/db/s/type_shard_collection.cpp
@@ -68,4 +68,11 @@ BSONObj ShardCollectionType::toBSON() const {
return obj;
}
+void ShardCollectionType::setAllowMigrations(bool allowMigrations) {
+ if (allowMigrations)
+ setPre50CompatibleAllowMigrations(boost::none);
+ else
+ setPre50CompatibleAllowMigrations(false);
+}
+
} // namespace mongo
diff --git a/src/mongo/db/s/type_shard_collection.h b/src/mongo/db/s/type_shard_collection.h
index 9a313183ef0..de71f2aab37 100644
--- a/src/mongo/db/s/type_shard_collection.h
+++ b/src/mongo/db/s/type_shard_collection.h
@@ -42,6 +42,7 @@ public:
using ShardCollectionTypeBase::kKeyPatternFieldName;
using ShardCollectionTypeBase::kLastRefreshedCollectionVersionFieldName;
using ShardCollectionTypeBase::kNssFieldName;
+ using ShardCollectionTypeBase::kPre50CompatibleAllowMigrationsFieldName;
using ShardCollectionTypeBase::kRefreshingFieldName;
using ShardCollectionTypeBase::kReshardingFieldsFieldName;
using ShardCollectionTypeBase::kUniqueFieldName;
@@ -79,6 +80,11 @@ public:
// A wrapper around the IDL generated 'ShardCollectionTypeBase::toBSON' to ensure backwards
// compatibility.
BSONObj toBSON() const;
+
+ bool getAllowMigrations() const {
+ return getPre50CompatibleAllowMigrations().get_value_or(true);
+ }
+ void setAllowMigrations(bool allowMigrations);
};
} // namespace mongo
diff --git a/src/mongo/db/s/type_shard_collection.idl b/src/mongo/db/s/type_shard_collection.idl
index 60a572e04fc..2ee5ffc7481 100644
--- a/src/mongo/db/s/type_shard_collection.idl
+++ b/src/mongo/db/s/type_shard_collection.idl
@@ -134,3 +134,14 @@ structs:
collection is the temporary resharding collection."
type: TypeCollectionReshardingFields
optional: true
+ allowMigrations: # TODO (SERVER-51880): This field must never be 'false' on downgrade
+ # to FCV 4.4 and must be cleared
+ cpp_name: pre50CompatibleAllowMigrations
+ type: bool
+ description: "Whether this collection allows chunks to move. It is required by
+ almost all DDL operations in order to guarantee that the set of
+ shards, which comprise a collection will not change.
+
+ It must be optional and not present when running in FCV 4.4, because
+ binaries prior to 5.0 use strict parsing and will fail."
+ optional: true
diff --git a/src/mongo/db/s/type_shard_collection_test.cpp b/src/mongo/db/s/type_shard_collection_test.cpp
index 57ef1140eeb..a9e8eba50bd 100644
--- a/src/mongo/db/s/type_shard_collection_test.cpp
+++ b/src/mongo/db/s/type_shard_collection_test.cpp
@@ -109,5 +109,18 @@ TEST(ShardCollectionType, ReshardingFieldsIncluded) {
ASSERT_EQ(reshardingUuid, shardCollType.getReshardingFields()->getUuid());
}
+TEST(ShardCollectionType, AllowMigrationsFieldBackwardsCompatibility) {
+ ShardCollectionType shardCollType(kNss, OID::gen(), UUID::gen(), kKeyPattern, true);
+ shardCollType.setAllowMigrations(false);
+ ASSERT_EQ(
+ false,
+ shardCollType.toBSON()[ShardCollectionTypeBase::kPre50CompatibleAllowMigrationsFieldName]
+ .Bool());
+
+ shardCollType.setAllowMigrations(true);
+ ASSERT(shardCollType.toBSON()[ShardCollectionTypeBase::kPre50CompatibleAllowMigrationsFieldName]
+ .eoo());
+}
+
} // namespace
} // namespace mongo
diff --git a/src/mongo/s/catalog/type_collection.h b/src/mongo/s/catalog/type_collection.h
index b0f1e81ff6a..ae3b0cb659c 100644
--- a/src/mongo/s/catalog/type_collection.h
+++ b/src/mongo/s/catalog/type_collection.h
@@ -86,16 +86,19 @@ public:
static constexpr auto kKeyPatternFieldName =
CollectionTypeBase::kPre50CompatibleKeyPatternFieldName;
static constexpr auto kUuidFieldName = CollectionTypeBase::kPre50CompatibleUuidFieldName;
+ using CollectionTypeBase::kAllowMigrationsFieldName;
using CollectionTypeBase::kNssFieldName;
using CollectionTypeBase::kReshardingFieldsFieldName;
using CollectionTypeBase::kUniqueFieldName;
using CollectionTypeBase::kUpdatedAtFieldName;
// Make getters and setters accessible.
+ using CollectionTypeBase::getAllowMigrations;
using CollectionTypeBase::getNss;
using CollectionTypeBase::getReshardingFields;
using CollectionTypeBase::getUnique;
using CollectionTypeBase::getUpdatedAt;
+ using CollectionTypeBase::setAllowMigrations;
using CollectionTypeBase::setNss;
using CollectionTypeBase::setReshardingFields;
using CollectionTypeBase::setUnique;
@@ -147,7 +150,7 @@ public:
void setUuid(UUID uuid);
bool getDropped() const {
- return getPre50CompatibleDropped() ? *getPre50CompatibleDropped() : false;
+ return getPre50CompatibleDropped().get_value_or(false);
}
const KeyPattern& getKeyPattern() const {
@@ -156,8 +159,7 @@ public:
void setKeyPattern(KeyPattern keyPattern);
BSONObj getDefaultCollation() const {
- return getPre50CompatibleDefaultCollation() ? *getPre50CompatibleDefaultCollation()
- : BSONObj();
+ return getPre50CompatibleDefaultCollation().get_value_or(BSONObj());
}
void setDefaultCollation(const BSONObj& defaultCollation);
diff --git a/src/mongo/s/catalog/type_collection.idl b/src/mongo/s/catalog/type_collection.idl
index 643627f0863..59e64243b00 100644
--- a/src/mongo/s/catalog/type_collection.idl
+++ b/src/mongo/s/catalog/type_collection.idl
@@ -114,3 +114,9 @@ structs:
original collection undergoing a resharding operation or this
collection is the temporary resharding collection."
optional: true
+ allowMigrations:
+ type: bool
+ description: "Whether this collection allows chunks to move. It is required by
+ almost all DDL operations in order to guarantee that the set of
+ shards, which comprise a collection will not change."
+ default: true
diff --git a/src/mongo/s/catalog_cache.cpp b/src/mongo/s/catalog_cache.cpp
index e94301915a5..6c934317216 100644
--- a/src/mongo/s/catalog_cache.cpp
+++ b/src/mongo/s/catalog_cache.cpp
@@ -563,6 +563,7 @@ CatalogCache::CollectionCache::LookupResult CatalogCache::CollectionCache::_look
if (isIncremental &&
existingHistory->optRt->getVersion().epoch() == collectionAndChunks.epoch) {
return existingHistory->optRt->makeUpdated(collectionAndChunks.reshardingFields,
+ collectionAndChunks.allowMigrations,
collectionAndChunks.changedChunks);
}
@@ -583,6 +584,7 @@ CatalogCache::CollectionCache::LookupResult CatalogCache::CollectionCache::_look
collectionAndChunks.shardKeyIsUnique,
collectionAndChunks.epoch,
std::move(collectionAndChunks.reshardingFields),
+ collectionAndChunks.allowMigrations,
collectionAndChunks.changedChunks);
}();
diff --git a/src/mongo/s/catalog_cache_loader.cpp b/src/mongo/s/catalog_cache_loader.cpp
index 5f2ee8cbcdc..9dd53b47f53 100644
--- a/src/mongo/s/catalog_cache_loader.cpp
+++ b/src/mongo/s/catalog_cache_loader.cpp
@@ -48,6 +48,7 @@ CatalogCacheLoader::CollectionAndChangedChunks::CollectionAndChangedChunks(
const BSONObj& collDefaultCollation,
bool collShardKeyIsUnique,
boost::optional<TypeCollectionReshardingFields> collReshardingFields,
+ bool allowMigrations,
std::vector<ChunkType> chunks)
: epoch(std::move(collEpoch)),
uuid(std::move(collUuid)),
@@ -55,6 +56,7 @@ CatalogCacheLoader::CollectionAndChangedChunks::CollectionAndChangedChunks(
defaultCollation(collDefaultCollation),
shardKeyIsUnique(collShardKeyIsUnique),
reshardingFields(std::move(collReshardingFields)),
+ allowMigrations(allowMigrations),
changedChunks(std::move(chunks)) {}
void CatalogCacheLoader::set(ServiceContext* serviceContext,
diff --git a/src/mongo/s/catalog_cache_loader.h b/src/mongo/s/catalog_cache_loader.h
index 763f238e52f..af282c4d24e 100644
--- a/src/mongo/s/catalog_cache_loader.h
+++ b/src/mongo/s/catalog_cache_loader.h
@@ -72,6 +72,7 @@ public:
const BSONObj& collDefaultCollation,
bool collShardKeyIsUnique,
boost::optional<TypeCollectionReshardingFields> collReshardingFields,
+ bool allowMigrations,
std::vector<ChunkType> chunks);
// Information about the entire collection
@@ -80,12 +81,14 @@ public:
// except under the default constructor
BSONObj shardKeyPattern;
BSONObj defaultCollation;
- bool shardKeyIsUnique{false};
+ bool shardKeyIsUnique;
// If the collection is currently undergoing a resharding operation, the optional will be
// populated.
boost::optional<TypeCollectionReshardingFields> reshardingFields;
+ bool allowMigrations;
+
// The chunks which have changed sorted by their chunkVersion. This list might potentially
// contain all the chunks in the collection.
std::vector<ChunkType> changedChunks;
diff --git a/src/mongo/s/catalog_cache_loader_mock.cpp b/src/mongo/s/catalog_cache_loader_mock.cpp
index b22916bc835..c556fe11365 100644
--- a/src/mongo/s/catalog_cache_loader_mock.cpp
+++ b/src/mongo/s/catalog_cache_loader_mock.cpp
@@ -95,6 +95,7 @@ CollectionAndChangedChunks getCollectionRefresh(
swCollectionReturnValue.getValue().getDefaultCollation(),
swCollectionReturnValue.getValue().getUnique(),
reshardingFields,
+ swCollectionReturnValue.getValue().getAllowMigrations(),
std::move(chunks)};
}
diff --git a/src/mongo/s/chunk_manager.cpp b/src/mongo/s/chunk_manager.cpp
index 96662323fb1..7e4fad475ce 100644
--- a/src/mongo/s/chunk_manager.cpp
+++ b/src/mongo/s/chunk_manager.cpp
@@ -308,6 +308,7 @@ RoutingTableHistory::RoutingTableHistory(
std::unique_ptr<CollatorInterface> defaultCollator,
bool unique,
boost::optional<TypeCollectionReshardingFields> reshardingFields,
+ bool allowMigrations,
ChunkMap chunkMap)
: _nss(std::move(nss)),
_uuid(uuid),
@@ -315,6 +316,7 @@ RoutingTableHistory::RoutingTableHistory(
_defaultCollator(std::move(defaultCollator)),
_unique(unique),
_reshardingFields(std::move(reshardingFields)),
+ _allowMigrations(allowMigrations),
_chunkMap(std::move(chunkMap)),
_shardVersions(_chunkMap.constructShardVersionMap()) {}
@@ -656,6 +658,12 @@ ChunkManager ChunkManager::makeAtTime(const ChunkManager& cm, Timestamp clusterT
return ChunkManager(cm.dbPrimary(), cm.dbVersion(), cm._rt, clusterTime);
}
+bool ChunkManager::allowMigrations() const {
+ if (!_rt->optRt)
+ return true;
+ return _rt->optRt->allowMigrations();
+}
+
std::string ChunkManager::toString() const {
return _rt->optRt ? _rt->optRt->toString() : "UNSHARDED";
}
@@ -719,6 +727,7 @@ RoutingTableHistory RoutingTableHistory::makeNew(
bool unique,
OID epoch,
boost::optional<TypeCollectionReshardingFields> reshardingFields,
+ bool allowMigrations,
const std::vector<ChunkType>& chunks) {
return RoutingTableHistory(std::move(nss),
std::move(uuid),
@@ -726,12 +735,14 @@ RoutingTableHistory RoutingTableHistory::makeNew(
std::move(defaultCollator),
std::move(unique),
boost::none,
+ allowMigrations,
ChunkMap{epoch})
- .makeUpdated(std::move(reshardingFields), chunks);
+ .makeUpdated(std::move(reshardingFields), allowMigrations, chunks);
}
RoutingTableHistory RoutingTableHistory::makeUpdated(
boost::optional<TypeCollectionReshardingFields> reshardingFields,
+ bool allowMigrations,
const std::vector<ChunkType>& changedChunks) const {
auto changedChunkInfos = flatten(changedChunks);
auto chunkMap = _chunkMap.createMerged(changedChunkInfos);
@@ -745,6 +756,7 @@ RoutingTableHistory RoutingTableHistory::makeUpdated(
CollatorInterface::cloneCollator(getDefaultCollator()),
isUnique(),
std::move(reshardingFields),
+ allowMigrations,
std::move(chunkMap));
}
diff --git a/src/mongo/s/chunk_manager.h b/src/mongo/s/chunk_manager.h
index 68a439e0064..235c18e9121 100644
--- a/src/mongo/s/chunk_manager.h
+++ b/src/mongo/s/chunk_manager.h
@@ -166,6 +166,7 @@ public:
bool unique,
OID epoch,
boost::optional<TypeCollectionReshardingFields> reshardingFields,
+ bool allowMigrations,
const std::vector<ChunkType>& chunks);
/**
@@ -182,6 +183,7 @@ public:
*/
RoutingTableHistory makeUpdated(
boost::optional<TypeCollectionReshardingFields> reshardingFields,
+ bool allowMigrations,
const std::vector<ChunkType>& changedChunks) const;
const NamespaceString& nss() const {
@@ -280,6 +282,10 @@ public:
return _reshardingFields;
}
+ bool allowMigrations() const {
+ return _allowMigrations;
+ }
+
private:
friend class ChunkManager;
@@ -289,6 +295,7 @@ private:
std::unique_ptr<CollatorInterface> defaultCollator,
bool unique,
boost::optional<TypeCollectionReshardingFields> reshardingFields,
+ bool allowMigrations,
ChunkMap chunkMap);
ChunkVersion _getVersion(const ShardId& shardName, bool throwOnStaleShard) const;
@@ -314,6 +321,8 @@ private:
// for this collection.
boost::optional<TypeCollectionReshardingFields> _reshardingFields;
+ bool _allowMigrations;
+
// Map from the max for each chunk to an entry describing the chunk. The union of all chunks'
// ranges must cover the complete space from [MinKey, MaxKey).
ChunkMap _chunkMap;
@@ -460,6 +469,12 @@ public:
return bool(_rt->optRt);
}
+ /**
+ * Indicates that this collection must not honour any moveChunk requests, because it is required
+ * to provide a stable view of its constituent shards.
+ */
+ bool allowMigrations() const;
+
const ShardId& dbPrimary() const {
return _dbPrimary;
}
diff --git a/src/mongo/s/chunk_manager_query_test.cpp b/src/mongo/s/chunk_manager_query_test.cpp
index c665e2c4f9f..32c886a7366 100644
--- a/src/mongo/s/chunk_manager_query_test.cpp
+++ b/src/mongo/s/chunk_manager_query_test.cpp
@@ -512,8 +512,15 @@ TEST_F(ChunkManagerQueryTest, SnapshotQueryWithMoreShardsThanLatestMetadata) {
ChunkType chunk1(kNss, {BSON("x" << 0), BSON("x" << MAXKEY)}, version, ShardId("1"));
chunk1.setName(OID::gen());
- auto oldRoutingTable = RoutingTableHistory::makeNew(
- kNss, boost::none, BSON("x" << 1), nullptr, false, epoch, boost::none, {chunk0, chunk1});
+ auto oldRoutingTable = RoutingTableHistory::makeNew(kNss,
+ boost::none,
+ BSON("x" << 1),
+ nullptr,
+ false,
+ epoch,
+ boost::none,
+ true,
+ {chunk0, chunk1});
// Simulate move chunk {x: 0} to shard 0. Effectively moving all remaining chunks to shard 0.
version.incMajor();
@@ -525,7 +532,7 @@ TEST_F(ChunkManagerQueryTest, SnapshotQueryWithMoreShardsThanLatestMetadata) {
ChunkManager chunkManager(
ShardId("0"),
DatabaseVersion(UUID::gen(), 1),
- makeStandaloneRoutingTableHistory(oldRoutingTable.makeUpdated(boost::none, {chunk1})),
+ makeStandaloneRoutingTableHistory(oldRoutingTable.makeUpdated(boost::none, true, {chunk1})),
Timestamp(5, 0));
std::set<ShardId> shardIds;
diff --git a/src/mongo/s/chunk_manager_refresh_bm.cpp b/src/mongo/s/chunk_manager_refresh_bm.cpp
index bd9b133301c..9b3c39f83c7 100644
--- a/src/mongo/s/chunk_manager_refresh_bm.cpp
+++ b/src/mongo/s/chunk_manager_refresh_bm.cpp
@@ -80,7 +80,7 @@ CollectionMetadata makeChunkManagerWithShardSelector(int nShards,
}
auto rt = RoutingTableHistory::makeNew(
- kNss, UUID::gen(), shardKeyPattern, nullptr, true, collEpoch, boost::none, chunks);
+ kNss, UUID::gen(), shardKeyPattern, nullptr, true, collEpoch, boost::none, true, chunks);
return CollectionMetadata(ChunkManager(ShardId("Shard0"),
DatabaseVersion(UUID::gen(), 1),
makeStandaloneRoutingTableHistory(std::move(rt)),
@@ -110,8 +110,8 @@ MONGO_COMPILER_NOINLINE auto makeChunkManagerWithOptimalBalancedDistribution(int
MONGO_COMPILER_NOINLINE auto runIncrementalUpdate(const CollectionMetadata& cm,
const std::vector<ChunkType>& newChunks) {
- auto rt =
- cm.getChunkManager()->getRoutingTableHistory_ForTest().makeUpdated(boost::none, newChunks);
+ auto rt = cm.getChunkManager()->getRoutingTableHistory_ForTest().makeUpdated(
+ boost::none, true, newChunks);
return CollectionMetadata(ChunkManager(ShardId("shard0"),
DatabaseVersion(UUID::gen(), 1),
makeStandaloneRoutingTableHistory(std::move(rt)),
@@ -160,8 +160,15 @@ auto BM_FullBuildOfChunkManager(benchmark::State& state, ShardSelectorFn selectS
}
for (auto keepRunning : state) {
- auto rt = RoutingTableHistory::makeNew(
- kNss, UUID::gen(), shardKeyPattern, nullptr, true, collEpoch, boost::none, chunks);
+ auto rt = RoutingTableHistory::makeNew(kNss,
+ UUID::gen(),
+ shardKeyPattern,
+ nullptr,
+ true,
+ collEpoch,
+ boost::none,
+ true,
+ chunks);
benchmark::DoNotOptimize(
CollectionMetadata(ChunkManager(ShardId("shard0"),
DatabaseVersion(UUID::gen(), 1),
diff --git a/src/mongo/s/config_server_catalog_cache_loader.cpp b/src/mongo/s/config_server_catalog_cache_loader.cpp
index a2748253178..4a1b9cbee2e 100644
--- a/src/mongo/s/config_server_catalog_cache_loader.cpp
+++ b/src/mongo/s/config_server_catalog_cache_loader.cpp
@@ -115,6 +115,7 @@ CollectionAndChangedChunks getChangedChunks(OperationContext* opCtx,
coll.getDefaultCollation(),
coll.getUnique(),
coll.getReshardingFields(),
+ coll.getAllowMigrations(),
std::move(changedChunks)};
}
diff --git a/src/mongo/s/routing_table_history_test.cpp b/src/mongo/s/routing_table_history_test.cpp
index 3d33cbce230..c0139b4cfee 100644
--- a/src/mongo/s/routing_table_history_test.cpp
+++ b/src/mongo/s/routing_table_history_test.cpp
@@ -71,7 +71,7 @@ RoutingTableHistory splitChunk(const RoutingTableHistory& rt,
newChunks.emplace_back(kNss, range, curVersion, kThisShard);
}
- return rt.makeUpdated(boost::none, newChunks);
+ return rt.makeUpdated(boost::none, true, newChunks);
}
/**
@@ -160,8 +160,15 @@ public:
version,
kThisShard};
- _rt.emplace(RoutingTableHistory::makeNew(
- kNss, UUID::gen(), _shardKeyPattern, nullptr, false, epoch, boost::none, {initChunk}));
+ _rt.emplace(RoutingTableHistory::makeNew(kNss,
+ UUID::gen(),
+ _shardKeyPattern,
+ nullptr,
+ false,
+ epoch,
+ boost::none,
+ true,
+ {initChunk}));
ASSERT_EQ(_rt->numChunks(), 1ull);
// Should only be one
@@ -326,8 +333,15 @@ TEST_F(RoutingTableHistoryTest, TestSplits) {
version,
kThisShard};
- auto rt = RoutingTableHistory::makeNew(
- kNss, UUID::gen(), getShardKeyPattern(), nullptr, false, epoch, boost::none, {chunkAll});
+ auto rt = RoutingTableHistory::makeNew(kNss,
+ UUID::gen(),
+ getShardKeyPattern(),
+ nullptr,
+ false,
+ epoch,
+ boost::none,
+ true,
+ {chunkAll});
std::vector<ChunkType> chunks1 = {
ChunkType{kNss,
@@ -339,7 +353,7 @@ TEST_F(RoutingTableHistoryTest, TestSplits) {
ChunkVersion{2, 2, epoch},
kThisShard}};
- auto rt1 = rt.makeUpdated(boost::none, chunks1);
+ auto rt1 = rt.makeUpdated(boost::none, true, chunks1);
auto v1 = ChunkVersion{2, 2, epoch};
ASSERT_EQ(v1, rt1.getVersion(kThisShard));
@@ -357,7 +371,7 @@ TEST_F(RoutingTableHistoryTest, TestSplits) {
ChunkVersion{3, 2, epoch},
kThisShard}};
- auto rt2 = rt1.makeUpdated(boost::none, chunks2);
+ auto rt2 = rt1.makeUpdated(boost::none, true, chunks2);
auto v2 = ChunkVersion{3, 2, epoch};
ASSERT_EQ(v2, rt2.getVersion(kThisShard));
}
@@ -371,8 +385,15 @@ TEST_F(RoutingTableHistoryTest, TestReplaceEmptyChunk) {
ChunkVersion{1, 0, epoch},
kThisShard}};
- auto rt = RoutingTableHistory::makeNew(
- kNss, UUID::gen(), getShardKeyPattern(), nullptr, false, epoch, boost::none, initialChunks);
+ auto rt = RoutingTableHistory::makeNew(kNss,
+ UUID::gen(),
+ getShardKeyPattern(),
+ nullptr,
+ false,
+ epoch,
+ boost::none,
+ true,
+ initialChunks);
ASSERT_EQ(rt.numChunks(), 1);
std::vector<ChunkType> changedChunks = {
@@ -385,7 +406,7 @@ TEST_F(RoutingTableHistoryTest, TestReplaceEmptyChunk) {
ChunkVersion{2, 2, epoch},
kThisShard}};
- auto rt1 = rt.makeUpdated(boost::none, changedChunks);
+ auto rt1 = rt.makeUpdated(boost::none, true, changedChunks);
auto v1 = ChunkVersion{2, 2, epoch};
ASSERT_EQ(v1, rt1.getVersion(kThisShard));
ASSERT_EQ(rt1.numChunks(), 2);
@@ -413,8 +434,15 @@ TEST_F(RoutingTableHistoryTest, TestUseLatestVersions) {
ChunkVersion{1, 0, epoch},
kThisShard}};
- auto rt = RoutingTableHistory::makeNew(
- kNss, UUID::gen(), getShardKeyPattern(), nullptr, false, epoch, boost::none, initialChunks);
+ auto rt = RoutingTableHistory::makeNew(kNss,
+ UUID::gen(),
+ getShardKeyPattern(),
+ nullptr,
+ false,
+ epoch,
+ boost::none,
+ true,
+ initialChunks);
ASSERT_EQ(rt.numChunks(), 1);
std::vector<ChunkType> changedChunks = {
@@ -431,7 +459,7 @@ TEST_F(RoutingTableHistoryTest, TestUseLatestVersions) {
ChunkVersion{2, 2, epoch},
kThisShard}};
- auto rt1 = rt.makeUpdated(boost::none, changedChunks);
+ auto rt1 = rt.makeUpdated(boost::none, true, changedChunks);
auto v1 = ChunkVersion{2, 2, epoch};
ASSERT_EQ(v1, rt1.getVersion(kThisShard));
ASSERT_EQ(rt1.numChunks(), 2);
@@ -450,8 +478,15 @@ TEST_F(RoutingTableHistoryTest, TestOutOfOrderVersion) {
ChunkVersion{2, 2, epoch},
kThisShard}};
- auto rt = RoutingTableHistory::makeNew(
- kNss, UUID::gen(), getShardKeyPattern(), nullptr, false, epoch, boost::none, initialChunks);
+ auto rt = RoutingTableHistory::makeNew(kNss,
+ UUID::gen(),
+ getShardKeyPattern(),
+ nullptr,
+ false,
+ epoch,
+ boost::none,
+ true,
+ initialChunks);
ASSERT_EQ(rt.numChunks(), 2);
std::vector<ChunkType> changedChunks = {
@@ -464,7 +499,7 @@ TEST_F(RoutingTableHistoryTest, TestOutOfOrderVersion) {
ChunkVersion{3, 1, epoch},
kThisShard}};
- auto rt1 = rt.makeUpdated(boost::none, changedChunks);
+ auto rt1 = rt.makeUpdated(boost::none, true, changedChunks);
auto v1 = ChunkVersion{3, 1, epoch};
ASSERT_EQ(v1, rt1.getVersion(kThisShard));
ASSERT_EQ(rt1.numChunks(), 2);
@@ -492,8 +527,15 @@ TEST_F(RoutingTableHistoryTest, TestMergeChunks) {
ChunkVersion{2, 2, epoch},
kThisShard}};
- auto rt = RoutingTableHistory::makeNew(
- kNss, UUID::gen(), getShardKeyPattern(), nullptr, false, epoch, boost::none, initialChunks);
+ auto rt = RoutingTableHistory::makeNew(kNss,
+ UUID::gen(),
+ getShardKeyPattern(),
+ nullptr,
+ false,
+ epoch,
+ boost::none,
+ true,
+ initialChunks);
ASSERT_EQ(rt.numChunks(), 3);
ASSERT_EQ(rt.getVersion(), ChunkVersion(2, 2, epoch));
@@ -507,7 +549,7 @@ TEST_F(RoutingTableHistoryTest, TestMergeChunks) {
ChunkVersion{3, 1, epoch},
kThisShard}};
- auto rt1 = rt.makeUpdated(boost::none, changedChunks);
+ auto rt1 = rt.makeUpdated(boost::none, true, changedChunks);
auto v1 = ChunkVersion{3, 1, epoch};
ASSERT_EQ(v1, rt1.getVersion(kThisShard));
ASSERT_EQ(rt1.numChunks(), 2);
@@ -530,8 +572,15 @@ TEST_F(RoutingTableHistoryTest, TestMergeChunksOrdering) {
ChunkVersion{2, 2, epoch},
kThisShard}};
- auto rt = RoutingTableHistory::makeNew(
- kNss, UUID::gen(), getShardKeyPattern(), nullptr, false, epoch, boost::none, initialChunks);
+ auto rt = RoutingTableHistory::makeNew(kNss,
+ UUID::gen(),
+ getShardKeyPattern(),
+ nullptr,
+ false,
+ epoch,
+ boost::none,
+ true,
+ initialChunks);
ASSERT_EQ(rt.numChunks(), 3);
ASSERT_EQ(rt.getVersion(), ChunkVersion(2, 2, epoch));
@@ -545,7 +594,7 @@ TEST_F(RoutingTableHistoryTest, TestMergeChunksOrdering) {
ChunkVersion{3, 1, epoch},
kThisShard}};
- auto rt1 = rt.makeUpdated(boost::none, changedChunks);
+ auto rt1 = rt.makeUpdated(boost::none, true, changedChunks);
auto v1 = ChunkVersion{3, 1, epoch};
ASSERT_EQ(v1, rt1.getVersion(kThisShard));
ASSERT_EQ(rt1.numChunks(), 2);
@@ -586,8 +635,15 @@ TEST_F(RoutingTableHistoryTest, TestFlatten) {
kThisShard},
};
- auto rt = RoutingTableHistory::makeNew(
- kNss, UUID::gen(), getShardKeyPattern(), nullptr, false, epoch, boost::none, initialChunks);
+ auto rt = RoutingTableHistory::makeNew(kNss,
+ UUID::gen(),
+ getShardKeyPattern(),
+ nullptr,
+ false,
+ epoch,
+ boost::none,
+ true,
+ initialChunks);
ASSERT_EQ(rt.numChunks(), 2);
ASSERT_EQ(rt.getVersion(), ChunkVersion(4, 1, epoch));
@@ -596,5 +652,6 @@ TEST_F(RoutingTableHistoryTest, TestFlatten) {
ASSERT_EQ(chunk1->getMin().woCompare(getShardKeyPattern().globalMin()), 0);
ASSERT_EQ(chunk1->getMax().woCompare(BSON("a" << 10)), 0);
}
+
} // namespace
} // namespace mongo