diff options
Diffstat (limited to 'src/mongo/db')
32 files changed, 270 insertions, 183 deletions
diff --git a/src/mongo/db/commands/count_cmd.cpp b/src/mongo/db/commands/count_cmd.cpp index 2f9a844950b..b2e23e4887c 100644 --- a/src/mongo/db/commands/count_cmd.cpp +++ b/src/mongo/db/commands/count_cmd.cpp @@ -181,10 +181,15 @@ public: // Prevent chunks from being cleaned up during yields - this allows us to only check the // version on initial entry into count. - auto rangePreserver = - CollectionShardingState::get(opCtx, nss) - ->getOwnershipFilter( - opCtx, CollectionShardingState::OrphanCleanupPolicy::kDisallowOrphanCleanup); + auto* const css = CollectionShardingState::get(opCtx, nss); + boost::optional<ScopedCollectionFilter> rangePreserver; + if (css->getCollectionDescription(opCtx).isSharded()) { + rangePreserver.emplace( + CollectionShardingState::get(opCtx, nss) + ->getOwnershipFilter( + opCtx, + CollectionShardingState::OrphanCleanupPolicy::kDisallowOrphanCleanup)); + } auto expCtx = makeExpressionContextForGetExecutor( opCtx, request.getCollation().value_or(BSONObj()), nss); @@ -244,10 +249,15 @@ public: // Prevent chunks from being cleaned up during yields - this allows us to only check the // version on initial entry into count. - auto rangePreserver = - CollectionShardingState::get(opCtx, nss) - ->getOwnershipFilter( - opCtx, CollectionShardingState::OrphanCleanupPolicy::kDisallowOrphanCleanup); + auto* const css = CollectionShardingState::get(opCtx, nss); + boost::optional<ScopedCollectionFilter> rangePreserver; + if (css->getCollectionDescription(opCtx).isSharded()) { + rangePreserver.emplace( + CollectionShardingState::get(opCtx, nss) + ->getOwnershipFilter( + opCtx, + CollectionShardingState::OrphanCleanupPolicy::kDisallowOrphanCleanup)); + } auto statusWithPlanExecutor = getExecutorCount(makeExpressionContextForGetExecutor( diff --git a/src/mongo/db/exec/upsert_stage.cpp b/src/mongo/db/exec/upsert_stage.cpp index 4f0fa990c23..132e6aad0d1 100644 --- a/src/mongo/db/exec/upsert_stage.cpp +++ b/src/mongo/db/exec/upsert_stage.cpp @@ -119,10 +119,9 @@ void UpsertStage::_performInsert(BSONObj newDocument) { // throw so that MongoS can target the insert to the correct shard. if (_isUserInitiatedWrite) { auto* const css = CollectionShardingState::get(opCtx(), collection()->ns()); - const auto collFilter = css->getOwnershipFilter( - opCtx(), CollectionShardingState::OrphanCleanupPolicy::kAllowOrphanCleanup); - - if (collFilter.isSharded()) { + if (css->getCollectionDescription(opCtx()).isSharded()) { + const auto collFilter = css->getOwnershipFilter( + opCtx(), CollectionShardingState::OrphanCleanupPolicy::kAllowOrphanCleanup); const ShardKeyPattern shardKeyPattern(collFilter.getKeyPattern()); auto newShardKey = shardKeyPattern.extractShardKeyFromDoc(newDocument); @@ -274,4 +273,5 @@ void UpsertStage::_assertDocumentToBeInsertedIsValid(const mb::Document& documen _assertPathsNotArray(document, shardKeyPaths); } } + } // namespace mongo diff --git a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp index a7416ef95e2..25414f71f69 100644 --- a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp +++ b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp @@ -388,12 +388,14 @@ StatusWith<MigrateInfoVector> BalancerChunkSelectionPolicyImpl::selectChunksToMo const NamespaceString& nss(coll.getNss()); - if (!coll.getAllowBalance()) { + if (!coll.getAllowBalance() || !coll.getAllowMigrations()) { LOGV2_DEBUG(21851, 1, "Not balancing collection {namespace}; explicitly disabled.", "Not balancing explicitly disabled collection", - "namespace"_attr = nss); + "namespace"_attr = nss, + "allowBalance"_attr = coll.getAllowBalance(), + "allowMigrations"_attr = coll.getAllowMigrations()); continue; } diff --git a/src/mongo/db/s/collection_metadata.cpp b/src/mongo/db/s/collection_metadata.cpp index ccb0a5a4050..354bab6cdec 100644 --- a/src/mongo/db/s/collection_metadata.cpp +++ b/src/mongo/db/s/collection_metadata.cpp @@ -44,6 +44,10 @@ namespace mongo { CollectionMetadata::CollectionMetadata(ChunkManager cm, const ShardId& thisShardId) : _cm(std::move(cm)), _thisShardId(thisShardId) {} +bool CollectionMetadata::allowMigrations() const { + return _cm ? _cm->allowMigrations() : true; +} + BSONObj CollectionMetadata::extractDocumentKey(const BSONObj& doc) const { BSONObj key; diff --git a/src/mongo/db/s/collection_metadata.h b/src/mongo/db/s/collection_metadata.h index 36c9fa844ad..e9b202f5ecf 100644 --- a/src/mongo/db/s/collection_metadata.h +++ b/src/mongo/db/s/collection_metadata.h @@ -69,6 +69,8 @@ public: return bool(_cm); } + bool allowMigrations() const; + /** * Returns the current shard version for the collection or UNSHARDED if it is not sharded. * diff --git a/src/mongo/db/s/collection_metadata_filtering_test.cpp b/src/mongo/db/s/collection_metadata_filtering_test.cpp index ddeb8f1c7ba..3d3763a5928 100644 --- a/src/mongo/db/s/collection_metadata_filtering_test.cpp +++ b/src/mongo/db/s/collection_metadata_filtering_test.cpp @@ -71,6 +71,7 @@ protected: false, epoch, boost::none, + true, [&] { ChunkVersion version(1, 0, epoch); diff --git a/src/mongo/db/s/collection_metadata_test.cpp b/src/mongo/db/s/collection_metadata_test.cpp index 0c343a5ed37..c52d13d3e50 100644 --- a/src/mongo/db/s/collection_metadata_test.cpp +++ b/src/mongo/db/s/collection_metadata_test.cpp @@ -90,6 +90,7 @@ CollectionMetadata makeCollectionMetadataImpl( false, epoch, boost::none, + true, allChunks)), kChunkManager), kThisShard); diff --git a/src/mongo/db/s/collection_sharding_runtime.cpp b/src/mongo/db/s/collection_sharding_runtime.cpp index c5c264af4c7..0e95190e915 100644 --- a/src/mongo/db/s/collection_sharding_runtime.cpp +++ b/src/mongo/db/s/collection_sharding_runtime.cpp @@ -112,11 +112,22 @@ CollectionShardingRuntime* CollectionShardingRuntime::get_UNSAFE(ServiceContext* ScopedCollectionFilter CollectionShardingRuntime::getOwnershipFilter( OperationContext* opCtx, OrphanCleanupPolicy orphanCleanupPolicy) { const auto optReceivedShardVersion = getOperationReceivedVersion(opCtx, _nss); - invariant(!optReceivedShardVersion || !ChunkVersion::isIgnoredVersion(*optReceivedShardVersion), - "getOwnershipFilter called by operation that doesn't have a valid shard version"); + // TODO (SERVER-52764): No operations should be calling getOwnershipFilter without a shard + // version + // + // invariant(optReceivedShardVersion, + // "getOwnershipFilter called by operation that doesn't specify shard version"); + if (!optReceivedShardVersion) + return {kUnshardedCollection}; + + auto metadata = _getMetadataWithVersionCheckAt( + opCtx, repl::ReadConcernArgs::get(opCtx).getArgsAtClusterTime()); + invariant(!ChunkVersion::isIgnoredVersion(*optReceivedShardVersion) || + !metadata->get().allowMigrations() || !metadata->get().isSharded(), + "For sharded collections getOwnershipFilter cannot be relied on without a valid " + "shard version"); - return _getMetadataWithVersionCheckAt(opCtx, - repl::ReadConcernArgs::get(opCtx).getArgsAtClusterTime()); + return {std::move(metadata)}; } ScopedCollectionDescription CollectionShardingRuntime::getCollectionDescription( @@ -335,10 +346,8 @@ CollectionShardingRuntime::_getMetadataWithVersionCheckAt( !criticalSectionSignal); } - if (ChunkVersion::isIgnoredVersion(receivedShardVersion)) - return kUnshardedCollection; - - if (receivedShardVersion.isWriteCompatibleWith(wantedShardVersion)) + if (wantedShardVersion.isWriteCompatibleWith(receivedShardVersion) || + ChunkVersion::isIgnoredVersion(receivedShardVersion)) return optCurrentMetadata; StaleConfigInfo sci( diff --git a/src/mongo/db/s/collection_sharding_runtime_test.cpp b/src/mongo/db/s/collection_sharding_runtime_test.cpp index 952924bf6f9..215dd006b95 100644 --- a/src/mongo/db/s/collection_sharding_runtime_test.cpp +++ b/src/mongo/db/s/collection_sharding_runtime_test.cpp @@ -63,6 +63,7 @@ protected: false, epoch, boost::none, + true, {std::move(chunk)})), boost::none); diff --git a/src/mongo/db/s/config/config_server_test_fixture.cpp b/src/mongo/db/s/config/config_server_test_fixture.cpp index 977fd9640b0..7aa957717c0 100644 --- a/src/mongo/db/s/config/config_server_test_fixture.cpp +++ b/src/mongo/db/s/config/config_server_test_fixture.cpp @@ -325,10 +325,17 @@ StatusWith<ShardType> ConfigServerTestFixture::getShardDoc(OperationContext* opC return ShardType::fromBSON(doc.getValue()); } -void ConfigServerTestFixture::setupChunks(const std::vector<ChunkType>& chunks) { - const NamespaceString chunkNS(ChunkType::ConfigNS); +void ConfigServerTestFixture::setupCollection(const NamespaceString& nss, + const KeyPattern& shardKey, + const std::vector<ChunkType>& chunks) { + CollectionType coll(nss, chunks[0].getVersion().epoch(), Date_t::now(), UUID::gen()); + coll.setKeyPattern(shardKey); + ASSERT_OK( + insertToConfigCollection(operationContext(), CollectionType::ConfigNS, coll.toBSON())); + for (const auto& chunk : chunks) { - ASSERT_OK(insertToConfigCollection(operationContext(), chunkNS, chunk.toConfigBSON())); + ASSERT_OK(insertToConfigCollection( + operationContext(), ChunkType::ConfigNS, chunk.toConfigBSON())); } } diff --git a/src/mongo/db/s/config/config_server_test_fixture.h b/src/mongo/db/s/config/config_server_test_fixture.h index ba008a827f6..b5264d9131c 100644 --- a/src/mongo/db/s/config/config_server_test_fixture.h +++ b/src/mongo/db/s/config/config_server_test_fixture.h @@ -103,7 +103,9 @@ protected: /** * Setup the config.chunks collection to contain the given chunks. */ - void setupChunks(const std::vector<ChunkType>& chunks); + void setupCollection(const NamespaceString& nss, + const KeyPattern& shardKey, + const std::vector<ChunkType>& chunks); /** * Retrieves the chunk document from the config server. diff --git a/src/mongo/db/s/config/sharding_catalog_manager_bump_shard_versions_and_change_metadata_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_bump_shard_versions_and_change_metadata_test.cpp index 7df7df0e6c7..98f7dd5288c 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_bump_shard_versions_and_change_metadata_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_bump_shard_versions_and_change_metadata_test.cpp @@ -44,6 +44,7 @@ namespace mongo { namespace { const NamespaceString kNss("TestDB", "TestColl"); +const KeyPattern kKeyPattern(BSON("a" << 1)); const ShardType kShard0("shard0000", "shard0000:1234"); const ShardType kShard1("shard0001", "shard0001:1234"); @@ -144,7 +145,7 @@ TEST_F(ShardingCatalogManagerBumpShardVersionsAndChangeMetadataTest, ChunkVersion targetChunkVersion( collectionVersion.majorVersion() + 1, 0, collectionVersion.epoch()); - setupChunks({shard0Chunk0, shard1Chunk0}); + setupCollection(kNss, kKeyPattern, {shard0Chunk0, shard1Chunk0}); auto opCtx = operationContext(); @@ -173,7 +174,7 @@ TEST_F(ShardingCatalogManagerBumpShardVersionsAndChangeMetadataTest, ChunkVersion targetChunkVersion( collectionVersion.majorVersion() + 1, 0, collectionVersion.epoch()); - setupChunks({shard0Chunk0, shard0Chunk1, shard1Chunk0}); + setupCollection(kNss, kKeyPattern, {shard0Chunk0, shard0Chunk1, shard1Chunk0}); auto opCtx = operationContext(); std::vector<ShardId> shardIds{kShard0.getName(), kShard1.getName()}; @@ -203,7 +204,7 @@ TEST_F(ShardingCatalogManagerBumpShardVersionsAndChangeMetadataTest, ChunkVersion targetChunkVersion( collectionVersion.majorVersion() + 1, 0, collectionVersion.epoch()); - setupChunks({shard0Chunk0, shard0Chunk1, shard1Chunk0, shard1Chunk1}); + setupCollection(kNss, kKeyPattern, {shard0Chunk0, shard0Chunk1, shard1Chunk0, shard1Chunk1}); auto opCtx = operationContext(); std::vector<ShardId> shardIds{kShard0.getName(), kShard1.getName()}; diff --git a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp index d1f09247cdb..a5823b34a33 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp @@ -774,12 +774,12 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration( boost::none)); uassert(ErrorCodes::ShardNotFound, - str::stream() << "shard " << toShard << " does not exist", + str::stream() << "Shard " << toShard << " does not exist", !shardResult.docs.empty()); auto shard = uassertStatusOK(ShardType::fromBSON(shardResult.docs.front())); uassert(ErrorCodes::ShardNotFound, - str::stream() << toShard << " is draining", + str::stream() << "Shard " << toShard << " is currently draining", !shard.getDraining()); // Take _kChunkOpLock in exclusive mode to prevent concurrent chunk splits, merges, and @@ -798,39 +798,43 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration( return {ErrorCodes::IllegalOperation, "chunk operation requires validAfter timestamp"}; } - // Must use local read concern because we will perform subsequent writes. - auto findResponse = + auto findCollResponse = uassertStatusOK( + configShard->exhaustiveFindOnConfig(opCtx, + ReadPreferenceSetting{ReadPreference::PrimaryOnly}, + repl::ReadConcernLevel::kLocalReadConcern, + CollectionType::ConfigNS, + BSON(CollectionType::kNssFieldName << nss.ns()), + {}, + 1)); + uassert(ErrorCodes::ConflictingOperationInProgress, + "Collection does not exist", + !findCollResponse.docs.empty()); + const CollectionType coll(findCollResponse.docs[0]); + uassert(ErrorCodes::ConflictingOperationInProgress, + "Collection is undergoing changes and chunks cannot be moved", + coll.getAllowMigrations()); + + auto findResponse = uassertStatusOK( configShard->exhaustiveFindOnConfig(opCtx, ReadPreferenceSetting{ReadPreference::PrimaryOnly}, repl::ReadConcernLevel::kLocalReadConcern, ChunkType::ConfigNS, BSON("ns" << nss.ns()), BSON(ChunkType::lastmod << -1), - 1); - if (!findResponse.isOK()) { - return findResponse.getStatus(); - } - - if (MONGO_unlikely(migrationCommitVersionError.shouldFail())) { - uassert(ErrorCodes::StaleEpoch, - "failpoint 'migrationCommitVersionError' generated error", - false); - } + 1)); + uassert(ErrorCodes::IncompatibleShardingMetadata, + str::stream() << "Tried to find max chunk version for collection '" << nss.ns() + << ", but found no chunks", + !findResponse.docs.empty()); - const auto chunksVector = std::move(findResponse.getValue().docs); - if (chunksVector.empty()) { - return {ErrorCodes::IncompatibleShardingMetadata, - str::stream() << "Tried to find max chunk version for collection '" << nss.ns() - << ", but found no chunks"}; - } + const auto chunk = uassertStatusOK(ChunkType::fromConfigBSON(findResponse.docs[0])); + const auto currentCollectionVersion = chunk.getVersion(); - const auto swChunk = ChunkType::fromConfigBSON(chunksVector.front()); - if (!swChunk.isOK()) { - return swChunk.getStatus(); + if (MONGO_unlikely(migrationCommitVersionError.shouldFail())) { + uasserted(ErrorCodes::StaleEpoch, + "Failpoint 'migrationCommitVersionError' generated error"); } - const auto currentCollectionVersion = swChunk.getValue().getVersion(); - // It is possible for a migration to end up running partly without the protection of the // distributed lock if the config primary stepped down since the start of the migration and // failed to recover the migration. Check that the collection has not been dropped and recreated diff --git a/src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp index ca19cc84e80..0982c6fe76e 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp @@ -44,6 +44,7 @@ namespace { using unittest::assertGet; +const KeyPattern kKeyPattern(BSON("x" << 1)); class ClearJumboFlagTest : public ConfigServerTestFixture { public: @@ -73,12 +74,6 @@ protected: setupShards({shard}); - CollectionType collection(_namespace, _epoch, Date_t::now(), UUID::gen()); - collection.setKeyPattern(BSON("x" << 1)); - - ASSERT_OK(insertToConfigCollection( - operationContext(), CollectionType::ConfigNS, collection.toBSON())); - ChunkType chunk; chunk.setName(OID::gen()); chunk.setNS(_namespace); @@ -96,7 +91,7 @@ protected: otherChunk.setMin(nonJumboChunk().getMin()); otherChunk.setMax(nonJumboChunk().getMax()); - setupChunks({chunk, otherChunk}); + setupCollection(_namespace, kKeyPattern, {chunk, otherChunk}); } private: diff --git a/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp index 0283b4c1cc4..754deb1bf86 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp @@ -47,6 +47,7 @@ using unittest::assertGet; using CommitChunkMigrate = ConfigServerTestFixture; const NamespaceString kNamespace("TestDB.TestColl"); +const KeyPattern kKeyPattern(BSON("x" << 1)); TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectly) { ShardType shard0; @@ -83,7 +84,7 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectly) { controlChunk.setJumbo(true); } - setupChunks({migratedChunk, controlChunk}); + setupCollection(kNamespace, kKeyPattern, {migratedChunk, controlChunk}); Timestamp validAfter{101, 0}; BSONObj versions = assertGet(ShardingCatalogManager::get(operationContext()) @@ -154,7 +155,7 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectlyWithoutControlChunk) { auto chunkMax = BSON("a" << 10); chunk0.setMax(chunkMax); - setupChunks({chunk0}); + setupCollection(kNamespace, kKeyPattern, {chunk0}); Timestamp validAfter{101, 0}; @@ -211,7 +212,7 @@ TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandNoCtlTrimHistory) { auto chunkMax = BSON("a" << 10); chunk0.setMax(chunkMax); - setupChunks({chunk0}); + setupCollection(kNamespace, kKeyPattern, {chunk0}); // Make the time distance between the last history element large enough. Timestamp validAfter{200, 0}; @@ -270,7 +271,7 @@ TEST_F(CommitChunkMigrate, RejectOutOfOrderHistory) { auto chunkMax = BSON("a" << 10); chunk0.setMax(chunkMax); - setupChunks({chunk0}); + setupCollection(kNamespace, kKeyPattern, {chunk0}); // Make the time before the last change to trigger the failure. Timestamp validAfter{99, 0}; @@ -288,7 +289,6 @@ TEST_F(CommitChunkMigrate, RejectOutOfOrderHistory) { } TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch0) { - ShardType shard0; shard0.setName("shard0"); shard0.setHost("shard0:12"); @@ -324,7 +324,7 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch0) { auto chunkMaxax = BSON("a" << 20); chunk1.setMax(chunkMaxax); - setupChunks({chunk0, chunk1}); + setupCollection(kNamespace, kKeyPattern, {chunk0, chunk1}); Timestamp validAfter{1}; @@ -341,7 +341,6 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch0) { } TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch1) { - ShardType shard0; shard0.setName("shard0"); shard0.setHost("shard0:12"); @@ -379,7 +378,7 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch1) { chunk1.setMax(chunkMaxax); // get version from the control chunk this time - setupChunks({chunk1, chunk0}); + setupCollection(kNamespace, kKeyPattern, {chunk1, chunk0}); Timestamp validAfter{1}; @@ -396,7 +395,6 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch1) { } TEST_F(CommitChunkMigrate, RejectChunkMissing0) { - ShardType shard0; shard0.setName("shard0"); shard0.setHost("shard0:12"); @@ -432,7 +430,7 @@ TEST_F(CommitChunkMigrate, RejectChunkMissing0) { auto chunkMaxax = BSON("a" << 20); chunk1.setMax(chunkMaxax); - setupChunks({chunk1}); + setupCollection(kNamespace, kKeyPattern, {chunk1}); Timestamp validAfter{1}; @@ -449,7 +447,6 @@ TEST_F(CommitChunkMigrate, RejectChunkMissing0) { } TEST_F(CommitChunkMigrate, CommitWithLastChunkOnShardShouldNotAffectOtherChunks) { - ShardType shard0; shard0.setName("shard0"); shard0.setHost("shard0:12"); @@ -489,7 +486,7 @@ TEST_F(CommitChunkMigrate, CommitWithLastChunkOnShardShouldNotAffectOtherChunks) Timestamp ctrlChunkValidAfter = Timestamp(50, 0); chunk1.setHistory({ChunkHistory(ctrlChunkValidAfter, shard1.getName())}); - setupChunks({chunk0, chunk1}); + setupCollection(kNamespace, kKeyPattern, {chunk0, chunk1}); Timestamp validAfter{101, 0}; StatusWith<BSONObj> resultBSON = ShardingCatalogManager::get(operationContext()) @@ -557,7 +554,7 @@ TEST_F(CommitChunkMigrate, RejectMissingChunkVersion) { currentChunk.setMin(BSON("a" << 1)); currentChunk.setMax(BSON("a" << 10)); - setupChunks({currentChunk}); + setupCollection(kNamespace, kKeyPattern, {currentChunk}); Timestamp validAfter{101, 0}; ASSERT_THROWS_CODE(ShardingCatalogManager::get(operationContext()) @@ -606,7 +603,7 @@ TEST_F(CommitChunkMigrate, RejectOlderChunkVersion) { currentChunk.setMin(BSON("a" << 1)); currentChunk.setMax(BSON("a" << 10)); - setupChunks({currentChunk}); + setupCollection(kNamespace, kKeyPattern, {currentChunk}); Timestamp validAfter{101, 0}; auto result = ShardingCatalogManager::get(operationContext()) @@ -655,7 +652,7 @@ TEST_F(CommitChunkMigrate, RejectMismatchedEpoch) { currentChunk.setMin(BSON("a" << 1)); currentChunk.setMax(BSON("a" << 10)); - setupChunks({currentChunk}); + setupCollection(kNamespace, kKeyPattern, {currentChunk}); Timestamp validAfter{101, 0}; auto result = ShardingCatalogManager::get(operationContext()) diff --git a/src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp index dfda75a049c..f2f308811d3 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp @@ -36,6 +36,7 @@ namespace mongo { namespace { const NamespaceString kNss("TestDB", "TestColl"); +const KeyPattern kKeyPattern(BSON("x" << 1)); using EnsureChunkVersionIsGreaterThanTest = ConfigServerTestFixture; @@ -103,7 +104,7 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoChunkWithMatchingEpochFoundRetur ChunkType existingChunkType = requestedChunkType; // Epoch is different. existingChunkType.setVersion(ChunkVersion(10, 2, OID::gen())); - setupChunks({existingChunkType}); + setupCollection(kNss, kKeyPattern, {existingChunkType}); ShardingCatalogManager::get(operationContext()) ->ensureChunkVersionIsGreaterThan(operationContext(), @@ -125,7 +126,7 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoChunkWithMatchingMinKeyFoundRetu ChunkType existingChunkType = requestedChunkType; // Min key is different. existingChunkType.setMin(BSON("a" << -1)); - setupChunks({existingChunkType}); + setupCollection(kNss, kKeyPattern, {existingChunkType}); ShardingCatalogManager::get(operationContext()) ->ensureChunkVersionIsGreaterThan(operationContext(), @@ -147,7 +148,7 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoChunkWithMatchingMaxKeyFoundRetu ChunkType existingChunkType = requestedChunkType; // Max key is different. existingChunkType.setMax(BSON("a" << 20)); - setupChunks({existingChunkType}); + setupCollection(kNss, kKeyPattern, {existingChunkType}); ShardingCatalogManager::get(operationContext()) ->ensureChunkVersionIsGreaterThan(operationContext(), @@ -168,7 +169,7 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest, const auto existingChunkType = requestedChunkType; const auto highestChunkType = generateChunkType( kNss, ChunkVersion(20, 3, epoch), ShardId("shard0001"), BSON("a" << 11), BSON("a" << 20)); - setupChunks({existingChunkType, highestChunkType}); + setupCollection(kNss, kKeyPattern, {existingChunkType, highestChunkType}); ShardingCatalogManager::get(operationContext()) ->ensureChunkVersionIsGreaterThan(operationContext(), @@ -191,7 +192,7 @@ TEST_F( ChunkType existingChunkType = requestedChunkType; existingChunkType.setVersion(ChunkVersion(11, 1, epoch)); - setupChunks({existingChunkType}); + setupCollection(kNss, kKeyPattern, {existingChunkType}); ShardingCatalogManager::get(operationContext()) ->ensureChunkVersionIsGreaterThan(operationContext(), diff --git a/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp index 05478c71aa6..269bb6e6253 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp @@ -38,12 +38,14 @@ namespace mongo { namespace { -using unittest::assertGet; -const NamespaceString kNamespace("TestDB.TestColl"); +using unittest::assertGet; using MergeChunkTest = ConfigServerTestFixture; +const NamespaceString kNamespace("TestDB.TestColl"); +const KeyPattern kKeyPattern(BSON("x" << 1)); + TEST_F(MergeChunkTest, MergeExistingChunksCorrectlyShouldSucceed) { ChunkType chunk; chunk.setName(OID::gen()); @@ -69,7 +71,7 @@ TEST_F(MergeChunkTest, MergeExistingChunksCorrectlyShouldSucceed) { std::vector<BSONObj> chunkBoundaries{chunkMin, chunkBound, chunkMax}; - setupChunks({chunk, chunk2}); + setupCollection(kNamespace, kKeyPattern, {chunk, chunk2}); Timestamp validAfter{100, 0}; @@ -151,7 +153,7 @@ TEST_F(MergeChunkTest, MergeSeveralChunksCorrectlyShouldSucceed) { // Record chunk boundaries for passing into commitChunkMerge std::vector<BSONObj> chunkBoundaries{chunkMin, chunkBound, chunkBound2, chunkMax}; - setupChunks({chunk, chunk2, chunk3}); + setupCollection(kNamespace, kKeyPattern, {chunk, chunk2, chunk3}); Timestamp validAfter{100, 0}; @@ -229,7 +231,7 @@ TEST_F(MergeChunkTest, NewMergeShouldClaimHighestVersion) { otherChunk.setMin(BSON("a" << 10)); otherChunk.setMax(BSON("a" << 20)); - setupChunks({chunk, chunk2, otherChunk}); + setupCollection(kNamespace, kKeyPattern, {chunk, chunk2, otherChunk}); Timestamp validAfter{100, 0}; @@ -303,7 +305,7 @@ TEST_F(MergeChunkTest, MergeLeavesOtherChunksAlone) { otherChunk.setMin(BSON("a" << 10)); otherChunk.setMax(BSON("a" << 20)); - setupChunks({chunk, chunk2, otherChunk}); + setupCollection(kNamespace, kKeyPattern, {chunk, chunk2, otherChunk}); Timestamp validAfter{1}; @@ -369,7 +371,7 @@ TEST_F(MergeChunkTest, NonExistingNamespace) { // Record chunk boundaries for passing into commitChunkMerge std::vector<BSONObj> chunkBoundaries{chunkMin, chunkBound, chunkMax}; - setupChunks({chunk, chunk2}); + setupCollection(kNamespace, kKeyPattern, {chunk, chunk2}); Timestamp validAfter{1}; @@ -406,7 +408,7 @@ TEST_F(MergeChunkTest, NonMatchingEpochsOfChunkAndRequestErrors) { // Record chunk baoundaries for passing into commitChunkMerge std::vector<BSONObj> chunkBoundaries{chunkMin, chunkBound, chunkMax}; - setupChunks({chunk, chunk2}); + setupCollection(kNamespace, kKeyPattern, {chunk, chunk2}); Timestamp validAfter{1}; @@ -451,7 +453,7 @@ TEST_F(MergeChunkTest, MergeAlreadyHappenedSucceeds) { mergedChunk.setVersion(mergedVersion); mergedChunk.setMax(chunkMax); - setupChunks({mergedChunk}); + setupCollection(kNamespace, kKeyPattern, {mergedChunk}); Timestamp validAfter{1}; @@ -516,7 +518,7 @@ TEST_F(MergeChunkTest, ChunkBoundariesOutOfOrderFails) { chunk.setVersion(version); originalChunks.push_back(chunk); - setupChunks(originalChunks); + setupCollection(kNamespace, kKeyPattern, originalChunks); } Timestamp validAfter{1}; @@ -557,7 +559,7 @@ TEST_F(MergeChunkTest, MergingChunksWithDollarPrefixShouldSucceed) { chunk3.setMin(chunkBound2); chunk3.setMax(chunkMax); - setupChunks({chunk1, chunk2, chunk3}); + setupCollection(kNamespace, kKeyPattern, {chunk1, chunk2, chunk3}); // Record chunk boundaries for passing into commitChunkMerge std::vector<BSONObj> chunkBoundaries{chunkMin, chunkBound1, chunkBound2, chunkMax}; diff --git a/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp index 254afab7c12..f8f73c0c7c7 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp @@ -63,6 +63,8 @@ using std::string; using std::vector; using unittest::assertGet; +const KeyPattern kKeyPattern(BSON("_id" << 1)); + BSONObj getReplSecondaryOkMetadata() { BSONObjBuilder o; ReadPreferenceSetting(ReadPreference::Nearest).toContainingBSON(&o); @@ -214,7 +216,9 @@ TEST_F(RemoveShardTest, RemoveShardStillDrainingChunksRemaining) { setupShards(std::vector<ShardType>{shard1, shard2}); setupDatabase("testDB", shard1.getName(), true); - setupChunks(std::vector<ChunkType>{chunk1, chunk2, chunk3}); + setupCollection(NamespaceString("testDB.testColl"), + kKeyPattern, + std::vector<ChunkType>{chunk1, chunk2, chunk3}); auto startedResult = ShardingCatalogManager::get(operationContext()) ->removeShard(operationContext(), shard1.getName()); @@ -297,7 +301,9 @@ TEST_F(RemoveShardTest, RemoveShardCompletion) { setupShards(std::vector<ShardType>{shard1, shard2}); setupDatabase("testDB", shard2.getName(), false); - setupChunks(std::vector<ChunkType>{chunk1, chunk2, chunk3}); + setupCollection(NamespaceString("testDB.testColl"), + kKeyPattern, + std::vector<ChunkType>{chunk1, chunk2, chunk3}); auto startedResult = ShardingCatalogManager::get(operationContext()) ->removeShard(operationContext(), shard1.getName()); diff --git a/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp index 4bb7ad1ec6d..24281e68389 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp @@ -40,6 +40,7 @@ namespace { using unittest::assertGet; const NamespaceString kNamespace("TestDB", "TestColl"); +const KeyPattern kKeyPattern(BSON("a" << 1)); using SplitChunkTest = ConfigServerTestFixture; @@ -62,7 +63,7 @@ TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) { auto chunkSplitPoint = BSON("a" << 5); std::vector<BSONObj> splitPoints{chunkSplitPoint}; - setupChunks({chunk}); + setupCollection(kNamespace, kKeyPattern, {chunk}); auto versions = assertGet(ShardingCatalogManager::get(operationContext()) ->commitChunkSplit(operationContext(), @@ -135,7 +136,7 @@ TEST_F(SplitChunkTest, MultipleSplitsOnExistingChunkShouldSucceed) { auto chunkSplitPoint2 = BSON("a" << 7); std::vector<BSONObj> splitPoints{chunkSplitPoint, chunkSplitPoint2}; - setupChunks({chunk}); + setupCollection(kNamespace, kKeyPattern, {chunk}); ASSERT_OK(ShardingCatalogManager::get(operationContext()) ->commitChunkSplit(operationContext(), @@ -221,7 +222,7 @@ TEST_F(SplitChunkTest, NewSplitShouldClaimHighestVersion) { chunk2.setMin(BSON("a" << 10)); chunk2.setMax(BSON("a" << 20)); - setupChunks({chunk, chunk2}); + setupCollection(kNamespace, kKeyPattern, {chunk, chunk2}); ASSERT_OK(ShardingCatalogManager::get(operationContext()) ->commitChunkSplit(operationContext(), @@ -272,7 +273,7 @@ TEST_F(SplitChunkTest, PreConditionFailErrors) { auto chunkSplitPoint = BSON("a" << 5); splitPoints.push_back(chunkSplitPoint); - setupChunks({chunk}); + setupCollection(kNamespace, kKeyPattern, {chunk}); auto splitStatus = ShardingCatalogManager::get(operationContext()) ->commitChunkSplit(operationContext(), @@ -299,7 +300,7 @@ TEST_F(SplitChunkTest, NonExisingNamespaceErrors) { std::vector<BSONObj> splitPoints{BSON("a" << 5)}; - setupChunks({chunk}); + setupCollection(kNamespace, kKeyPattern, {chunk}); auto splitStatus = ShardingCatalogManager::get(operationContext()) ->commitChunkSplit(operationContext(), @@ -326,7 +327,7 @@ TEST_F(SplitChunkTest, NonMatchingEpochsOfChunkAndRequestErrors) { std::vector<BSONObj> splitPoints{BSON("a" << 5)}; - setupChunks({chunk}); + setupCollection(kNamespace, kKeyPattern, {chunk}); auto splitStatus = ShardingCatalogManager::get(operationContext()) ->commitChunkSplit(operationContext(), @@ -354,7 +355,7 @@ TEST_F(SplitChunkTest, SplitPointsOutOfOrderShouldFail) { std::vector<BSONObj> splitPoints{BSON("a" << 5), BSON("a" << 4)}; - setupChunks({chunk}); + setupCollection(kNamespace, kKeyPattern, {chunk}); auto splitStatus = ShardingCatalogManager::get(operationContext()) ->commitChunkSplit(operationContext(), @@ -381,7 +382,7 @@ TEST_F(SplitChunkTest, SplitPointsOutOfRangeAtMinShouldFail) { std::vector<BSONObj> splitPoints{BSON("a" << 0), BSON("a" << 5)}; - setupChunks({chunk}); + setupCollection(kNamespace, kKeyPattern, {chunk}); auto splitStatus = ShardingCatalogManager::get(operationContext()) ->commitChunkSplit(operationContext(), @@ -409,7 +410,7 @@ TEST_F(SplitChunkTest, SplitPointsOutOfRangeAtMaxShouldFail) { std::vector<BSONObj> splitPoints{BSON("a" << 5), BSON("a" << 15)}; - setupChunks({chunk}); + setupCollection(kNamespace, kKeyPattern, {chunk}); auto splitStatus = ShardingCatalogManager::get(operationContext()) ->commitChunkSplit(operationContext(), @@ -433,7 +434,7 @@ TEST_F(SplitChunkTest, SplitPointsWithDollarPrefixShouldFail) { auto chunkMax = BSON("a" << kMaxBSONKey); chunk.setMin(chunkMin); chunk.setMax(chunkMax); - setupChunks({chunk}); + setupCollection(kNamespace, kKeyPattern, {chunk}); ASSERT_NOT_OK(ShardingCatalogManager::get(operationContext()) ->commitChunkSplit(operationContext(), diff --git a/src/mongo/db/s/metadata_manager_test.cpp b/src/mongo/db/s/metadata_manager_test.cpp index 71995b20b72..4786c4a9c1f 100644 --- a/src/mongo/db/s/metadata_manager_test.cpp +++ b/src/mongo/db/s/metadata_manager_test.cpp @@ -86,6 +86,7 @@ protected: false, epoch, boost::none, + true, {ChunkType{kNss, range, ChunkVersion(1, 0, epoch), kOtherShard}}); return CollectionMetadata(ChunkManager(kThisShard, @@ -132,7 +133,7 @@ protected: splitChunks.emplace_back( kNss, ChunkRange(maxKey, chunkToSplit.getMax()), chunkVersion, kOtherShard); - auto rt = cm->getRoutingTableHistory_ForTest().makeUpdated(boost::none, splitChunks); + auto rt = cm->getRoutingTableHistory_ForTest().makeUpdated(boost::none, true, splitChunks); return CollectionMetadata(ChunkManager(cm->dbPrimary(), cm->dbVersion(), @@ -157,7 +158,9 @@ protected: chunkVersion.incMajor(); auto rt = cm->getRoutingTableHistory_ForTest().makeUpdated( - boost::none, {ChunkType(kNss, ChunkRange(minKey, maxKey), chunkVersion, kOtherShard)}); + boost::none, + true, + {ChunkType(kNss, ChunkRange(minKey, maxKey), chunkVersion, kOtherShard)}); return CollectionMetadata(ChunkManager(cm->dbPrimary(), cm->dbVersion(), diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp index 3d87d74f94b..6ceedcb45af 100644 --- a/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp +++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp @@ -155,6 +155,7 @@ protected: false, epoch, boost::none, + true, {ChunkType{kNss, ChunkRange{BSON(kShardKey << MINKEY), BSON(kShardKey << MAXKEY)}, ChunkVersion(1, 0, epoch), diff --git a/src/mongo/db/s/migration_source_manager.cpp b/src/mongo/db/s/migration_source_manager.cpp index 570c9c3ecb5..e6951a2cf85 100644 --- a/src/mongo/db/s/migration_source_manager.cpp +++ b/src/mongo/db/s/migration_source_manager.cpp @@ -164,6 +164,9 @@ MigrationSourceManager::MigrationSourceManager(OperationContext* opCtx, uassert(ErrorCodes::IncompatibleShardingMetadata, "Cannot move chunks for an unsharded collection", metadata.isSharded()); + uassert(ErrorCodes::ConflictingOperationInProgress, + "Collection is undergoing changes so moveChunk is not allowed.", + metadata.allowMigrations()); return std::make_tuple(std::move(metadata), std::move(collectionUUID)); }(); diff --git a/src/mongo/db/s/op_observer_sharding_test.cpp b/src/mongo/db/s/op_observer_sharding_test.cpp index f1c78c2d9e9..e95b45c99a3 100644 --- a/src/mongo/db/s/op_observer_sharding_test.cpp +++ b/src/mongo/db/s/op_observer_sharding_test.cpp @@ -73,6 +73,7 @@ protected: false, epoch, boost::none, + true, {std::move(chunk)}); return CollectionMetadata(ChunkManager(ShardId("this"), diff --git a/src/mongo/db/s/range_deletion_util_test.cpp b/src/mongo/db/s/range_deletion_util_test.cpp index b2de302ab59..c9e10579df7 100644 --- a/src/mongo/db/s/range_deletion_util_test.cpp +++ b/src/mongo/db/s/range_deletion_util_test.cpp @@ -97,6 +97,7 @@ public: false, epoch, boost::none, + true, {ChunkType{kNss, ChunkRange{BSON(kShardKey << MINKEY), BSON(kShardKey << MAXKEY)}, ChunkVersion(1, 0, epoch), diff --git a/src/mongo/db/s/resharding/resharding_coordinator_service.cpp b/src/mongo/db/s/resharding/resharding_coordinator_service.cpp index 7d81fe332c7..d9ef0c52d6c 100644 --- a/src/mongo/db/s/resharding/resharding_coordinator_service.cpp +++ b/src/mongo/db/s/resharding/resharding_coordinator_service.cpp @@ -159,10 +159,11 @@ BSONObj createReshardingFieldsUpdateForOriginalNss( TypeCollectionDonorFields donorField(coordinatorDoc.getReshardingKey()); originalEntryReshardingFields.setDonorFields(donorField); - return BSON( - "$set" << BSON("reshardingFields" - << originalEntryReshardingFields.toBSON() << "lastmod" - << opCtx->getServiceContext()->getPreciseClockSource()->now())); + return BSON("$set" << BSON(CollectionType::kReshardingFieldsFieldName + << originalEntryReshardingFields.toBSON() + << CollectionType::kUpdatedAtFieldName + << opCtx->getServiceContext()->getPreciseClockSource()->now() + << CollectionType::kAllowMigrationsFieldName << false)); } case CoordinatorStateEnum::kCommitted: // Update the config.collections entry for the original nss to reflect @@ -179,10 +180,10 @@ BSONObj createReshardingFieldsUpdateForOriginalNss( case mongo::CoordinatorStateEnum::kDone: // Remove 'reshardingFields' from the config.collections entry return BSON( - "$unset" << BSON("reshardingFields" - << "") + "$unset" << BSON(CollectionType::kReshardingFieldsFieldName + << "" << CollectionType::kAllowMigrationsFieldName << "") << "$set" - << BSON("lastmod" + << BSON(CollectionType::kUpdatedAtFieldName << opCtx->getServiceContext()->getPreciseClockSource()->now())); default: // Update the 'state' field in the 'reshardingFields' section diff --git a/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.cpp b/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.cpp index f23de8b3a08..4ffebac60fb 100644 --- a/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.cpp +++ b/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.cpp @@ -105,6 +105,7 @@ protected: false, epoch, boost::none, + true, {std::move(chunk)})), boost::none); diff --git a/src/mongo/db/s/resharding_destined_recipient_test.cpp b/src/mongo/db/s/resharding_destined_recipient_test.cpp index 7e3a1c9f3bc..164a51fda5e 100644 --- a/src/mongo/db/s/resharding_destined_recipient_test.cpp +++ b/src/mongo/db/s/resharding_destined_recipient_test.cpp @@ -156,13 +156,6 @@ public: } protected: - CollectionType createCollection(const OID& epoch) { - CollectionType coll(kNss, epoch, Date_t::now(), UUID::gen()); - coll.setKeyPattern(BSON(kShardKey << 1)); - coll.setUnique(false); - return coll; - } - std::vector<ChunkType> createChunks(const OID& epoch, const std::string& shardKey) { auto range1 = ChunkRange(BSON(shardKey << MINKEY), BSON(shardKey << 5)); ChunkType chunk1(kNss, range1, ChunkVersion(1, 0, epoch), kShardList[0].getName()); @@ -201,20 +194,22 @@ protected: client.createCollection(env.tempNss.ns()); - DatabaseType db(kNss.db().toString(), kShardList[0].getName(), true, env.dbVersion); TypeCollectionReshardingFields reshardingFields; reshardingFields.setUuid(UUID::gen()); reshardingFields.setDonorFields(TypeCollectionDonorFields{BSON("y" << 1)}); - auto collType = createCollection(env.version.epoch()); + CollectionType coll(kNss, env.version.epoch(), Date_t::now(), UUID::gen()); + coll.setKeyPattern(BSON(kShardKey << 1)); + coll.setUnique(false); + coll.setAllowMigrations(false); _mockCatalogCacheLoader->setDatabaseRefreshReturnValue(db); _mockCatalogCacheLoader->setCollectionRefreshValues( - kNss, collType, createChunks(env.version.epoch(), kShardKey), reshardingFields); + kNss, coll, createChunks(env.version.epoch(), kShardKey), reshardingFields); _mockCatalogCacheLoader->setCollectionRefreshValues( - env.tempNss, collType, createChunks(env.version.epoch(), "y"), boost::none); + env.tempNss, coll, createChunks(env.version.epoch(), "y"), boost::none); forceDatabaseRefresh(opCtx, kNss.db()); forceShardFilteringMetadataRefresh(opCtx, kNss); @@ -229,21 +224,9 @@ protected: const NamespaceString& nss, const BSONObj& doc, const ReshardingEnv& env) { + AutoGetCollection coll(opCtx, nss, MODE_IX); WriteUnitOfWork wuow(opCtx); - AutoGetCollection autoColl1(opCtx, nss, MODE_IX); - - // TODO(SERVER-50027): This is to temporarily make this test pass until getOwnershipFilter - // has been updated to detect frozen migrations. - if (!OperationShardingState::isOperationVersioned(opCtx)) { - OperationShardingState::get(opCtx).initializeClientRoutingVersions( - nss, env.version, env.dbVersion); - } - - auto collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss); - ASSERT(collection); - auto status = collection->insertDocument(opCtx, InsertStatement(doc), nullptr); - ASSERT_OK(status); - + ASSERT_OK(coll->insertDocument(opCtx, InsertStatement(doc), nullptr)); wuow.commit(); } @@ -253,14 +236,6 @@ protected: const BSONObj& update, const ReshardingEnv& env) { AutoGetCollection coll(opCtx, nss, MODE_IX); - - // TODO(SERVER-50027): This is to temporarily make this test pass until getOwnershipFilter - // has been updated to detect frozen migrations. - if (!OperationShardingState::isOperationVersioned(opCtx)) { - OperationShardingState::get(opCtx).initializeClientRoutingVersions( - kNss, env.version, env.dbVersion); - } - Helpers::update(opCtx, nss.toString(), filter, update); } @@ -270,13 +245,6 @@ protected: const ReshardingEnv& env) { AutoGetCollection coll(opCtx, nss, MODE_IX); - // TODO(SERVER-50027): This is to temporarily make this test pass until getOwnershipFilter - // has been updated to detect frozen migrations. - if (!OperationShardingState::isOperationVersioned(opCtx)) { - OperationShardingState::get(opCtx).initializeClientRoutingVersions( - kNss, env.version, env.dbVersion); - } - RecordId rid = Helpers::findOne(opCtx, coll.getCollection(), query, false); ASSERT(!rid.isNull()); @@ -303,14 +271,8 @@ TEST_F(DestinedRecipientTest, TestGetDestinedRecipient) { auto env = setupReshardingEnv(opCtx, true); AutoGetCollection coll(opCtx, kNss, MODE_IX); - - // TODO(SERVER-50027): This is to temporarily make this test pass until getOwnershipFilter has - // been updated to detect frozen migrations. - if (!OperationShardingState::isOperationVersioned(opCtx)) { - OperationShardingState::get(opCtx).initializeClientRoutingVersions( - kNss, env.version, env.dbVersion); - } - + OperationShardingState::get(opCtx).initializeClientRoutingVersions( + kNss, env.version, env.dbVersion); auto destShardId = getDestinedRecipient(opCtx, kNss, BSON("x" << 2 << "y" << 10)); ASSERT(destShardId); ASSERT_EQ(*destShardId, env.destShard); @@ -322,14 +284,8 @@ TEST_F(DestinedRecipientTest, TestGetDestinedRecipientThrowsOnBlockedRefresh) { { AutoGetCollection coll(opCtx, kNss, MODE_IX); - - // TODO(SERVER-50027): This is to temporarily make this test pass until getOwnershipFilter - // has been updated to detect frozen migrations. - if (!OperationShardingState::isOperationVersioned(opCtx)) { - OperationShardingState::get(opCtx).initializeClientRoutingVersions( - kNss, env.version, env.dbVersion); - } - + OperationShardingState::get(opCtx).initializeClientRoutingVersions( + kNss, env.version, env.dbVersion); ASSERT_THROWS(getDestinedRecipient(opCtx, kNss, BSON("x" << 2 << "y" << 10)), ExceptionFor<ErrorCodes::ShardInvalidatedForTargeting>); } @@ -341,6 +297,8 @@ TEST_F(DestinedRecipientTest, TestOpObserverSetsDestinedRecipientOnInserts) { auto opCtx = operationContext(); auto env = setupReshardingEnv(opCtx, true); + OperationShardingState::get(opCtx).initializeClientRoutingVersions( + kNss, env.version, env.dbVersion); writeDoc(opCtx, kNss, BSON("_id" << 0 << "x" << 2 << "y" << 10), env); auto entry = getLastOplogEntry(opCtx); @@ -354,6 +312,8 @@ TEST_F(DestinedRecipientTest, TestOpObserverSetsDestinedRecipientOnInsertsInTran auto opCtx = operationContext(); auto env = setupReshardingEnv(opCtx, true); + OperationShardingState::get(opCtx).initializeClientRoutingVersions( + kNss, env.version, env.dbVersion); runInTransaction( opCtx, [&]() { writeDoc(opCtx, kNss, BSON("_id" << 0 << "x" << 2 << "y" << 10), env); }); @@ -380,6 +340,8 @@ TEST_F(DestinedRecipientTest, TestOpObserverSetsDestinedRecipientOnUpdates) { auto env = setupReshardingEnv(opCtx, true); + OperationShardingState::get(opCtx).initializeClientRoutingVersions( + kNss, env.version, env.dbVersion); updateDoc(opCtx, kNss, BSON("_id" << 0), BSON("$set" << BSON("z" << 50)), env); auto entry = getLastOplogEntry(opCtx); @@ -389,6 +351,30 @@ TEST_F(DestinedRecipientTest, TestOpObserverSetsDestinedRecipientOnUpdates) { ASSERT_EQ(*recipShard, env.destShard); } +TEST_F(DestinedRecipientTest, TestOpObserverSetsDestinedRecipientOnMultiUpdates) { + auto opCtx = operationContext(); + + DBDirectClient client(opCtx); + client.insert(kNss.toString(), BSON("x" << 0 << "y" << 10 << "z" << 4)); + client.insert(kNss.toString(), BSON("x" << 0 << "y" << 10 << "z" << 4)); + + auto env = setupReshardingEnv(opCtx, true); + + OperationShardingState::get(opCtx).initializeClientRoutingVersions( + kNss, ChunkVersion::IGNORED(), env.dbVersion); + client.update(kNss.ns(), + Query{BSON("x" << 0)}, + BSON("$set" << BSON("z" << 5)), + false /*upsert*/, + true /*multi*/); + + auto entry = getLastOplogEntry(opCtx); + auto recipShard = entry.getDestinedRecipient(); + + ASSERT(recipShard); + ASSERT_EQ(*recipShard, env.destShard); +} + TEST_F(DestinedRecipientTest, TestOpObserverSetsDestinedRecipientOnUpdatesOutOfPlace) { auto opCtx = operationContext(); @@ -397,6 +383,8 @@ TEST_F(DestinedRecipientTest, TestOpObserverSetsDestinedRecipientOnUpdatesOutOfP auto env = setupReshardingEnv(opCtx, true); + OperationShardingState::get(opCtx).initializeClientRoutingVersions( + kNss, env.version, env.dbVersion); updateDoc(opCtx, kNss, BSON("_id" << 0), BSON("$set" << BSON("z" << 50)), env); auto entry = getLastOplogEntry(opCtx); @@ -414,6 +402,8 @@ TEST_F(DestinedRecipientTest, TestOpObserverSetsDestinedRecipientOnUpdatesInTran auto env = setupReshardingEnv(opCtx, true); + OperationShardingState::get(opCtx).initializeClientRoutingVersions( + kNss, env.version, env.dbVersion); runInTransaction(opCtx, [&]() { updateDoc(opCtx, kNss, BSON("_id" << 0), BSON("$set" << BSON("z" << 50)), env); }); @@ -441,6 +431,8 @@ TEST_F(DestinedRecipientTest, TestOpObserverSetsDestinedRecipientOnDeletes) { auto env = setupReshardingEnv(opCtx, true); + OperationShardingState::get(opCtx).initializeClientRoutingVersions( + kNss, env.version, env.dbVersion); deleteDoc(opCtx, kNss, BSON("_id" << 0), env); auto entry = getLastOplogEntry(opCtx); @@ -458,6 +450,8 @@ TEST_F(DestinedRecipientTest, TestOpObserverSetsDestinedRecipientOnDeletesInTran auto env = setupReshardingEnv(opCtx, true); + OperationShardingState::get(opCtx).initializeClientRoutingVersions( + kNss, env.version, env.dbVersion); runInTransaction(opCtx, [&]() { deleteDoc(opCtx, kNss, BSON("_id" << 0), env); }); // Look for destined recipient in latest oplog entry. Since this write was done in a @@ -483,6 +477,8 @@ TEST_F(DestinedRecipientTest, TestUpdateChangesOwningShardThrows) { auto env = setupReshardingEnv(opCtx, true); + OperationShardingState::get(opCtx).initializeClientRoutingVersions( + kNss, env.version, env.dbVersion); ASSERT_THROWS(runInTransaction( opCtx, [&]() { @@ -500,6 +496,8 @@ TEST_F(DestinedRecipientTest, TestUpdateSameOwningShard) { auto env = setupReshardingEnv(opCtx, true); + OperationShardingState::get(opCtx).initializeClientRoutingVersions( + kNss, env.version, env.dbVersion); runInTransaction(opCtx, [&]() { updateDoc(opCtx, kNss, BSON("_id" << 0), BSON("$set" << BSON("y" << 3)), env); }); diff --git a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp index 385ab96afc8..77704557cb9 100644 --- a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp +++ b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp @@ -94,20 +94,14 @@ Status persistCollectionAndChangedChunks(OperationContext* opCtx, const CollectionAndChangedChunks& collAndChunks, const ChunkVersion& maxLoaderVersion) { // Update the collections collection entry for 'nss' in case there are any new updates. - ShardCollectionType update = ShardCollectionType(nss, - collAndChunks.epoch, - *collAndChunks.uuid, - collAndChunks.shardKeyPattern, - collAndChunks.shardKeyIsUnique); - - update.setUuid(*collAndChunks.uuid); - if (!collAndChunks.defaultCollation.isEmpty()) { - update.setDefaultCollation(collAndChunks.defaultCollation.getOwned()); - } - - if (collAndChunks.reshardingFields) { - update.setReshardingFields(collAndChunks.reshardingFields.get()); - } + ShardCollectionType update(nss, + collAndChunks.epoch, + *collAndChunks.uuid, + collAndChunks.shardKeyPattern, + collAndChunks.shardKeyIsUnique); + update.setDefaultCollation(collAndChunks.defaultCollation); + update.setReshardingFields(collAndChunks.reshardingFields); + update.setAllowMigrations(collAndChunks.allowMigrations); // Mark the chunk metadata as refreshing, so that secondaries are aware of refresh. update.setRefreshing(true); @@ -242,6 +236,7 @@ CollectionAndChangedChunks getPersistedMetadataSinceVersion(OperationContext* op shardCollectionEntry.getDefaultCollation(), shardCollectionEntry.getUnique(), shardCollectionEntry.getReshardingFields(), + shardCollectionEntry.getAllowMigrations(), std::move(changedChunks)}; } diff --git a/src/mongo/db/s/type_shard_collection.cpp b/src/mongo/db/s/type_shard_collection.cpp index 297d06471dc..a4720650021 100644 --- a/src/mongo/db/s/type_shard_collection.cpp +++ b/src/mongo/db/s/type_shard_collection.cpp @@ -68,4 +68,11 @@ BSONObj ShardCollectionType::toBSON() const { return obj; } +void ShardCollectionType::setAllowMigrations(bool allowMigrations) { + if (allowMigrations) + setPre50CompatibleAllowMigrations(boost::none); + else + setPre50CompatibleAllowMigrations(false); +} + } // namespace mongo diff --git a/src/mongo/db/s/type_shard_collection.h b/src/mongo/db/s/type_shard_collection.h index 9a313183ef0..de71f2aab37 100644 --- a/src/mongo/db/s/type_shard_collection.h +++ b/src/mongo/db/s/type_shard_collection.h @@ -42,6 +42,7 @@ public: using ShardCollectionTypeBase::kKeyPatternFieldName; using ShardCollectionTypeBase::kLastRefreshedCollectionVersionFieldName; using ShardCollectionTypeBase::kNssFieldName; + using ShardCollectionTypeBase::kPre50CompatibleAllowMigrationsFieldName; using ShardCollectionTypeBase::kRefreshingFieldName; using ShardCollectionTypeBase::kReshardingFieldsFieldName; using ShardCollectionTypeBase::kUniqueFieldName; @@ -79,6 +80,11 @@ public: // A wrapper around the IDL generated 'ShardCollectionTypeBase::toBSON' to ensure backwards // compatibility. BSONObj toBSON() const; + + bool getAllowMigrations() const { + return getPre50CompatibleAllowMigrations().get_value_or(true); + } + void setAllowMigrations(bool allowMigrations); }; } // namespace mongo diff --git a/src/mongo/db/s/type_shard_collection.idl b/src/mongo/db/s/type_shard_collection.idl index 60a572e04fc..2ee5ffc7481 100644 --- a/src/mongo/db/s/type_shard_collection.idl +++ b/src/mongo/db/s/type_shard_collection.idl @@ -134,3 +134,14 @@ structs: collection is the temporary resharding collection." type: TypeCollectionReshardingFields optional: true + allowMigrations: # TODO (SERVER-51880): This field must never be 'false' on downgrade + # to FCV 4.4 and must be cleared + cpp_name: pre50CompatibleAllowMigrations + type: bool + description: "Whether this collection allows chunks to move. It is required by + almost all DDL operations in order to guarantee that the set of + shards, which comprise a collection will not change. + + It must be optional and not present when running in FCV 4.4, because + binaries prior to 5.0 use strict parsing and will fail." + optional: true diff --git a/src/mongo/db/s/type_shard_collection_test.cpp b/src/mongo/db/s/type_shard_collection_test.cpp index 57ef1140eeb..a9e8eba50bd 100644 --- a/src/mongo/db/s/type_shard_collection_test.cpp +++ b/src/mongo/db/s/type_shard_collection_test.cpp @@ -109,5 +109,18 @@ TEST(ShardCollectionType, ReshardingFieldsIncluded) { ASSERT_EQ(reshardingUuid, shardCollType.getReshardingFields()->getUuid()); } +TEST(ShardCollectionType, AllowMigrationsFieldBackwardsCompatibility) { + ShardCollectionType shardCollType(kNss, OID::gen(), UUID::gen(), kKeyPattern, true); + shardCollType.setAllowMigrations(false); + ASSERT_EQ( + false, + shardCollType.toBSON()[ShardCollectionTypeBase::kPre50CompatibleAllowMigrationsFieldName] + .Bool()); + + shardCollType.setAllowMigrations(true); + ASSERT(shardCollType.toBSON()[ShardCollectionTypeBase::kPre50CompatibleAllowMigrationsFieldName] + .eoo()); +} + } // namespace } // namespace mongo |