diff options
Diffstat (limited to 'src/mongo/db/s')
5 files changed, 130 insertions, 9 deletions
diff --git a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp index ed54d9d24f9..ed0eaf1d357 100644 --- a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp +++ b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp @@ -300,6 +300,16 @@ StatusWith<SplitInfoVector> BalancerChunkSelectionPolicyImpl::selectChunksToSpli const NamespaceString& nss(coll.getNss()); + if (coll.getTimeseriesFields()) { + LOGV2_DEBUG(5559200, + 1, + "Not splitting collection {namespace}; explicitly disabled.", + "Not splitting explicitly disabled collection", + "namespace"_attr = nss, + "timeseriesFields"_attr = coll.getTimeseriesFields()); + continue; + } + auto candidatesStatus = _getSplitCandidatesForCollection(opCtx, nss, shardStats); if (candidatesStatus == ErrorCodes::NamespaceNotFound) { // Namespace got dropped before we managed to get to it, so just skip it @@ -373,14 +383,15 @@ StatusWith<MigrateInfoVector> BalancerChunkSelectionPolicyImpl::selectChunksToMo const NamespaceString& nss(coll.getNss()); - if (!coll.getAllowBalance() || !coll.getAllowMigrations()) { + if (!coll.getAllowBalance() || !coll.getAllowMigrations() || coll.getTimeseriesFields()) { LOGV2_DEBUG(21851, 1, "Not balancing collection {namespace}; explicitly disabled.", "Not balancing explicitly disabled collection", "namespace"_attr = nss, "allowBalance"_attr = coll.getAllowBalance(), - "allowMigrations"_attr = coll.getAllowMigrations()); + "allowMigrations"_attr = coll.getAllowMigrations(), + "timeseriesFields"_attr = coll.getTimeseriesFields()); continue; } diff --git a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp index 46be0f11fd9..0df1e8a4338 100644 --- a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp +++ b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp @@ -34,6 +34,7 @@ #include "mongo/db/s/balancer/cluster_statistics_impl.h" #include "mongo/db/s/balancer/migration_test_fixture.h" #include "mongo/platform/random.h" +#include "mongo/s/type_collection_timeseries_fields_gen.h" namespace mongo { namespace { @@ -230,5 +231,97 @@ TEST_F(BalancerChunkSelectionTest, TagRangeMaxNotAlignedWithChunkMax) { {BSON(kPattern << -15), kKeyPattern.globalMax()}}); } +TEST_F(BalancerChunkSelectionTest, ShardedTimeseriesCollectionsCannotBeAutoSplitted) { + // Set up two shards in the metadata, each one with its own tag + ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(), + ShardType::ConfigNS, + appendTags(kShard0, {"A"}), + kMajorityWriteConcern)); + ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(), + ShardType::ConfigNS, + appendTags(kShard1, {"B"}), + kMajorityWriteConcern)); + + // Set up a database and a sharded collection in the metadata. + const auto collUUID = UUID::gen(); + ChunkVersion version(2, 0, OID::gen(), Timestamp(42)); + setUpDatabase(kDbName, kShardId0); + setUpCollection(kNamespace, collUUID, version, TypeCollectionTimeseriesFields("fieldName")); + + // Set up two zones + setUpTags(kNamespace, + { + {"A", {kKeyPattern.globalMin(), BSON(kPattern << 0)}}, + {"B", {BSON(kPattern << 0), kKeyPattern.globalMax()}}, + }); + + // Create just one chunk covering the whole space + setUpChunk( + kNamespace, collUUID, kKeyPattern.globalMin(), kKeyPattern.globalMax(), kShardId0, version); + + auto future = launchAsync([this] { + ThreadClient tc(getServiceContext()); + auto opCtx = Client::getCurrent()->makeOperationContext(); + + // Requests chunks to be relocated requires running commands on each shard to + // get shard statistics. Set up dummy hosts for the source shards. + shardTargeterMock(opCtx.get(), kShardId0)->setFindHostReturnValue(kShardHost0); + shardTargeterMock(opCtx.get(), kShardId1)->setFindHostReturnValue(kShardHost1); + + auto candidateChunksStatus = _chunkSelectionPolicy.get()->selectChunksToSplit(opCtx.get()); + ASSERT_OK(candidateChunksStatus.getStatus()); + + // No chunks to split since the coll is a sharded time-series collection + ASSERT_EQUALS(0U, candidateChunksStatus.getValue().size()); + }); + + expectGetStatsCommands(2); + future.default_timed_get(); +} + +TEST_F(BalancerChunkSelectionTest, ShardedTimeseriesCollectionsCannotBeBalanced) { + // Set up two shards in the metadata. + ASSERT_OK(catalogClient()->insertConfigDocument( + operationContext(), ShardType::ConfigNS, kShard0, kMajorityWriteConcern)); + ASSERT_OK(catalogClient()->insertConfigDocument( + operationContext(), ShardType::ConfigNS, kShard1, kMajorityWriteConcern)); + + // Set up a database and a sharded collection in the metadata. + const auto collUUID = UUID::gen(); + ChunkVersion version(2, 0, OID::gen(), Timestamp(42)); + setUpDatabase(kDbName, kShardId0); + setUpCollection(kNamespace, collUUID, version, TypeCollectionTimeseriesFields("fieldName")); + + auto addChunk = [&](const BSONObj& min, const BSONObj& max) { + setUpChunk(kNamespace, collUUID, min, max, kShardId0, version); + version.incMinor(); + }; + + addChunk(kKeyPattern.globalMin(), BSON(kPattern << 0)); + for (int i = 1; i <= 100; ++i) { + addChunk(BSON(kPattern << (i - 1)), BSON(kPattern << i)); + } + addChunk(BSON(kPattern << 100), kKeyPattern.globalMax()); + + auto future = launchAsync([this] { + ThreadClient tc(getServiceContext()); + auto opCtx = Client::getCurrent()->makeOperationContext(); + + // Requests chunks to be relocated requires running commands on each shard to + // get shard statistics. Set up dummy hosts for the source shards. + shardTargeterMock(opCtx.get(), kShardId0)->setFindHostReturnValue(kShardHost0); + shardTargeterMock(opCtx.get(), kShardId1)->setFindHostReturnValue(kShardHost1); + + auto candidateChunksStatus = _chunkSelectionPolicy.get()->selectChunksToMove(opCtx.get()); + ASSERT_OK(candidateChunksStatus.getStatus()); + + // No chunks to move since the coll is a sharded time-series collection + ASSERT_EQUALS(0, candidateChunksStatus.getValue().size()); + }); + + expectGetStatsCommands(2); + future.default_timed_get(); +} + } // namespace } // namespace mongo diff --git a/src/mongo/db/s/balancer/migration_test_fixture.cpp b/src/mongo/db/s/balancer/migration_test_fixture.cpp index 1c1e546d30b..3320a43142d 100644 --- a/src/mongo/db/s/balancer/migration_test_fixture.cpp +++ b/src/mongo/db/s/balancer/migration_test_fixture.cpp @@ -53,12 +53,15 @@ void MigrationTestFixture::setUpDatabase(const std::string& dbName, const ShardI operationContext(), DatabaseType::ConfigNS, db.toBSON(), kMajorityWriteConcern)); } -void MigrationTestFixture::setUpCollection(const NamespaceString& collName, - const UUID& collUUID, - ChunkVersion version) { - CollectionType coll(collName, version.epoch(), Date_t::now(), collUUID); +void MigrationTestFixture::setUpCollection( + const NamespaceString& collName, + const UUID& collUUID, + const ChunkVersion& version, + boost::optional<TypeCollectionTimeseriesFields> timeseriesFields) { + CollectionType coll(collName, version.epoch(), version.getTimestamp(), Date_t::now(), collUUID); coll.setKeyPattern(kKeyPattern); coll.setUnique(false); + coll.setTimeseriesFields(std::move(timeseriesFields)); ASSERT_OK(catalogClient()->insertConfigDocument( operationContext(), CollectionType::ConfigNS, coll.toBSON(), kMajorityWriteConcern)); } diff --git a/src/mongo/db/s/balancer/migration_test_fixture.h b/src/mongo/db/s/balancer/migration_test_fixture.h index 3f54b9a3ce5..a0cd6397bb9 100644 --- a/src/mongo/db/s/balancer/migration_test_fixture.h +++ b/src/mongo/db/s/balancer/migration_test_fixture.h @@ -41,6 +41,7 @@ #include "mongo/s/catalog/type_database.h" #include "mongo/s/catalog/type_shard.h" #include "mongo/s/catalog/type_tags.h" +#include "mongo/s/type_collection_timeseries_fields_gen.h" namespace mongo { @@ -72,9 +73,11 @@ protected: * Inserts a document into the config.collections collection to indicate that "collName" is * sharded with version "version". The shard key pattern defaults to "_id". */ - void setUpCollection(const NamespaceString& collName, - const UUID& collUUID, - ChunkVersion version); + void setUpCollection( + const NamespaceString& collName, + const UUID& collUUID, + const ChunkVersion& version, + boost::optional<TypeCollectionTimeseriesFields> timeseriesFields = boost::none); /** * Inserts a document into the config.chunks collection so that the chunk defined by the diff --git a/src/mongo/db/s/shardsvr_drop_collection_command.cpp b/src/mongo/db/s/shardsvr_drop_collection_command.cpp index a7578e01870..6507b8d8d95 100644 --- a/src/mongo/db/s/shardsvr_drop_collection_command.cpp +++ b/src/mongo/db/s/shardsvr_drop_collection_command.cpp @@ -40,6 +40,7 @@ #include "mongo/db/s/sharding_ddl_coordinator_service.h" #include "mongo/db/s/sharding_state.h" #include "mongo/logv2/log.h" +#include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" @@ -76,6 +77,16 @@ public: << opCtx->getWriteConcern().wMode, opCtx->getWriteConcern().wMode == WriteConcernOptions::kMajority); + try { + const auto coll = Grid::get(opCtx)->catalogClient()->getCollection(opCtx, ns()); + + uassert(ErrorCodes::NotImplemented, + "drop collection of a sharded time-series collection is not supported", + !coll.getTimeseriesFields()); + } catch (ExceptionFor<ErrorCodes::NamespaceNotFound>&) { + // The collection is not sharded or doesn't exist. + } + FixedFCVRegion fcvRegion(opCtx); bool useNewPath = feature_flags::gShardingFullDDLSupport.isEnabled(*fcvRegion); |