diff options
Diffstat (limited to 'src/mongo/s')
-rw-r--r-- | src/mongo/s/SConscript | 1 | ||||
-rw-r--r-- | src/mongo/s/catalog/type_collection.cpp | 6 | ||||
-rw-r--r-- | src/mongo/s/catalog/type_collection.h | 10 | ||||
-rw-r--r-- | src/mongo/s/catalog/type_collection.idl | 10 | ||||
-rw-r--r-- | src/mongo/s/catalog_cache.cpp | 16 | ||||
-rw-r--r-- | src/mongo/s/catalog_cache_loader.cpp | 4 | ||||
-rw-r--r-- | src/mongo/s/catalog_cache_loader.h | 6 | ||||
-rw-r--r-- | src/mongo/s/catalog_cache_loader_mock.cpp | 2 | ||||
-rw-r--r-- | src/mongo/s/chunk_manager.cpp | 28 | ||||
-rw-r--r-- | src/mongo/s/chunk_manager.h | 14 | ||||
-rw-r--r-- | src/mongo/s/chunk_manager_query_test.cpp | 11 | ||||
-rw-r--r-- | src/mongo/s/chunk_manager_refresh_bm.cpp | 4 | ||||
-rw-r--r-- | src/mongo/s/commands/SConscript | 1 | ||||
-rw-r--r-- | src/mongo/s/commands/cluster_configure_collection_auto_split.cpp | 117 | ||||
-rw-r--r-- | src/mongo/s/config_server_catalog_cache_loader.cpp | 2 | ||||
-rw-r--r-- | src/mongo/s/request_types/configure_collection_auto_split.idl | 77 | ||||
-rw-r--r-- | src/mongo/s/routing_table_history_test.cpp | 24 |
17 files changed, 315 insertions, 18 deletions
diff --git a/src/mongo/s/SConscript b/src/mongo/s/SConscript index 155085c64cc..fbc03252a1d 100644 --- a/src/mongo/s/SConscript +++ b/src/mongo/s/SConscript @@ -160,6 +160,7 @@ env.Library( 'request_types/clone_collection_options_from_primary_shard.idl', 'request_types/commit_chunk_migration_request_type.cpp', 'request_types/commit_reshard_collection.idl', + 'request_types/configure_collection_auto_split.idl', 'request_types/ensure_chunk_version_is_greater_than.idl', 'request_types/flush_database_cache_updates.idl', 'request_types/flush_resharding_state_change.idl', diff --git a/src/mongo/s/catalog/type_collection.cpp b/src/mongo/s/catalog/type_collection.cpp index c518a135aaa..349bda5a43a 100644 --- a/src/mongo/s/catalog/type_collection.cpp +++ b/src/mongo/s/catalog/type_collection.cpp @@ -36,6 +36,7 @@ #include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/simple_bsonobj_comparator.h" #include "mongo/bson/util/bson_extract.h" +#include "mongo/s/balancer_configuration.h" #include "mongo/util/assert_util.h" namespace mongo { @@ -93,4 +94,9 @@ void CollectionType::setDefaultCollation(const BSONObj& defaultCollation) { setPre50CompatibleDefaultCollation(defaultCollation); } +void CollectionType::setMaxChunkSizeBytes(int64_t value) { + uassert(ErrorCodes::BadValue, "Default chunk size is out of range", value > 0); + CollectionTypeBase::setMaxChunkSizeBytes(value); +} + } // namespace mongo diff --git a/src/mongo/s/catalog/type_collection.h b/src/mongo/s/catalog/type_collection.h index a5710d08f76..ffdc31a4a64 100644 --- a/src/mongo/s/catalog/type_collection.h +++ b/src/mongo/s/catalog/type_collection.h @@ -84,6 +84,9 @@ public: static constexpr auto kKeyPatternFieldName = kPre50CompatibleKeyPatternFieldName; static constexpr auto kUuidFieldName = kPre50CompatibleUuidFieldName; static constexpr auto kAllowMigrationsFieldName = kPre50CompatibleAllowMigrationsFieldName; + + using CollectionTypeBase::kMaxChunkSizeBytesFieldName; + using CollectionTypeBase::kNoAutoSplitFieldName; using CollectionTypeBase::kNssFieldName; using CollectionTypeBase::kReshardingFieldsFieldName; using CollectionTypeBase::kTimeseriesFieldsFieldName; @@ -92,6 +95,7 @@ public: using CollectionTypeBase::kUpdatedAtFieldName; // Make getters and setters accessible. + using CollectionTypeBase::getMaxChunkSizeBytes; using CollectionTypeBase::getNss; using CollectionTypeBase::getReshardingFields; using CollectionTypeBase::getTimeseriesFields; @@ -147,6 +151,12 @@ public: } void setDefaultCollation(const BSONObj& defaultCollation); + void setMaxChunkSizeBytes(int64_t value); + + bool getAllowAutoSplit() const { + return !getNoAutoSplit(); + } + bool getAllowBalance() const { return !getNoBalance(); } diff --git a/src/mongo/s/catalog/type_collection.idl b/src/mongo/s/catalog/type_collection.idl index 5917c2faad5..3f91bbc0bee 100644 --- a/src/mongo/s/catalog/type_collection.idl +++ b/src/mongo/s/catalog/type_collection.idl @@ -117,6 +117,14 @@ structs: type: bool description: "Uniqueness of the sharding key." default: false + maxChunkSizeBytes: + type: safeInt64 + description: "Max chunk size in bytes." + optional: true + noAutoSplit: + type: bool + description: "Specifies whether the auto-splitter should be running or not for this collection." + default: false noBalance: type: bool description: "Consulted by the Balancer only and indicates whether this collection @@ -144,4 +152,4 @@ structs: type: TypeCollectionTimeseriesFields description: "Time-series collection fields. Only set when this is a time-series buckets collection." - optional: true + optional: true
\ No newline at end of file diff --git a/src/mongo/s/catalog_cache.cpp b/src/mongo/s/catalog_cache.cpp index 3710ac43bdc..9957a4e3a32 100644 --- a/src/mongo/s/catalog_cache.cpp +++ b/src/mongo/s/catalog_cache.cpp @@ -606,6 +606,19 @@ CatalogCache::CollectionCache::LookupResult CatalogCache::CollectionCache::_look auto collectionAndChunks = _catalogCacheLoader.getChunksSince(nss, lookupVersion).get(); + const auto maxChunkSize = [&]() -> boost::optional<uint64_t> { + if (!collectionAndChunks.allowAutoSplit) { + // maxChunkSize = 0 is an invalid chunkSize so we use it to detect noAutoSplit + // on the steady-state path in incrementChunkOnInsertOrUpdate(...) + return 0; + } + if (collectionAndChunks.maxChunkSizeBytes) { + invariant(collectionAndChunks.maxChunkSizeBytes.get() > 0); + return uint64_t(*collectionAndChunks.maxChunkSizeBytes); + } + return boost::none; + }(); + auto newRoutingHistory = [&] { // If we have routing info already and it's for the same collection epoch, we're // updating. Otherwise, we're making a whole new routing table. @@ -616,10 +629,12 @@ CatalogCache::CollectionCache::LookupResult CatalogCache::CollectionCache::_look return existingHistory->optRt ->makeUpdatedReplacingTimestamp(collectionAndChunks.creationTime) .makeUpdated(collectionAndChunks.reshardingFields, + maxChunkSize, collectionAndChunks.allowMigrations, collectionAndChunks.changedChunks); } else { return existingHistory->optRt->makeUpdated(collectionAndChunks.reshardingFields, + maxChunkSize, collectionAndChunks.allowMigrations, collectionAndChunks.changedChunks); } @@ -644,6 +659,7 @@ CatalogCache::CollectionCache::LookupResult CatalogCache::CollectionCache::_look collectionAndChunks.creationTime, collectionAndChunks.timeseriesFields, std::move(collectionAndChunks.reshardingFields), + maxChunkSize, collectionAndChunks.allowMigrations, collectionAndChunks.changedChunks); }(); diff --git a/src/mongo/s/catalog_cache_loader.cpp b/src/mongo/s/catalog_cache_loader.cpp index a25aa552150..265ee967bc0 100644 --- a/src/mongo/s/catalog_cache_loader.cpp +++ b/src/mongo/s/catalog_cache_loader.cpp @@ -50,6 +50,8 @@ CatalogCacheLoader::CollectionAndChangedChunks::CollectionAndChangedChunks( bool collShardKeyIsUnique, boost::optional<TypeCollectionTimeseriesFields> collTimeseriesFields, boost::optional<TypeCollectionReshardingFields> collReshardingFields, + boost::optional<int64_t> maxChunkSizeBytes, + bool allowAutoSplit, bool allowMigrations, std::vector<ChunkType> chunks) : epoch(std::move(collEpoch)), @@ -60,6 +62,8 @@ CatalogCacheLoader::CollectionAndChangedChunks::CollectionAndChangedChunks( shardKeyIsUnique(collShardKeyIsUnique), timeseriesFields(std::move(collTimeseriesFields)), reshardingFields(std::move(collReshardingFields)), + maxChunkSizeBytes(std::move(maxChunkSizeBytes)), + allowAutoSplit(allowAutoSplit), allowMigrations(allowMigrations), changedChunks(std::move(chunks)) {} diff --git a/src/mongo/s/catalog_cache_loader.h b/src/mongo/s/catalog_cache_loader.h index a4afe4e7277..e16a8a3786a 100644 --- a/src/mongo/s/catalog_cache_loader.h +++ b/src/mongo/s/catalog_cache_loader.h @@ -75,6 +75,8 @@ public: bool collShardKeyIsUnique, boost::optional<TypeCollectionTimeseriesFields> collTimeseriesFields, boost::optional<TypeCollectionReshardingFields> collReshardingFields, + boost::optional<int64_t> maxChunkSizeBytes, + bool allowAutoSplit, bool allowMigrations, std::vector<ChunkType> chunks); @@ -94,6 +96,10 @@ public: // populated. boost::optional<TypeCollectionReshardingFields> reshardingFields; + boost::optional<int64_t> maxChunkSizeBytes; + + bool allowAutoSplit; + bool allowMigrations; // The chunks which have changed sorted by their chunkVersion. This list might potentially diff --git a/src/mongo/s/catalog_cache_loader_mock.cpp b/src/mongo/s/catalog_cache_loader_mock.cpp index 96bdd409054..f8dadc1e667 100644 --- a/src/mongo/s/catalog_cache_loader_mock.cpp +++ b/src/mongo/s/catalog_cache_loader_mock.cpp @@ -97,6 +97,8 @@ CollectionAndChangedChunks getCollectionRefresh( swCollectionReturnValue.getValue().getUnique(), swCollectionReturnValue.getValue().getTimeseriesFields(), reshardingFields, + swCollectionReturnValue.getValue().getMaxChunkSizeBytes(), + swCollectionReturnValue.getValue().getAllowAutoSplit(), swCollectionReturnValue.getValue().getAllowMigrations(), std::move(chunks)}; } diff --git a/src/mongo/s/chunk_manager.cpp b/src/mongo/s/chunk_manager.cpp index f565d29f7f5..3dc98fbbf2d 100644 --- a/src/mongo/s/chunk_manager.cpp +++ b/src/mongo/s/chunk_manager.cpp @@ -320,6 +320,7 @@ RoutingTableHistory::RoutingTableHistory( bool unique, boost::optional<TypeCollectionTimeseriesFields> timeseriesFields, boost::optional<TypeCollectionReshardingFields> reshardingFields, + boost::optional<uint64_t> maxChunkSizeBytes, bool allowMigrations, ChunkMap chunkMap) : _nss(std::move(nss)), @@ -329,6 +330,7 @@ RoutingTableHistory::RoutingTableHistory( _unique(unique), _timeseriesFields(std::move(timeseriesFields)), _reshardingFields(std::move(reshardingFields)), + _maxChunkSizeBytes(maxChunkSizeBytes), _allowMigrations(allowMigrations), _chunkMap(std::move(chunkMap)), _shardVersions(_chunkMap.constructShardVersionMap()) {} @@ -691,6 +693,20 @@ bool ChunkManager::allowMigrations() const { return _rt->optRt->allowMigrations(); } +bool ChunkManager::allowAutoSplit() const { + const auto maxChunkSize = maxChunkSizeBytes(); + if (!maxChunkSize) + return true; + + return *maxChunkSize != 0; +} + +boost::optional<uint64_t> ChunkManager::maxChunkSizeBytes() const { + if (!_rt->optRt) + return boost::none; + return _rt->optRt->maxChunkSizeBytes(); +} + std::string ChunkManager::toString() const { return _rt->optRt ? _rt->optRt->toString() : "UNSHARDED"; } @@ -757,18 +773,21 @@ RoutingTableHistory RoutingTableHistory::makeNew( const boost::optional<Timestamp>& timestamp, boost::optional<TypeCollectionTimeseriesFields> timeseriesFields, boost::optional<TypeCollectionReshardingFields> reshardingFields, + boost::optional<uint64_t> maxChunkSizeBytes, bool allowMigrations, const std::vector<ChunkType>& chunks) { + + auto changedChunkInfos = flatten(chunks); return RoutingTableHistory(std::move(nss), std::move(uuid), std::move(shardKeyPattern), std::move(defaultCollator), std::move(unique), std::move(timeseriesFields), - boost::none, + std::move(reshardingFields), + maxChunkSizeBytes, allowMigrations, - ChunkMap{epoch, timestamp}) - .makeUpdated(std::move(reshardingFields), allowMigrations, chunks); + ChunkMap{epoch, timestamp}.createMerged(changedChunkInfos)); } // Note that any new parameters added to RoutingTableHistory::makeUpdated() must also be added to @@ -776,6 +795,7 @@ RoutingTableHistory RoutingTableHistory::makeNew( // it may overlap with the enqueued metadata. RoutingTableHistory RoutingTableHistory::makeUpdated( boost::optional<TypeCollectionReshardingFields> reshardingFields, + boost::optional<uint64_t> maxChunkSizeBytes, bool allowMigrations, const std::vector<ChunkType>& changedChunks) const { auto changedChunkInfos = flatten(changedChunks); @@ -791,6 +811,7 @@ RoutingTableHistory RoutingTableHistory::makeUpdated( isUnique(), _timeseriesFields, std::move(reshardingFields), + maxChunkSizeBytes, allowMigrations, std::move(chunkMap)); } @@ -822,6 +843,7 @@ RoutingTableHistory RoutingTableHistory::makeUpdatedReplacingTimestamp( _unique, _timeseriesFields, _reshardingFields, + _maxChunkSizeBytes, _allowMigrations, std::move(newMap)); } diff --git a/src/mongo/s/chunk_manager.h b/src/mongo/s/chunk_manager.h index 64f306626a0..970173ea07b 100644 --- a/src/mongo/s/chunk_manager.h +++ b/src/mongo/s/chunk_manager.h @@ -180,6 +180,7 @@ public: const boost::optional<Timestamp>& timestamp, boost::optional<TypeCollectionTimeseriesFields> timeseriesFields, boost::optional<TypeCollectionReshardingFields> reshardingFields, + boost::optional<uint64_t> maxChunkSizeBytes, bool allowMigrations, const std::vector<ChunkType>& chunks); @@ -197,6 +198,7 @@ public: */ RoutingTableHistory makeUpdated( boost::optional<TypeCollectionReshardingFields> reshardingFields, + boost::optional<uint64_t> maxChunkSizeBytes, bool allowMigrations, const std::vector<ChunkType>& changedChunks) const; @@ -323,6 +325,11 @@ public: return _allowMigrations; } + // collection default chunk size or +inf, iff no splits should happen + boost::optional<uint64_t> maxChunkSizeBytes() const { + return _maxChunkSizeBytes; + } + private: friend class ChunkManager; @@ -333,6 +340,7 @@ private: bool unique, boost::optional<TypeCollectionTimeseriesFields> timeseriesFields, boost::optional<TypeCollectionReshardingFields> reshardingFields, + boost::optional<uint64_t> maxChunkSizeBytes, bool allowMigrations, ChunkMap chunkMap); @@ -362,6 +370,8 @@ private: // for this collection. boost::optional<TypeCollectionReshardingFields> _reshardingFields; + boost::optional<uint64_t> _maxChunkSizeBytes; + bool _allowMigrations; // Map from the max for each chunk to an entry describing the chunk. The union of all chunks' @@ -531,6 +541,10 @@ public: */ bool allowMigrations() const; + bool allowAutoSplit() const; + + boost::optional<uint64_t> maxChunkSizeBytes() const; + const ShardId& dbPrimary() const { return _dbPrimary; } diff --git a/src/mongo/s/chunk_manager_query_test.cpp b/src/mongo/s/chunk_manager_query_test.cpp index b51886d4089..47b325d5414 100644 --- a/src/mongo/s/chunk_manager_query_test.cpp +++ b/src/mongo/s/chunk_manager_query_test.cpp @@ -521,6 +521,7 @@ TEST_F(ChunkManagerQueryTest, SnapshotQueryWithMoreShardsThanLatestMetadata) { boost::none /* timestamp */, boost::none /* timeseriesFields */, boost::none, + boost::none /* chunkSizeBytes */, true, {chunk0, chunk1}); @@ -531,11 +532,11 @@ TEST_F(ChunkManagerQueryTest, SnapshotQueryWithMoreShardsThanLatestMetadata) { chunk1.setHistory({ChunkHistory(Timestamp(20, 0), ShardId("0")), ChunkHistory(Timestamp(1, 0), ShardId("1"))}); - ChunkManager chunkManager( - ShardId("0"), - DatabaseVersion(UUID::gen()), - makeStandaloneRoutingTableHistory(oldRoutingTable.makeUpdated(boost::none, true, {chunk1})), - Timestamp(5, 0)); + ChunkManager chunkManager(ShardId("0"), + DatabaseVersion(UUID::gen()), + makeStandaloneRoutingTableHistory(oldRoutingTable.makeUpdated( + boost::none, boost::none, true, {chunk1})), + Timestamp(5, 0)); std::set<ShardId> shardIds; chunkManager.getShardIdsForRange(BSON("x" << MINKEY), BSON("x" << MAXKEY), &shardIds); diff --git a/src/mongo/s/chunk_manager_refresh_bm.cpp b/src/mongo/s/chunk_manager_refresh_bm.cpp index c936ce9b85e..dd1012548dc 100644 --- a/src/mongo/s/chunk_manager_refresh_bm.cpp +++ b/src/mongo/s/chunk_manager_refresh_bm.cpp @@ -88,6 +88,7 @@ CollectionMetadata makeChunkManagerWithShardSelector(int nShards, boost::none /* timestamp */, boost::none /* timeseriesFields */, boost::none, + boost::none /* chunkSizeBytes */, true, chunks); return CollectionMetadata(ChunkManager(ShardId("Shard0"), @@ -120,7 +121,7 @@ MONGO_COMPILER_NOINLINE auto makeChunkManagerWithOptimalBalancedDistribution(int MONGO_COMPILER_NOINLINE auto runIncrementalUpdate(const CollectionMetadata& cm, const std::vector<ChunkType>& newChunks) { auto rt = cm.getChunkManager()->getRoutingTableHistory_ForTest().makeUpdated( - boost::none, true, newChunks); + boost::none, boost::none, true, newChunks); return CollectionMetadata(ChunkManager(ShardId("shard0"), DatabaseVersion(UUID::gen()), makeStandaloneRoutingTableHistory(std::move(rt)), @@ -178,6 +179,7 @@ auto BM_FullBuildOfChunkManager(benchmark::State& state, ShardSelectorFn selectS boost::none /* timestamp */, boost::none /* timeseriesFields */, boost::none, + boost::none /* chunkSizeBytes */, true, chunks); benchmark::DoNotOptimize( diff --git a/src/mongo/s/commands/SConscript b/src/mongo/s/commands/SConscript index cda5515584f..ae001743249 100644 --- a/src/mongo/s/commands/SConscript +++ b/src/mongo/s/commands/SConscript @@ -36,6 +36,7 @@ env.Library( 'cluster_commit_transaction_cmd.cpp', 'cluster_commit_reshard_collection_cmd.cpp', 'cluster_compact_cmd.cpp', + 'cluster_configure_collection_auto_split.cpp', 'cluster_control_balancer_cmd.cpp', 'cluster_count_cmd.cpp', 'cluster_create_cmd.cpp', diff --git a/src/mongo/s/commands/cluster_configure_collection_auto_split.cpp b/src/mongo/s/commands/cluster_configure_collection_auto_split.cpp new file mode 100644 index 00000000000..3982eadab7e --- /dev/null +++ b/src/mongo/s/commands/cluster_configure_collection_auto_split.cpp @@ -0,0 +1,117 @@ +/** + * Copyright (C) 2021-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * <http://www.mongodb.com/licensing/server-side-public-license>. + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding + +#include "mongo/platform/basic.h" + +#include "mongo/db/auth/action_set.h" +#include "mongo/db/auth/action_type.h" +#include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" +#include "mongo/db/commands.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/repl_client_info.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/s/catalog_cache_loader.h" +#include "mongo/s/grid.h" +#include "mongo/s/request_types/configure_collection_auto_split_gen.h" + +namespace mongo { +namespace { + +class ConfigCollAutoSplitCmd final : public TypedCommand<ConfigCollAutoSplitCmd> { +public: + using Request = ConfigureCollAutoSplit; + + class Invocation final : public InvocationBase { + public: + using InvocationBase::InvocationBase; + + StringData kStatusField = "status"_sd; + + void typedRun(OperationContext* opCtx) { + opCtx->setAlwaysInterruptAtStepDownOrUp(); + const NamespaceString& nss = ns(); + + ConfigsvrConfigureCollAutoSplit configsvrRequest(nss); + configsvrRequest.setConfigureCollAutoSplit(request().getConfigureCollAutoSplit()); + configsvrRequest.setDbName(request().getDbName()); + + auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard(); + auto cmdResponse = uassertStatusOK(configShard->runCommandWithFixedRetryAttempts( + opCtx, + ReadPreferenceSetting(ReadPreference::PrimaryOnly), + NamespaceString::kAdminDb.toString(), + configsvrRequest.toBSON({}), + Shard::RetryPolicy::kIdempotent)); + + uassertStatusOK(cmdResponse.commandStatus); + } + + private: + NamespaceString ns() const override { + return request().getCommandParameter(); + } + + bool supportsWriteConcern() const override { + return false; + } + + void doCheckAuthorization(OperationContext* opCtx) const override { + ActionSet actions({ActionType::splitChunk}); + // TODO: SERVER-58908 add balancer merge parameter + uassert(ErrorCodes::Unauthorized, + "Unauthorized", + AuthorizationSession::get(opCtx->getClient()) + ->isAuthorizedForActionsOnResource(ResourcePattern::forExactNamespace(ns()), + actions)); + } + }; + + std::string help() const override { + return "command to check whether the chunks of a given collection are in a quiesced state " + "or there are any which need to be moved because of (1) draining shards, (2) zone " + "violation or (3) imbalance between shards"; + } + + bool adminOnly() const override { + return true; + } + + AllowedOnSecondary secondaryAllowed(ServiceContext*) const override { + return AllowedOnSecondary::kNever; + } + +} configureCollectionAutoSplitCmd; + +} // namespace +} // namespace mongo diff --git a/src/mongo/s/config_server_catalog_cache_loader.cpp b/src/mongo/s/config_server_catalog_cache_loader.cpp index 18a2d7592c6..0065acfb04a 100644 --- a/src/mongo/s/config_server_catalog_cache_loader.cpp +++ b/src/mongo/s/config_server_catalog_cache_loader.cpp @@ -77,6 +77,8 @@ CollectionAndChangedChunks getChangedChunks(OperationContext* opCtx, coll.getUnique(), coll.getTimeseriesFields(), coll.getReshardingFields(), + coll.getMaxChunkSizeBytes(), + coll.getAllowAutoSplit(), coll.getAllowMigrations(), std::move(collAndChunks.second)}; } diff --git a/src/mongo/s/request_types/configure_collection_auto_split.idl b/src/mongo/s/request_types/configure_collection_auto_split.idl new file mode 100644 index 00000000000..0a3121d8256 --- /dev/null +++ b/src/mongo/s/request_types/configure_collection_auto_split.idl @@ -0,0 +1,77 @@ +# Copyright(C) 2021 - present MongoDB, Inc. +# +# This program is free software : you can redistribute it and / or modify +# it under the terms of the Server Side Public License, version 1, +# as published by MongoDB, Inc. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the +# Server Side Public License for more details. +# +# You should have received a copy of the Server Side Public License +# along with this program.If not, see +# < http://www.mongodb.com/licensing/server-side-public-license>. +# +# As a special exception, the copyright holders give permission to link the +# code of portions of this program with the OpenSSL library under certain +# conditions as described in each individual source file and distribute +# linked combinations including the program with the OpenSSL library.You +# must comply with the Server Side Public License in all respects for +# all of the code used other than as permitted herein.If you modify file(s) +# with this exception, you may extend this exception to your version of the +# file(s), but you are not obligated to do so.If you do not wish to do so, +# delete this exception statement from your version.If you delete this +# exception statement from all source files in the program, then also delete +# it in the license file. +# + +# _configsvrConfigureCollectionAutoSplitter and configureCollectionAutoSplitter IDL File + +global: + cpp_namespace: "mongo" + +imports: + - "mongo/idl/basic_types.idl" + +structs: + configure_auto_split_params: + description: "Parameters for configureCollectionAutoSplitter command" + fields: + defaultChunkSize: + type: safeInt + cpp_name: defaultChunkSizeMB + description: "New default chunk size in MB." + optional: true + enableAutoSplitter: + type: bool + description: "Specifies whether the auto-splitter should be running or not for this collection." + optional: true + + +commands: + configureAutoSplit: + command_name: configureCollectionAutoSplitter + cpp_name: ConfigureCollAutoSplit + description: "Public configureCollectionAutoSplitter command on mongos" + strict: true + namespace: type + api_version: "" + type: namespacestring + inline_chained_structs: true + chained_structs: + configure_auto_split_params: + cpp_name: ConfigureCollAutoSplit + + _configsvrConfigureAutoSplit: + command_name: _configsvrConfigureAutoSplit + cpp_name: ConfigsvrConfigureCollAutoSplit + description: "Internal configureCollectionAutoSplitter command on the config server" + strict: true + namespace: type + api_version: "" + type: namespacestring + inline_chained_structs: true + chained_structs: + configure_auto_split_params: + cpp_name: ConfigureCollAutoSplit diff --git a/src/mongo/s/routing_table_history_test.cpp b/src/mongo/s/routing_table_history_test.cpp index 479c3610b08..5b1dd71ee15 100644 --- a/src/mongo/s/routing_table_history_test.cpp +++ b/src/mongo/s/routing_table_history_test.cpp @@ -71,7 +71,7 @@ RoutingTableHistory splitChunk(const RoutingTableHistory& rt, newChunks.emplace_back(kNss, range, curVersion, kThisShard); } - return rt.makeUpdated(boost::none, true, newChunks); + return rt.makeUpdated(boost::none, boost::none, true, newChunks); } /** @@ -169,6 +169,7 @@ public: boost::none /* timestamp */, boost::none /* timeseriesFields */, boost::none, + boost::none /* chunkSizeBytes */, true, {initChunk})); ASSERT_EQ(_rt->numChunks(), 1ull); @@ -344,6 +345,7 @@ TEST_F(RoutingTableHistoryTest, TestSplits) { boost::none /* timestamp */, boost::none /* timeseriesFields */, boost::none, + boost::none /* chunkSizeBytes */, true, {chunkAll}); @@ -357,7 +359,7 @@ TEST_F(RoutingTableHistoryTest, TestSplits) { ChunkVersion{2, 2, epoch, boost::none /* timestamp */}, kThisShard}}; - auto rt1 = rt.makeUpdated(boost::none, true, chunks1); + auto rt1 = rt.makeUpdated(boost::none, boost::none, true, chunks1); auto v1 = ChunkVersion{2, 2, epoch, boost::none /* timestamp */}; ASSERT_EQ(v1, rt1.getVersion(kThisShard)); @@ -375,7 +377,7 @@ TEST_F(RoutingTableHistoryTest, TestSplits) { ChunkVersion{3, 2, epoch, boost::none /* timestamp */}, kThisShard}}; - auto rt2 = rt1.makeUpdated(boost::none, true, chunks2); + auto rt2 = rt1.makeUpdated(boost::none, boost::none, true, chunks2); auto v2 = ChunkVersion{3, 2, epoch, boost::none /* timestamp */}; ASSERT_EQ(v2, rt2.getVersion(kThisShard)); } @@ -398,6 +400,7 @@ TEST_F(RoutingTableHistoryTest, TestReplaceEmptyChunk) { boost::none /* timestamp */, boost::none /* timeseriesFields */, boost::none, + boost::none /* chunkSizeBytes */, true, initialChunks); ASSERT_EQ(rt.numChunks(), 1); @@ -412,7 +415,7 @@ TEST_F(RoutingTableHistoryTest, TestReplaceEmptyChunk) { ChunkVersion{2, 2, epoch, boost::none /* timestamp */}, kThisShard}}; - auto rt1 = rt.makeUpdated(boost::none, true, changedChunks); + auto rt1 = rt.makeUpdated(boost::none, boost::none, true, changedChunks); auto v1 = ChunkVersion{2, 2, epoch, boost::none /* timestamp */}; ASSERT_EQ(v1, rt1.getVersion(kThisShard)); ASSERT_EQ(rt1.numChunks(), 2); @@ -449,6 +452,7 @@ TEST_F(RoutingTableHistoryTest, TestUseLatestVersions) { boost::none /* timestamp */, boost::none /* timeseriesFields */, boost::none, + boost::none /* chunkSizeBytes */, true, initialChunks); ASSERT_EQ(rt.numChunks(), 1); @@ -467,7 +471,7 @@ TEST_F(RoutingTableHistoryTest, TestUseLatestVersions) { ChunkVersion{2, 2, epoch, boost::none /* timestamp */}, kThisShard}}; - auto rt1 = rt.makeUpdated(boost::none, true, changedChunks); + auto rt1 = rt.makeUpdated(boost::none, boost::none, true, changedChunks); auto v1 = ChunkVersion{2, 2, epoch, boost::none /* timestamp */}; ASSERT_EQ(v1, rt1.getVersion(kThisShard)); ASSERT_EQ(rt1.numChunks(), 2); @@ -495,6 +499,7 @@ TEST_F(RoutingTableHistoryTest, TestOutOfOrderVersion) { boost::none /* timestamp */, boost::none /* timeseriesFields */, boost::none, + boost::none /* chunkSizeBytes */, true, initialChunks); ASSERT_EQ(rt.numChunks(), 2); @@ -509,7 +514,7 @@ TEST_F(RoutingTableHistoryTest, TestOutOfOrderVersion) { ChunkVersion{3, 1, epoch, boost::none /* timestamp */}, kThisShard}}; - auto rt1 = rt.makeUpdated(boost::none, true, changedChunks); + auto rt1 = rt.makeUpdated(boost::none, boost::none, true, changedChunks); auto v1 = ChunkVersion{3, 1, epoch, boost::none /* timestamp */}; ASSERT_EQ(v1, rt1.getVersion(kThisShard)); ASSERT_EQ(rt1.numChunks(), 2); @@ -546,6 +551,7 @@ TEST_F(RoutingTableHistoryTest, TestMergeChunks) { boost::none, boost::none /* timestamp */, boost::none /* timeseriesFields */, + boost::none /* chunkSizeBytes */, true, initialChunks); ASSERT_EQ(rt.numChunks(), 3); @@ -561,7 +567,7 @@ TEST_F(RoutingTableHistoryTest, TestMergeChunks) { ChunkVersion{3, 1, epoch, boost::none /* timestamp */}, kThisShard}}; - auto rt1 = rt.makeUpdated(boost::none, true, changedChunks); + auto rt1 = rt.makeUpdated(boost::none, boost::none, true, changedChunks); auto v1 = ChunkVersion{3, 1, epoch, boost::none /* timestamp */}; ASSERT_EQ(v1, rt1.getVersion(kThisShard)); ASSERT_EQ(rt1.numChunks(), 2); @@ -593,6 +599,7 @@ TEST_F(RoutingTableHistoryTest, TestMergeChunksOrdering) { boost::none /* timestamp */, boost::none /* timeseriesFields */, boost::none, + boost::none /* chunkSizeBytes */, true, initialChunks); ASSERT_EQ(rt.numChunks(), 3); @@ -608,7 +615,7 @@ TEST_F(RoutingTableHistoryTest, TestMergeChunksOrdering) { ChunkVersion{3, 1, epoch, boost::none /* timestamp */}, kThisShard}}; - auto rt1 = rt.makeUpdated(boost::none, true, changedChunks); + auto rt1 = rt.makeUpdated(boost::none, boost::none, true, changedChunks); auto v1 = ChunkVersion{3, 1, epoch, boost::none /* timestamp */}; ASSERT_EQ(v1, rt1.getVersion(kThisShard)); ASSERT_EQ(rt1.numChunks(), 2); @@ -658,6 +665,7 @@ TEST_F(RoutingTableHistoryTest, TestFlatten) { boost::none /* timestamp */, boost::none /* timeseriesFields */, boost::none, + boost::none /* chunkSizeBytes */, true, initialChunks); ASSERT_EQ(rt.numChunks(), 2); |