summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSimon Graetzer <simon.gratzer@mongodb.com>2021-07-30 11:41:40 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-07-30 12:09:37 +0000
commit66708080ab5db302558f7bd33293167d181f3858 (patch)
tree91dc9abafabbc7456b1db6d422087d2e04ba5bf2
parenta05959f3e16b909dd76f7935b7fb529d02942dff (diff)
downloadmongo-66708080ab5db302558f7bd33293167d181f3858.tar.gz
SERVER-58270 Create configureCollectionAutoSplitter command
-rw-r--r--jstests/core/views/views_all_commands.js2
-rw-r--r--jstests/replsets/db_reads_while_recovering_all_commands.js2
-rw-r--r--jstests/sharding/autosplit_configure_collection.js142
-rw-r--r--jstests/sharding/database_versioning_all_commands.js1
-rw-r--r--jstests/sharding/read_write_concern_defaults_application.js2
-rw-r--r--jstests/sharding/safe_secondary_reads_drop_recreate.js2
-rw-r--r--jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js2
-rw-r--r--jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js2
-rw-r--r--src/mongo/db/s/SConscript1
-rw-r--r--src/mongo/db/s/balancer/balance_stats_test.cpp1
-rw-r--r--src/mongo/db/s/balancer/balancer_chunk_selection_policy.h2
-rw-r--r--src/mongo/db/s/chunk_splitter.cpp53
-rw-r--r--src/mongo/db/s/collection_metadata_filtering_test.cpp1
-rw-r--r--src/mongo/db/s/collection_metadata_test.cpp1
-rw-r--r--src/mongo/db/s/collection_sharding_runtime_test.cpp1
-rw-r--r--src/mongo/db/s/config/configsvr_configure_collection_auto_split.cpp119
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager.h13
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp6
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp82
-rw-r--r--src/mongo/db/s/metadata_manager_test.cpp5
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp3
-rw-r--r--src/mongo/db/s/op_observer_sharding_test.cpp1
-rw-r--r--src/mongo/db/s/range_deletion_util_test.cpp1
-rw-r--r--src/mongo/db/s/resharding/resharding_data_replication_test.cpp1
-rw-r--r--src/mongo/db/s/resharding/resharding_donor_recipient_common_test.h1
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp1
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_batch_applier_test.cpp1
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_crud_application_test.cpp1
-rw-r--r--src/mongo/db/s/resharding/resharding_recipient_service_test.cpp1
-rw-r--r--src/mongo/db/s/shard_server_catalog_cache_loader.cpp8
-rw-r--r--src/mongo/db/s/shard_server_op_observer.cpp12
-rw-r--r--src/mongo/db/s/type_shard_collection.h4
-rw-r--r--src/mongo/db/s/type_shard_collection.idl8
-rw-r--r--src/mongo/s/SConscript1
-rw-r--r--src/mongo/s/catalog/type_collection.cpp6
-rw-r--r--src/mongo/s/catalog/type_collection.h10
-rw-r--r--src/mongo/s/catalog/type_collection.idl10
-rw-r--r--src/mongo/s/catalog_cache.cpp16
-rw-r--r--src/mongo/s/catalog_cache_loader.cpp4
-rw-r--r--src/mongo/s/catalog_cache_loader.h6
-rw-r--r--src/mongo/s/catalog_cache_loader_mock.cpp2
-rw-r--r--src/mongo/s/chunk_manager.cpp28
-rw-r--r--src/mongo/s/chunk_manager.h14
-rw-r--r--src/mongo/s/chunk_manager_query_test.cpp11
-rw-r--r--src/mongo/s/chunk_manager_refresh_bm.cpp4
-rw-r--r--src/mongo/s/commands/SConscript1
-rw-r--r--src/mongo/s/commands/cluster_configure_collection_auto_split.cpp117
-rw-r--r--src/mongo/s/config_server_catalog_cache_loader.cpp2
-rw-r--r--src/mongo/s/request_types/configure_collection_auto_split.idl77
-rw-r--r--src/mongo/s/routing_table_history_test.cpp24
-rw-r--r--src/mongo/shell/utils_sh.js11
51 files changed, 780 insertions, 47 deletions
diff --git a/jstests/core/views/views_all_commands.js b/jstests/core/views/views_all_commands.js
index 31a315730b3..defb55f4125 100644
--- a/jstests/core/views/views_all_commands.js
+++ b/jstests/core/views/views_all_commands.js
@@ -91,6 +91,7 @@ let viewsCommandTests = {
_configsvrCommitMovePrimary:
{skip: isAnInternalCommand}, // Can be removed once 6.0 is last LTS
_configsvrCommitReshardCollection: {skip: isAnInternalCommand},
+ _configsvrConfigureAutoSplit: {skip: isAnInternalCommand},
_configsvrCreateDatabase: {skip: isAnInternalCommand},
_configsvrDropCollection:
{skip: isAnInternalCommand}, // TODO SERVER-58843: Remove once 6.0 becomes last LTS
@@ -206,6 +207,7 @@ let viewsCommandTests = {
commitTransaction: {skip: isUnrelated},
compact: {command: {compact: "view", force: true}, expectFailure: true, skipSharded: true},
configureFailPoint: {skip: isUnrelated},
+ configureCollectionAutoSplitter: {skip: isUnrelated},
connPoolStats: {skip: isUnrelated},
connPoolSync: {skip: isUnrelated},
connectionStatus: {skip: isUnrelated},
diff --git a/jstests/replsets/db_reads_while_recovering_all_commands.js b/jstests/replsets/db_reads_while_recovering_all_commands.js
index 691d476cb07..d108877e9d4 100644
--- a/jstests/replsets/db_reads_while_recovering_all_commands.js
+++ b/jstests/replsets/db_reads_while_recovering_all_commands.js
@@ -39,6 +39,7 @@ const allCommands = {
_configsvrCommitChunkMigration: {skip: isPrimaryOnly},
_configsvrCommitChunkSplit: {skip: isPrimaryOnly},
_configsvrCommitReshardCollection: {skip: isPrimaryOnly},
+ _configsvrConfigureAutoSplit: {skip: isPrimaryOnly},
_configsvrCreateDatabase: {skip: isPrimaryOnly},
_configsvrEnsureChunkVersionIsGreaterThan: {skip: isPrimaryOnly},
_configsvrMoveChunk: {skip: isPrimaryOnly},
@@ -115,6 +116,7 @@ const allCommands = {
commitTransaction: {skip: isPrimaryOnly},
compact: {skip: isNotAUserDataRead},
configureFailPoint: {skip: isNotAUserDataRead},
+ configureCollectionAutoSplitter: {skip: isPrimaryOnly},
connPoolStats: {skip: isNotAUserDataRead},
connPoolSync: {skip: isNotAUserDataRead},
connectionStatus: {skip: isNotAUserDataRead},
diff --git a/jstests/sharding/autosplit_configure_collection.js b/jstests/sharding/autosplit_configure_collection.js
new file mode 100644
index 00000000000..bd252c5e0b4
--- /dev/null
+++ b/jstests/sharding/autosplit_configure_collection.js
@@ -0,0 +1,142 @@
+/**
+ * This test confirms that chunks get split according to a collection specific setting as they grow
+ * due to data insertion.
+ *
+ * @tags: [
+ * requires_fcv_51
+ * ]
+ */
+(function() {
+'use strict';
+load('jstests/sharding/autosplit_include.js');
+load("jstests/sharding/libs/find_chunks_util.js");
+
+let st = new ShardingTest({
+ name: "auto1",
+ shards: 2,
+ mongos: 1,
+ other: {enableAutoSplit: true},
+});
+
+const fullNS = "test.foo";
+const bigString = "X".repeat(1024 * 1024 / 16); // 65 KB
+
+assert.commandWorked(st.s0.adminCommand({enablesharding: "test"}));
+st.ensurePrimaryShard('test', st.shard1.shardName);
+assert.commandWorked(st.s0.adminCommand({shardcollection: fullNS, key: {num: 1}}));
+
+let db = st.getDB("test");
+let coll = db.foo;
+
+let i = 0;
+
+// Inserts numDocs documents into the collection, waits for any ongoing
+// splits to finish, and then prints some information about the
+// collection's chunks
+function insertDocsAndWaitForSplit(numDocs) {
+ let bulk = coll.initializeUnorderedBulkOp();
+ let curMaxKey = i;
+ // Increment the global 'i' variable to keep 'num' unique across all
+ // documents
+ for (; i < curMaxKey + numDocs; i++) {
+ bulk.insert({num: i, s: bigString});
+ }
+ assert.commandWorked(bulk.execute());
+
+ waitForOngoingChunkSplits(st);
+
+ st.printChunks();
+ st.printChangeLog();
+}
+
+let configDB = db.getSiblingDB('config');
+
+jsTest.log("Testing enableAutoSplitter == false, defaultChunkSize=unset ...");
+{
+ assert.commandWorked(
+ st.s0.adminCommand({configureCollectionAutoSplitter: fullNS, enableAutoSplitter: false}));
+
+ let configColl = configDB.collections.findOne({_id: fullNS});
+
+ // Check that noAutoSplit has been set to 'true' on the configsvr config.collections
+ assert.eq(true, configColl.noAutoSplit);
+ assert.eq(configColl.maxChunkSizeBytes, undefined);
+
+ // Accumulate ~1MB of documents
+ insertDocsAndWaitForSplit(16);
+
+ // No split should have been performed
+ assert.eq(
+ findChunksUtil.countChunksForNs(st.config, fullNS), 1, "Number of chunks is more than one");
+}
+
+jsTest.log("Testing enableAutoSplitter == true, defaultChunkSize=unset ...");
+{
+ assert.commandWorked(
+ st.s0.adminCommand({configureCollectionAutoSplitter: fullNS, enableAutoSplitter: true}));
+
+ let configColl = configDB.collections.findOne({_id: fullNS});
+
+ // Check that noAutoSplit has been set to 'true' on the configsvr config.collections
+ assert.eq(false, configColl.noAutoSplit);
+ assert.eq(configColl.maxChunkSizeBytes, undefined);
+
+ // Add ~1MB of documents
+ insertDocsAndWaitForSplit(16);
+
+ // No split should have been performed
+ assert.eq(
+ findChunksUtil.countChunksForNs(st.config, fullNS), 1, "Number of chunks is more than one");
+}
+
+jsTest.log("Testing enableAutoSplitter == false, defaultChunkSize=1 ...");
+{
+ assert.commandWorked(st.s0.adminCommand(
+ {configureCollectionAutoSplitter: fullNS, enableAutoSplitter: false, defaultChunkSize: 1}));
+
+ let configColl = configDB.collections.findOne({_id: fullNS});
+
+ // Check that noAutoSplit has been set to 'true' on the configsvr config.collections
+ assert.eq(true, configColl.noAutoSplit);
+ assert.eq(configColl.maxChunkSizeBytes, 1024 * 1024);
+
+ // Reach ~3MB of documents total
+ insertDocsAndWaitForSplit(16);
+
+ assert.eq(16 * 3, db.foo.find().itcount());
+
+ // No split should have been performed
+ assert.eq(
+ findChunksUtil.countChunksForNs(st.config, fullNS), 1, "Number of chunks is more than one");
+}
+
+jsTest.log("Testing enableAutoSplitter == true, defaultChunkSize=10 ...");
+{
+ assert.commandWorked(st.s0.adminCommand(
+ {configureCollectionAutoSplitter: fullNS, enableAutoSplitter: true, defaultChunkSize: 10}));
+
+ let configColl = configDB.collections.findOne({_id: fullNS});
+
+ // Check that noAutoSplit has been unset and chunkSizeBytes is set to 10 MB.
+ assert.eq(configColl.noAutoSplit, false);
+ assert.eq(configColl.maxChunkSizeBytes, 10 * 1024 * 1024);
+
+ // Add ~10MB of documents
+ insertDocsAndWaitForSplit(16 * 10);
+ assert.gte(findChunksUtil.countChunksForNs(st.config, fullNS),
+ 2,
+ "Number of chunks is less then 2, no split have been perfomed");
+ assert.eq(16 * (2 + 1 + 10), db.foo.find().itcount());
+
+ // Add ~10MB of documents
+ insertDocsAndWaitForSplit(16 * 10);
+ assert.gte(findChunksUtil.countChunksForNs(st.config, fullNS),
+ 3,
+ "Number of chunks is less then 3, no split have been perfomed");
+ assert.eq(16 * (2 + 1 + 10 + 10), db.foo.find().itcount());
+}
+
+printjson(db.stats());
+
+st.stop();
+})();
diff --git a/jstests/sharding/database_versioning_all_commands.js b/jstests/sharding/database_versioning_all_commands.js
index b95be6a9da3..d056fe1fada 100644
--- a/jstests/sharding/database_versioning_all_commands.js
+++ b/jstests/sharding/database_versioning_all_commands.js
@@ -289,6 +289,7 @@ let testCases = {
commitReshardCollection: {skip: "always targets the config server"},
commitTransaction: {skip: "unversioned and uses special targetting rules"},
compact: {skip: "not allowed through mongos"},
+ configureCollectionAutoSplitter: {skip: "does not forward command to primary shard"},
configureFailPoint: {skip: "executes locally on mongos (not sent to any remote node)"},
connPoolStats: {skip: "executes locally on mongos (not sent to any remote node)"},
connPoolSync: {skip: "executes locally on mongos (not sent to any remote node)"},
diff --git a/jstests/sharding/read_write_concern_defaults_application.js b/jstests/sharding/read_write_concern_defaults_application.js
index b0928d011ec..d8b00f672bf 100644
--- a/jstests/sharding/read_write_concern_defaults_application.js
+++ b/jstests/sharding/read_write_concern_defaults_application.js
@@ -89,6 +89,7 @@ let testCases = {
_configsvrCommitChunkSplit: {skip: "internal command"},
_configsvrCommitMovePrimary: {skip: "internal command"}, // Can be removed once 6.0 is last LTS
_configsvrCommitReshardCollection: {skip: "internal command"},
+ _configsvrConfigureAutoSplit: {skip: "internal command"},
_configsvrCreateDatabase: {skip: "internal command"},
_configsvrDropCollection:
{skip: "internal command"}, // TODO SERVER-58843: Remove once 6.0 becomes last LTS
@@ -254,6 +255,7 @@ let testCases = {
useLogs: true,
},
compact: {skip: "does not accept read or write concern"},
+ configureCollectionAutoSplitter: {skip: "does not accept read or write concern"},
configureFailPoint: {skip: "does not accept read or write concern"},
connPoolStats: {skip: "does not accept read or write concern"},
connPoolSync: {skip: "internal command"},
diff --git a/jstests/sharding/safe_secondary_reads_drop_recreate.js b/jstests/sharding/safe_secondary_reads_drop_recreate.js
index 590285864c7..4dbcd814530 100644
--- a/jstests/sharding/safe_secondary_reads_drop_recreate.js
+++ b/jstests/sharding/safe_secondary_reads_drop_recreate.js
@@ -56,6 +56,7 @@ let testCases = {
_configsvrCommitChunkSplit: {skip: "primary only"},
_configsvrCommitMovePrimary:
{skip: "primary only"}, // TODO SERVER-58843: Remove once 6.0 becomes last LTS
+ _configsvrConfigureAutoSplit: {skip: "primary only"},
_configsvrDropCollection:
{skip: "primary only"}, // TODO SERVER-58843: Remove once 6.0 becomes last LTS
_configsvrDropDatabase:
@@ -122,6 +123,7 @@ let testCases = {
commitReshardCollection: {skip: "primary only"},
commitTransaction: {skip: "primary only"},
compact: {skip: "does not return user data"},
+ configureCollectionAutoSplitter: {skip: "does not return user data"},
configureFailPoint: {skip: "does not return user data"},
connPoolStats: {skip: "does not return user data"},
connPoolSync: {skip: "does not return user data"},
diff --git a/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js b/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js
index ad14e40e30c..6a2b6a7054f 100644
--- a/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js
+++ b/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js
@@ -62,6 +62,7 @@ let testCases = {
_configsvrCommitChunkSplit: {skip: "primary only"},
_configsvrCommitMovePrimary:
{skip: "primary only"}, // TODO SERVER-58843: Remove once 6.0 becomes last LTS
+ _configsvrConfigureAutoSplit: {skip: "primary only"},
_configsvrDropCollection:
{skip: "primary only"}, // TODO SERVER-58843: Remove once 6.0 becomes last LTS
_configsvrDropDatabase:
@@ -134,6 +135,7 @@ let testCases = {
cloneCollectionAsCapped: {skip: "primary only"},
commitReshardCollection: {skip: "primary only"},
commitTransaction: {skip: "primary only"},
+ configureCollectionAutoSplitter: {skip: "does not return user data"},
collMod: {skip: "primary only"},
collStats: {skip: "does not return user data"},
compact: {skip: "does not return user data"},
diff --git a/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js b/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js
index 0f644bd7702..6f22ec61ab8 100644
--- a/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js
+++ b/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js
@@ -52,6 +52,7 @@ let testCases = {
_configsvrCommitChunkSplit: {skip: "primary only"},
_configsvrCommitMovePrimary:
{skip: "primary only"}, // TODO SERVER-58843: Remove once 6.0 becomes last LTS
+ _configsvrConfigureAutoSplit: {skip: "primary only"},
_configsvrDropCollection:
{skip: "primary only"}, // TODO SERVER-58843: Remove once 6.0 becomes last LTS
_configsvrDropDatabase:
@@ -122,6 +123,7 @@ let testCases = {
commitReshardCollection: {skip: "primary only"},
commitTransaction: {skip: "primary only"},
compact: {skip: "does not return user data"},
+ configureCollectionAutoSplitter: {skip: "does not return user data"},
configureFailPoint: {skip: "does not return user data"},
connPoolStats: {skip: "does not return user data"},
connPoolSync: {skip: "does not return user data"},
diff --git a/src/mongo/db/s/SConscript b/src/mongo/db/s/SConscript
index 3da2fcea030..eafed18ef19 100644
--- a/src/mongo/db/s/SConscript
+++ b/src/mongo/db/s/SConscript
@@ -301,6 +301,7 @@ env.Library(
'config/configsvr_clear_jumbo_flag_command.cpp',
'config/configsvr_commit_chunk_migration_command.cpp',
'config/configsvr_commit_reshard_collection_command.cpp',
+ 'config/configsvr_configure_collection_auto_split.cpp',
'config/configsvr_control_balancer_command.cpp',
'config/configsvr_create_database_command.cpp',
'config/configsvr_ensure_chunk_version_is_greater_than_command.cpp',
diff --git a/src/mongo/db/s/balancer/balance_stats_test.cpp b/src/mongo/db/s/balancer/balance_stats_test.cpp
index 3c825b8f317..0bae93f1693 100644
--- a/src/mongo/db/s/balancer/balance_stats_test.cpp
+++ b/src/mongo/db/s/balancer/balance_stats_test.cpp
@@ -61,6 +61,7 @@ public:
boost::none, // timestamp
boost::none, // time series fields
boost::none, // resharding fields
+ boost::none, // chunk size bytes
true, // allowMigration
chunks);
diff --git a/src/mongo/db/s/balancer/balancer_chunk_selection_policy.h b/src/mongo/db/s/balancer/balancer_chunk_selection_policy.h
index 79a539c3599..4851a323118 100644
--- a/src/mongo/db/s/balancer/balancer_chunk_selection_policy.h
+++ b/src/mongo/db/s/balancer/balancer_chunk_selection_policy.h
@@ -91,7 +91,7 @@ public:
virtual StatusWith<SplitInfoVector> selectChunksToSplit(OperationContext* opCtx) = 0;
/**
- * Given a valid namespace returns all the Migrations the balancer would need to perform
+ * Given a valid namespace returns all the splits the balancer would need to perform
* with the current state
*/
virtual StatusWith<SplitInfoVector> selectChunksToSplit(OperationContext* opCtx,
diff --git a/src/mongo/db/s/chunk_splitter.cpp b/src/mongo/db/s/chunk_splitter.cpp
index 155b46a59ae..e3c3f69fe50 100644
--- a/src/mongo/db/s/chunk_splitter.cpp
+++ b/src/mongo/db/s/chunk_splitter.cpp
@@ -195,27 +195,6 @@ BSONObj findExtremeKeyForShard(OperationContext* opCtx,
return shardKeyPattern.extractShardKeyFromDoc(end);
}
-/**
- * Checks if autobalance is enabled on the current sharded collection.
- */
-bool isAutoBalanceEnabled(OperationContext* opCtx,
- const NamespaceString& nss,
- BalancerConfiguration* balancerConfig) {
- if (!balancerConfig->shouldBalanceForAutoSplit())
- return false;
-
- try {
- return Grid::get(opCtx)->catalogClient()->getCollection(opCtx, nss).getAllowBalance();
- } catch (const DBException& ex) {
- LOGV2(21903,
- "Auto-split for {namespace} failed to load collection metadata: {error}",
- "Auto-split failed to load collection metadata",
- "namespace"_attr = nss,
- "error"_attr = redact(ex));
- return false;
- }
-}
-
const auto getChunkSplitter = ServiceContext::declareDecoration<ChunkSplitter>();
} // namespace
@@ -285,6 +264,28 @@ void ChunkSplitter::trySplitting(std::shared_ptr<ChunkSplitStateDriver> chunkSpl
});
}
+/**
+ * Checks if autobalance is enabled on the current sharded collection.
+ */
+bool isAutoBalanceEnabled(OperationContext* opCtx,
+ const NamespaceString& nss,
+ BalancerConfiguration* balancerConfig) {
+ if (!balancerConfig->shouldBalanceForAutoSplit()) {
+ return false;
+ }
+
+ try {
+ return Grid::get(opCtx)->catalogClient()->getCollection(opCtx, nss).getAllowBalance();
+ } catch (const DBException& ex) {
+ LOGV2(21903,
+ "Auto-split for {namespace} failed to load collection metadata: {error}",
+ "Auto-split failed to load collection metadata",
+ "namespace"_attr = nss,
+ "error"_attr = redact(ex));
+ return false;
+ }
+}
+
void ChunkSplitter::_runAutosplit(std::shared_ptr<ChunkSplitStateDriver> chunkSplitStateDriver,
const NamespaceString& nss,
const BSONObj& min,
@@ -320,11 +321,17 @@ void ChunkSplitter::_runAutosplit(std::shared_ptr<ChunkSplitStateDriver> chunkSp
// Ensure we have the most up-to-date balancer configuration
uassertStatusOK(balancerConfig->refreshAndCheck(opCtx.get()));
- if (!balancerConfig->getShouldAutoSplit()) {
+ if (!balancerConfig->getShouldAutoSplit() || !cm.allowAutoSplit()) {
return;
}
- const uint64_t maxChunkSizeBytes = balancerConfig->getMaxChunkSizeBytes();
+ const uint64_t maxChunkSizeBytes = [&] {
+ if (cm.maxChunkSizeBytes()) {
+ return *cm.maxChunkSizeBytes();
+ }
+ return balancerConfig->getMaxChunkSizeBytes();
+ }();
+ invariant(ChunkSizeSettingsType::checkMaxChunkSizeValid(maxChunkSizeBytes));
LOGV2_DEBUG(21906,
1,
diff --git a/src/mongo/db/s/collection_metadata_filtering_test.cpp b/src/mongo/db/s/collection_metadata_filtering_test.cpp
index 0b3a9585c27..9d6d68a1fcf 100644
--- a/src/mongo/db/s/collection_metadata_filtering_test.cpp
+++ b/src/mongo/db/s/collection_metadata_filtering_test.cpp
@@ -75,6 +75,7 @@ protected:
boost::none /* timestamp */,
timeseriesFields,
boost::none,
+ boost::none,
true,
[&] {
ChunkVersion version(1, 0, epoch, boost::none /* timestamp */);
diff --git a/src/mongo/db/s/collection_metadata_test.cpp b/src/mongo/db/s/collection_metadata_test.cpp
index 0b8bd1a2bbf..b4e8e1231b5 100644
--- a/src/mongo/db/s/collection_metadata_test.cpp
+++ b/src/mongo/db/s/collection_metadata_test.cpp
@@ -96,6 +96,7 @@ CollectionMetadata makeCollectionMetadataImpl(
boost::none /* timestamp */,
boost::none /* timeseriesFields */,
std::move(reshardingFields),
+ boost::none /* chunkSizeBytes */,
true,
allChunks)),
kChunkManager),
diff --git a/src/mongo/db/s/collection_sharding_runtime_test.cpp b/src/mongo/db/s/collection_sharding_runtime_test.cpp
index fbac57d611a..52022ffd6a9 100644
--- a/src/mongo/db/s/collection_sharding_runtime_test.cpp
+++ b/src/mongo/db/s/collection_sharding_runtime_test.cpp
@@ -73,6 +73,7 @@ protected:
boost::none /* timestamp */,
boost::none /* timeseriesFields */,
boost::none,
+ boost::none /* chunkSizeBytes */,
true,
{std::move(chunk)})),
boost::none);
diff --git a/src/mongo/db/s/config/configsvr_configure_collection_auto_split.cpp b/src/mongo/db/s/config/configsvr_configure_collection_auto_split.cpp
new file mode 100644
index 00000000000..921e2d52347
--- /dev/null
+++ b/src/mongo/db/s/config/configsvr_configure_collection_auto_split.cpp
@@ -0,0 +1,119 @@
+/**
+ * Copyright (C) 2021-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/db/auth/action_set.h"
+#include "mongo/db/auth/action_type.h"
+#include "mongo/db/auth/authorization_session.h"
+#include "mongo/db/auth/privilege.h"
+#include "mongo/db/commands.h"
+#include "mongo/db/operation_context.h"
+#include "mongo/db/repl/repl_client_info.h"
+#include "mongo/db/s/balancer/balancer.h"
+#include "mongo/db/s/config/sharding_catalog_manager.h"
+#include "mongo/db/s/operation_sharding_state.h"
+#include "mongo/db/s/shard_filtering_metadata_refresh.h"
+#include "mongo/s/balancer_configuration.h"
+#include "mongo/s/grid.h"
+#include "mongo/s/request_types/configure_collection_auto_split_gen.h"
+#include "mongo/s/write_ops/batched_command_request.h"
+
+namespace mongo {
+namespace {
+
+class ConfigsvrConfigureAutoSplitCommand final
+ : public TypedCommand<ConfigsvrConfigureAutoSplitCommand> {
+public:
+ using Request = ConfigsvrConfigureCollAutoSplit;
+
+ class Invocation final : public InvocationBase {
+ public:
+ using InvocationBase::InvocationBase;
+
+ void typedRun(OperationContext* opCtx) {
+ uassert(ErrorCodes::IllegalOperation,
+ str::stream() << Request::kCommandName << " can only be run on config servers",
+ serverGlobalParams.clusterRole == ClusterRole::ConfigServer);
+
+ const NamespaceString& nss = ns();
+
+ uassert(ErrorCodes::InvalidNamespace,
+ str::stream() << "Invalid namespace specified '" << nss.ns() << "'",
+ nss.isValid());
+
+ const auto maxChunkSizeBytes = [&]() -> boost::optional<int64_t> {
+ if (request().getDefaultChunkSizeMB()) {
+ return *request().getDefaultChunkSizeMB() * 1024 * 1024;
+ }
+ return boost::none;
+ }();
+
+ // throws if collection does not exist or parameters are invalid
+ ShardingCatalogManager::get(opCtx)->configureCollectionAutoSplit(
+ opCtx, nss, maxChunkSizeBytes, request().getEnableAutoSplitter());
+ }
+
+ private:
+ NamespaceString ns() const override {
+ return request().getCommandParameter();
+ }
+
+ bool supportsWriteConcern() const override {
+ return false;
+ }
+
+ void doCheckAuthorization(OperationContext* opCtx) const override {
+ uassert(ErrorCodes::Unauthorized,
+ "Unauthorized",
+ AuthorizationSession::get(opCtx->getClient())
+ ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(),
+ ActionType::internal));
+ }
+ };
+
+ std::string help() const override {
+ return "Internal command, which is exported by the sharding config server. Do not call "
+ "directly.";
+ }
+
+ bool adminOnly() const override {
+ return true;
+ }
+
+ AllowedOnSecondary secondaryAllowed(ServiceContext*) const override {
+ return AllowedOnSecondary::kNever;
+ }
+
+} configsvrConfigureAutoSplitCmd;
+
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/s/config/sharding_catalog_manager.h b/src/mongo/db/s/config/sharding_catalog_manager.h
index 15142fc8b46..d897868740d 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager.h
+++ b/src/mongo/db/s/config/sharding_catalog_manager.h
@@ -332,6 +332,13 @@ public:
const boost::optional<UUID>& collectionUUID,
bool allowMigrations);
+ /**
+ * Bump the minor version of the newest chunk on each shard
+ */
+ void bumpCollectionMinorVersionInTxn(OperationContext* opCtx,
+ const NamespaceString& nss,
+ TxnNumber txnNumber) const;
+
//
// Database Operations
//
@@ -372,6 +379,12 @@ public:
const bool upsert,
TxnNumber txnNumber);
+
+ void configureCollectionAutoSplit(OperationContext* opCtx,
+ const NamespaceString& nss,
+ boost::optional<int64_t> maxChunkSizeBytes,
+ boost::optional<bool> enableAutoSplitter);
+
//
// Shard Operations
//
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
index b1b0bb459e2..c042602d190 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
@@ -1805,4 +1805,10 @@ void ShardingCatalogManager::setAllowMigrationsAndBumpOneChunk(
executor);
}
+void ShardingCatalogManager::bumpCollectionMinorVersionInTxn(OperationContext* opCtx,
+ const NamespaceString& nss,
+ TxnNumber txnNumber) const {
+ bumpCollectionMinorVersion(opCtx, nss, txnNumber);
+}
+
} // namespace mongo
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
index 8aec1356618..84d951d1cd2 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
@@ -55,6 +55,7 @@
#include "mongo/db/repl/repl_client_info.h"
#include "mongo/db/s/sharding_ddl_util.h"
#include "mongo/db/s/sharding_logging.h"
+#include "mongo/db/s/sharding_util.h"
#include "mongo/db/vector_clock.h"
#include "mongo/executor/network_interface.h"
#include "mongo/executor/task_executor.h"
@@ -460,6 +461,86 @@ void ShardingCatalogManager::updateShardingCatalogEntryForCollectionInTxn(
}
}
+
+void ShardingCatalogManager::configureCollectionAutoSplit(
+ OperationContext* opCtx,
+ const NamespaceString& nss,
+ boost::optional<int64_t> maxChunkSizeBytes,
+ boost::optional<bool> enableAutoSplitter) {
+
+ uassert(ErrorCodes::InvalidOptions,
+ "invalid collection auto splitter config update",
+ maxChunkSizeBytes || enableAutoSplitter);
+
+ const auto coll = Grid::get(opCtx)->catalogClient()->getCollection(
+ opCtx, nss, repl::ReadConcernLevel::kLocalReadConcern);
+
+ short updatedFields = 0;
+ BSONObjBuilder updateCmd;
+ {
+ BSONObjBuilder setBuilder(updateCmd.subobjStart("$set"));
+ if (maxChunkSizeBytes) {
+ // verify we got a positive integer in range [1MB, 1GB]
+ uassert(ErrorCodes::InvalidOptions,
+ str::stream() << "Chunk size '" << *maxChunkSizeBytes
+ << "' out of range [1MB, 1GB]",
+ *maxChunkSizeBytes > 0 &&
+ ChunkSizeSettingsType::checkMaxChunkSizeValid(*maxChunkSizeBytes));
+
+ setBuilder.append(CollectionType::kMaxChunkSizeBytesFieldName, *maxChunkSizeBytes);
+ updatedFields++;
+
+ // TODO: SERVER-58908 add defragmentation getter / setter logic
+ }
+ if (enableAutoSplitter) {
+ const bool doSplit = enableAutoSplitter.get();
+ setBuilder.append(CollectionType::kNoAutoSplitFieldName, !doSplit);
+ updatedFields++;
+ }
+ }
+
+ if (updatedFields == 0) {
+ return;
+ }
+
+ const auto cm = Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfo(opCtx, nss);
+ std::set<ShardId> shardsIds;
+ cm.getAllShardIds(&shardsIds);
+
+ withTransaction(
+ opCtx, CollectionType::ConfigNS, [&](OperationContext* opCtx, TxnNumber txnNumber) {
+ const auto update = updateCmd.obj();
+
+ const auto query =
+ BSON(CollectionType::kNssFieldName << nss.ns() << CollectionType::kUuidFieldName
+ << coll.getUuid());
+ const auto res = writeToConfigDocumentInTxn(
+ opCtx,
+ CollectionType::ConfigNS,
+ BatchedCommandRequest::buildUpdateOp(CollectionType::ConfigNS,
+ query,
+ update /* update */,
+ false /* upsert */,
+ false /* multi */),
+ txnNumber);
+ const auto numDocsModified = UpdateOp::parseResponse(res).getN();
+ uassert(ErrorCodes::ConflictingOperationInProgress,
+ str::stream() << "Expected to match one doc for query " << query
+ << " but matched " << numDocsModified,
+ numDocsModified == 1);
+
+ bumpCollectionMinorVersionInTxn(opCtx, nss, txnNumber);
+ });
+
+
+ const auto executor = Grid::get(opCtx)->getExecutorPool()->getFixedExecutor();
+ sharding_util::tellShardsToRefreshCollection(
+ opCtx,
+ {std::make_move_iterator(shardsIds.begin()), std::make_move_iterator(shardsIds.end())},
+ nss,
+ executor);
+}
+
void ShardingCatalogManager::renameShardedMetadata(
OperationContext* opCtx,
const NamespaceString& from,
@@ -500,5 +581,4 @@ void ShardingCatalogManager::renameShardedMetadata(
}
}
-
} // namespace mongo
diff --git a/src/mongo/db/s/metadata_manager_test.cpp b/src/mongo/db/s/metadata_manager_test.cpp
index b11ace72e72..dc59cd318e4 100644
--- a/src/mongo/db/s/metadata_manager_test.cpp
+++ b/src/mongo/db/s/metadata_manager_test.cpp
@@ -88,6 +88,7 @@ protected:
boost::none /* timestamp */,
boost::none /* timeseriesFields */,
boost::none,
+ boost::none /* chunkSizeBytes */,
true,
{ChunkType{
kNss, range, ChunkVersion(1, 0, epoch, boost::none /* timestamp */), kOtherShard}});
@@ -136,7 +137,8 @@ protected:
splitChunks.emplace_back(
kNss, ChunkRange(maxKey, chunkToSplit.getMax()), chunkVersion, kOtherShard);
- auto rt = cm->getRoutingTableHistory_ForTest().makeUpdated(boost::none, true, splitChunks);
+ auto rt = cm->getRoutingTableHistory_ForTest().makeUpdated(
+ boost::none, boost::none, true, splitChunks);
return CollectionMetadata(ChunkManager(cm->dbPrimary(),
cm->dbVersion(),
@@ -162,6 +164,7 @@ protected:
auto rt = cm->getRoutingTableHistory_ForTest().makeUpdated(
boost::none,
+ boost::none,
true,
{ChunkType(kNss, ChunkRange(minKey, maxKey), chunkVersion, kOtherShard)});
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp
index 1248ded418e..ce33397e38d 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp
@@ -177,7 +177,8 @@ protected:
epoch,
boost::none /* timestamp */,
boost::none /* timeseriesFields */,
- boost::none,
+ boost::none /* resharding Fields */,
+ boost::none /* chunkSizeBytes */,
true,
{ChunkType{kNss,
ChunkRange{BSON(kShardKey << MINKEY), BSON(kShardKey << MAXKEY)},
diff --git a/src/mongo/db/s/op_observer_sharding_test.cpp b/src/mongo/db/s/op_observer_sharding_test.cpp
index 93b0a5b58f9..9f67cb2c930 100644
--- a/src/mongo/db/s/op_observer_sharding_test.cpp
+++ b/src/mongo/db/s/op_observer_sharding_test.cpp
@@ -77,6 +77,7 @@ protected:
boost::none /* timestamp */,
boost::none /* timeseriesFields */,
boost::none,
+ boost::none /* chunkSizeBytes */,
true,
{std::move(chunk)});
diff --git a/src/mongo/db/s/range_deletion_util_test.cpp b/src/mongo/db/s/range_deletion_util_test.cpp
index cd380ada81c..f4300ae3a2c 100644
--- a/src/mongo/db/s/range_deletion_util_test.cpp
+++ b/src/mongo/db/s/range_deletion_util_test.cpp
@@ -108,6 +108,7 @@ public:
boost::none /* timestamp */,
boost::none /* timeseriesFields */,
boost::none,
+ boost::none /* chunkSizeBytes */,
true,
{ChunkType{kNss,
ChunkRange{BSON(kShardKey << MINKEY), BSON(kShardKey << MAXKEY)},
diff --git a/src/mongo/db/s/resharding/resharding_data_replication_test.cpp b/src/mongo/db/s/resharding/resharding_data_replication_test.cpp
index dcbe12200b0..a911af349f7 100644
--- a/src/mongo/db/s/resharding/resharding_data_replication_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_data_replication_test.cpp
@@ -87,6 +87,7 @@ public:
boost::none /* timestamp */,
boost::none /* timeseriesFields */,
boost::none /* reshardingFields */,
+ boost::none /* chunkSizeBytes */,
true /* allowMigrations */,
chunks);
diff --git a/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.h b/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.h
index bc0e4888e43..499ff3a5301 100644
--- a/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.h
+++ b/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.h
@@ -117,6 +117,7 @@ protected:
boost::none,
boost::none /* timeseriesFields */,
boost::none,
+ boost::none /* chunkSizeBytes */,
true,
{std::move(chunk)})),
boost::none);
diff --git a/src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp b/src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp
index 504ef1eb442..ef205fbd7fe 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp
@@ -192,6 +192,7 @@ public:
boost::none /* timestamp */,
boost::none /* timeseriesFields */,
boost::none,
+ boost::none /* chunkSizeBytes */,
false,
chunks);
diff --git a/src/mongo/db/s/resharding/resharding_oplog_batch_applier_test.cpp b/src/mongo/db/s/resharding/resharding_oplog_batch_applier_test.cpp
index a54e6dde5fc..88160624032 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_batch_applier_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_oplog_batch_applier_test.cpp
@@ -301,6 +301,7 @@ private:
boost::none /* timestamp */,
boost::none /* timeseriesFields */,
boost::none /* reshardingFields */,
+ boost::none /* chunkSizeBytes */,
true /* allowMigrations */,
chunks);
diff --git a/src/mongo/db/s/resharding/resharding_oplog_crud_application_test.cpp b/src/mongo/db/s/resharding/resharding_oplog_crud_application_test.cpp
index 2bc7b48771a..b155330628d 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_crud_application_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_oplog_crud_application_test.cpp
@@ -263,6 +263,7 @@ private:
boost::none /* timestamp */,
boost::none /* timeseriesFields */,
boost::none /* reshardingFields */,
+ boost::none /* chunkSizeBytes */,
true /* allowMigrations */,
chunks);
diff --git a/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp b/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp
index 20ae401c284..9b5d972f39a 100644
--- a/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp
@@ -92,6 +92,7 @@ public:
boost::none /* timestamp */,
boost::none /* timeseriesFields */,
boost::none /* reshardingFields */,
+ boost::none /* chunkSizeBytes */,
true /* allowMigrations */,
chunks);
diff --git a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
index b05c1649562..d4ba0d7628c 100644
--- a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
+++ b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
@@ -123,6 +123,8 @@ Status persistCollectionAndChangedChunks(OperationContext* opCtx,
update.setDefaultCollation(collAndChunks.defaultCollation);
update.setTimeseriesFields(collAndChunks.timeseriesFields);
update.setReshardingFields(collAndChunks.reshardingFields);
+ update.setMaxChunkSizeBytes(collAndChunks.maxChunkSizeBytes);
+ update.setAllowAutoSplit(collAndChunks.allowAutoSplit);
update.setAllowMigrations(collAndChunks.allowMigrations);
// Mark the chunk metadata as refreshing, so that secondaries are aware of refresh.
@@ -285,6 +287,8 @@ CollectionAndChangedChunks getPersistedMetadataSinceVersion(OperationContext* op
shardCollectionEntry.getUnique(),
shardCollectionEntry.getTimeseriesFields(),
shardCollectionEntry.getReshardingFields(),
+ shardCollectionEntry.getMaxChunkSizeBytes(),
+ shardCollectionEntry.getAllowAutoSplit(),
shardCollectionEntry.getAllowMigrations(),
std::move(changedChunks)};
}
@@ -1029,6 +1033,8 @@ StatusWith<CollectionAndChangedChunks> ShardServerCatalogCacheLoader::_getLoader
// The collection info in enqueued metadata may be more recent than the persisted metadata
persisted.creationTime = enqueued.creationTime;
persisted.reshardingFields = std::move(enqueued.reshardingFields);
+ persisted.maxChunkSizeBytes = enqueued.maxChunkSizeBytes;
+ persisted.allowAutoSplit = enqueued.allowAutoSplit;
persisted.allowMigrations = enqueued.allowMigrations;
return persisted;
@@ -1609,6 +1615,8 @@ ShardServerCatalogCacheLoader::CollAndChunkTaskList::getEnqueuedMetadataForTerm(
// Keep the most recent version of these fields
collAndChunks.allowMigrations = task.collectionAndChangedChunks->allowMigrations;
+ collAndChunks.maxChunkSizeBytes = task.collectionAndChangedChunks->maxChunkSizeBytes;
+ collAndChunks.allowAutoSplit = task.collectionAndChangedChunks->allowAutoSplit;
collAndChunks.reshardingFields = task.collectionAndChangedChunks->reshardingFields;
}
}
diff --git a/src/mongo/db/s/shard_server_op_observer.cpp b/src/mongo/db/s/shard_server_op_observer.cpp
index 1569538bc77..2fe0145fd53 100644
--- a/src/mongo/db/s/shard_server_op_observer.cpp
+++ b/src/mongo/db/s/shard_server_op_observer.cpp
@@ -203,8 +203,16 @@ void incrementChunkOnInsertOrUpdate(OperationContext* opCtx,
if (!fromMigrate) {
const auto balancerConfig = Grid::get(opCtx)->getBalancerConfiguration();
- if (balancerConfig->getShouldAutoSplit() &&
- chunkWritesTracker->shouldSplit(balancerConfig->getMaxChunkSizeBytes())) {
+ const uint64_t maxChunkSizeBytes = [&] {
+ const boost::optional<uint64_t> csb = chunkManager.maxChunkSizeBytes();
+ if (csb) {
+ return *csb;
+ }
+ return balancerConfig->getMaxChunkSizeBytes();
+ }();
+
+ if (balancerConfig->getShouldAutoSplit() && chunkManager.allowAutoSplit() &&
+ chunkWritesTracker->shouldSplit(maxChunkSizeBytes)) {
auto chunkSplitStateDriver =
ChunkSplitStateDriver::tryInitiateSplit(chunkWritesTracker);
if (chunkSplitStateDriver) {
diff --git a/src/mongo/db/s/type_shard_collection.h b/src/mongo/db/s/type_shard_collection.h
index 224194e5abd..a7b5f45792e 100644
--- a/src/mongo/db/s/type_shard_collection.h
+++ b/src/mongo/db/s/type_shard_collection.h
@@ -51,11 +51,13 @@ public:
using ShardCollectionTypeBase::kUuidFieldName;
// Make getters and setters accessible.
+ using ShardCollectionTypeBase::getAllowAutoSplit;
using ShardCollectionTypeBase::getDefaultCollation;
using ShardCollectionTypeBase::getEnterCriticalSectionCounter;
using ShardCollectionTypeBase::getEpoch;
using ShardCollectionTypeBase::getKeyPattern;
using ShardCollectionTypeBase::getLastRefreshedCollectionVersion;
+ using ShardCollectionTypeBase::getMaxChunkSizeBytes;
using ShardCollectionTypeBase::getNss;
using ShardCollectionTypeBase::getRefreshing;
using ShardCollectionTypeBase::getReshardingFields;
@@ -63,11 +65,13 @@ public:
using ShardCollectionTypeBase::getTimestamp;
using ShardCollectionTypeBase::getUnique;
using ShardCollectionTypeBase::getUuid;
+ using ShardCollectionTypeBase::setAllowAutoSplit;
using ShardCollectionTypeBase::setDefaultCollation;
using ShardCollectionTypeBase::setEnterCriticalSectionCounter;
using ShardCollectionTypeBase::setEpoch;
using ShardCollectionTypeBase::setKeyPattern;
using ShardCollectionTypeBase::setLastRefreshedCollectionVersion;
+ using ShardCollectionTypeBase::setMaxChunkSizeBytes;
using ShardCollectionTypeBase::setNss;
using ShardCollectionTypeBase::setRefreshing;
using ShardCollectionTypeBase::setReshardingFields;
diff --git a/src/mongo/db/s/type_shard_collection.idl b/src/mongo/db/s/type_shard_collection.idl
index 31cc7493c7d..3b86d7c1ca7 100644
--- a/src/mongo/db/s/type_shard_collection.idl
+++ b/src/mongo/db/s/type_shard_collection.idl
@@ -153,6 +153,14 @@ structs:
collection is the temporary resharding collection."
type: TypeCollectionReshardingFields
optional: true
+ maxChunkSizeBytes:
+ type: safeInt64
+ description: "Max chunk size in bytes."
+ optional: true
+ allowAutoSplit:
+ type: bool
+ description: "Specifies whether the auto-splitter should be running or not for this collection."
+ default: true
allowMigrations:
cpp_name: pre50CompatibleAllowMigrations
type: bool
diff --git a/src/mongo/s/SConscript b/src/mongo/s/SConscript
index 155085c64cc..fbc03252a1d 100644
--- a/src/mongo/s/SConscript
+++ b/src/mongo/s/SConscript
@@ -160,6 +160,7 @@ env.Library(
'request_types/clone_collection_options_from_primary_shard.idl',
'request_types/commit_chunk_migration_request_type.cpp',
'request_types/commit_reshard_collection.idl',
+ 'request_types/configure_collection_auto_split.idl',
'request_types/ensure_chunk_version_is_greater_than.idl',
'request_types/flush_database_cache_updates.idl',
'request_types/flush_resharding_state_change.idl',
diff --git a/src/mongo/s/catalog/type_collection.cpp b/src/mongo/s/catalog/type_collection.cpp
index c518a135aaa..349bda5a43a 100644
--- a/src/mongo/s/catalog/type_collection.cpp
+++ b/src/mongo/s/catalog/type_collection.cpp
@@ -36,6 +36,7 @@
#include "mongo/bson/bsonobjbuilder.h"
#include "mongo/bson/simple_bsonobj_comparator.h"
#include "mongo/bson/util/bson_extract.h"
+#include "mongo/s/balancer_configuration.h"
#include "mongo/util/assert_util.h"
namespace mongo {
@@ -93,4 +94,9 @@ void CollectionType::setDefaultCollation(const BSONObj& defaultCollation) {
setPre50CompatibleDefaultCollation(defaultCollation);
}
+void CollectionType::setMaxChunkSizeBytes(int64_t value) {
+ uassert(ErrorCodes::BadValue, "Default chunk size is out of range", value > 0);
+ CollectionTypeBase::setMaxChunkSizeBytes(value);
+}
+
} // namespace mongo
diff --git a/src/mongo/s/catalog/type_collection.h b/src/mongo/s/catalog/type_collection.h
index a5710d08f76..ffdc31a4a64 100644
--- a/src/mongo/s/catalog/type_collection.h
+++ b/src/mongo/s/catalog/type_collection.h
@@ -84,6 +84,9 @@ public:
static constexpr auto kKeyPatternFieldName = kPre50CompatibleKeyPatternFieldName;
static constexpr auto kUuidFieldName = kPre50CompatibleUuidFieldName;
static constexpr auto kAllowMigrationsFieldName = kPre50CompatibleAllowMigrationsFieldName;
+
+ using CollectionTypeBase::kMaxChunkSizeBytesFieldName;
+ using CollectionTypeBase::kNoAutoSplitFieldName;
using CollectionTypeBase::kNssFieldName;
using CollectionTypeBase::kReshardingFieldsFieldName;
using CollectionTypeBase::kTimeseriesFieldsFieldName;
@@ -92,6 +95,7 @@ public:
using CollectionTypeBase::kUpdatedAtFieldName;
// Make getters and setters accessible.
+ using CollectionTypeBase::getMaxChunkSizeBytes;
using CollectionTypeBase::getNss;
using CollectionTypeBase::getReshardingFields;
using CollectionTypeBase::getTimeseriesFields;
@@ -147,6 +151,12 @@ public:
}
void setDefaultCollation(const BSONObj& defaultCollation);
+ void setMaxChunkSizeBytes(int64_t value);
+
+ bool getAllowAutoSplit() const {
+ return !getNoAutoSplit();
+ }
+
bool getAllowBalance() const {
return !getNoBalance();
}
diff --git a/src/mongo/s/catalog/type_collection.idl b/src/mongo/s/catalog/type_collection.idl
index 5917c2faad5..3f91bbc0bee 100644
--- a/src/mongo/s/catalog/type_collection.idl
+++ b/src/mongo/s/catalog/type_collection.idl
@@ -117,6 +117,14 @@ structs:
type: bool
description: "Uniqueness of the sharding key."
default: false
+ maxChunkSizeBytes:
+ type: safeInt64
+ description: "Max chunk size in bytes."
+ optional: true
+ noAutoSplit:
+ type: bool
+ description: "Specifies whether the auto-splitter should be running or not for this collection."
+ default: false
noBalance:
type: bool
description: "Consulted by the Balancer only and indicates whether this collection
@@ -144,4 +152,4 @@ structs:
type: TypeCollectionTimeseriesFields
description: "Time-series collection fields. Only set when this is a time-series
buckets collection."
- optional: true
+ optional: true \ No newline at end of file
diff --git a/src/mongo/s/catalog_cache.cpp b/src/mongo/s/catalog_cache.cpp
index 3710ac43bdc..9957a4e3a32 100644
--- a/src/mongo/s/catalog_cache.cpp
+++ b/src/mongo/s/catalog_cache.cpp
@@ -606,6 +606,19 @@ CatalogCache::CollectionCache::LookupResult CatalogCache::CollectionCache::_look
auto collectionAndChunks = _catalogCacheLoader.getChunksSince(nss, lookupVersion).get();
+ const auto maxChunkSize = [&]() -> boost::optional<uint64_t> {
+ if (!collectionAndChunks.allowAutoSplit) {
+ // maxChunkSize = 0 is an invalid chunkSize so we use it to detect noAutoSplit
+ // on the steady-state path in incrementChunkOnInsertOrUpdate(...)
+ return 0;
+ }
+ if (collectionAndChunks.maxChunkSizeBytes) {
+ invariant(collectionAndChunks.maxChunkSizeBytes.get() > 0);
+ return uint64_t(*collectionAndChunks.maxChunkSizeBytes);
+ }
+ return boost::none;
+ }();
+
auto newRoutingHistory = [&] {
// If we have routing info already and it's for the same collection epoch, we're
// updating. Otherwise, we're making a whole new routing table.
@@ -616,10 +629,12 @@ CatalogCache::CollectionCache::LookupResult CatalogCache::CollectionCache::_look
return existingHistory->optRt
->makeUpdatedReplacingTimestamp(collectionAndChunks.creationTime)
.makeUpdated(collectionAndChunks.reshardingFields,
+ maxChunkSize,
collectionAndChunks.allowMigrations,
collectionAndChunks.changedChunks);
} else {
return existingHistory->optRt->makeUpdated(collectionAndChunks.reshardingFields,
+ maxChunkSize,
collectionAndChunks.allowMigrations,
collectionAndChunks.changedChunks);
}
@@ -644,6 +659,7 @@ CatalogCache::CollectionCache::LookupResult CatalogCache::CollectionCache::_look
collectionAndChunks.creationTime,
collectionAndChunks.timeseriesFields,
std::move(collectionAndChunks.reshardingFields),
+ maxChunkSize,
collectionAndChunks.allowMigrations,
collectionAndChunks.changedChunks);
}();
diff --git a/src/mongo/s/catalog_cache_loader.cpp b/src/mongo/s/catalog_cache_loader.cpp
index a25aa552150..265ee967bc0 100644
--- a/src/mongo/s/catalog_cache_loader.cpp
+++ b/src/mongo/s/catalog_cache_loader.cpp
@@ -50,6 +50,8 @@ CatalogCacheLoader::CollectionAndChangedChunks::CollectionAndChangedChunks(
bool collShardKeyIsUnique,
boost::optional<TypeCollectionTimeseriesFields> collTimeseriesFields,
boost::optional<TypeCollectionReshardingFields> collReshardingFields,
+ boost::optional<int64_t> maxChunkSizeBytes,
+ bool allowAutoSplit,
bool allowMigrations,
std::vector<ChunkType> chunks)
: epoch(std::move(collEpoch)),
@@ -60,6 +62,8 @@ CatalogCacheLoader::CollectionAndChangedChunks::CollectionAndChangedChunks(
shardKeyIsUnique(collShardKeyIsUnique),
timeseriesFields(std::move(collTimeseriesFields)),
reshardingFields(std::move(collReshardingFields)),
+ maxChunkSizeBytes(std::move(maxChunkSizeBytes)),
+ allowAutoSplit(allowAutoSplit),
allowMigrations(allowMigrations),
changedChunks(std::move(chunks)) {}
diff --git a/src/mongo/s/catalog_cache_loader.h b/src/mongo/s/catalog_cache_loader.h
index a4afe4e7277..e16a8a3786a 100644
--- a/src/mongo/s/catalog_cache_loader.h
+++ b/src/mongo/s/catalog_cache_loader.h
@@ -75,6 +75,8 @@ public:
bool collShardKeyIsUnique,
boost::optional<TypeCollectionTimeseriesFields> collTimeseriesFields,
boost::optional<TypeCollectionReshardingFields> collReshardingFields,
+ boost::optional<int64_t> maxChunkSizeBytes,
+ bool allowAutoSplit,
bool allowMigrations,
std::vector<ChunkType> chunks);
@@ -94,6 +96,10 @@ public:
// populated.
boost::optional<TypeCollectionReshardingFields> reshardingFields;
+ boost::optional<int64_t> maxChunkSizeBytes;
+
+ bool allowAutoSplit;
+
bool allowMigrations;
// The chunks which have changed sorted by their chunkVersion. This list might potentially
diff --git a/src/mongo/s/catalog_cache_loader_mock.cpp b/src/mongo/s/catalog_cache_loader_mock.cpp
index 96bdd409054..f8dadc1e667 100644
--- a/src/mongo/s/catalog_cache_loader_mock.cpp
+++ b/src/mongo/s/catalog_cache_loader_mock.cpp
@@ -97,6 +97,8 @@ CollectionAndChangedChunks getCollectionRefresh(
swCollectionReturnValue.getValue().getUnique(),
swCollectionReturnValue.getValue().getTimeseriesFields(),
reshardingFields,
+ swCollectionReturnValue.getValue().getMaxChunkSizeBytes(),
+ swCollectionReturnValue.getValue().getAllowAutoSplit(),
swCollectionReturnValue.getValue().getAllowMigrations(),
std::move(chunks)};
}
diff --git a/src/mongo/s/chunk_manager.cpp b/src/mongo/s/chunk_manager.cpp
index f565d29f7f5..3dc98fbbf2d 100644
--- a/src/mongo/s/chunk_manager.cpp
+++ b/src/mongo/s/chunk_manager.cpp
@@ -320,6 +320,7 @@ RoutingTableHistory::RoutingTableHistory(
bool unique,
boost::optional<TypeCollectionTimeseriesFields> timeseriesFields,
boost::optional<TypeCollectionReshardingFields> reshardingFields,
+ boost::optional<uint64_t> maxChunkSizeBytes,
bool allowMigrations,
ChunkMap chunkMap)
: _nss(std::move(nss)),
@@ -329,6 +330,7 @@ RoutingTableHistory::RoutingTableHistory(
_unique(unique),
_timeseriesFields(std::move(timeseriesFields)),
_reshardingFields(std::move(reshardingFields)),
+ _maxChunkSizeBytes(maxChunkSizeBytes),
_allowMigrations(allowMigrations),
_chunkMap(std::move(chunkMap)),
_shardVersions(_chunkMap.constructShardVersionMap()) {}
@@ -691,6 +693,20 @@ bool ChunkManager::allowMigrations() const {
return _rt->optRt->allowMigrations();
}
+bool ChunkManager::allowAutoSplit() const {
+ const auto maxChunkSize = maxChunkSizeBytes();
+ if (!maxChunkSize)
+ return true;
+
+ return *maxChunkSize != 0;
+}
+
+boost::optional<uint64_t> ChunkManager::maxChunkSizeBytes() const {
+ if (!_rt->optRt)
+ return boost::none;
+ return _rt->optRt->maxChunkSizeBytes();
+}
+
std::string ChunkManager::toString() const {
return _rt->optRt ? _rt->optRt->toString() : "UNSHARDED";
}
@@ -757,18 +773,21 @@ RoutingTableHistory RoutingTableHistory::makeNew(
const boost::optional<Timestamp>& timestamp,
boost::optional<TypeCollectionTimeseriesFields> timeseriesFields,
boost::optional<TypeCollectionReshardingFields> reshardingFields,
+ boost::optional<uint64_t> maxChunkSizeBytes,
bool allowMigrations,
const std::vector<ChunkType>& chunks) {
+
+ auto changedChunkInfos = flatten(chunks);
return RoutingTableHistory(std::move(nss),
std::move(uuid),
std::move(shardKeyPattern),
std::move(defaultCollator),
std::move(unique),
std::move(timeseriesFields),
- boost::none,
+ std::move(reshardingFields),
+ maxChunkSizeBytes,
allowMigrations,
- ChunkMap{epoch, timestamp})
- .makeUpdated(std::move(reshardingFields), allowMigrations, chunks);
+ ChunkMap{epoch, timestamp}.createMerged(changedChunkInfos));
}
// Note that any new parameters added to RoutingTableHistory::makeUpdated() must also be added to
@@ -776,6 +795,7 @@ RoutingTableHistory RoutingTableHistory::makeNew(
// it may overlap with the enqueued metadata.
RoutingTableHistory RoutingTableHistory::makeUpdated(
boost::optional<TypeCollectionReshardingFields> reshardingFields,
+ boost::optional<uint64_t> maxChunkSizeBytes,
bool allowMigrations,
const std::vector<ChunkType>& changedChunks) const {
auto changedChunkInfos = flatten(changedChunks);
@@ -791,6 +811,7 @@ RoutingTableHistory RoutingTableHistory::makeUpdated(
isUnique(),
_timeseriesFields,
std::move(reshardingFields),
+ maxChunkSizeBytes,
allowMigrations,
std::move(chunkMap));
}
@@ -822,6 +843,7 @@ RoutingTableHistory RoutingTableHistory::makeUpdatedReplacingTimestamp(
_unique,
_timeseriesFields,
_reshardingFields,
+ _maxChunkSizeBytes,
_allowMigrations,
std::move(newMap));
}
diff --git a/src/mongo/s/chunk_manager.h b/src/mongo/s/chunk_manager.h
index 64f306626a0..970173ea07b 100644
--- a/src/mongo/s/chunk_manager.h
+++ b/src/mongo/s/chunk_manager.h
@@ -180,6 +180,7 @@ public:
const boost::optional<Timestamp>& timestamp,
boost::optional<TypeCollectionTimeseriesFields> timeseriesFields,
boost::optional<TypeCollectionReshardingFields> reshardingFields,
+ boost::optional<uint64_t> maxChunkSizeBytes,
bool allowMigrations,
const std::vector<ChunkType>& chunks);
@@ -197,6 +198,7 @@ public:
*/
RoutingTableHistory makeUpdated(
boost::optional<TypeCollectionReshardingFields> reshardingFields,
+ boost::optional<uint64_t> maxChunkSizeBytes,
bool allowMigrations,
const std::vector<ChunkType>& changedChunks) const;
@@ -323,6 +325,11 @@ public:
return _allowMigrations;
}
+ // collection default chunk size or +inf, iff no splits should happen
+ boost::optional<uint64_t> maxChunkSizeBytes() const {
+ return _maxChunkSizeBytes;
+ }
+
private:
friend class ChunkManager;
@@ -333,6 +340,7 @@ private:
bool unique,
boost::optional<TypeCollectionTimeseriesFields> timeseriesFields,
boost::optional<TypeCollectionReshardingFields> reshardingFields,
+ boost::optional<uint64_t> maxChunkSizeBytes,
bool allowMigrations,
ChunkMap chunkMap);
@@ -362,6 +370,8 @@ private:
// for this collection.
boost::optional<TypeCollectionReshardingFields> _reshardingFields;
+ boost::optional<uint64_t> _maxChunkSizeBytes;
+
bool _allowMigrations;
// Map from the max for each chunk to an entry describing the chunk. The union of all chunks'
@@ -531,6 +541,10 @@ public:
*/
bool allowMigrations() const;
+ bool allowAutoSplit() const;
+
+ boost::optional<uint64_t> maxChunkSizeBytes() const;
+
const ShardId& dbPrimary() const {
return _dbPrimary;
}
diff --git a/src/mongo/s/chunk_manager_query_test.cpp b/src/mongo/s/chunk_manager_query_test.cpp
index b51886d4089..47b325d5414 100644
--- a/src/mongo/s/chunk_manager_query_test.cpp
+++ b/src/mongo/s/chunk_manager_query_test.cpp
@@ -521,6 +521,7 @@ TEST_F(ChunkManagerQueryTest, SnapshotQueryWithMoreShardsThanLatestMetadata) {
boost::none /* timestamp */,
boost::none /* timeseriesFields */,
boost::none,
+ boost::none /* chunkSizeBytes */,
true,
{chunk0, chunk1});
@@ -531,11 +532,11 @@ TEST_F(ChunkManagerQueryTest, SnapshotQueryWithMoreShardsThanLatestMetadata) {
chunk1.setHistory({ChunkHistory(Timestamp(20, 0), ShardId("0")),
ChunkHistory(Timestamp(1, 0), ShardId("1"))});
- ChunkManager chunkManager(
- ShardId("0"),
- DatabaseVersion(UUID::gen()),
- makeStandaloneRoutingTableHistory(oldRoutingTable.makeUpdated(boost::none, true, {chunk1})),
- Timestamp(5, 0));
+ ChunkManager chunkManager(ShardId("0"),
+ DatabaseVersion(UUID::gen()),
+ makeStandaloneRoutingTableHistory(oldRoutingTable.makeUpdated(
+ boost::none, boost::none, true, {chunk1})),
+ Timestamp(5, 0));
std::set<ShardId> shardIds;
chunkManager.getShardIdsForRange(BSON("x" << MINKEY), BSON("x" << MAXKEY), &shardIds);
diff --git a/src/mongo/s/chunk_manager_refresh_bm.cpp b/src/mongo/s/chunk_manager_refresh_bm.cpp
index c936ce9b85e..dd1012548dc 100644
--- a/src/mongo/s/chunk_manager_refresh_bm.cpp
+++ b/src/mongo/s/chunk_manager_refresh_bm.cpp
@@ -88,6 +88,7 @@ CollectionMetadata makeChunkManagerWithShardSelector(int nShards,
boost::none /* timestamp */,
boost::none /* timeseriesFields */,
boost::none,
+ boost::none /* chunkSizeBytes */,
true,
chunks);
return CollectionMetadata(ChunkManager(ShardId("Shard0"),
@@ -120,7 +121,7 @@ MONGO_COMPILER_NOINLINE auto makeChunkManagerWithOptimalBalancedDistribution(int
MONGO_COMPILER_NOINLINE auto runIncrementalUpdate(const CollectionMetadata& cm,
const std::vector<ChunkType>& newChunks) {
auto rt = cm.getChunkManager()->getRoutingTableHistory_ForTest().makeUpdated(
- boost::none, true, newChunks);
+ boost::none, boost::none, true, newChunks);
return CollectionMetadata(ChunkManager(ShardId("shard0"),
DatabaseVersion(UUID::gen()),
makeStandaloneRoutingTableHistory(std::move(rt)),
@@ -178,6 +179,7 @@ auto BM_FullBuildOfChunkManager(benchmark::State& state, ShardSelectorFn selectS
boost::none /* timestamp */,
boost::none /* timeseriesFields */,
boost::none,
+ boost::none /* chunkSizeBytes */,
true,
chunks);
benchmark::DoNotOptimize(
diff --git a/src/mongo/s/commands/SConscript b/src/mongo/s/commands/SConscript
index cda5515584f..ae001743249 100644
--- a/src/mongo/s/commands/SConscript
+++ b/src/mongo/s/commands/SConscript
@@ -36,6 +36,7 @@ env.Library(
'cluster_commit_transaction_cmd.cpp',
'cluster_commit_reshard_collection_cmd.cpp',
'cluster_compact_cmd.cpp',
+ 'cluster_configure_collection_auto_split.cpp',
'cluster_control_balancer_cmd.cpp',
'cluster_count_cmd.cpp',
'cluster_create_cmd.cpp',
diff --git a/src/mongo/s/commands/cluster_configure_collection_auto_split.cpp b/src/mongo/s/commands/cluster_configure_collection_auto_split.cpp
new file mode 100644
index 00000000000..3982eadab7e
--- /dev/null
+++ b/src/mongo/s/commands/cluster_configure_collection_auto_split.cpp
@@ -0,0 +1,117 @@
+/**
+ * Copyright (C) 2021-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/db/auth/action_set.h"
+#include "mongo/db/auth/action_type.h"
+#include "mongo/db/auth/authorization_session.h"
+#include "mongo/db/auth/privilege.h"
+#include "mongo/db/catalog_raii.h"
+#include "mongo/db/client.h"
+#include "mongo/db/commands.h"
+#include "mongo/db/operation_context.h"
+#include "mongo/db/repl/repl_client_info.h"
+#include "mongo/idl/idl_parser.h"
+#include "mongo/s/catalog_cache_loader.h"
+#include "mongo/s/grid.h"
+#include "mongo/s/request_types/configure_collection_auto_split_gen.h"
+
+namespace mongo {
+namespace {
+
+class ConfigCollAutoSplitCmd final : public TypedCommand<ConfigCollAutoSplitCmd> {
+public:
+ using Request = ConfigureCollAutoSplit;
+
+ class Invocation final : public InvocationBase {
+ public:
+ using InvocationBase::InvocationBase;
+
+ StringData kStatusField = "status"_sd;
+
+ void typedRun(OperationContext* opCtx) {
+ opCtx->setAlwaysInterruptAtStepDownOrUp();
+ const NamespaceString& nss = ns();
+
+ ConfigsvrConfigureCollAutoSplit configsvrRequest(nss);
+ configsvrRequest.setConfigureCollAutoSplit(request().getConfigureCollAutoSplit());
+ configsvrRequest.setDbName(request().getDbName());
+
+ auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
+ auto cmdResponse = uassertStatusOK(configShard->runCommandWithFixedRetryAttempts(
+ opCtx,
+ ReadPreferenceSetting(ReadPreference::PrimaryOnly),
+ NamespaceString::kAdminDb.toString(),
+ configsvrRequest.toBSON({}),
+ Shard::RetryPolicy::kIdempotent));
+
+ uassertStatusOK(cmdResponse.commandStatus);
+ }
+
+ private:
+ NamespaceString ns() const override {
+ return request().getCommandParameter();
+ }
+
+ bool supportsWriteConcern() const override {
+ return false;
+ }
+
+ void doCheckAuthorization(OperationContext* opCtx) const override {
+ ActionSet actions({ActionType::splitChunk});
+ // TODO: SERVER-58908 add balancer merge parameter
+ uassert(ErrorCodes::Unauthorized,
+ "Unauthorized",
+ AuthorizationSession::get(opCtx->getClient())
+ ->isAuthorizedForActionsOnResource(ResourcePattern::forExactNamespace(ns()),
+ actions));
+ }
+ };
+
+ std::string help() const override {
+ return "command to check whether the chunks of a given collection are in a quiesced state "
+ "or there are any which need to be moved because of (1) draining shards, (2) zone "
+ "violation or (3) imbalance between shards";
+ }
+
+ bool adminOnly() const override {
+ return true;
+ }
+
+ AllowedOnSecondary secondaryAllowed(ServiceContext*) const override {
+ return AllowedOnSecondary::kNever;
+ }
+
+} configureCollectionAutoSplitCmd;
+
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/s/config_server_catalog_cache_loader.cpp b/src/mongo/s/config_server_catalog_cache_loader.cpp
index 18a2d7592c6..0065acfb04a 100644
--- a/src/mongo/s/config_server_catalog_cache_loader.cpp
+++ b/src/mongo/s/config_server_catalog_cache_loader.cpp
@@ -77,6 +77,8 @@ CollectionAndChangedChunks getChangedChunks(OperationContext* opCtx,
coll.getUnique(),
coll.getTimeseriesFields(),
coll.getReshardingFields(),
+ coll.getMaxChunkSizeBytes(),
+ coll.getAllowAutoSplit(),
coll.getAllowMigrations(),
std::move(collAndChunks.second)};
}
diff --git a/src/mongo/s/request_types/configure_collection_auto_split.idl b/src/mongo/s/request_types/configure_collection_auto_split.idl
new file mode 100644
index 00000000000..0a3121d8256
--- /dev/null
+++ b/src/mongo/s/request_types/configure_collection_auto_split.idl
@@ -0,0 +1,77 @@
+# Copyright(C) 2021 - present MongoDB, Inc.
+#
+# This program is free software : you can redistribute it and / or modify
+# it under the terms of the Server Side Public License, version 1,
+# as published by MongoDB, Inc.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
+# Server Side Public License for more details.
+#
+# You should have received a copy of the Server Side Public License
+# along with this program.If not, see
+# < http://www.mongodb.com/licensing/server-side-public-license>.
+#
+# As a special exception, the copyright holders give permission to link the
+# code of portions of this program with the OpenSSL library under certain
+# conditions as described in each individual source file and distribute
+# linked combinations including the program with the OpenSSL library.You
+# must comply with the Server Side Public License in all respects for
+# all of the code used other than as permitted herein.If you modify file(s)
+# with this exception, you may extend this exception to your version of the
+# file(s), but you are not obligated to do so.If you do not wish to do so,
+# delete this exception statement from your version.If you delete this
+# exception statement from all source files in the program, then also delete
+# it in the license file.
+#
+
+# _configsvrConfigureCollectionAutoSplitter and configureCollectionAutoSplitter IDL File
+
+global:
+ cpp_namespace: "mongo"
+
+imports:
+ - "mongo/idl/basic_types.idl"
+
+structs:
+ configure_auto_split_params:
+ description: "Parameters for configureCollectionAutoSplitter command"
+ fields:
+ defaultChunkSize:
+ type: safeInt
+ cpp_name: defaultChunkSizeMB
+ description: "New default chunk size in MB."
+ optional: true
+ enableAutoSplitter:
+ type: bool
+ description: "Specifies whether the auto-splitter should be running or not for this collection."
+ optional: true
+
+
+commands:
+ configureAutoSplit:
+ command_name: configureCollectionAutoSplitter
+ cpp_name: ConfigureCollAutoSplit
+ description: "Public configureCollectionAutoSplitter command on mongos"
+ strict: true
+ namespace: type
+ api_version: ""
+ type: namespacestring
+ inline_chained_structs: true
+ chained_structs:
+ configure_auto_split_params:
+ cpp_name: ConfigureCollAutoSplit
+
+ _configsvrConfigureAutoSplit:
+ command_name: _configsvrConfigureAutoSplit
+ cpp_name: ConfigsvrConfigureCollAutoSplit
+ description: "Internal configureCollectionAutoSplitter command on the config server"
+ strict: true
+ namespace: type
+ api_version: ""
+ type: namespacestring
+ inline_chained_structs: true
+ chained_structs:
+ configure_auto_split_params:
+ cpp_name: ConfigureCollAutoSplit
diff --git a/src/mongo/s/routing_table_history_test.cpp b/src/mongo/s/routing_table_history_test.cpp
index 479c3610b08..5b1dd71ee15 100644
--- a/src/mongo/s/routing_table_history_test.cpp
+++ b/src/mongo/s/routing_table_history_test.cpp
@@ -71,7 +71,7 @@ RoutingTableHistory splitChunk(const RoutingTableHistory& rt,
newChunks.emplace_back(kNss, range, curVersion, kThisShard);
}
- return rt.makeUpdated(boost::none, true, newChunks);
+ return rt.makeUpdated(boost::none, boost::none, true, newChunks);
}
/**
@@ -169,6 +169,7 @@ public:
boost::none /* timestamp */,
boost::none /* timeseriesFields */,
boost::none,
+ boost::none /* chunkSizeBytes */,
true,
{initChunk}));
ASSERT_EQ(_rt->numChunks(), 1ull);
@@ -344,6 +345,7 @@ TEST_F(RoutingTableHistoryTest, TestSplits) {
boost::none /* timestamp */,
boost::none /* timeseriesFields */,
boost::none,
+ boost::none /* chunkSizeBytes */,
true,
{chunkAll});
@@ -357,7 +359,7 @@ TEST_F(RoutingTableHistoryTest, TestSplits) {
ChunkVersion{2, 2, epoch, boost::none /* timestamp */},
kThisShard}};
- auto rt1 = rt.makeUpdated(boost::none, true, chunks1);
+ auto rt1 = rt.makeUpdated(boost::none, boost::none, true, chunks1);
auto v1 = ChunkVersion{2, 2, epoch, boost::none /* timestamp */};
ASSERT_EQ(v1, rt1.getVersion(kThisShard));
@@ -375,7 +377,7 @@ TEST_F(RoutingTableHistoryTest, TestSplits) {
ChunkVersion{3, 2, epoch, boost::none /* timestamp */},
kThisShard}};
- auto rt2 = rt1.makeUpdated(boost::none, true, chunks2);
+ auto rt2 = rt1.makeUpdated(boost::none, boost::none, true, chunks2);
auto v2 = ChunkVersion{3, 2, epoch, boost::none /* timestamp */};
ASSERT_EQ(v2, rt2.getVersion(kThisShard));
}
@@ -398,6 +400,7 @@ TEST_F(RoutingTableHistoryTest, TestReplaceEmptyChunk) {
boost::none /* timestamp */,
boost::none /* timeseriesFields */,
boost::none,
+ boost::none /* chunkSizeBytes */,
true,
initialChunks);
ASSERT_EQ(rt.numChunks(), 1);
@@ -412,7 +415,7 @@ TEST_F(RoutingTableHistoryTest, TestReplaceEmptyChunk) {
ChunkVersion{2, 2, epoch, boost::none /* timestamp */},
kThisShard}};
- auto rt1 = rt.makeUpdated(boost::none, true, changedChunks);
+ auto rt1 = rt.makeUpdated(boost::none, boost::none, true, changedChunks);
auto v1 = ChunkVersion{2, 2, epoch, boost::none /* timestamp */};
ASSERT_EQ(v1, rt1.getVersion(kThisShard));
ASSERT_EQ(rt1.numChunks(), 2);
@@ -449,6 +452,7 @@ TEST_F(RoutingTableHistoryTest, TestUseLatestVersions) {
boost::none /* timestamp */,
boost::none /* timeseriesFields */,
boost::none,
+ boost::none /* chunkSizeBytes */,
true,
initialChunks);
ASSERT_EQ(rt.numChunks(), 1);
@@ -467,7 +471,7 @@ TEST_F(RoutingTableHistoryTest, TestUseLatestVersions) {
ChunkVersion{2, 2, epoch, boost::none /* timestamp */},
kThisShard}};
- auto rt1 = rt.makeUpdated(boost::none, true, changedChunks);
+ auto rt1 = rt.makeUpdated(boost::none, boost::none, true, changedChunks);
auto v1 = ChunkVersion{2, 2, epoch, boost::none /* timestamp */};
ASSERT_EQ(v1, rt1.getVersion(kThisShard));
ASSERT_EQ(rt1.numChunks(), 2);
@@ -495,6 +499,7 @@ TEST_F(RoutingTableHistoryTest, TestOutOfOrderVersion) {
boost::none /* timestamp */,
boost::none /* timeseriesFields */,
boost::none,
+ boost::none /* chunkSizeBytes */,
true,
initialChunks);
ASSERT_EQ(rt.numChunks(), 2);
@@ -509,7 +514,7 @@ TEST_F(RoutingTableHistoryTest, TestOutOfOrderVersion) {
ChunkVersion{3, 1, epoch, boost::none /* timestamp */},
kThisShard}};
- auto rt1 = rt.makeUpdated(boost::none, true, changedChunks);
+ auto rt1 = rt.makeUpdated(boost::none, boost::none, true, changedChunks);
auto v1 = ChunkVersion{3, 1, epoch, boost::none /* timestamp */};
ASSERT_EQ(v1, rt1.getVersion(kThisShard));
ASSERT_EQ(rt1.numChunks(), 2);
@@ -546,6 +551,7 @@ TEST_F(RoutingTableHistoryTest, TestMergeChunks) {
boost::none,
boost::none /* timestamp */,
boost::none /* timeseriesFields */,
+ boost::none /* chunkSizeBytes */,
true,
initialChunks);
ASSERT_EQ(rt.numChunks(), 3);
@@ -561,7 +567,7 @@ TEST_F(RoutingTableHistoryTest, TestMergeChunks) {
ChunkVersion{3, 1, epoch, boost::none /* timestamp */},
kThisShard}};
- auto rt1 = rt.makeUpdated(boost::none, true, changedChunks);
+ auto rt1 = rt.makeUpdated(boost::none, boost::none, true, changedChunks);
auto v1 = ChunkVersion{3, 1, epoch, boost::none /* timestamp */};
ASSERT_EQ(v1, rt1.getVersion(kThisShard));
ASSERT_EQ(rt1.numChunks(), 2);
@@ -593,6 +599,7 @@ TEST_F(RoutingTableHistoryTest, TestMergeChunksOrdering) {
boost::none /* timestamp */,
boost::none /* timeseriesFields */,
boost::none,
+ boost::none /* chunkSizeBytes */,
true,
initialChunks);
ASSERT_EQ(rt.numChunks(), 3);
@@ -608,7 +615,7 @@ TEST_F(RoutingTableHistoryTest, TestMergeChunksOrdering) {
ChunkVersion{3, 1, epoch, boost::none /* timestamp */},
kThisShard}};
- auto rt1 = rt.makeUpdated(boost::none, true, changedChunks);
+ auto rt1 = rt.makeUpdated(boost::none, boost::none, true, changedChunks);
auto v1 = ChunkVersion{3, 1, epoch, boost::none /* timestamp */};
ASSERT_EQ(v1, rt1.getVersion(kThisShard));
ASSERT_EQ(rt1.numChunks(), 2);
@@ -658,6 +665,7 @@ TEST_F(RoutingTableHistoryTest, TestFlatten) {
boost::none /* timestamp */,
boost::none /* timeseriesFields */,
boost::none,
+ boost::none /* chunkSizeBytes */,
true,
initialChunks);
ASSERT_EQ(rt.numChunks(), 2);
diff --git a/src/mongo/shell/utils_sh.js b/src/mongo/shell/utils_sh.js
index ff8f20a1043..c549278e9e5 100644
--- a/src/mongo/shell/utils_sh.js
+++ b/src/mongo/shell/utils_sh.js
@@ -94,6 +94,9 @@ sh.help = function() {
print(
"\tsh.balancerCollectionStatus(fullName) " +
"returns wheter the specified collection is balanced or the balancer needs to take more actions on it");
+ print(
+ "\tsh.configureCollectionAutoSplitter(fullName, params) " +
+ "configure both the default chunk size and the auto-splitting behaviour for a collection");
};
sh.status = function(verbose, configDB) {
@@ -553,6 +556,14 @@ sh.balancerCollectionStatus = function(coll) {
return sh._adminCommand({balancerCollectionStatus: coll}, true);
};
+sh.configureCollectionAutoSplitter = function(coll) {
+ let cmd = {configureCollectionAutoSplitter: coll};
+ if (opts) {
+ cmd = Object.assign(cmd, opts);
+ }
+ return sh._adminCommand(cmd, true);
+};
+
function printShardingStatus(configDB, verbose) {
// configDB is a DB object that contains the sharding metadata of interest.
// Defaults to the db named "config" on the current connection.