summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTommaso Tocci <tommaso.tocci@mongodb.com>2021-07-27 14:02:56 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-07-27 14:25:04 +0000
commit6078864280613cf9abf901855f09ba03d18a5953 (patch)
tree43282a1dac75e3a2dcf7a4c20d41501f486bffa1
parent53078d274792e569f03c6df044edf1d525163452 (diff)
downloadmongo-6078864280613cf9abf901855f09ba03d18a5953.tar.gz
SERVER-58652 Remove legacy ConfigsvrShardCollection command
-rw-r--r--jstests/auth/list_all_sessions.js9
-rw-r--r--jstests/core/views/views_all_commands.js6
-rw-r--r--jstests/libs/override_methods/network_error_and_txn_override.js1
-rw-r--r--jstests/libs/override_methods/read_and_write_concern_helpers.js1
-rw-r--r--jstests/noPassthrough/verify_session_cache_updates.js35
-rw-r--r--jstests/replsets/db_reads_while_recovering_all_commands.js2
-rw-r--r--jstests/replsets/tenant_migration_concurrent_writes_on_donor.js4
-rw-r--r--jstests/sharding/configsvr_metadata_commands_require_majority_write_concern.js3
-rw-r--r--jstests/sharding/read_write_concern_defaults_application.js6
-rw-r--r--jstests/sharding/safe_secondary_reads_drop_recreate.js3
-rw-r--r--jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js3
-rw-r--r--jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js3
-rw-r--r--jstests/sharding/sessions_collection_auto_healing.js37
-rw-r--r--src/mongo/db/s/README.md7
-rw-r--r--src/mongo/db/s/SConscript5
-rw-r--r--src/mongo/db/s/active_shard_collection_registry.cpp186
-rw-r--r--src/mongo/db/s/active_shard_collection_registry.h181
-rw-r--r--src/mongo/db/s/active_shard_collection_registry_test.cpp236
-rw-r--r--src/mongo/db/s/config/configsvr_shard_collection_command.cpp370
-rw-r--r--src/mongo/db/s/config/initial_split_policy.h7
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager.cpp5
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager.h6
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_shard_collection_test.cpp135
-rw-r--r--src/mongo/db/s/create_collection_coordinator.cpp5
-rw-r--r--src/mongo/db/s/sessions_collection_config_server.cpp29
-rw-r--r--src/mongo/db/s/shard_collection_legacy.cpp711
-rw-r--r--src/mongo/db/s/shard_collection_legacy.h49
-rw-r--r--src/mongo/db/s/shardsvr_create_collection_command.cpp234
-rw-r--r--src/mongo/db/s/shardsvr_shard_collection_command.cpp111
-rw-r--r--src/mongo/s/SConscript1
-rw-r--r--src/mongo/s/request_types/shard_collection.idl132
31 files changed, 174 insertions, 2349 deletions
diff --git a/jstests/auth/list_all_sessions.js b/jstests/auth/list_all_sessions.js
index 9ba823564a7..52b48825577 100644
--- a/jstests/auth/list_all_sessions.js
+++ b/jstests/auth/list_all_sessions.js
@@ -15,9 +15,6 @@ function runListAllSessionsTest(mongod) {
const config = mongod.getDB("config");
const pipeline = [{'$listSessions': {allUsers: true}}];
- function listSessions() {
- return config.system.sessions.aggregate(pipeline);
- }
admin.createUser({user: 'admin', pwd: 'pass', roles: jsTest.adminUserRoles});
assert(admin.auth('admin', 'pass'));
@@ -42,7 +39,11 @@ function runListAllSessionsTest(mongod) {
// Ensure that the cache now contains the session and is visible by admin
assert(admin.auth('admin', 'pass'));
- const resultArray = listSessions().toArray();
+ const resultArray =
+ config.system.sessions
+ .aggregate(
+ [{'$listSessions': {allUsers: true}}, {'$sort': {lastUse: -1}}, {'$limit': 1}])
+ .toArray();
assert.eq(resultArray.length, 1);
const cacheid = resultArray[0]._id.id;
assert(cacheid !== undefined);
diff --git a/jstests/core/views/views_all_commands.js b/jstests/core/views/views_all_commands.js
index 3f23daaab15..24246ddabb4 100644
--- a/jstests/core/views/views_all_commands.js
+++ b/jstests/core/views/views_all_commands.js
@@ -109,7 +109,8 @@ let viewsCommandTests = {
_configsvrRemoveTags: {skip: isAnInternalCommand},
_configsvrReshardCollection: {skip: isAnInternalCommand},
_configsvrSetAllowMigrations: {skip: isAnInternalCommand},
- _configsvrShardCollection: {skip: isAnInternalCommand},
+ _configsvrShardCollection:
+ {skip: isAnInternalCommand}, // TODO SERVER-58843: Remove once 6.0 becomes last LTS
_configsvrUpdateZoneKeyRange: {skip: isAnInternalCommand},
_flushDatabaseCacheUpdates: {skip: isUnrelated},
_flushDatabaseCacheUpdatesWithWriteConcern: {skip: isUnrelated},
@@ -146,7 +147,8 @@ let viewsCommandTests = {
_shardsvrRenameCollectionParticipantUnblock: {skip: isAnInternalCommand},
_shardsvrReshardCollection: {skip: isAnInternalCommand},
_shardsvrReshardingOperationTime: {skip: isAnInternalCommand},
- _shardsvrShardCollection: {skip: isAnInternalCommand},
+ _shardsvrShardCollection:
+ {skip: isAnInternalCommand}, // TODO SERVER-58843: Remove once 6.0 becomes last LTS
_transferMods: {skip: isAnInternalCommand},
_vectorClockPersist: {skip: isAnInternalCommand},
abortReshardCollection: {skip: isUnrelated},
diff --git a/jstests/libs/override_methods/network_error_and_txn_override.js b/jstests/libs/override_methods/network_error_and_txn_override.js
index 6ecb28a82dc..d42e267ec7c 100644
--- a/jstests/libs/override_methods/network_error_and_txn_override.js
+++ b/jstests/libs/override_methods/network_error_and_txn_override.js
@@ -83,7 +83,6 @@ const kNonRetryableCommands = new Set([
"_configsvrMovePrimary",
"_configsvrRemoveShard",
"_configsvrRemoveShardFromZone",
- "_configsvrShardCollection",
"_configsvrUpdateZoneKeyRange",
"_mergeAuthzCollections",
"_recvChunkStart",
diff --git a/jstests/libs/override_methods/read_and_write_concern_helpers.js b/jstests/libs/override_methods/read_and_write_concern_helpers.js
index 9ebe41858e7..0e83b39d6b3 100644
--- a/jstests/libs/override_methods/read_and_write_concern_helpers.js
+++ b/jstests/libs/override_methods/read_and_write_concern_helpers.js
@@ -40,7 +40,6 @@ var kCommandsSupportingWriteConcern = new Set([
"_configsvrMovePrimary",
"_configsvrRemoveShard",
"_configsvrRemoveShardFromZone",
- "_configsvrShardCollection",
"_configsvrUpdateZoneKeyRange",
"_mergeAuthzCollections",
"_recvChunkStart",
diff --git a/jstests/noPassthrough/verify_session_cache_updates.js b/jstests/noPassthrough/verify_session_cache_updates.js
index e0c15dac3d6..6303931ef60 100644
--- a/jstests/noPassthrough/verify_session_cache_updates.js
+++ b/jstests/noPassthrough/verify_session_cache_updates.js
@@ -12,49 +12,54 @@ function runTest(conn) {
conn.getDB("test").test.save({a: i});
}
- function verify(conn, nRecords) {
- conn.getDB("admin").runCommand({refreshLogicalSessionCacheNow: 1});
- assert.eq(nRecords, conn.getDB("config").system.sessions.find({}).count());
+ function countSessions(conn, since) {
+ conn.adminCommand({refreshLogicalSessionCacheNow: 1});
+ return conn.getDB("config").system.sessions.countDocuments({lastUse: {"$gt": since}});
}
- function getLastUse(conn) {
+ function getLatestSessionTime(conn) {
conn.getDB("admin").runCommand({refreshLogicalSessionCacheNow: 1});
- return conn.getDB("config").system.sessions.findOne({}).lastUse;
+ let lastSession = conn.getDB("config")
+ .system.sessions.aggregate([{"$sort": {lastUse: -1}}, {$limit: 1}])
+ .toArray();
+ return (lastSession.length ? lastSession[0].lastUse : new Date(0));
}
+ let origSessTime = getLatestSessionTime(conn);
+
// initially we have no sessions
- verify(conn, 0);
+ assert.eq(0, countSessions(conn, origSessTime));
// Calling startSession in the shell doesn't initiate the session
var session = conn.startSession();
- verify(conn, 0);
+ assert.eq(0, countSessions(conn, origSessTime));
// running a command that doesn't require auth does touch
session.getDatabase("admin").runCommand("hello");
- verify(conn, 1);
+ assert.eq(1, countSessions(conn, origSessTime));
// running a session updating command does touch
session.getDatabase("admin").runCommand({serverStatus: 1});
- verify(conn, 1);
+ assert.eq(1, countSessions(conn, origSessTime));
// running a session updating command updates last use
{
- var lastUse = getLastUse(conn);
+ var lastUse = getLatestSessionTime(conn);
sleep(200);
session.getDatabase("admin").runCommand({serverStatus: 1});
- verify(conn, 1);
- assert.gt(getLastUse(conn), lastUse);
+ assert.eq(1, countSessions(conn, origSessTime));
+ assert.gt(getLatestSessionTime(conn), lastUse);
}
// verify that reading from a cursor updates last use
{
var cursor = session.getDatabase("test").test.find({}).batchSize(1);
cursor.next();
- var lastUse = getLastUse(conn);
+ var lastUse = getLatestSessionTime(conn);
sleep(200);
- verify(conn, 1);
+ assert.eq(1, countSessions(conn, origSessTime));
cursor.next();
- assert.gt(getLastUse(conn), lastUse);
+ assert.gt(getLatestSessionTime(conn), lastUse);
}
session.endSession();
diff --git a/jstests/replsets/db_reads_while_recovering_all_commands.js b/jstests/replsets/db_reads_while_recovering_all_commands.js
index 45f4ff90475..74155c28dc1 100644
--- a/jstests/replsets/db_reads_while_recovering_all_commands.js
+++ b/jstests/replsets/db_reads_while_recovering_all_commands.js
@@ -52,7 +52,6 @@ const allCommands = {
_configsvrRenameCollectionMetadata: {skip: isAnInternalCommand},
_configsvrReshardCollection: {skip: isPrimaryOnly},
_configsvrSetAllowMigrations: {skip: isPrimaryOnly},
- _configsvrShardCollection: {skip: isPrimaryOnly},
_configsvrUpdateZoneKeyRange: {skip: isPrimaryOnly},
_flushDatabaseCacheUpdates: {skip: isPrimaryOnly},
_flushDatabaseCacheUpdatesWithWriteConcern: {skip: isPrimaryOnly},
@@ -85,7 +84,6 @@ const allCommands = {
_shardsvrRenameCollectionParticipantUnblock: {skip: isAnInternalCommand},
_shardsvrDropDatabase: {skip: isPrimaryOnly},
_shardsvrDropDatabaseParticipant: {skip: isPrimaryOnly},
- _shardsvrShardCollection: {skip: isPrimaryOnly},
_shardsvrReshardCollection: {skip: isPrimaryOnly},
_shardsvrReshardingOperationTime: {skip: isPrimaryOnly},
_shardsvrRefineCollectionShardKey: {skip: isPrimaryOnly},
diff --git a/jstests/replsets/tenant_migration_concurrent_writes_on_donor.js b/jstests/replsets/tenant_migration_concurrent_writes_on_donor.js
index c8d33ccb03c..d6677e85b4c 100644
--- a/jstests/replsets/tenant_migration_concurrent_writes_on_donor.js
+++ b/jstests/replsets/tenant_migration_concurrent_writes_on_donor.js
@@ -469,7 +469,6 @@ const testCases = {
_configsvrRefineCollectionShardKey: {skip: isNotRunOnUserDatabase},
_configsvrRemoveShard: {skip: isNotRunOnUserDatabase},
_configsvrRemoveShardFromZone: {skip: isNotRunOnUserDatabase},
- _configsvrShardCollection: {skip: isNotRunOnUserDatabase},
_configsvrUpdateZoneKeyRange: {skip: isNotRunOnUserDatabase},
_flushDatabaseCacheUpdates: {skip: isNotRunOnUserDatabase},
_flushDatabaseCacheUpdatesWithWriteConcern: {skip: isNotRunOnUserDatabase},
@@ -491,7 +490,8 @@ const testCases = {
_shardsvrCreateCollection: {skip: isOnlySupportedOnShardedCluster},
_shardsvrCreateCollectionParticipant: {skip: isOnlySupportedOnShardedCluster},
_shardsvrMovePrimary: {skip: isNotRunOnUserDatabase},
- _shardsvrShardCollection: {skip: isNotRunOnUserDatabase},
+ _shardsvrShardCollection:
+ {skip: isNotRunOnUserDatabase}, // TODO SERVER-58843: Remove once 6.0 becomes last LTS
_shardsvrRenameCollection: {skip: isOnlySupportedOnShardedCluster},
_transferMods: {skip: isNotRunOnUserDatabase},
abortTransaction: {
diff --git a/jstests/sharding/configsvr_metadata_commands_require_majority_write_concern.js b/jstests/sharding/configsvr_metadata_commands_require_majority_write_concern.js
index bd64717007e..24efb253106 100644
--- a/jstests/sharding/configsvr_metadata_commands_require_majority_write_concern.js
+++ b/jstests/sharding/configsvr_metadata_commands_require_majority_write_concern.js
@@ -142,9 +142,6 @@ checkCommandConfigSvr({_configsvrMovePrimary: dbName, to: st.shard0.name},
// shardCollection
checkCommandMongos(
{shardCollection: ns, key: {_id: 1}}, setupFuncs.enableSharding, cleanupFuncs.dropDatabase);
-checkCommandConfigSvr({_configsvrShardCollection: ns, key: {_id: 1}},
- setupFuncs.enableSharding,
- cleanupFuncs.dropDatabase);
// createDatabase
// Don't check createDatabase against mongos: there is no createDatabase command exposed on
diff --git a/jstests/sharding/read_write_concern_defaults_application.js b/jstests/sharding/read_write_concern_defaults_application.js
index 1b3f58c268c..c14c8430dd1 100644
--- a/jstests/sharding/read_write_concern_defaults_application.js
+++ b/jstests/sharding/read_write_concern_defaults_application.js
@@ -109,7 +109,8 @@ let testCases = {
_configsvrRenameCollectionMetadata: {skip: "internal command"},
_configsvrReshardCollection: {skip: "internal command"},
_configsvrSetAllowMigrations: {skip: "internal command"},
- _configsvrShardCollection: {skip: "internal command"},
+ _configsvrShardCollection:
+ {skip: "internal command"}, // TODO SERVER-58843: Remove once 6.0 becomes last LTS
_configsvrUpdateZoneKeyRange: {skip: "internal command"},
_flushDatabaseCacheUpdates: {skip: "internal command"},
_flushDatabaseCacheUpdatesWithWriteConcern: {skip: "internal command"},
@@ -145,7 +146,8 @@ let testCases = {
_shardsvrRenameCollectionParticipantUnblock: {skip: "internal command"},
_shardsvrReshardCollection: {skip: "internal command"},
_shardsvrReshardingOperationTime: {skip: "internal command"},
- _shardsvrShardCollection: {skip: "internal command"},
+ _shardsvrShardCollection:
+ {skip: "internal command"}, // TODO SERVER-58843: Remove once 6.0 becomes last LTS
_transferMods: {skip: "internal command"},
_vectorClockPersist: {skip: "internal command"},
abortReshardCollection: {skip: "does not accept read or write concern"},
diff --git a/jstests/sharding/safe_secondary_reads_drop_recreate.js b/jstests/sharding/safe_secondary_reads_drop_recreate.js
index 49d1c3741ea..dcf463650f8 100644
--- a/jstests/sharding/safe_secondary_reads_drop_recreate.js
+++ b/jstests/sharding/safe_secondary_reads_drop_recreate.js
@@ -64,7 +64,8 @@ let testCases = {
_configsvrMovePrimary: {skip: "primary only"},
_configsvrRemoveShardFromZone: {skip: "primary only"},
_configsvrReshardCollection: {skip: "primary only"},
- _configsvrShardCollection: {skip: "primary only"},
+ _configsvrShardCollection:
+ {skip: "primary only"}, // TODO SERVER-58843: Remove once 6.0 becomes last LTS
_configsvrUpdateZoneKeyRange: {skip: "primary only"},
_flushReshardingStateChange: {skip: "does not return user data"},
_flushRoutingTableCacheUpdates: {skip: "does not return user data"},
diff --git a/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js b/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js
index ca74970b273..86b80c6924f 100644
--- a/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js
+++ b/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js
@@ -75,7 +75,8 @@ let testCases = {
_configsvrRemoveTags: {skip: "primary only"},
_configsvrReshardCollection: {skip: "primary only"},
_configsvrSetAllowMigrations: {skip: "primary only"},
- _configsvrShardCollection: {skip: "primary only"},
+ _configsvrShardCollection:
+ {skip: "primary only"}, // TODO SERVER-58843: Remove once 6.0 becomes last LTS
_configsvrUpdateZoneKeyRange: {skip: "primary only"},
_flushReshardingStateChange: {skip: "does not return user data"},
_flushRoutingTableCacheUpdates: {skip: "does not return user data"},
diff --git a/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js b/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js
index 6c460316727..1036b7df678 100644
--- a/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js
+++ b/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js
@@ -65,7 +65,8 @@ let testCases = {
_configsvrRemoveTags: {skip: "primary only"},
_configsvrReshardCollection: {skip: "primary only"},
_configsvrSetAllowMigrations: {skip: "primary only"},
- _configsvrShardCollection: {skip: "primary only"},
+ _configsvrShardCollection:
+ {skip: "primary only"}, // TODO SERVER-58843: Remove once 6.0 becomes last LTS
_configsvrUpdateZoneKeyRange: {skip: "primary only"},
_flushReshardingStateChange: {skip: "does not return user data"},
_flushRoutingTableCacheUpdates: {skip: "does not return user data"},
diff --git a/jstests/sharding/sessions_collection_auto_healing.js b/jstests/sharding/sessions_collection_auto_healing.js
index 9d80d0dcc14..70d5a9cc0a1 100644
--- a/jstests/sharding/sessions_collection_auto_healing.js
+++ b/jstests/sharding/sessions_collection_auto_healing.js
@@ -15,10 +15,8 @@ TestData.disableImplicitSessions = true;
var st = new ShardingTest({shards: 0});
var configSvr = st.configRS.getPrimary();
-var configAdmin = configSvr.getDB("admin");
var mongos = st.s;
-var mongosAdmin = mongos.getDB("admin");
var mongosConfig = mongos.getDB("config");
// Test that we can use sessions on the config server before we add any shards.
@@ -26,7 +24,7 @@ var mongosConfig = mongos.getDB("config");
validateSessionsCollection(configSvr, false, false);
validateSessionsCollection(mongos, false, false);
- assert.commandWorked(configAdmin.runCommand({startSession: 1}));
+ assert.commandWorked(configSvr.adminCommand({startSession: 1}));
validateSessionsCollection(configSvr, false, false);
validateSessionsCollection(mongos, false, false);
@@ -37,7 +35,7 @@ var mongosConfig = mongos.getDB("config");
validateSessionsCollection(configSvr, false, false);
validateSessionsCollection(mongos, false, false);
- assert.commandWorked(mongosAdmin.runCommand({startSession: 1}));
+ assert.commandWorked(mongos.adminCommand({startSession: 1}));
validateSessionsCollection(configSvr, false, false);
validateSessionsCollection(mongos, false, false);
@@ -46,9 +44,9 @@ var mongosConfig = mongos.getDB("config");
// Test that the config server does not create the sessions collection
// if there are not any shards.
{
- assert.eq(mongosConfig.shards.count(), 0);
+ assert.eq(mongosConfig.shards.countDocuments({}), 0);
- assert.commandWorked(configAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
+ assert.commandWorked(configSvr.adminCommand({refreshLogicalSessionCacheNow: 1}));
validateSessionsCollection(configSvr, false, false);
}
@@ -59,7 +57,6 @@ rs.startSet({shardsvr: ""});
rs.initiate();
var shard = rs.getPrimary();
-var shardAdmin = shard.getDB("admin");
var shardConfig = shard.getDB("config");
// Test that we can add this shard, even with a local config.system.sessions collection,
@@ -68,8 +65,8 @@ var shardConfig = shard.getDB("config");
shardConfig.system.sessions.insert({"hey": "you"});
validateSessionsCollection(shard, true, false);
- assert.commandWorked(mongosAdmin.runCommand({addShard: rs.getURL()}));
- assert.eq(mongosConfig.shards.count(), 1);
+ assert.commandWorked(mongos.adminCommand({addShard: rs.getURL()}));
+ assert.eq(mongosConfig.shards.countDocuments({}), 1);
validateSessionsCollection(shard, false, false);
}
@@ -79,7 +76,7 @@ var shardConfig = shard.getDB("config");
validateSessionsCollection(configSvr, false, false);
validateSessionsCollection(shard, false, false);
- assert.commandWorked(shardAdmin.runCommand({startSession: 1}));
+ assert.commandWorked(shard.adminCommand({startSession: 1}));
validateSessionsCollection(configSvr, false, false);
validateSessionsCollection(shard, false, false);
@@ -92,7 +89,7 @@ var shardConfig = shard.getDB("config");
validateSessionsCollection(shard, false, false);
validateSessionsCollection(mongos, false, false);
- assert.commandWorked(mongosAdmin.runCommand({startSession: 1}));
+ assert.commandWorked(mongos.adminCommand({startSession: 1}));
validateSessionsCollection(configSvr, false, false);
validateSessionsCollection(shard, false, false);
@@ -105,7 +102,7 @@ var shardConfig = shard.getDB("config");
validateSessionsCollection(configSvr, false, false);
validateSessionsCollection(shard, false, false);
- assert.commandWorked(shardAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
+ assert.commandWorked(shard.adminCommand({refreshLogicalSessionCacheNow: 1}));
validateSessionsCollection(configSvr, false, false);
validateSessionsCollection(shard, false, false);
@@ -116,23 +113,23 @@ var shardConfig = shard.getDB("config");
{
validateSessionsCollection(shard, false, false);
- assert.commandWorked(configAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
+ assert.commandWorked(configSvr.adminCommand({refreshLogicalSessionCacheNow: 1}));
validateSessionsCollection(shard, true, true);
// We will have two sessions because of the session used in the shardCollection's retryable
// write to shard the sessions collection. It will disappear after we run the refresh
// function on the shard.
- assert.eq(shardConfig.system.sessions.count(), 2, "did not flush config's sessions");
+ assert.eq(shardConfig.system.sessions.countDocuments({}), 2, "did not flush config's sessions");
// Now, if we do refreshes on the other servers, their in-mem records will
// be written to the collection.
- assert.commandWorked(shardAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
- assert.eq(shardConfig.system.sessions.count(), 2, "did not flush shard's sessions");
+ assert.commandWorked(shard.adminCommand({refreshLogicalSessionCacheNow: 1}));
+ assert.eq(shardConfig.system.sessions.countDocuments({}), 3, "did not flush shard's sessions");
rs.awaitLastOpCommitted();
- assert.commandWorked(mongosAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
- assert.eq(shardConfig.system.sessions.count(), 4, "did not flush mongos' sessions");
+ assert.commandWorked(mongos.adminCommand({refreshLogicalSessionCacheNow: 1}));
+ assert.eq(shardConfig.system.sessions.countDocuments({}), 5, "did not flush mongos' sessions");
}
// Test that if we drop the index on the sessions collection, only a refresh on the config
@@ -142,12 +139,12 @@ var shardConfig = shard.getDB("config");
validateSessionsCollection(shard, true, false);
- assert.commandWorked(configAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
+ assert.commandWorked(configSvr.adminCommand({refreshLogicalSessionCacheNow: 1}));
validateSessionsCollection(shard, true, true);
assert.commandWorked(shardConfig.system.sessions.dropIndex({lastUse: 1}));
- assert.commandWorked(shardAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
+ assert.commandWorked(shard.adminCommand({refreshLogicalSessionCacheNow: 1}));
validateSessionsCollection(shard, true, false);
}
diff --git a/src/mongo/db/s/README.md b/src/mongo/db/s/README.md
index d931ed581c7..8dd759a82cd 100644
--- a/src/mongo/db/s/README.md
+++ b/src/mongo/db/s/README.md
@@ -456,13 +456,6 @@ The business logic for most DDL commands that the config server coordinates live
[**ShardingCatalogManager class**](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/db/s/config/sharding_catalog_manager.h#L86),
including the logic for
[**dropCollection**](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp#L417).
-However, note that the ShardingCatalogManager class also contains business logic to just commit some
-operations that are otherwise coordinated by a shard.
-* Example of a DDL command (shard collection) for which the config server
-[**hands off coordination**](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/db/s/config/configsvr_shard_collection_command.cpp)
-to a shard. The business logic for such commands is in the shard's command body, such as the logic
-for
-[**shardCollection**](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/db/s/shardsvr_shard_collection.cpp#L7830).
## Important caveats
diff --git a/src/mongo/db/s/SConscript b/src/mongo/db/s/SConscript
index f43fcce97e6..8057768e20d 100644
--- a/src/mongo/db/s/SConscript
+++ b/src/mongo/db/s/SConscript
@@ -35,7 +35,6 @@ env.Library(
source=[
'active_migrations_registry.cpp',
'active_move_primaries_registry.cpp',
- 'active_shard_collection_registry.cpp',
'auto_split_vector.cpp',
'chunk_move_write_concern_options.cpp',
'chunk_splitter.cpp',
@@ -91,7 +90,6 @@ env.Library(
'scoped_operation_completion_sharding_actions.cpp',
'session_catalog_migration_destination.cpp',
'session_catalog_migration_source.cpp',
- 'shard_collection_legacy.cpp',
'shard_filtering_metadata_refresh.cpp',
'shard_identity_rollback_notifier.cpp',
'shard_key_util.cpp',
@@ -320,7 +318,6 @@ env.Library(
'config/configsvr_rename_collection_metadata_command.cpp',
'config/configsvr_reshard_collection_cmd.cpp',
'config/configsvr_set_allow_migrations_command.cpp',
- 'config/configsvr_shard_collection_command.cpp',
'config/configsvr_split_chunk_command.cpp',
'config/configsvr_update_zone_key_range_command.cpp',
'create_collection_coordinator.cpp',
@@ -373,7 +370,6 @@ env.Library(
'shardsvr_rename_collection_participant_command.cpp',
'shardsvr_reshard_collection_command.cpp',
'shardsvr_resharding_operation_time_command.cpp',
- 'shardsvr_shard_collection_command.cpp',
'split_chunk_command.cpp',
'split_vector_command.cpp',
'txn_two_phase_commit_cmds.cpp',
@@ -465,7 +461,6 @@ env.CppUnitTest(
source=[
'active_migrations_registry_test.cpp',
'active_move_primaries_registry_test.cpp',
- 'active_shard_collection_registry_test.cpp',
'balancer/balance_stats_test.cpp',
'chunk_split_state_driver_test.cpp',
'collection_metadata_filtering_test.cpp',
diff --git a/src/mongo/db/s/active_shard_collection_registry.cpp b/src/mongo/db/s/active_shard_collection_registry.cpp
deleted file mode 100644
index 73ce57be9fa..00000000000
--- a/src/mongo/db/s/active_shard_collection_registry.cpp
+++ /dev/null
@@ -1,186 +0,0 @@
-/**
- * Copyright (C) 2018-present MongoDB, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the Server Side Public License, version 1,
- * as published by MongoDB, Inc.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * Server Side Public License for more details.
- *
- * You should have received a copy of the Server Side Public License
- * along with this program. If not, see
- * <http://www.mongodb.com/licensing/server-side-public-license>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the Server Side Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#include "mongo/platform/basic.h"
-
-#include "mongo/db/s/active_shard_collection_registry.h"
-
-#include "mongo/db/catalog_raii.h"
-#include "mongo/db/s/collection_sharding_runtime.h"
-#include "mongo/db/service_context.h"
-
-namespace mongo {
-namespace {
-
-const auto getRegistry = ServiceContext::declareDecoration<ActiveShardCollectionRegistry>();
-
-bool ActiveShardsvrShardCollectionEqualsNewRequest(
- const ShardsvrShardCollectionRequest& activeRequest,
- const ShardsvrShardCollectionRequest& newRequest) {
- if (activeRequest.get_shardsvrShardCollection().get() !=
- newRequest.get_shardsvrShardCollection().get())
- return false;
- if (activeRequest.getKey().woCompare(newRequest.getKey()) != 0)
- return false;
- if (activeRequest.getUnique() != newRequest.getUnique())
- return false;
- if (activeRequest.getNumInitialChunks() != newRequest.getNumInitialChunks())
- return false;
- if ((activeRequest.getCollation() && newRequest.getCollation()) &&
- (activeRequest.getCollation().get().woCompare(newRequest.getCollation().get()) != 0))
- return false;
- if (activeRequest.getGetUUIDfromPrimaryShard() != newRequest.getGetUUIDfromPrimaryShard())
- return false;
-
- if (activeRequest.getInitialSplitPoints() && newRequest.getInitialSplitPoints()) {
- if (activeRequest.getInitialSplitPoints().get().size() !=
- newRequest.getInitialSplitPoints().get().size()) {
- return false;
- } else {
- for (std::size_t i = 0; i < activeRequest.getInitialSplitPoints().get().size(); i++) {
- if (activeRequest.getInitialSplitPoints().get()[i].woCompare(
- newRequest.getInitialSplitPoints().get()[i]) != 0)
- return false;
- }
- }
- }
-
- return true;
-}
-
-} // namespace
-
-ActiveShardCollectionRegistry::ActiveShardCollectionRegistry() = default;
-
-ActiveShardCollectionRegistry::~ActiveShardCollectionRegistry() {
- invariant(_activeShardCollectionMap.empty());
-}
-
-ActiveShardCollectionRegistry& ActiveShardCollectionRegistry::get(ServiceContext* service) {
- return getRegistry(service);
-}
-
-ActiveShardCollectionRegistry& ActiveShardCollectionRegistry::get(OperationContext* opCtx) {
- return get(opCtx->getServiceContext());
-}
-
-StatusWith<ScopedShardCollection> ActiveShardCollectionRegistry::registerShardCollection(
- const ShardsvrShardCollectionRequest& request) {
- stdx::lock_guard<Latch> lk(_mutex);
- std::string nss = request.get_shardsvrShardCollection().get().ns();
-
- auto iter = _activeShardCollectionMap.find(nss);
- if (iter == _activeShardCollectionMap.end()) {
- auto activeShardCollectionState = std::make_shared<ActiveShardCollectionState>(request);
- _activeShardCollectionMap.try_emplace(nss, activeShardCollectionState);
-
- return {ScopedShardCollection(
- nss, this, true, activeShardCollectionState->_responsePromise.getFuture())};
- } else {
- auto activeShardCollectionState = iter->second;
-
- if (ActiveShardsvrShardCollectionEqualsNewRequest(activeShardCollectionState->activeRequest,
- request)) {
- return {ScopedShardCollection(
- nss, nullptr, false, activeShardCollectionState->_responsePromise.getFuture())};
- }
- return activeShardCollectionState->constructErrorStatus(request);
- }
-}
-
-void ActiveShardCollectionRegistry::_clearShardCollection(std::string nss) {
- stdx::lock_guard<Latch> lk(_mutex);
- auto iter = _activeShardCollectionMap.find(nss);
- invariant(iter != _activeShardCollectionMap.end());
- _activeShardCollectionMap.erase(nss);
-}
-
-void ActiveShardCollectionRegistry::_setResponseOrError(
- std::string nss, StatusWith<boost::optional<CreateCollectionResponse>> swResponse) {
- stdx::lock_guard<Latch> lk(_mutex);
- auto iter = _activeShardCollectionMap.find(nss);
- invariant(iter != _activeShardCollectionMap.end());
- auto activeShardCollectionState = iter->second;
- activeShardCollectionState->_responsePromise.setFrom(swResponse);
-}
-
-Status ActiveShardCollectionRegistry::ActiveShardCollectionState::constructErrorStatus(
- const ShardsvrShardCollectionRequest& request) const {
- return {ErrorCodes::ConflictingOperationInProgress,
- str::stream() << "Unable to shard collection "
- << request.get_shardsvrShardCollection().get().ns()
- << " with arguments: " << request.toBSON()
- << " because this shard is currently running shard collection on this "
- << "collection with arguments: " << activeRequest.toBSON()};
-}
-
-ScopedShardCollection::ScopedShardCollection(
- std::string nss,
- ActiveShardCollectionRegistry* registry,
- bool shouldExecute,
- SharedSemiFuture<boost::optional<CreateCollectionResponse>> responseFuture)
- : _nss(nss),
- _registry(registry),
- _shouldExecute(shouldExecute),
- _responseFuture(std::move(responseFuture)) {}
-
-ScopedShardCollection::~ScopedShardCollection() {
- if (_registry && _shouldExecute) {
- _registry->_clearShardCollection(_nss);
- }
-}
-
-ScopedShardCollection::ScopedShardCollection(ScopedShardCollection&& other) {
- *this = std::move(other);
-}
-
-ScopedShardCollection& ScopedShardCollection::operator=(ScopedShardCollection&& other) {
- if (&other != this) {
- _registry = other._registry;
- other._registry = nullptr;
- _shouldExecute = other._shouldExecute;
- _responseFuture = std::move(other._responseFuture);
- _nss = std::move(other._nss);
- }
-
- return *this;
-}
-
-void ScopedShardCollection::emplaceResponse(
- StatusWith<boost::optional<CreateCollectionResponse>> swResponse) {
- invariant(_shouldExecute);
- _registry->_setResponseOrError(_nss, swResponse);
-}
-
-SharedSemiFuture<boost::optional<CreateCollectionResponse>> ScopedShardCollection::getResponse() {
- invariant(!_shouldExecute);
- return _responseFuture;
-}
-
-} // namespace mongo
diff --git a/src/mongo/db/s/active_shard_collection_registry.h b/src/mongo/db/s/active_shard_collection_registry.h
deleted file mode 100644
index 6c0c5333457..00000000000
--- a/src/mongo/db/s/active_shard_collection_registry.h
+++ /dev/null
@@ -1,181 +0,0 @@
-/**
- * Copyright (C) 2018-present MongoDB, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the Server Side Public License, version 1,
- * as published by MongoDB, Inc.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * Server Side Public License for more details.
- *
- * You should have received a copy of the Server Side Public License
- * along with this program. If not, see
- * <http://www.mongodb.com/licensing/server-side-public-license>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the Server Side Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#pragma once
-
-#include <boost/optional.hpp>
-#include <memory>
-
-#include "mongo/platform/mutex.h"
-#include "mongo/s/request_types/shard_collection_gen.h"
-#include "mongo/s/request_types/sharded_ddl_commands_gen.h"
-#include "mongo/util/concurrency/notification.h"
-#include "mongo/util/string_map.h"
-
-namespace mongo {
-
-class OperationContext;
-class ScopedShardCollection;
-template <typename T>
-class StatusWith;
-
-/**
- * Thread-safe object that keeps track of any active shardCollection commands running. There is only
- * one instance of this object per shard.
- */
-class ActiveShardCollectionRegistry {
- ActiveShardCollectionRegistry(const ActiveShardCollectionRegistry&) = delete;
- ActiveShardCollectionRegistry& operator=(const ActiveShardCollectionRegistry&) = delete;
-
-public:
- ActiveShardCollectionRegistry();
- ~ActiveShardCollectionRegistry();
-
- static ActiveShardCollectionRegistry& get(ServiceContext* service);
- static ActiveShardCollectionRegistry& get(OperationContext* opCtx);
-
- /**
- * If the collection being requested to shard is not already being sharded on this shard,
- * registers an active shardCollection with the specified arguments. Returns a
- * ScopedShardCollection object, which must be signaled by the caller before it goes out of
- * scope.
- *
- * If this collection is already being sharded on this shard and it has the exact same
- * arguments, returns a ScopedShardCollection. The ScopedShardCollection can be used to join the
- * already running shard collection.
- *
- * Otherwise returns a ConflictingOperationInProgress error.
- */
- StatusWith<ScopedShardCollection> registerShardCollection(
- const ShardsvrShardCollectionRequest& request);
-
-private:
- friend class ScopedShardCollection;
-
- // Describes the state of a currently active shardCollection operation
- struct ActiveShardCollectionState {
- ActiveShardCollectionState(ShardsvrShardCollectionRequest inRequest)
- : activeRequest(std::move(inRequest)) {}
-
- /**
- * Constructs an error status to return in the case of conflicting operations.
- */
- Status constructErrorStatus(const ShardsvrShardCollectionRequest& request) const;
-
- // Exact arguments of the currently active operation
- ShardsvrShardCollectionRequest activeRequest;
-
- /**
- * Promise that contains the response of the command so that a shardCollection object that
- * is in 'join' mode has access to the response object.
- */
- SharedPromise<boost::optional<CreateCollectionResponse>> _responsePromise;
- };
-
- /**
- * Unregisters a previously registered namespace with an ongoing shardCollection. Must only be
- * called if a previous call to registerShardCollection has succeeded.
- */
- void _clearShardCollection(std::string nss);
-
- // Fulfills the promise and stores the response for the command if the status is OK or sets an
- // error on the promise if it is not.
- void _setResponseOrError(std::string nss,
- StatusWith<boost::optional<CreateCollectionResponse>> swResponse);
-
- // Protects the state below
- Mutex _mutex = MONGO_MAKE_LATCH("ActiveShardCollectionRegistry::_mutex");
-
- // Map containing any collections currently being sharded
- StringMap<std::shared_ptr<ActiveShardCollectionState>> _activeShardCollectionMap;
-};
-
-/**
- * Object of this class is returned from the registerShardCollection call of the active shard
- * collection registry. It can exist in two modes - 'execute' and 'join'. See the comments for
- * registerShardCollection method for more details.
- */
-class ScopedShardCollection {
- ScopedShardCollection(const ScopedShardCollection&) = delete;
- ScopedShardCollection& operator=(const ScopedShardCollection&) = delete;
-
-public:
- ScopedShardCollection(
- std::string nss,
- ActiveShardCollectionRegistry* registry,
- bool shouldExecute,
- SharedSemiFuture<boost::optional<CreateCollectionResponse>> responseFuture);
- ~ScopedShardCollection();
-
- ScopedShardCollection(ScopedShardCollection&&);
- ScopedShardCollection& operator=(ScopedShardCollection&&);
-
- /**
- * Returns true if the shardCollection object is in the 'execute' mode. This means that the
- * caller can execute the shardCollection command. The holder must execute the command and call
- * signalComplete with a status.
- */
- bool mustExecute() const {
- return _shouldExecute;
- }
-
- /**
- * Must only be called if the object is in the 'execute' mode when the shardCollection command
- * was invoked (the command immediately executed). Will either emplace the response on the
- * promise stored in the ActiveShardCollectionRegistry for this nss if status is OK or sets an
- * error if it is not.
- */
- void emplaceResponse(StatusWith<boost::optional<CreateCollectionResponse>> swResponse);
-
- /**
- * Must only be called if the object is in the 'join' mode. Gets a future that contains the
- * response for the command.
- */
- SharedSemiFuture<boost::optional<CreateCollectionResponse>> getResponse();
-
-private:
- // Namespace of collection being sharded
- std::string _nss;
-
- // Registry from which to unregister the migration. Not owned.
- ActiveShardCollectionRegistry* _registry;
-
- /**
- * Whether the holder is the first in line for a newly started shardCollection (in which case
- * the destructor must unregister) or the caller is joining on an already-running
- * shardCollection (in which case the caller must block and wait for completion).
- */
- bool _shouldExecute;
-
- // Future that will be signaled at the end of shardCollection, contains the response for the
- // command.
- SharedSemiFuture<boost::optional<CreateCollectionResponse>> _responseFuture;
-};
-
-} // namespace mongo
diff --git a/src/mongo/db/s/active_shard_collection_registry_test.cpp b/src/mongo/db/s/active_shard_collection_registry_test.cpp
deleted file mode 100644
index f154d7af00d..00000000000
--- a/src/mongo/db/s/active_shard_collection_registry_test.cpp
+++ /dev/null
@@ -1,236 +0,0 @@
-/**
- * Copyright (C) 2018-present MongoDB, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the Server Side Public License, version 1,
- * as published by MongoDB, Inc.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * Server Side Public License for more details.
- *
- * You should have received a copy of the Server Side Public License
- * along with this program. If not, see
- * <http://www.mongodb.com/licensing/server-side-public-license>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the Server Side Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest
-#include "mongo/platform/basic.h"
-
-#include "mongo/bson/bsonmisc.h"
-#include "mongo/bson/bsonobjbuilder.h"
-#include "mongo/db/client.h"
-#include "mongo/db/s/active_shard_collection_registry.h"
-#include "mongo/db/service_context_d_test_fixture.h"
-#include "mongo/s/request_types/shard_collection_gen.h"
-#include "mongo/unittest/unittest.h"
-
-namespace mongo {
-namespace {
-
-using unittest::assertGet;
-
-class ShardCollectionRegistrationTest : public ServiceContextMongoDTest {
-protected:
- ActiveShardCollectionRegistry _registry;
-};
-
-ShardsvrShardCollectionRequest createShardsvrShardCollectionRequest(
- const NamespaceString& nss,
- BSONObj key,
- bool unique,
- int numInitialChunks,
- boost::optional<std::vector<mongo::BSONObj>> initialSplitPoints,
- boost::optional<mongo::BSONObj> collation,
- bool UUIDfromPrimaryShard) {
- ShardsvrShardCollectionRequest shardsvrShardCollectionRequest;
- shardsvrShardCollectionRequest.set_shardsvrShardCollection(nss);
- shardsvrShardCollectionRequest.setKey(key);
- shardsvrShardCollectionRequest.setUnique(unique);
- shardsvrShardCollectionRequest.setNumInitialChunks(numInitialChunks);
- shardsvrShardCollectionRequest.setInitialSplitPoints(initialSplitPoints);
- shardsvrShardCollectionRequest.setCollation(collation);
- shardsvrShardCollectionRequest.setGetUUIDfromPrimaryShard(UUIDfromPrimaryShard);
-
- return shardsvrShardCollectionRequest;
-}
-
-TEST_F(ShardCollectionRegistrationTest, ScopedShardCollectionConstructorAndAssignment) {
- auto shardsvrShardCollectionRequest =
- createShardsvrShardCollectionRequest(NamespaceString("TestDB", "TestColl"),
- BSON("x"
- << "hashed"),
- false,
- 1,
- boost::none,
- boost::none,
- false);
- auto originalScopedShardCollection =
- assertGet(_registry.registerShardCollection(shardsvrShardCollectionRequest));
- ASSERT(originalScopedShardCollection.mustExecute());
-
- ScopedShardCollection movedScopedShardCollection(std::move(originalScopedShardCollection));
- ASSERT(movedScopedShardCollection.mustExecute());
-
- originalScopedShardCollection = std::move(movedScopedShardCollection);
- ASSERT(originalScopedShardCollection.mustExecute());
-
- // Need to signal the registered shard collection so the destructor doesn't invariant
- CreateCollectionResponse response(ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */));
- response.setCollectionUUID(UUID::gen());
- originalScopedShardCollection.emplaceResponse(response);
-}
-
-TEST_F(ShardCollectionRegistrationTest,
- SecondShardCollectionWithDifferentOptionsReturnsConflictingOperationInProgress) {
- auto firstShardsvrShardCollectionRequest =
- createShardsvrShardCollectionRequest(NamespaceString("TestDB", "TestColl"),
- BSON("x"
- << "hashed"),
- false,
- 1,
- boost::none,
- boost::none,
- false);
- auto originalScopedShardCollection =
- assertGet(_registry.registerShardCollection(firstShardsvrShardCollectionRequest));
-
- auto secondShardsvrShardCollectionRequest =
- createShardsvrShardCollectionRequest(NamespaceString("TestDB", "TestColl"),
- BSON("x" << 0),
- false,
- 1,
- boost::none,
- boost::none,
- false);
- auto secondScopedShardCollection =
- _registry.registerShardCollection(secondShardsvrShardCollectionRequest);
- ASSERT_EQ(ErrorCodes::ConflictingOperationInProgress, secondScopedShardCollection.getStatus());
-
- CreateCollectionResponse response(ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */));
- response.setCollectionUUID(UUID::gen());
- originalScopedShardCollection.emplaceResponse(response);
-}
-
-TEST_F(ShardCollectionRegistrationTest, SecondShardCollectionWithSameOptionsJoinsFirstOnSuccess) {
- auto firstShardsvrShardCollectionRequest =
- createShardsvrShardCollectionRequest(NamespaceString("TestDB", "TestColl"),
- BSON("x"
- << "hashed"),
- false,
- 1,
- boost::none,
- boost::none,
- false);
- auto originalScopedShardCollection =
- assertGet(_registry.registerShardCollection(firstShardsvrShardCollectionRequest));
- ASSERT(originalScopedShardCollection.mustExecute());
-
- auto secondShardsvrShardCollectionRequest =
- createShardsvrShardCollectionRequest(NamespaceString("TestDB", "TestColl"),
- BSON("x"
- << "hashed"),
- false,
- 1,
- boost::none,
- boost::none,
- false);
- auto secondScopedShardCollection =
- assertGet(_registry.registerShardCollection(secondShardsvrShardCollectionRequest));
- ASSERT(!secondScopedShardCollection.mustExecute());
-
- auto uuid = UUID::gen();
-
- CreateCollectionResponse response(ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */));
- response.setCollectionUUID(uuid);
- originalScopedShardCollection.emplaceResponse(response);
-
- auto swResponse = secondScopedShardCollection.getResponse().getNoThrow();
- ASSERT_EQ(uuid, swResponse.getValue().get().getCollectionUUID().value());
-}
-
-TEST_F(ShardCollectionRegistrationTest, SecondShardCollectionWithSameOptionsJoinsFirstOnError) {
- auto firstShardsvrShardCollectionRequest =
- createShardsvrShardCollectionRequest(NamespaceString("TestDB", "TestColl"),
- BSON("x"
- << "hashed"),
- false,
- 1,
- boost::none,
- boost::none,
- false);
- auto originalScopedShardCollection =
- assertGet(_registry.registerShardCollection(firstShardsvrShardCollectionRequest));
- ASSERT(originalScopedShardCollection.mustExecute());
-
- auto secondShardsvrShardCollectionRequest =
- createShardsvrShardCollectionRequest(NamespaceString("TestDB", "TestColl"),
- BSON("x"
- << "hashed"),
- false,
- 1,
- boost::none,
- boost::none,
- false);
- auto secondScopedShardCollection =
- assertGet(_registry.registerShardCollection(secondShardsvrShardCollectionRequest));
- ASSERT(!secondScopedShardCollection.mustExecute());
-
- originalScopedShardCollection.emplaceResponse({ErrorCodes::InternalError, "Test error"});
- auto swResponse = secondScopedShardCollection.getResponse().getNoThrow();
- ASSERT_EQ(Status(ErrorCodes::InternalError, "Test error"), swResponse.getStatus());
-}
-
-TEST_F(ShardCollectionRegistrationTest, TwoShardCollectionsOnDifferentCollectionsAllowed) {
- auto firstShardsvrShardCollectionRequest =
- createShardsvrShardCollectionRequest(NamespaceString("TestDB", "TestColl"),
- BSON("x"
- << "hashed"),
- false,
- 1,
- boost::none,
- boost::none,
- false);
- auto originalScopedShardCollection =
- assertGet(_registry.registerShardCollection(firstShardsvrShardCollectionRequest));
- ASSERT(originalScopedShardCollection.mustExecute());
-
- auto secondShardsvrShardCollectionRequest =
- createShardsvrShardCollectionRequest(NamespaceString("TestDB2", "TestColl2"),
- BSON("x"
- << "hashed"),
- false,
- 1,
- boost::none,
- boost::none,
- false);
- auto secondScopedShardCollection =
- assertGet(_registry.registerShardCollection(secondShardsvrShardCollectionRequest));
- ASSERT(secondScopedShardCollection.mustExecute());
-
- CreateCollectionResponse responseOriginal(
- ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */));
- responseOriginal.setCollectionUUID(UUID::gen());
-
- CreateCollectionResponse responseSecond(
- ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */));
- responseOriginal.setCollectionUUID(UUID::gen());
-
- originalScopedShardCollection.emplaceResponse(responseOriginal);
- secondScopedShardCollection.emplaceResponse(responseSecond);
-}
-
-} // namespace
-} // namespace mongo
diff --git a/src/mongo/db/s/config/configsvr_shard_collection_command.cpp b/src/mongo/db/s/config/configsvr_shard_collection_command.cpp
deleted file mode 100644
index b8fae536f71..00000000000
--- a/src/mongo/db/s/config/configsvr_shard_collection_command.cpp
+++ /dev/null
@@ -1,370 +0,0 @@
-/**
- * Copyright (C) 2018-present MongoDB, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the Server Side Public License, version 1,
- * as published by MongoDB, Inc.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * Server Side Public License for more details.
- *
- * You should have received a copy of the Server Side Public License
- * along with this program. If not, see
- * <http://www.mongodb.com/licensing/server-side-public-license>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the Server Side Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
-
-#include "mongo/platform/basic.h"
-
-#include "mongo/bson/util/bson_extract.h"
-#include "mongo/db/audit.h"
-#include "mongo/db/auth/action_type.h"
-#include "mongo/db/auth/authorization_session.h"
-#include "mongo/db/auth/privilege.h"
-#include "mongo/db/commands.h"
-#include "mongo/db/namespace_string.h"
-#include "mongo/db/query/collation/collator_factory_interface.h"
-#include "mongo/db/repl/read_concern_args.h"
-#include "mongo/db/s/config/sharding_catalog_manager.h"
-#include "mongo/db/s/dist_lock_manager.h"
-#include "mongo/db/s/shard_key_util.h"
-#include "mongo/s/balancer_configuration.h"
-#include "mongo/s/catalog/type_database.h"
-#include "mongo/s/catalog/type_shard.h"
-#include "mongo/s/catalog_cache.h"
-#include "mongo/s/client/shard_registry.h"
-#include "mongo/s/config_server_client.h"
-#include "mongo/s/grid.h"
-#include "mongo/s/request_types/shard_collection_gen.h"
-#include "mongo/util/scopeguard.h"
-#include "mongo/util/str.h"
-
-// TODO (SERVER-54879): Remove this command entirely after 5.0 branches
-namespace mongo {
-namespace {
-
-const long long kMaxSizeMBDefault = 0;
-
-/**
- * Validates the options specified in the request.
- *
- * WARNING: After validating the request's collation, replaces it with the collection default
- * collation.
- */
-void validateAndDeduceFullRequestOptions(OperationContext* opCtx,
- const NamespaceString& nss,
- const ShardKeyPattern& shardKeyPattern,
- int numShards,
- const std::shared_ptr<Shard>& primaryShard,
- ConfigsvrShardCollectionRequest* request) {
- uassert(
- ErrorCodes::InvalidOptions, "cannot have empty shard key", !request->getKey().isEmpty());
-
- // Ensure that hashed and unique are not both set.
- uassert(ErrorCodes::InvalidOptions,
- "Hashed shard keys cannot be declared unique. It's possible to ensure uniqueness on "
- "the hashed field by declaring an additional (non-hashed) unique index on the field.",
- !shardKeyPattern.isHashedPattern() || !request->getUnique());
-
- // Ensure the namespace is valid.
- uassert(ErrorCodes::IllegalOperation,
- "can't shard system namespaces",
- !nss.isSystem() || nss == NamespaceString::kLogicalSessionsNamespace ||
- nss.isTemporaryReshardingCollection() || nss.isTimeseriesBucketsCollection());
-
- // Ensure numInitialChunks is within valid bounds.
- // Cannot have more than 8192 initial chunks per shard. Setting a maximum of 1,000,000
- // chunks in total to limit the amount of memory this command consumes so there is less
- // danger of an OOM error.
- const int maxNumInitialChunksForShards = numShards * 8192;
- const int maxNumInitialChunksTotal = 1000 * 1000; // Arbitrary limit to memory consumption
- int numChunks = request->getNumInitialChunks();
- uassert(ErrorCodes::InvalidOptions,
- str::stream() << "numInitialChunks cannot be more than either: "
- << maxNumInitialChunksForShards << ", 8192 * number of shards; or "
- << maxNumInitialChunksTotal,
- numChunks >= 0 && numChunks <= maxNumInitialChunksForShards &&
- numChunks <= maxNumInitialChunksTotal);
-
- // Ensure the collation is valid. Currently we only allow the simple collation.
- bool simpleCollationSpecified = false;
- if (request->getCollation()) {
- auto& collation = *request->getCollation();
- auto collator = uassertStatusOK(
- CollatorFactoryInterface::get(opCtx->getServiceContext())->makeFromBSON(collation));
- uassert(ErrorCodes::BadValue,
- str::stream() << "The collation for shardCollection must be {locale: 'simple'}, "
- << "but found: " << collation,
- !collator);
- simpleCollationSpecified = true;
- }
-
- // Retrieve the collection metadata in order to verify the collection's collation settings
- BSONObj res;
- {
- auto listCollectionsCmd =
- BSON("listCollections" << 1 << "filter" << BSON("name" << nss.coll()));
- auto allRes = uassertStatusOK(primaryShard->runExhaustiveCursorCommand(
- opCtx,
- ReadPreferenceSetting(ReadPreference::PrimaryOnly),
- nss.db().toString(),
- listCollectionsCmd,
- Milliseconds(-1)));
- const auto& all = allRes.docs;
- if (!all.empty()) {
- res = all.front().getOwned();
- }
- }
-
- BSONObj defaultCollation;
-
- if (!res.isEmpty()) {
- // Check that namespace is not a view.
- {
- std::string namespaceType;
- uassertStatusOK(bsonExtractStringField(res, "type", &namespaceType));
- uassert(ErrorCodes::CommandNotSupportedOnView,
- "Views cannot be sharded.",
- namespaceType != "view");
- }
-
- BSONObj collectionOptions;
- if (res["options"].type() == BSONType::Object) {
- collectionOptions = res["options"].Obj();
- }
-
- // Check that collection is not capped.
- uassert(ErrorCodes::InvalidOptions,
- "can't shard a capped collection",
- !collectionOptions["capped"].trueValue());
-
- // Get collection default collation.
- BSONElement collationElement;
- auto status = bsonExtractTypedField(
- collectionOptions, "collation", BSONType::Object, &collationElement);
- if (status.isOK()) {
- defaultCollation = collationElement.Obj().getOwned();
- uassert(ErrorCodes::BadValue,
- "Default collation in collection metadata cannot be empty.",
- !defaultCollation.isEmpty());
- } else if (status != ErrorCodes::NoSuchKey) {
- uassertStatusOK(status);
- }
-
- // If the collection has a non-simple default collation but the user did not specify the
- // simple collation explicitly, return an error.
- uassert(ErrorCodes::BadValue,
- str::stream() << "Collection has default collation: "
- << collectionOptions["collation"]
- << ". Must specify collation {locale: 'simple'}",
- defaultCollation.isEmpty() || simpleCollationSpecified);
- }
-
- // Once the request's collation has been validated as simple or unset, replace it with the
- // deduced collection default collation.
- request->setCollation(defaultCollation.getOwned());
-}
-
-/**
- * Internal sharding command run on config servers to shard a collection.
- */
-class ConfigSvrShardCollectionCommand : public BasicCommand {
-public:
- ConfigSvrShardCollectionCommand() : BasicCommand("_configsvrShardCollection") {}
-
- Status checkAuthForCommand(Client* client,
- const std::string& dbname,
- const BSONObj& cmdObj) const override {
- if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forClusterResource(), ActionType::internal)) {
- return Status(ErrorCodes::Unauthorized, "Unauthorized");
- }
- return Status::OK();
- }
-
- AllowedOnSecondary secondaryAllowed(ServiceContext*) const override {
- return AllowedOnSecondary::kNever;
- }
-
- bool adminOnly() const override {
- return true;
- }
-
- bool supportsWriteConcern(const BSONObj& cmd) const override {
- return true;
- }
-
- std::string help() const override {
- return "Internal command, which is exported by the sharding config server. Do not call "
- "directly. Shards a collection. Requires key. Optional unique. Sharding must "
- "already be enabled for the database";
- }
-
- std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const override {
- return CommandHelpers::parseNsFullyQualified(cmdObj);
- }
-
- bool run(OperationContext* opCtx,
- const std::string& dbname,
- const BSONObj& cmdObj,
- BSONObjBuilder& result) override {
- uassert(ErrorCodes::IllegalOperation,
- "_configsvrShardCollection can only be run on config servers",
- serverGlobalParams.clusterRole == ClusterRole::ConfigServer);
- uassert(ErrorCodes::InvalidOptions,
- str::stream()
- << "_configsvrShardCollection must be called with majority writeConcern, got "
- << cmdObj,
- opCtx->getWriteConcern().wMode == WriteConcernOptions::kMajority);
-
- // Set the operation context read concern level to local for reads into the config database.
- repl::ReadConcernArgs::get(opCtx) =
- repl::ReadConcernArgs(repl::ReadConcernLevel::kLocalReadConcern);
-
- const NamespaceString nss(parseNs(dbname, cmdObj));
- auto request = ConfigsvrShardCollectionRequest::parse(
- IDLParserErrorContext("ConfigsvrShardCollectionRequest"), cmdObj);
-
- audit::logShardCollection(
- opCtx->getClient(), nss.ns(), request.getKey(), request.getUnique());
-
- auto const catalogManager = ShardingCatalogManager::get(opCtx);
- auto const catalogCache = Grid::get(opCtx)->catalogCache();
- auto const catalogClient = Grid::get(opCtx)->catalogClient();
- auto shardRegistry = Grid::get(opCtx)->shardRegistry();
-
- // Make the distlocks boost::optional so that they can be released by being reset below.
- boost::optional<DistLockManager::ScopedDistLock> dbDistLock(
- uassertStatusOK(DistLockManager::get(opCtx)->lock(
- opCtx, nss.db(), "shardCollection", DistLockManager::kDefaultLockTimeout)));
- boost::optional<DistLockManager::ScopedDistLock> collDistLock(
- uassertStatusOK(DistLockManager::get(opCtx)->lock(
- opCtx, nss.ns(), "shardCollection", DistLockManager::kDefaultLockTimeout)));
-
- // Ensure sharding is allowed on the database.
- // Until all metadata commands are on the config server, the CatalogCache on the config
- // server may be stale. Read the database entry directly rather than purging and reloading
- // the database into the CatalogCache, which is very expensive.
- auto dbType = catalogClient->getDatabase(
- opCtx, nss.db(), repl::ReadConcernArgs::get(opCtx).getLevel());
- uassert(ErrorCodes::IllegalOperation,
- str::stream() << "sharding not enabled for db " << nss.db(),
- dbType.getSharded());
-
- // Get variables required throughout this command.
-
- auto proposedKey(request.getKey().getOwned());
- ShardKeyPattern shardKeyPattern(proposedKey);
-
- auto shardIds = shardRegistry->getAllShardIds(opCtx);
- uassert(ErrorCodes::IllegalOperation,
- "cannot shard collections before there are shards",
- !shardIds.empty());
-
- // Handle collections in the config db separately.
- if (nss.db() == NamespaceString::kConfigDb) {
- // Only allowlisted collections in config may be sharded (unless we are in test mode)
- uassert(ErrorCodes::IllegalOperation,
- "only special collections in the config db may be sharded",
- nss == NamespaceString::kLogicalSessionsNamespace);
-
- auto configShard = uassertStatusOK(shardRegistry->getShard(opCtx, dbType.getPrimary()));
- auto countCmd = BSON("count" << nss.coll());
- auto countRes = uassertStatusOK(
- configShard->runCommand(opCtx,
- ReadPreferenceSetting(ReadPreference::PrimaryOnly),
- nss.db().toString(),
- countCmd,
- Shard::RetryPolicy::kIdempotent));
- auto numDocs = countRes.response["n"].Int();
-
- // If this is a collection on the config db, it must be empty to be sharded,
- // otherwise we might end up with chunks on the config servers.
- uassert(ErrorCodes::IllegalOperation,
- "collections in the config db must be empty to be sharded",
- numDocs == 0);
- }
-
- // For the config db, pick a new host shard for this collection, otherwise
- // make a connection to the real primary shard for this database.
- const auto primaryShardId = [&] {
- if (nss.db() == NamespaceString::kConfigDb) {
- // Many tests assume that the primary shard for configDb will be the shard
- // with the first ID in ascending lexical order
- std::sort(shardIds.begin(), shardIds.end());
- return shardIds[0];
- } else {
- return dbType.getPrimary();
- }
- }();
-
- auto primaryShard = uassertStatusOK(shardRegistry->getShard(opCtx, primaryShardId));
-
- // Step 1.
- validateAndDeduceFullRequestOptions(
- opCtx, nss, shardKeyPattern, shardIds.size(), primaryShard, &request);
-
- // The collation option should have been set to the collection default collation after being
- // validated.
- invariant(request.getCollation());
-
- boost::optional<UUID> uuid;
-
- // The primary shard will read the config.tags collection so we need to lock the zone
- // mutex.
- Lock::ExclusiveLock lk = catalogManager->lockZoneMutex(opCtx);
-
- ShardsvrShardCollectionRequest shardsvrShardCollectionRequest;
- shardsvrShardCollectionRequest.set_shardsvrShardCollection(nss);
- shardsvrShardCollectionRequest.setKey(request.getKey());
- shardsvrShardCollectionRequest.setUnique(request.getUnique());
- shardsvrShardCollectionRequest.setNumInitialChunks(request.getNumInitialChunks());
- shardsvrShardCollectionRequest.setPresplitHashedZones(request.getPresplitHashedZones());
- shardsvrShardCollectionRequest.setInitialSplitPoints(request.getInitialSplitPoints());
- shardsvrShardCollectionRequest.setCollation(request.getCollation());
- shardsvrShardCollectionRequest.setGetUUIDfromPrimaryShard(
- request.getGetUUIDfromPrimaryShard());
-
- auto cmdResponse = uassertStatusOK(primaryShard->runCommandWithFixedRetryAttempts(
- opCtx,
- ReadPreferenceSetting(ReadPreference::PrimaryOnly),
- "admin",
- CommandHelpers::appendMajorityWriteConcern(CommandHelpers::appendGenericCommandArgs(
- cmdObj, shardsvrShardCollectionRequest.toBSON())),
- Shard::RetryPolicy::kIdempotent));
-
- uassertStatusOK(cmdResponse.commandStatus);
-
- auto shardCollResponse = ShardsvrShardCollectionResponse::parse(
- IDLParserErrorContext("ShardsvrShardCollectionResponse"), cmdResponse.response);
- uuid = std::move(shardCollResponse.getCollectionUUID());
-
- result << "collectionsharded" << nss.ns();
- if (uuid) {
- result << "collectionUUID" << *uuid;
- }
-
- catalogCache->invalidateCollectionEntry_LINEARIZABLE(nss);
-
- return true;
- }
-
-} configsvrShardCollectionCmd;
-
-} // namespace
-} // namespace mongo
diff --git a/src/mongo/db/s/config/initial_split_policy.h b/src/mongo/db/s/config/initial_split_policy.h
index 6ba5baa449a..faf008a7f15 100644
--- a/src/mongo/db/s/config/initial_split_policy.h
+++ b/src/mongo/db/s/config/initial_split_policy.h
@@ -116,11 +116,10 @@ public:
* chunks at a time. For example, the first k chunks are assigned to the first available shard,
* and the next k chunks are assigned to the second available shard and so on.
* numContiguousChunksPerShard should only be > 1 when we do not pre-split the range
- * into larger chunks and then split the resulting chunks on the destination shards as in
- * configSvrShardCollection, thus should be equal the number of final split points + 1 divided
+ * into larger chunks and then split the resulting chunks on the destination shards,
+ * thus should be equal the number of final split points + 1 divided
* by the number of initial split points + 1. It serves to preserve the ordering/contigousness
- * of chunks when split by shardSvrShardCollection so that its yields the exact same shard
- * assignments as configSvrShardCollection.
+ * of chunks when split by shardSvrShardCollection.
*/
static ShardCollectionConfig generateShardCollectionInitialChunks(
const SplitPolicyParams& params,
diff --git a/src/mongo/db/s/config/sharding_catalog_manager.cpp b/src/mongo/db/s/config/sharding_catalog_manager.cpp
index 68b67fcdddd..f91f3e89e12 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager.cpp
@@ -1105,11 +1105,6 @@ void ShardingCatalogManager::_downgradeCollectionsAndChunksEntriesToPre50Phase2(
LOGV2(5276710, "Successfully downgraded config.chunks (phase 2)");
}
-Lock::ExclusiveLock ShardingCatalogManager::lockZoneMutex(OperationContext* opCtx) {
- Lock::ExclusiveLock lk(opCtx->lockState(), _kZoneOpLock);
- return lk;
-}
-
StatusWith<bool> ShardingCatalogManager::_isShardRequiredByZoneStillInUse(
OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
diff --git a/src/mongo/db/s/config/sharding_catalog_manager.h b/src/mongo/db/s/config/sharding_catalog_manager.h
index ffee213d619..5c28a90a101 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager.h
+++ b/src/mongo/db/s/config/sharding_catalog_manager.h
@@ -170,12 +170,6 @@ public:
const NamespaceString& nss,
const ChunkRange& range);
- /**
- * Exposes the zone operations mutex to external callers in order to allow them to synchronize
- * with any changes to the zones.
- */
- Lock::ExclusiveLock lockZoneMutex(OperationContext* opCtx);
-
//
// General utilities related to the ShardingCatalogManager
//
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_shard_collection_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_shard_collection_test.cpp
index a70c19f1d65..f77d5c08dfe 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_shard_collection_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_shard_collection_test.cpp
@@ -29,10 +29,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
-#include "mongo/platform/basic.h"
-
-#include <set>
-#include <string>
#include <vector>
#include "mongo/client/read_preference.h"
@@ -40,27 +36,16 @@
#include "mongo/client/remote_command_targeter_mock.h"
#include "mongo/db/s/config/config_server_test_fixture.h"
#include "mongo/db/s/config/initial_split_policy.h"
-#include "mongo/db/s/config/sharding_catalog_manager.h"
#include "mongo/rpc/metadata/tracking_metadata.h"
#include "mongo/s/catalog/type_chunk.h"
-#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/client/shard_registry.h"
-#include "mongo/s/grid.h"
-#include "mongo/s/request_types/shard_collection_gen.h"
+#include "mongo/s/request_types/sharded_ddl_commands_gen.h"
#include "mongo/s/shard_key_pattern.h"
-#include "mongo/util/scopeguard.h"
-#include "mongo/util/time_support.h"
namespace mongo {
namespace {
-using executor::NetworkInterfaceMock;
using executor::RemoteCommandRequest;
-using executor::RemoteCommandResponse;
-using executor::TaskExecutor;
-using std::set;
-using std::string;
-using std::vector;
using unittest::assertGet;
class ShardCollectionTestBase : public ConfigServerTestFixture {
@@ -70,7 +55,7 @@ protected:
const BSONObj& splitPoints) {
onCommand([&](const RemoteCommandRequest& request) {
ASSERT_EQUALS(shardHost, request.target);
- string cmdName = request.cmdObj.firstElement().fieldName();
+ std::string cmdName = request.cmdObj.firstElement().fieldName();
ASSERT_EQUALS("splitVector", cmdName);
ASSERT_EQUALS(kNamespace.ns(),
request.cmdObj["splitVector"].String()); // splitVector uses full ns
@@ -108,8 +93,10 @@ protected:
};
TEST_F(CreateFirstChunksTest, Split_Disallowed_With_Both_SplitPoints_And_Zones) {
- ShardsvrShardCollectionRequest request;
+ CreateCollectionRequest request;
std::vector<BSONObj> splitPoints = {BSONObj()};
+ request.setNumInitialChunks(0);
+ request.setPresplitHashedZones(false);
request.setInitialSplitPoints(splitPoints);
std::vector<TagsType> tags = {
TagsType(kNamespace,
@@ -119,8 +106,8 @@ TEST_F(CreateFirstChunksTest, Split_Disallowed_With_Both_SplitPoints_And_Zones)
ASSERT_THROWS_CODE(
InitialSplitPolicy::calculateOptimizationStrategy(operationContext(),
kShardKeyPattern,
- request.getNumInitialChunks(),
- request.getPresplitHashedZones(),
+ request.getNumInitialChunks().get(),
+ request.getPresplitHashedZones().get(),
request.getInitialSplitPoints(),
tags,
2 /* numShards */,
@@ -131,8 +118,8 @@ TEST_F(CreateFirstChunksTest, Split_Disallowed_With_Both_SplitPoints_And_Zones)
ASSERT_THROWS_CODE(
InitialSplitPolicy::calculateOptimizationStrategy(operationContext(),
kShardKeyPattern,
- request.getNumInitialChunks(),
- request.getPresplitHashedZones(),
+ request.getNumInitialChunks().get(),
+ request.getPresplitHashedZones().get(),
request.getInitialSplitPoints(),
tags,
2 /* numShards */,
@@ -161,16 +148,18 @@ TEST_F(CreateFirstChunksTest, NonEmptyCollection_SplitPoints_FromSplitVector_Man
ThreadClient tc("Test", getServiceContext());
auto opCtx = cc().makeOperationContext();
- ShardsvrShardCollectionRequest request;
- auto optimization =
- InitialSplitPolicy::calculateOptimizationStrategy(operationContext(),
- kShardKeyPattern,
- request.getNumInitialChunks(),
- request.getPresplitHashedZones(),
- request.getInitialSplitPoints(),
- {}, /* tags */
- 3 /* numShards */,
- false /* collectionIsEmpty */);
+ CreateCollectionRequest request;
+ request.setNumInitialChunks(0);
+ request.setPresplitHashedZones(false);
+ auto optimization = InitialSplitPolicy::calculateOptimizationStrategy(
+ operationContext(),
+ kShardKeyPattern,
+ request.getNumInitialChunks().get(),
+ request.getPresplitHashedZones().get(),
+ request.getInitialSplitPoints(),
+ {}, /* tags */
+ 3 /* numShards */,
+ false /* collectionIsEmpty */);
ASSERT(!optimization->isOptimized());
return optimization->createFirstChunks(opCtx.get(),
kShardKeyPattern,
@@ -212,17 +201,19 @@ TEST_F(CreateFirstChunksTest, NonEmptyCollection_SplitPoints_FromClient_ManyChun
std::vector<TagsType> zones{};
bool collectionIsEmpty = false;
- ShardsvrShardCollectionRequest request;
+ CreateCollectionRequest request;
+ request.setNumInitialChunks(0);
+ request.setPresplitHashedZones(false);
request.setInitialSplitPoints(splitPoints);
- auto optimization =
- InitialSplitPolicy::calculateOptimizationStrategy(operationContext(),
- kShardKeyPattern,
- request.getNumInitialChunks(),
- request.getPresplitHashedZones(),
- request.getInitialSplitPoints(),
- zones,
- 3 /* numShards */,
- collectionIsEmpty);
+ auto optimization = InitialSplitPolicy::calculateOptimizationStrategy(
+ operationContext(),
+ kShardKeyPattern,
+ request.getNumInitialChunks().get(),
+ request.getPresplitHashedZones().get(),
+ request.getInitialSplitPoints(),
+ zones,
+ 3 /* numShards */,
+ collectionIsEmpty);
ASSERT(optimization->isOptimized());
return optimization->createFirstChunks(opCtx.get(),
kShardKeyPattern,
@@ -251,12 +242,14 @@ TEST_F(CreateFirstChunksTest, NonEmptyCollection_WithZones_OneChunkToPrimary) {
ChunkRange(kShardKeyPattern.getKeyPattern().globalMin(), BSON("x" << 0)))};
bool collectionIsEmpty = false;
- ShardsvrShardCollectionRequest request;
+ CreateCollectionRequest request;
+ request.setNumInitialChunks(0);
+ request.setPresplitHashedZones(false);
auto optimization =
InitialSplitPolicy::calculateOptimizationStrategy(operationContext(),
kShardKeyPattern,
- request.getNumInitialChunks(),
- request.getPresplitHashedZones(),
+ request.getNumInitialChunks().get(),
+ request.getPresplitHashedZones().get(),
request.getInitialSplitPoints(),
zones,
3 /* numShards */,
@@ -296,17 +289,19 @@ TEST_F(CreateFirstChunksTest, EmptyCollection_SplitPoints_FromClient_ManyChunksD
std::vector<TagsType> zones{};
bool collectionIsEmpty = true;
- ShardsvrShardCollectionRequest request;
+ CreateCollectionRequest request;
+ request.setNumInitialChunks(0);
+ request.setPresplitHashedZones(false);
request.setInitialSplitPoints(splitPoints);
- auto optimization =
- InitialSplitPolicy::calculateOptimizationStrategy(operationContext(),
- kShardKeyPattern,
- request.getNumInitialChunks(),
- request.getPresplitHashedZones(),
- request.getInitialSplitPoints(),
- zones,
- 3 /* numShards */,
- collectionIsEmpty);
+ auto optimization = InitialSplitPolicy::calculateOptimizationStrategy(
+ operationContext(),
+ kShardKeyPattern,
+ request.getNumInitialChunks().get(),
+ request.getPresplitHashedZones().get(),
+ request.getInitialSplitPoints(),
+ zones,
+ 3 /* numShards */,
+ collectionIsEmpty);
ASSERT(optimization->isOptimized());
return optimization->createFirstChunks(operationContext(),
@@ -348,17 +343,19 @@ TEST_F(CreateFirstChunksTest, EmptyCollection_NoSplitPoints_OneChunkToPrimary) {
std::vector<TagsType> zones{};
bool collectionIsEmpty = true;
- ShardsvrShardCollectionRequest request;
+ CreateCollectionRequest request;
+ request.setNumInitialChunks(0);
+ request.setPresplitHashedZones(false);
request.setInitialSplitPoints(splitPoints);
- auto optimization =
- InitialSplitPolicy::calculateOptimizationStrategy(operationContext(),
- kShardKeyPattern,
- request.getNumInitialChunks(),
- request.getPresplitHashedZones(),
- request.getInitialSplitPoints(),
- zones,
- 3 /* numShards */,
- collectionIsEmpty);
+ auto optimization = InitialSplitPolicy::calculateOptimizationStrategy(
+ operationContext(),
+ kShardKeyPattern,
+ request.getNumInitialChunks().get(),
+ request.getPresplitHashedZones().get(),
+ request.getInitialSplitPoints(),
+ zones,
+ 3 /* numShards */,
+ collectionIsEmpty);
ASSERT(optimization->isOptimized());
return optimization->createFirstChunks(operationContext(),
@@ -387,12 +384,14 @@ TEST_F(CreateFirstChunksTest, EmptyCollection_WithZones_ManyChunksOnFirstZoneSha
"TestZone",
ChunkRange(kShardKeyPattern.getKeyPattern().globalMin(), BSON("x" << 0)))};
bool collectionIsEmpty = true;
- ShardsvrShardCollectionRequest request;
+ CreateCollectionRequest request;
+ request.setNumInitialChunks(0);
+ request.setPresplitHashedZones(false);
auto optimization =
InitialSplitPolicy::calculateOptimizationStrategy(operationContext(),
kShardKeyPattern,
- request.getNumInitialChunks(),
- request.getPresplitHashedZones(),
+ request.getNumInitialChunks().get(),
+ request.getPresplitHashedZones().get(),
request.getInitialSplitPoints(),
zones,
3 /* numShards */,
diff --git a/src/mongo/db/s/create_collection_coordinator.cpp b/src/mongo/db/s/create_collection_coordinator.cpp
index ded2e4fa311..3a09ced71f5 100644
--- a/src/mongo/db/s/create_collection_coordinator.cpp
+++ b/src/mongo/db/s/create_collection_coordinator.cpp
@@ -51,7 +51,6 @@
#include "mongo/s/cluster_commands_helpers.h"
#include "mongo/s/cluster_write.h"
#include "mongo/s/grid.h"
-#include "mongo/s/request_types/shard_collection_gen.h"
namespace mongo {
namespace {
@@ -481,8 +480,8 @@ ExecutorFuture<void> CreateCollectionCoordinator::_runImpl(
audit::logShardCollection(opCtx->getClient(),
nss().ns(),
- *_doc.getCreateCollectionRequest().getShardKey(),
- *_doc.getCreateCollectionRequest().getUnique());
+ *_doc.getShardKey(),
+ _doc.getUnique().value_or(false));
if (_splitPolicy->isOptimized()) {
// Block reads/writes from here on if we need to create
diff --git a/src/mongo/db/s/sessions_collection_config_server.cpp b/src/mongo/db/s/sessions_collection_config_server.cpp
index a9c7788ec92..ebd7ae37eed 100644
--- a/src/mongo/db/s/sessions_collection_config_server.cpp
+++ b/src/mongo/db/s/sessions_collection_config_server.cpp
@@ -29,21 +29,14 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kControl
-#include "mongo/platform/basic.h"
-
#include "mongo/db/s/sessions_collection_config_server.h"
-#include "mongo/client/query.h"
-#include "mongo/db/commands.h"
-#include "mongo/db/dbdirectclient.h"
-#include "mongo/db/logical_session_id.h"
-#include "mongo/db/operation_context.h"
#include "mongo/logv2/log.h"
-#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/cluster_commands_helpers.h"
+#include "mongo/s/cluster_ddl.h"
#include "mongo/s/grid.h"
-#include "mongo/s/request_types/shard_collection_gen.h"
+#include "mongo/s/request_types/sharded_ddl_commands_gen.h"
#include "mongo/s/stale_shard_version_helpers.h"
namespace mongo {
@@ -63,19 +56,13 @@ void SessionsCollectionConfigServer::_shardCollectionIfNeeded(OperationContext*
<< ": cannot create the collection until there are shards",
Grid::get(opCtx)->shardRegistry()->getNumShardsNoReload() != 0);
- // TODO (SERVER-54879): Switch this to call cluster::createCollection after 5.0 branches
- ConfigsvrShardCollectionRequest shardCollection;
- shardCollection.set_configsvrShardCollection(NamespaceString::kLogicalSessionsNamespace);
- shardCollection.setKey(BSON("_id" << 1));
+ ShardsvrCreateCollection shardsvrCollRequest(NamespaceString::kLogicalSessionsNamespace);
+ CreateCollectionRequest requestParamsObj;
+ requestParamsObj.setShardKey(BSON("_id" << 1));
+ shardsvrCollRequest.setCreateCollectionRequest(std::move(requestParamsObj));
+ shardsvrCollRequest.setDbName(NamespaceString::kLogicalSessionsNamespace.db());
- DBDirectClient client(opCtx);
- BSONObj info;
- if (!client.runCommand(
- "admin", CommandHelpers::appendMajorityWriteConcern(shardCollection.toBSON()), info)) {
- uassertStatusOKWithContext(getStatusFromCommandResult(info),
- str::stream() << "Failed to create "
- << NamespaceString::kLogicalSessionsNamespace);
- }
+ cluster::createCollection(opCtx, shardsvrCollRequest);
}
void SessionsCollectionConfigServer::_generateIndexesIfNeeded(OperationContext* opCtx) {
diff --git a/src/mongo/db/s/shard_collection_legacy.cpp b/src/mongo/db/s/shard_collection_legacy.cpp
deleted file mode 100644
index fe3daff6d96..00000000000
--- a/src/mongo/db/s/shard_collection_legacy.cpp
+++ /dev/null
@@ -1,711 +0,0 @@
-/**
- * Copyright (C) 2020-present MongoDB, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the Server Side Public License, version 1,
- * as published by MongoDB, Inc.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * Server Side Public License for more details.
- *
- * You should have received a copy of the Server Side Public License
- * along with this program. If not, see
- * <http://www.mongodb.com/licensing/server-side-public-license>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the Server Side Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
-
-#include "mongo/platform/basic.h"
-
-#include "mongo/bson/simple_bsonelement_comparator.h"
-#include "mongo/bson/util/bson_extract.h"
-#include "mongo/db/audit.h"
-#include "mongo/db/auth/action_type.h"
-#include "mongo/db/auth/authorization_session.h"
-#include "mongo/db/catalog_raii.h"
-#include "mongo/db/commands.h"
-#include "mongo/db/dbdirectclient.h"
-#include "mongo/db/hasher.h"
-#include "mongo/db/index/index_descriptor.h"
-#include "mongo/db/namespace_string.h"
-#include "mongo/db/query/collation/collator_factory_interface.h"
-#include "mongo/db/repl/repl_client_info.h"
-#include "mongo/db/s/active_shard_collection_registry.h"
-#include "mongo/db/s/collection_sharding_runtime.h"
-#include "mongo/db/s/config/initial_split_policy.h"
-#include "mongo/db/s/database_sharding_state.h"
-#include "mongo/db/s/dist_lock_manager.h"
-#include "mongo/db/s/operation_sharding_state.h"
-#include "mongo/db/s/shard_collection_legacy.h"
-#include "mongo/db/s/shard_filtering_metadata_refresh.h"
-#include "mongo/db/s/shard_key_util.h"
-#include "mongo/db/s/sharding_ddl_50_upgrade_downgrade.h"
-#include "mongo/db/s/sharding_ddl_util.h"
-#include "mongo/db/s/sharding_logging.h"
-#include "mongo/db/s/sharding_state.h"
-#include "mongo/logv2/log.h"
-#include "mongo/rpc/get_status_from_command_result.h"
-#include "mongo/s/balancer_configuration.h"
-#include "mongo/s/catalog/sharding_catalog_client_impl.h"
-#include "mongo/s/catalog/type_database.h"
-#include "mongo/s/catalog/type_shard.h"
-#include "mongo/s/catalog/type_tags.h"
-#include "mongo/s/cluster_commands_helpers.h"
-#include "mongo/s/grid.h"
-#include "mongo/s/request_types/clone_collection_options_from_primary_shard_gen.h"
-#include "mongo/s/request_types/shard_collection_gen.h"
-#include "mongo/s/shard_util.h"
-#include "mongo/util/fail_point.h"
-#include "mongo/util/scopeguard.h"
-#include "mongo/util/str.h"
-
-// TODO (SERVER-54879): Remove this entire file after 5.0 branches
-namespace mongo {
-namespace {
-
-using FeatureCompatibility = ServerGlobalParams::FeatureCompatibility;
-using FCVersion = FeatureCompatibility::Version;
-
-MONGO_FAIL_POINT_DEFINE(pauseShardCollectionBeforeCriticalSection);
-MONGO_FAIL_POINT_DEFINE(pauseShardCollectionReadOnlyCriticalSection);
-MONGO_FAIL_POINT_DEFINE(pauseShardCollectionCommitPhase);
-MONGO_FAIL_POINT_DEFINE(pauseShardCollectionAfterCriticalSection);
-
-struct ShardCollectionTargetState {
- UUID uuid;
- ShardKeyPattern shardKeyPattern;
- std::vector<TagsType> tags;
- bool collectionIsEmpty;
-};
-
-const ReadPreferenceSetting kConfigReadSelector(ReadPreference::Nearest, TagSet{});
-
-/**
- * If the specified status is not OK logs a warning and throws a DBException corresponding to the
- * specified status.
- */
-void uassertStatusOKWithWarning(const Status& status) {
- if (!status.isOK()) {
- LOGV2_WARNING(22103,
- "shardsvrShardCollection failed {error}",
- "shardsvrShardCollection failed",
- "error"_attr = redact(status));
- uassertStatusOK(status);
- }
-}
-
-boost::optional<CreateCollectionResponse> checkIfCollectionAlreadyShardedWithSameOptions(
- OperationContext* opCtx, const ShardsvrShardCollectionRequest& request) {
- const auto& nss = *request.get_shardsvrShardCollection();
- return mongo::sharding_ddl_util::checkIfCollectionAlreadySharded(
- opCtx, nss, request.getKey(), *request.getCollation(), request.getUnique());
-}
-
-void checkForExistingChunks(OperationContext* opCtx,
- const NamespaceString& nss,
- const boost::optional<UUID>& optUUID) {
- BSONObjBuilder countBuilder;
- if (optUUID) {
- countBuilder.append("count", ChunkType::ConfigNS.coll());
- countBuilder.append("query", BSON(ChunkType::collectionUUID << *optUUID));
- } else {
- countBuilder.append("count", ChunkType::ConfigNS.coll());
- countBuilder.append("query", BSON(ChunkType::ns(nss.ns())));
- }
-
- // OK to use limit=1, since if any chunks exist, we will fail.
- countBuilder.append("limit", 1);
-
- auto readConcern =
- Grid::get(opCtx)->readConcernWithConfigTime(repl::ReadConcernLevel::kMajorityReadConcern);
- readConcern.appendInfo(&countBuilder);
-
- auto cmdResponse = uassertStatusOK(
- Grid::get(opCtx)->shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts(
- opCtx,
- kConfigReadSelector,
- ChunkType::ConfigNS.db().toString(),
- countBuilder.done(),
- Shard::kDefaultConfigCommandTimeout,
- Shard::RetryPolicy::kIdempotent));
- uassertStatusOK(cmdResponse.commandStatus);
-
- long long numChunks;
- uassertStatusOK(bsonExtractIntegerField(cmdResponse.response, "n", &numChunks));
- uassert(
- ErrorCodes::ManualInterventionRequired,
- str::stream() << "A previous attempt to shard collection " << nss.ns()
- << " failed after writing some initial chunks to config.chunks. Please "
- "manually delete the partially written chunks for collection "
- << nss.ns() << " from config.chunks"
- << (optUUID ? str::stream() << " uuid: " << *optUUID : str::stream() << ""),
- numChunks == 0);
-}
-
-void checkCollation(OperationContext* opCtx, const ShardsvrShardCollectionRequest& request) {
- // Ensure the collation is valid. Currently we only allow the simple collation.
- std::unique_ptr<CollatorInterface> requestedCollator;
-
- const auto& collation = *request.getCollation();
- if (!collation.isEmpty())
- requestedCollator = uassertStatusOK(
- CollatorFactoryInterface::get(opCtx->getServiceContext())->makeFromBSON(collation));
-
- AutoGetCollection autoColl(opCtx,
- *request.get_shardsvrShardCollection(),
- MODE_IS,
- AutoGetCollectionViewMode::kViewsForbidden);
-
- const auto actualCollator = [&]() -> const CollatorInterface* {
- const auto& coll = autoColl.getCollection();
- if (coll) {
- uassert(
- ErrorCodes::InvalidOptions, "can't shard a capped collection", !coll->isCapped());
- return coll->getDefaultCollator();
- }
-
- return nullptr;
- }();
-
- if (!requestedCollator && !actualCollator)
- return;
-
- // If this check fails, this means the collation changed between the time
- // '_configsvrShardCollection' was called and the request got to the shard. Report the message
- // as if it failed on the config server in the first place.
- uassert(ErrorCodes::BadValue,
- str::stream()
- << "Collection has default collation: "
- << (actualCollator ? actualCollator : requestedCollator.get())->getSpec().toBSON()
- << ". Must specify collation {locale: 'simple'}.",
- CollatorInterface::collatorsMatch(requestedCollator.get(), actualCollator));
-}
-
-std::vector<TagsType> getTagsAndValidate(OperationContext* opCtx,
- const NamespaceString& nss,
- const BSONObj& proposedKey) {
- const auto catalogClient = Grid::get(opCtx)->catalogClient();
- auto tags = uassertStatusOK(catalogClient->getTagsForCollection(opCtx, nss));
-
- // Compares the proposed shard key with the shard key of the collection's existing zones to
- // ensure they are a legal combination
- for (const auto& tag : tags) {
- BSONObjIterator tagMinFields(tag.getMinKey());
- BSONObjIterator tagMaxFields(tag.getMaxKey());
- BSONObjIterator proposedFields(proposedKey);
-
- while (tagMinFields.more() && proposedFields.more()) {
- BSONElement tagMinKeyElement = tagMinFields.next();
- BSONElement tagMaxKeyElement = tagMaxFields.next();
- uassert(ErrorCodes::InvalidOptions,
- str::stream() << "the min and max of the existing zone " << tag.getMinKey()
- << " -->> " << tag.getMaxKey() << " have non-matching keys",
- tagMinKeyElement.fieldNameStringData() ==
- tagMaxKeyElement.fieldNameStringData());
-
- BSONElement proposedKeyElement = proposedFields.next();
- bool match = ((tagMinKeyElement.fieldNameStringData() ==
- proposedKeyElement.fieldNameStringData()) &&
- ((tagMinFields.more() && proposedFields.more()) ||
- (!tagMinFields.more() && !proposedFields.more())));
- uassert(ErrorCodes::InvalidOptions,
- str::stream() << "the proposed shard key " << proposedKey.toString()
- << " does not match with the shard key of the existing zone "
- << tag.getMinKey() << " -->> " << tag.getMaxKey(),
- match);
-
- // If the field is hashed, make sure that the min and max values are of supported type
- uassert(
- ErrorCodes::InvalidOptions,
- str::stream() << "cannot do hash sharding with the proposed key "
- << proposedKey.toString() << " because there exists a zone "
- << tag.getMinKey() << " -->> " << tag.getMaxKey()
- << " whose boundaries are not of type NumberLong, MinKey or MaxKey",
- !ShardKeyPattern::isHashedPatternEl(proposedKeyElement) ||
- (ShardKeyPattern::isValidHashedValue(tagMinKeyElement) &&
- ShardKeyPattern::isValidHashedValue(tagMaxKeyElement)));
- }
- }
-
- return tags;
-}
-
-bool checkIfCollectionIsEmpty(OperationContext* opCtx, const NamespaceString& nss) {
- // Use find with predicate instead of count in order to ensure that the count
- // command doesn't just consult the cached metadata, which may not always be
- // correct
- DBDirectClient localClient(opCtx);
- return localClient.findOne(nss.ns(), Query()).isEmpty();
-}
-
-int getNumShards(OperationContext* opCtx) {
- const auto shardRegistry = Grid::get(opCtx)->shardRegistry();
- shardRegistry->reload(opCtx);
- return shardRegistry->getAllShardIds(opCtx).size();
-}
-
-ShardCollectionTargetState calculateTargetState(OperationContext* opCtx,
- const NamespaceString& nss,
- const ShardsvrShardCollectionRequest& request) {
- auto tags = getTagsAndValidate(opCtx, nss, request.getKey());
- auto uuid = request.getGetUUIDfromPrimaryShard()
- ? *sharding_ddl_util::getCollectionUUID(opCtx, nss)
- : UUID::gen();
-
- const bool isEmpty = checkIfCollectionIsEmpty(opCtx, nss);
- return {uuid, ShardKeyPattern(request.getKey()), tags, isEmpty};
-}
-
-void logStartShardCollection(OperationContext* opCtx,
- const BSONObj& cmdObj,
- const NamespaceString& nss,
- const ShardsvrShardCollectionRequest& request,
- const ShardCollectionTargetState& prerequisites,
- const ShardId& dbPrimaryShardId) {
- LOGV2(
- 22100, "CMD: shardcollection: {command}", "CMD: shardcollection", "command"_attr = cmdObj);
-
- audit::logShardCollection(
- opCtx->getClient(), nss.ns(), prerequisites.shardKeyPattern.toBSON(), request.getUnique());
-
- const auto shardRegistry = Grid::get(opCtx)->shardRegistry();
- const auto primaryShard = uassertStatusOK(shardRegistry->getShard(opCtx, dbPrimaryShardId));
-
- // Record start in changelog
- {
- BSONObjBuilder collectionDetail;
- collectionDetail.append("shardKey", prerequisites.shardKeyPattern.toBSON());
- collectionDetail.append("collection", nss.ns());
- prerequisites.uuid.appendToBuilder(&collectionDetail, "uuid");
- collectionDetail.append("empty", prerequisites.collectionIsEmpty);
- collectionDetail.append("primary", primaryShard->toString());
- uassertStatusOK(ShardingLogging::get(opCtx)->logChangeChecked(
- opCtx,
- "shardCollection.start",
- nss.ns(),
- collectionDetail.obj(),
- ShardingCatalogClient::kMajorityWriteConcern));
- }
-}
-
-void createCollectionOnShardsReceivingChunks(OperationContext* opCtx,
- const NamespaceString& nss,
- const std::vector<ChunkType>& initialChunks,
- const ShardId& dbPrimaryShardId) {
-
- std::vector<AsyncRequestsSender::Request> requests;
- std::set<ShardId> initializedShards;
- for (const auto& chunk : initialChunks) {
- const auto& chunkShardId = chunk.getShard();
- if (chunkShardId == dbPrimaryShardId ||
- initializedShards.find(chunkShardId) != initializedShards.end()) {
- continue;
- }
-
-
- CloneCollectionOptionsFromPrimaryShard cloneCollectionOptionsFromPrimaryShardRequest(nss);
- cloneCollectionOptionsFromPrimaryShardRequest.setPrimaryShard(dbPrimaryShardId.toString());
- cloneCollectionOptionsFromPrimaryShardRequest.setDbName(nss.db());
-
- requests.emplace_back(
- chunkShardId,
- cloneCollectionOptionsFromPrimaryShardRequest.toBSON(
- BSON("writeConcern" << ShardingCatalogClient::kMajorityWriteConcern.toBSON())));
-
- initializedShards.emplace(chunkShardId);
- }
-
- if (!requests.empty()) {
- auto responses = gatherResponses(opCtx,
- nss.db(),
- ReadPreferenceSetting(ReadPreference::PrimaryOnly),
- Shard::RetryPolicy::kIdempotent,
- requests);
-
- // If any shards fail to create the collection, fail the entire shardCollection command
- // (potentially leaving incomplely created sharded collection)
- for (const auto& response : responses) {
- auto shardResponse =
- uassertStatusOKWithContext(std::move(response.swResponse),
- str::stream() << "Unable to create collection "
- << nss.ns() << " on " << response.shardId);
- auto status = getStatusFromCommandResult(shardResponse.data);
- uassertStatusOK(status.withContext(str::stream()
- << "Unable to create collection " << nss.ns()
- << " on " << response.shardId));
-
- auto wcStatus = getWriteConcernStatusFromCommandResult(shardResponse.data);
- uassertStatusOK(wcStatus.withContext(str::stream()
- << "Unable to create collection " << nss.ns()
- << " on " << response.shardId));
- }
- }
-}
-
-void writeFirstChunksToConfig(OperationContext* opCtx,
- const InitialSplitPolicy::ShardCollectionConfig& initialChunks) {
-
- std::vector<BSONObj> chunkObjs;
- chunkObjs.reserve(initialChunks.chunks.size());
- for (const auto& chunk : initialChunks.chunks) {
- chunkObjs.push_back(chunk.toConfigBSON());
- }
-
- Grid::get(opCtx)->catalogClient()->insertConfigDocumentsAsRetryableWrite(
- opCtx,
- ChunkType::ConfigNS,
- std::move(chunkObjs),
- ShardingCatalogClient::kMajorityWriteConcern);
-}
-
-void writeShardingCatalogEntryForCollection(
- OperationContext* opCtx,
- const NamespaceString& nss,
- const ShardCollectionTargetState& prerequisites,
- const InitialSplitPolicy::ShardCollectionConfig& initialChunks,
- const BSONObj& defaultCollation,
- const bool unique) {
- // Construct the collection default collator.
- std::unique_ptr<CollatorInterface> defaultCollator;
- if (!defaultCollation.isEmpty()) {
- defaultCollator = uassertStatusOK(CollatorFactoryInterface::get(opCtx->getServiceContext())
- ->makeFromBSON(defaultCollation));
- }
-
- CollectionType coll(nss,
- initialChunks.collVersion().epoch(),
- initialChunks.collVersion().getTimestamp(),
- Date_t::now(),
- prerequisites.uuid);
- coll.setKeyPattern(prerequisites.shardKeyPattern.toBSON());
- if (defaultCollator) {
- coll.setDefaultCollation(defaultCollator->getSpec().toBSON());
- }
- coll.setUnique(unique);
-
- uassertStatusOK(ShardingCatalogClientImpl::updateShardingCatalogEntryForCollection(
- opCtx, nss, coll, true /*upsert*/));
-}
-
-void refreshAllShards(OperationContext* opCtx,
- const NamespaceString& nss,
- const ShardId& dbPrimaryShardId,
- const std::vector<ChunkType>& initialChunks) {
- // If the refresh fails, then the shard will end with a shardVersion UNSHARDED.
- try {
- forceShardFilteringMetadataRefresh(opCtx, nss);
- } catch (const DBException&) {
- UninterruptibleLockGuard noInterrupt(opCtx->lockState());
- AutoGetCollection autoColl(opCtx, nss, MODE_IX);
- CollectionShardingRuntime::get(opCtx, nss)->clearFilteringMetadata(opCtx);
- throw;
- }
-
- auto shardRegistry = Grid::get(opCtx)->shardRegistry();
-
- std::set<ShardId> shardsRefreshed;
- for (const auto& chunk : initialChunks) {
- const auto& chunkShardId = chunk.getShard();
- if (chunkShardId == dbPrimaryShardId ||
- shardsRefreshed.find(chunkShardId) != shardsRefreshed.end()) {
- continue;
- }
-
- auto shard = uassertStatusOK(shardRegistry->getShard(opCtx, chunkShardId));
- auto refreshCmdResponse = uassertStatusOK(shard->runCommandWithFixedRetryAttempts(
- opCtx,
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- "admin",
- BSON("_flushRoutingTableCacheUpdates" << nss.ns()),
- Seconds{30},
- Shard::RetryPolicy::kIdempotent));
-
- uassertStatusOK(refreshCmdResponse.commandStatus);
- shardsRefreshed.emplace(chunkShardId);
- }
-}
-
-CreateCollectionResponse shardCollection(OperationContext* opCtx,
- const NamespaceString& nss,
- const BSONObj& cmdObj,
- const ShardsvrShardCollectionRequest& request,
- const ShardId& dbPrimaryShardId,
- bool mustTakeDistLock,
- const FixedFCVRegion& fcvRegion) {
- // Fast check for whether the collection is already sharded without taking any locks
- if (auto createCollectionResponseOpt =
- checkIfCollectionAlreadyShardedWithSameOptions(opCtx, request)) {
- return *createCollectionResponseOpt;
- }
-
- auto writeChunkDocumentsAndRefreshShards =
- [&](const ShardCollectionTargetState& targetState,
- const InitialSplitPolicy::ShardCollectionConfig& initialChunks) {
- // Insert chunk documents to config.chunks on the config server.
- writeFirstChunksToConfig(opCtx, initialChunks);
- // If an error happens when contacting the config server, we don't know if the update
- // succeded or not, which might cause the local shard version to differ from the config
- // server, so we clear the metadata to allow another operation to refresh it.
- try {
- writeShardingCatalogEntryForCollection(opCtx,
- nss,
- targetState,
- initialChunks,
- *request.getCollation(),
- request.getUnique());
-
- } catch (const DBException&) {
- UninterruptibleLockGuard noInterrupt(opCtx->lockState());
- AutoGetCollection autoColl(opCtx, nss, MODE_IX);
- CollectionShardingRuntime::get(opCtx, nss)->clearFilteringMetadata(opCtx);
- throw;
- }
-
- refreshAllShards(opCtx, nss, dbPrimaryShardId, initialChunks.chunks);
- };
-
- boost::optional<ShardCollectionTargetState> targetState;
- std::unique_ptr<InitialSplitPolicy> splitPolicy;
- InitialSplitPolicy::ShardCollectionConfig initialChunks;
-
- CreateCollectionResponse shardCollectionResponse;
-
- // Make the distlocks boost::optional so that they can be emplaced only if the request came
- // from the router.
- boost::optional<DistLockManager::ScopedDistLock> dbDistLock;
- boost::optional<DistLockManager::ScopedDistLock> collDistLock;
- if (mustTakeDistLock) {
- dbDistLock.emplace(uassertStatusOK(DistLockManager::get(opCtx)->lock(
- opCtx, nss.db(), "shardCollection", DistLockManager::kDefaultLockTimeout)));
- collDistLock.emplace(uassertStatusOK(DistLockManager::get(opCtx)->lock(
- opCtx, nss.ns(), "shardCollection", DistLockManager::kDefaultLockTimeout)));
- }
-
- {
- pauseShardCollectionBeforeCriticalSection.pauseWhileSet();
-
- // From this point onward the collection can only be read, not written to, so it is safe to
- // construct the prerequisites and generate the target state.
- ScopedShardVersionCriticalSection critSec(opCtx,
- nss,
- BSON("command"
- << "shardCollection"
- << "collection" << nss.ns()));
-
- pauseShardCollectionReadOnlyCriticalSection.pauseWhileSet();
-
- if (auto createCollectionResponseOpt =
- checkIfCollectionAlreadyShardedWithSameOptions(opCtx, request)) {
- return *createCollectionResponseOpt;
- }
-
- if (mustTakeDistLock) {
- // If DistLock must be taken, then the request came from a router, there is no need to
- // check for the count here because it was already done on the config server.
- if (nss.db() == NamespaceString::kConfigDb) {
- auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
-
- auto findReponse = uassertStatusOK(configShard->exhaustiveFindOnConfig(
- opCtx,
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- repl::ReadConcernLevel::kMajorityReadConcern,
- nss,
- BSONObj(),
- BSONObj(),
- 1));
-
- auto numDocs = findReponse.docs.size();
-
- // If this is a collection on the config db, it must be empty to be sharded.
- uassert(ErrorCodes::IllegalOperation,
- "collections in the config db must be empty to be sharded",
- numDocs == 0);
- } else {
- const auto dbInfo = uassertStatusOK(
- Grid::get(opCtx)->catalogCache()->getDatabaseWithRefresh(opCtx, nss.db()));
-
- // Check under the dbLock if this is still the primary shard for the database. This
- // is an unstable check because the shard version might change right after the
- // check, this is added to fix some tests where a movePrimary might be issued from a
- // stale router.
- DatabaseShardingState::checkIsPrimaryShardForDb(opCtx, nss.db());
-
- auto shardId = ShardingState::get(opCtx)->shardId();
-
- uassert(ErrorCodes::IllegalOperation,
- str::stream()
- << "this is not the primary shard for db " << nss.db()
- << " expected: " << dbInfo.primaryId() << " shardId: " << shardId,
- dbInfo.primaryId() == shardId);
- }
- }
-
- // Fail if there are partially written chunks from a previous failed shardCollection.
- if (serverGlobalParams.featureCompatibility.getVersion() ==
- FCVersion::kFullyDowngradedTo44 ||
- serverGlobalParams.featureCompatibility.getVersion() == FCVersion::kVersion49) {
- checkForExistingChunks(opCtx, nss, boost::none);
- } else if (serverGlobalParams.featureCompatibility.getVersion() ==
- FCVersion::kFullyDowngradedTo50) {
- if (auto optUUID = sharding_ddl_util::getCollectionUUID(opCtx, nss))
- checkForExistingChunks(opCtx, nss, optUUID);
- } else {
- // In the intermediate state must check for leftovers from both formats
- checkForExistingChunks(opCtx, nss, boost::none);
- if (auto optUUID = sharding_ddl_util::getCollectionUUID(opCtx, nss))
- checkForExistingChunks(opCtx, nss, optUUID);
- }
-
- checkCollation(opCtx, request);
-
- // Create the collection locally
- shardkeyutil::validateShardKeyIndexExistsOrCreateIfPossible(
- opCtx,
- nss,
- ShardKeyPattern(request.getKey()),
- *request.getCollation(),
- request.getUnique(),
- shardkeyutil::ValidationBehaviorsShardCollection(opCtx));
-
- // Wait until the index is majority written, to prevent having the collection commited to
- // the config server, but the index creation rolled backed on stepdowns.
- WriteConcernResult ignoreResult;
- auto latestOpTime = repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp();
- uassertStatusOK(waitForWriteConcern(
- opCtx, latestOpTime, ShardingCatalogClient::kMajorityWriteConcern, &ignoreResult));
-
- targetState = calculateTargetState(opCtx, nss, request);
-
- splitPolicy =
- InitialSplitPolicy::calculateOptimizationStrategy(opCtx,
- targetState->shardKeyPattern,
- request.getNumInitialChunks(),
- request.getPresplitHashedZones(),
- request.getInitialSplitPoints(),
- targetState->tags,
- getNumShards(opCtx),
- targetState->collectionIsEmpty);
-
- initialChunks = splitPolicy->createFirstChunks(
- opCtx,
- targetState->shardKeyPattern,
- {nss, targetState->uuid, dbPrimaryShardId, ChunkEntryFormat::get(fcvRegion)});
-
- // There must be at least one chunk.
- invariant(initialChunks.chunks.size());
-
- shardCollectionResponse.setCollectionUUID(targetState->uuid);
- shardCollectionResponse.setCollectionVersion(
- initialChunks.chunks[initialChunks.chunks.size() - 1].getVersion());
-
- logStartShardCollection(opCtx, cmdObj, nss, request, *targetState, dbPrimaryShardId);
-
- // From this point onward, the collection can not be written to or read from.
- critSec.enterCommitPhase();
- pauseShardCollectionCommitPhase.pauseWhileSet();
-
- if (splitPolicy->isOptimized()) {
- createCollectionOnShardsReceivingChunks(
- opCtx, nss, initialChunks.chunks, dbPrimaryShardId);
-
- writeChunkDocumentsAndRefreshShards(*targetState, initialChunks);
- }
- }
-
- // We have now left the critical section.
- pauseShardCollectionAfterCriticalSection.pauseWhileSet();
-
- if (!splitPolicy->isOptimized()) {
- writeChunkDocumentsAndRefreshShards(*targetState, initialChunks);
- }
-
- LOGV2(22101,
- "Created {numInitialChunks} chunk(s) for: {namespace}, producing collection version "
- "{initialCollectionVersion}",
- "Created initial chunk(s)",
- "numInitialChunks"_attr = initialChunks.chunks.size(),
- "namespace"_attr = nss,
- "initialCollectionVersion"_attr = initialChunks.collVersion());
-
- ShardingLogging::get(opCtx)->logChange(
- opCtx,
- "shardCollection.end",
- nss.ns(),
- BSON("version" << initialChunks.collVersion().toString() << "numChunks"
- << static_cast<int>(initialChunks.chunks.size())),
- ShardingCatalogClient::kMajorityWriteConcern);
-
- return shardCollectionResponse;
-}
-
-} // namespace
-
-CreateCollectionResponse shardCollectionLegacy(OperationContext* opCtx,
- const NamespaceString& nss,
- const BSONObj& cmdObj,
- bool requestFromCSRS,
- const FixedFCVRegion& fcvRegion) {
- auto request = ShardsvrShardCollectionRequest::parse(
- IDLParserErrorContext("shardCollectionLegacy"), cmdObj);
- if (!request.getCollation())
- request.setCollation(BSONObj());
- if (!request.getCollation()->isEmpty()) {
- auto requestedCollator =
- uassertStatusOK(CollatorFactoryInterface::get(opCtx->getServiceContext())
- ->makeFromBSON(*request.getCollation()));
- if (!requestedCollator)
- request.setCollation(BSONObj());
- }
-
- auto scopedShardCollection =
- uassertStatusOK(ActiveShardCollectionRegistry::get(opCtx).registerShardCollection(request));
-
- boost::optional<CreateCollectionResponse> response;
-
- // Check if this collection is currently being sharded and if so, join it
- if (!scopedShardCollection.mustExecute()) {
- response = scopedShardCollection.getResponse().get();
- } else {
- try {
- response = shardCollection(opCtx,
- nss,
- cmdObj,
- request,
- ShardingState::get(opCtx)->shardId(),
- !requestFromCSRS,
- fcvRegion);
- } catch (const DBException& e) {
- scopedShardCollection.emplaceResponse(e.toStatus());
- throw;
- } catch (const std::exception& e) {
- scopedShardCollection.emplaceResponse(
- {ErrorCodes::InternalError,
- str::stream() << "Severe error occurred while running shardCollection command: "
- << e.what()});
- throw;
- }
-
- uassert(ErrorCodes::InvalidUUID,
- str::stream() << "Collection " << nss << " is sharded without UUID",
- response);
-
- scopedShardCollection.emplaceResponse(response);
- }
-
- return response.value();
-}
-} // namespace mongo
diff --git a/src/mongo/db/s/shard_collection_legacy.h b/src/mongo/db/s/shard_collection_legacy.h
deleted file mode 100644
index ad69435859f..00000000000
--- a/src/mongo/db/s/shard_collection_legacy.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/**
- * Copyright (C) 2020-present MongoDB, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the Server Side Public License, version 1,
- * as published by MongoDB, Inc.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * Server Side Public License for more details.
- *
- * You should have received a copy of the Server Side Public License
- * along with this program. If not, see
- * <http://www.mongodb.com/licensing/server-side-public-license>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the Server Side Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#pragma once
-
-#include "mongo/db/commands/feature_compatibility_version.h"
-#include "mongo/db/operation_context.h"
-#include "mongo/s/request_types/sharded_ddl_commands_gen.h"
-
-namespace mongo {
-
-/**
- * Executes the legacy path of shard collection. If requestFromCSRS is false, then the checks and
- * operations performed by the config server (like taking the distributed lock on the database and
- * the collection) must be done on the shard instead.
- */
-CreateCollectionResponse shardCollectionLegacy(OperationContext* opCtx,
- const NamespaceString& nss,
- const BSONObj& cmdObj,
- bool requestFromCSRS,
- const FixedFCVRegion& fcvRegion);
-
-} // namespace mongo
diff --git a/src/mongo/db/s/shardsvr_create_collection_command.cpp b/src/mongo/db/s/shardsvr_create_collection_command.cpp
index add45172998..4a29a2d45d0 100644
--- a/src/mongo/db/s/shardsvr_create_collection_command.cpp
+++ b/src/mongo/db/s/shardsvr_create_collection_command.cpp
@@ -29,199 +29,19 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
-#include "mongo/platform/basic.h"
-
-#include "mongo/bson/util/bson_extract.h"
#include "mongo/db/auth/authorization_session.h"
#include "mongo/db/catalog/collection_catalog.h"
#include "mongo/db/commands.h"
-#include "mongo/db/dbdirectclient.h"
-#include "mongo/db/query/collation/collator_factory_interface.h"
-#include "mongo/db/query/query_feature_flags_gen.h"
#include "mongo/db/s/create_collection_coordinator.h"
-#include "mongo/db/s/shard_collection_legacy.h"
-#include "mongo/db/s/sharding_ddl_50_upgrade_downgrade.h"
#include "mongo/db/s/sharding_ddl_coordinator_service.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/db/timeseries/timeseries_index_schema_conversion_functions.h"
#include "mongo/db/timeseries/timeseries_options.h"
#include "mongo/logv2/log.h"
-#include "mongo/s/grid.h"
-#include "mongo/s/request_types/shard_collection_gen.h"
-#include "mongo/s/request_types/sharded_ddl_commands_gen.h"
namespace mongo {
namespace {
-using FeatureCompatibility = ServerGlobalParams::FeatureCompatibility;
-using FCVersion = FeatureCompatibility::Version;
-
-BSONObj inferCollationFromLocalCollection(OperationContext* opCtx,
- const NamespaceString& nss,
- const ShardsvrCreateCollection& request) {
- auto& collation = request.getCollation().value();
- auto collator = uassertStatusOK(
- CollatorFactoryInterface::get(opCtx->getServiceContext())->makeFromBSON(collation));
- uassert(ErrorCodes::BadValue,
- str::stream() << "The collation for shardCollection must be {locale: 'simple'}, "
- << "but found: " << collation,
- !collator);
-
- BSONObj res, defaultCollation, collectionOptions;
- DBDirectClient client(opCtx);
-
- auto allRes = client.getCollectionInfos(nss.db().toString(), BSON("name" << nss.coll()));
-
- if (!allRes.empty())
- res = allRes.front().getOwned();
-
- if (!res.isEmpty() && res["options"].type() == BSONType::Object) {
- collectionOptions = res["options"].Obj();
- }
-
- // Get collection default collation.
- BSONElement collationElement;
- auto status =
- bsonExtractTypedField(collectionOptions, "collation", BSONType::Object, &collationElement);
- if (status.isOK()) {
- defaultCollation = collationElement.Obj();
- uassert(ErrorCodes::BadValue,
- "Default collation in collection metadata cannot be empty.",
- !defaultCollation.isEmpty());
- } else if (status != ErrorCodes::NoSuchKey) {
- uassertStatusOK(status);
- }
-
- return defaultCollation.getOwned();
-}
-
-// TODO (SERVER-54879): Remove this path after 5.0 branches
-CreateCollectionResponse createCollectionLegacy(OperationContext* opCtx,
- const NamespaceString& nss,
- const ShardsvrCreateCollection& request,
- const FixedFCVRegion& fcvRegion) {
- const auto dbInfo =
- uassertStatusOK(Grid::get(opCtx)->catalogCache()->getDatabaseWithRefresh(opCtx, nss.db()));
-
- uassert(ErrorCodes::IllegalOperation,
- str::stream() << "sharding not enabled for db " << nss.db(),
- dbInfo.shardingEnabled());
-
- if (nss.db() == NamespaceString::kConfigDb) {
- // Only allowlisted collections in config may be sharded (unless we are in test mode)
- uassert(ErrorCodes::IllegalOperation,
- "only special collections in the config db may be sharded",
- nss == NamespaceString::kLogicalSessionsNamespace);
- }
-
- ShardKeyPattern shardKeyPattern(request.getShardKey().value().getOwned());
-
- // Ensure that hashed and unique are not both set.
- uassert(ErrorCodes::InvalidOptions,
- "Hashed shard keys cannot be declared unique. It's possible to ensure uniqueness on "
- "the hashed field by declaring an additional (non-hashed) unique index on the field.",
- !shardKeyPattern.isHashedPattern() ||
- !(request.getUnique() && request.getUnique().value()));
-
- // Ensure that a time-series collection cannot be sharded
- uassert(ErrorCodes::IllegalOperation,
- str::stream() << "can't shard time-series collection " << nss,
- !timeseries::getTimeseriesOptions(opCtx, nss));
-
- // Ensure the namespace is valid.
- uassert(ErrorCodes::IllegalOperation,
- "can't shard system namespaces",
- !nss.isSystem() || nss == NamespaceString::kLogicalSessionsNamespace ||
- nss.isTemporaryReshardingCollection() || nss.isTimeseriesBucketsCollection());
-
- auto optNumInitialChunks = request.getNumInitialChunks();
- if (optNumInitialChunks) {
- // Ensure numInitialChunks is within valid bounds.
- // Cannot have more than 8192 initial chunks per shard. Setting a maximum of 1,000,000
- // chunks in total to limit the amount of memory this command consumes so there is less
- // danger of an OOM error.
-
- const auto shardIds = Grid::get(opCtx)->shardRegistry()->getAllShardIds(opCtx);
- const int maxNumInitialChunksForShards = shardIds.size() * 8192;
- const int maxNumInitialChunksTotal = 1000 * 1000; // Arbitrary limit to memory consumption
- int numChunks = optNumInitialChunks.value();
- uassert(ErrorCodes::InvalidOptions,
- str::stream() << "numInitialChunks cannot be more than either: "
- << maxNumInitialChunksForShards << ", 8192 * number of shards; or "
- << maxNumInitialChunksTotal,
- numChunks >= 0 && numChunks <= maxNumInitialChunksForShards &&
- numChunks <= maxNumInitialChunksTotal);
- }
-
- ShardsvrShardCollectionRequest shardsvrShardCollectionRequest;
- shardsvrShardCollectionRequest.set_shardsvrShardCollection(nss);
- shardsvrShardCollectionRequest.setKey(request.getShardKey().value());
- if (request.getUnique())
- shardsvrShardCollectionRequest.setUnique(request.getUnique().value());
- if (request.getNumInitialChunks())
- shardsvrShardCollectionRequest.setNumInitialChunks(request.getNumInitialChunks().value());
- if (request.getPresplitHashedZones())
- shardsvrShardCollectionRequest.setPresplitHashedZones(
- request.getPresplitHashedZones().value());
- if (request.getInitialSplitPoints())
- shardsvrShardCollectionRequest.setInitialSplitPoints(
- request.getInitialSplitPoints().value());
-
- if (request.getCollation()) {
- shardsvrShardCollectionRequest.setCollation(
- inferCollationFromLocalCollection(opCtx, nss, request));
- }
-
- return shardCollectionLegacy(opCtx,
- nss,
- shardsvrShardCollectionRequest.toBSON(),
- false /* requestIsFromCSRS */,
- fcvRegion);
-}
-
-CreateCollectionResponse createCollection(OperationContext* opCtx,
- NamespaceString nss,
- const ShardsvrCreateCollection& request) {
- uassert(
- ErrorCodes::NotImplemented, "create collection not implemented yet", request.getShardKey());
-
- auto bucketsNs = nss.makeTimeseriesBucketsNamespace();
- auto bucketsColl =
- CollectionCatalog::get(opCtx)->lookupCollectionByNamespaceForRead(opCtx, bucketsNs);
- CreateCollectionRequest createCmdRequest = request.getCreateCollectionRequest();
-
- // If the 'system.buckets' exists or 'timeseries' parameters are passed in, we know that we are
- // trying shard a timeseries collection.
- if (bucketsColl || createCmdRequest.getTimeseries()) {
- uassert(5731502,
- "Sharding a timeseries collection feature is not enabled",
- feature_flags::gFeatureFlagShardedTimeSeries.isEnabledAndIgnoreFCV());
-
- if (!createCmdRequest.getTimeseries()) {
- createCmdRequest.setTimeseries(bucketsColl->getTimeseriesOptions());
- } else if (bucketsColl) {
- uassert(5731500,
- str::stream() << "the 'timeseries' spec provided must match that of exists '"
- << nss << "' collection",
- timeseries::optionsAreEqual(*createCmdRequest.getTimeseries(),
- *bucketsColl->getTimeseriesOptions()));
- }
- nss = bucketsNs;
- createCmdRequest.setShardKey(
- uassertStatusOK(timeseries::createBucketsShardKeySpecFromTimeseriesShardKeySpec(
- *createCmdRequest.getTimeseries(), *createCmdRequest.getShardKey())));
- }
-
- auto coordinatorDoc = CreateCollectionCoordinatorDocument();
- coordinatorDoc.setShardingDDLCoordinatorMetadata(
- {{std::move(nss), DDLCoordinatorTypeEnum::kCreateCollection}});
- coordinatorDoc.setCreateCollectionRequest(std::move(createCmdRequest));
- auto service = ShardingDDLCoordinatorService::getService(opCtx);
- auto createCollectionCoordinator = checked_pointer_cast<CreateCollectionCoordinator>(
- service->getOrCreateInstance(opCtx, coordinatorDoc.toBSON()));
- return createCollectionCoordinator->getResult(opCtx);
-}
-
class ShardsvrCreateCollectionCommand final : public TypedCommand<ShardsvrCreateCollectionCommand> {
public:
using Request = ShardsvrCreateCollection;
@@ -244,8 +64,7 @@ public:
using InvocationBase::InvocationBase;
Response typedRun(OperationContext* opCtx) {
- auto const shardingState = ShardingState::get(opCtx);
- uassertStatusOK(shardingState->canAcceptShardedCommands());
+ uassertStatusOK(ShardingState::get(opCtx)->canAcceptShardedCommands());
opCtx->setAlwaysInterruptAtStepDownOrUp();
@@ -260,24 +79,43 @@ public:
"Create Collection path has not been implemented",
request().getShardKey());
- FixedFCVRegion fcvRegion(opCtx);
-
- bool useNewPath = [&] {
- return feature_flags::gShardingFullDDLSupport.isEnabled(*fcvRegion);
- }();
-
- if (!useNewPath) {
- LOGV2_DEBUG(5277911,
- 1,
- "Running legacy create collection procedure",
- "namespace"_attr = ns());
- return createCollectionLegacy(opCtx, ns(), request(), fcvRegion);
+ auto nss = ns();
+ auto bucketsNs = nss.makeTimeseriesBucketsNamespace();
+ auto bucketsColl =
+ CollectionCatalog::get(opCtx)->lookupCollectionByNamespaceForRead(opCtx, bucketsNs);
+ CreateCollectionRequest createCmdRequest = request().getCreateCollectionRequest();
+
+ // If the 'system.buckets' exists or 'timeseries' parameters are passed in, we know that
+ // we are trying shard a timeseries collection.
+ if (bucketsColl || createCmdRequest.getTimeseries()) {
+ uassert(5731502,
+ "Sharding a timeseries collection feature is not enabled",
+ feature_flags::gFeatureFlagShardedTimeSeries.isEnabledAndIgnoreFCV());
+
+ if (!createCmdRequest.getTimeseries()) {
+ createCmdRequest.setTimeseries(bucketsColl->getTimeseriesOptions());
+ } else if (bucketsColl) {
+ uassert(5731500,
+ str::stream()
+ << "the 'timeseries' spec provided must match that of exists '"
+ << nss << "' collection",
+ timeseries::optionsAreEqual(*createCmdRequest.getTimeseries(),
+ *bucketsColl->getTimeseriesOptions()));
+ }
+ nss = bucketsNs;
+ createCmdRequest.setShardKey(
+ uassertStatusOK(timeseries::createBucketsShardKeySpecFromTimeseriesShardKeySpec(
+ *createCmdRequest.getTimeseries(), *createCmdRequest.getShardKey())));
}
- LOGV2_DEBUG(
- 5277910, 1, "Running new create collection procedure", "namespace"_attr = ns());
-
- return createCollection(opCtx, ns(), request());
+ auto coordinatorDoc = CreateCollectionCoordinatorDocument();
+ coordinatorDoc.setShardingDDLCoordinatorMetadata(
+ {{std::move(nss), DDLCoordinatorTypeEnum::kCreateCollection}});
+ coordinatorDoc.setCreateCollectionRequest(std::move(createCmdRequest));
+ auto service = ShardingDDLCoordinatorService::getService(opCtx);
+ auto createCollectionCoordinator = checked_pointer_cast<CreateCollectionCoordinator>(
+ service->getOrCreateInstance(opCtx, coordinatorDoc.toBSON()));
+ return createCollectionCoordinator->getResult(opCtx);
}
private:
diff --git a/src/mongo/db/s/shardsvr_shard_collection_command.cpp b/src/mongo/db/s/shardsvr_shard_collection_command.cpp
deleted file mode 100644
index 465a842a2d8..00000000000
--- a/src/mongo/db/s/shardsvr_shard_collection_command.cpp
+++ /dev/null
@@ -1,111 +0,0 @@
-/**
- * Copyright (C) 2018-present MongoDB, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the Server Side Public License, version 1,
- * as published by MongoDB, Inc.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * Server Side Public License for more details.
- *
- * You should have received a copy of the Server Side Public License
- * along with this program. If not, see
- * <http://www.mongodb.com/licensing/server-side-public-license>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the Server Side Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
-
-#include "mongo/platform/basic.h"
-
-#include "mongo/db/auth/authorization_session.h"
-#include "mongo/db/commands.h"
-#include "mongo/db/namespace_string.h"
-#include "mongo/db/s/shard_collection_legacy.h"
-#include "mongo/db/s/sharding_ddl_50_upgrade_downgrade.h"
-#include "mongo/db/s/sharding_state.h"
-#include "mongo/logv2/log.h"
-#include "mongo/s/grid.h"
-#include "mongo/s/request_types/shard_collection_gen.h"
-#include "mongo/s/request_types/sharded_ddl_commands_gen.h"
-
-// TODO (SERVER-54879): Remove this command entirely after 5.0 branches
-namespace mongo {
-namespace {
-
-/**
- * Internal sharding command run on primary shard server to shard a collection.
- */
-class ShardsvrShardCollectionCommand : public BasicCommand {
-public:
- ShardsvrShardCollectionCommand() : BasicCommand("_shardsvrShardCollection") {}
-
- std::string help() const override {
- return "should not be calling this directly";
- }
-
- AllowedOnSecondary secondaryAllowed(ServiceContext*) const override {
- return AllowedOnSecondary::kNever;
- }
-
- bool adminOnly() const override {
- return true;
- }
-
- bool supportsWriteConcern(const BSONObj& cmd) const override {
- return true;
- }
-
- Status checkAuthForCommand(Client* client,
- const std::string& dbname,
- const BSONObj& cmdObj) const override {
- if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forClusterResource(), ActionType::internal)) {
- return Status(ErrorCodes::Unauthorized, "Unauthorized");
- }
- return Status::OK();
- }
-
- std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const override {
- return CommandHelpers::parseNsFullyQualified(cmdObj);
- }
-
- bool run(OperationContext* opCtx,
- const std::string& dbname,
- const BSONObj& cmdObj,
- BSONObjBuilder& result) override {
- auto const shardingState = ShardingState::get(opCtx);
- uassertStatusOK(shardingState->canAcceptShardedCommands());
-
- opCtx->setAlwaysInterruptAtStepDownOrUp();
-
- const NamespaceString nss(parseNs(dbname, cmdObj));
-
- FixedFCVRegion fcvRegion(opCtx);
-
- auto createCollectionResponse =
- shardCollectionLegacy(opCtx, nss, cmdObj, true /* requestIsFromCSRS */, fcvRegion);
-
- createCollectionResponse.serialize(&result);
- result.append("collectionsharded", nss.toString());
-
- return true;
- }
-
-} shardsvrShardCollectionCmd;
-
-} // namespace
-} // namespace mongo
diff --git a/src/mongo/s/SConscript b/src/mongo/s/SConscript
index 7b421337d9f..155085c64cc 100644
--- a/src/mongo/s/SConscript
+++ b/src/mongo/s/SConscript
@@ -176,7 +176,6 @@ env.Library(
'request_types/resharding_operation_time.idl',
'request_types/set_allow_migrations.idl',
'request_types/set_shard_version_request.cpp',
- 'request_types/shard_collection.idl',
'request_types/sharded_ddl_commands.idl',
'request_types/split_chunk_request_type.cpp',
'request_types/update_zone_key_range_request_type.cpp',
diff --git a/src/mongo/s/request_types/shard_collection.idl b/src/mongo/s/request_types/shard_collection.idl
deleted file mode 100644
index 6440111a61b..00000000000
--- a/src/mongo/s/request_types/shard_collection.idl
+++ /dev/null
@@ -1,132 +0,0 @@
-# Copyright (C) 2018-present MongoDB, Inc.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the Server Side Public License, version 1,
-# as published by MongoDB, Inc.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# Server Side Public License for more details.
-#
-# You should have received a copy of the Server Side Public License
-# along with this program. If not, see
-# <http://www.mongodb.com/licensing/server-side-public-license>.
-#
-# As a special exception, the copyright holders give permission to link the
-# code of portions of this program with the OpenSSL library under certain
-# conditions as described in each individual source file and distribute
-# linked combinations including the program with the OpenSSL library. You
-# must comply with the Server Side Public License in all respects for
-# all of the code used other than as permitted herein. If you modify file(s)
-# with this exception, you may extend this exception to your version of the
-# file(s), but you are not obligated to do so. If you do not wish to do so,
-# delete this exception statement from your version. If you delete this
-# exception statement from all source files in the program, then also delete
-# it in the license file.
-#
-
-# shardCollection IDL File
-
-global:
- cpp_namespace: "mongo"
-
-imports:
- - "mongo/idl/basic_types.idl"
-
-structs:
- ConfigsvrShardCollectionRequest:
- description: "The request format of the internal shardCollection command on the config server"
- strict: false
- fields:
- _configsvrShardCollection:
- type: namespacestring
- description: "The namespace of the collection to shard in the form <database>.<collection>."
- optional: true
- key:
- type: object
- description: "The index specification document to use as the shard key."
- unique:
- type: bool
- description: "Whether the shard key index should enforce a unique constraint"
- default: false
- numInitialChunks:
- type: safeInt64
- description: "The number of chunks to create initially when sharding an empty collection with a hashed shard key."
- default: 0
- presplitHashedZones:
- type: bool
- description: "True if the chunks should be pre-split based on the existing zones when sharding a collection with hashed shard key"
- default: false
- initialSplitPoints:
- type: array<object>
- description: "A specific set of points to create initial splits at, currently used only by mapReduce"
- optional: true
- collation:
- type: object
- description: "The collation to use for the shard key index."
- optional: true
- getUUIDfromPrimaryShard:
- type: bool
- description: "Whether the collection should be created on the primary shard. This should only be false when used in mapReduce."
- default: true
-
- ConfigsvrShardCollectionResponse:
- description: "The response format of the internal shardCollection command on the config server"
- strict: false
- fields:
- collectionsharded:
- type: string
- description: "The name of the collection that just got sharded."
- collectionUUID:
- type: uuid
- description: "The UUID of the collection that just got sharded."
- optional: true
-
- ShardsvrShardCollectionRequest:
- description: "The internal shardCollection command on a primary shard"
- strict: false
- fields:
- _shardsvrShardCollection:
- type: namespacestring
- description: "The namespace of the collection to shard in the form <database>.<collection>."
- optional: true
- key:
- type: object
- description: "The index specification document to use as the shard key."
- unique:
- type: bool
- description: "Whether the shard key index should enforce a unique constraint"
- default: false
- numInitialChunks:
- type: safeInt64
- description: "The number of chunks to create initially when sharding an empty collection with a hashed shard key."
- default: 0
- presplitHashedZones:
- type: bool
- description: "True if the chunks should be pre-split based on the existing zones when sharding a collection with hashed shard key"
- default: false
- initialSplitPoints:
- type: array<object>
- description: "A specific set of points to create initial splits at, currently used only by mapReduce"
- optional: true
- collation:
- type: object
- description: "The collation to use for the shard key index."
- optional: true
- getUUIDfromPrimaryShard:
- type: bool
- description: "Whether the collection should be created on the primary shard. This should only be false when used in mapReduce."
- default: true
-
- ShardsvrShardCollectionResponse:
- description: "The response format of the internal shardCollection command on the primary shard"
- strict: false
- fields:
- collectionsharded:
- type: string
- description: "The name of the collection that just got sharded."
- collectionUUID:
- type: uuid
- description: "The UUID of the collection that just got sharded."
- optional: true