summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRui Liu <rui.liu@mongodb.com>2021-12-22 16:36:45 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-12-22 17:15:30 +0000
commitd6f2b64decf02397546427f6574f581e96c52d5b (patch)
tree3516dedf28b970ef6d2cddfad4fdb3887075783f
parentd269dd586a99b2e6c05710543c66f958afa7ffe4 (diff)
downloadmongo-d6f2b64decf02397546427f6574f581e96c52d5b.tar.gz
SERVER-60694 Move collMod to DDL coordinator infrastructure
-rw-r--r--jstests/concurrency/fsm_workloads/agg_out.js15
-rw-r--r--jstests/concurrency/fsm_workloads/collmod.js18
-rw-r--r--jstests/concurrency/fsm_workloads/random_moveChunk_index_operations.js6
-rw-r--r--jstests/concurrency/fsm_workloads/view_catalog_cycle_lookup.js14
-rw-r--r--jstests/concurrency/fsm_workloads/view_catalog_cycle_with_drop.js6
-rw-r--r--jstests/core/collmod_convert_to_unique_violations.js27
-rw-r--r--jstests/core/hidden_index.js4
-rw-r--r--jstests/core/views/views_all_commands.js2
-rw-r--r--jstests/multiVersion/sharded_timeseries_collmod_mixed_version.js50
-rw-r--r--jstests/replsets/db_reads_while_recovering_all_commands.js2
-rw-r--r--jstests/sharding/catalog_cache_refresh_counters.js10
-rw-r--r--jstests/sharding/index_commands_shard_targeting.js16
-rw-r--r--jstests/sharding/index_operations_abort_concurrent_outgoing_migrations.js28
-rw-r--r--jstests/sharding/libs/mongos_api_params_util.js2
-rw-r--r--jstests/sharding/move_primary_with_writes.js6
-rw-r--r--jstests/sharding/read_write_concern_defaults_application.js2
-rw-r--r--jstests/sharding/resharding_disallow_writes.js6
-rw-r--r--jstests/sharding/resharding_prohibited_commands.js9
-rw-r--r--jstests/sharding/sharding_statistics_server_status.js10
-rw-r--r--jstests/sharding/timeseries_multiple_mongos.js8
-rw-r--r--src/mongo/db/SConscript17
-rw-r--r--src/mongo/db/coll_mod.idl41
-rw-r--r--src/mongo/db/s/SConscript6
-rw-r--r--src/mongo/db/s/collmod_coordinator.cpp236
-rw-r--r--src/mongo/db/s/collmod_coordinator.h96
-rw-r--r--src/mongo/db/s/collmod_coordinator_document.idl63
-rw-r--r--src/mongo/db/s/sharded_collmod.idl46
-rw-r--r--src/mongo/db/s/sharding_ddl_coordinator.idl1
-rw-r--r--src/mongo/db/s/sharding_ddl_coordinator_service.cpp5
-rw-r--r--src/mongo/db/s/sharding_ddl_util.cpp14
-rw-r--r--src/mongo/db/s/sharding_ddl_util.h12
-rw-r--r--src/mongo/db/s/sharding_util.cpp16
-rw-r--r--src/mongo/db/s/sharding_util.h12
-rw-r--r--src/mongo/db/s/shardsvr_collmod_command.cpp125
-rw-r--r--src/mongo/db/s/shardsvr_collmod_participant_command.cpp109
-rw-r--r--src/mongo/db/timeseries/SConscript2
-rw-r--r--src/mongo/db/timeseries/timeseries_collmod.cpp28
-rw-r--r--src/mongo/db/transaction_validation.cpp3
-rw-r--r--src/mongo/s/SConscript1
-rw-r--r--src/mongo/s/commands/cluster_collection_mod_cmd.cpp47
-rw-r--r--src/mongo/s/request_types/sharded_ddl_commands.idl11
41 files changed, 949 insertions, 183 deletions
diff --git a/jstests/concurrency/fsm_workloads/agg_out.js b/jstests/concurrency/fsm_workloads/agg_out.js
index 6c1d720558f..86ebca8801a 100644
--- a/jstests/concurrency/fsm_workloads/agg_out.js
+++ b/jstests/concurrency/fsm_workloads/agg_out.js
@@ -107,14 +107,17 @@ var $config = extendWorkload($config, function($config, $super) {
// Change the validation level.
const validationLevels = ['off', 'strict', 'moderate'];
const newValidationLevel = validationLevels[Random.randInt(validationLevels.length)];
- assertWhenOwnDB.commandWorked(
- db.runCommand({collMod: this.outputCollName, validationLevel: newValidationLevel}));
+ assertWhenOwnDB.commandWorkedOrFailedWithCode(
+ db.runCommand({collMod: this.outputCollName, validationLevel: newValidationLevel}),
+ ErrorCodes.ConflictingOperationInProgress);
} else {
// Change the validation action.
- assertWhenOwnDB.commandWorked(db.runCommand({
- collMod: this.outputCollName,
- validationAction: Random.rand() > 0.5 ? 'warn' : 'error'
- }));
+ assertWhenOwnDB.commandWorkedOrFailedWithCode(
+ db.runCommand({
+ collMod: this.outputCollName,
+ validationAction: Random.rand() > 0.5 ? 'warn' : 'error'
+ }),
+ ErrorCodes.ConflictingOperationInProgress);
}
};
diff --git a/jstests/concurrency/fsm_workloads/collmod.js b/jstests/concurrency/fsm_workloads/collmod.js
index ef56b6cf408..4fb7945ae52 100644
--- a/jstests/concurrency/fsm_workloads/collmod.js
+++ b/jstests/concurrency/fsm_workloads/collmod.js
@@ -23,21 +23,23 @@ var $config = (function() {
collMod: this.threadCollName,
index: {keyPattern: {createdAt: 1}, expireAfterSeconds: newTTL}
});
- assertAlways.commandWorked(res);
+ assertAlways.commandWorkedOrFailedWithCode(res,
+ [ErrorCodes.ConflictingOperationInProgress]);
// only assert if new expireAfterSeconds differs from old one
- if (res.hasOwnProperty('expireAfterSeconds_new')) {
+ if (res.ok === 1 && res.hasOwnProperty('expireAfterSeconds_new')) {
assertWhenOwnDB.eq(res.expireAfterSeconds_new, newTTL);
}
// Attempt an invalid collMod which should always fail regardless of whether a WCE
// occurred. This is meant to reproduce SERVER-56772.
const encryptSchema = {$jsonSchema: {properties: {_id: {encrypt: {}}}}};
- assertAlways.commandFailedWithCode(db.runCommand({
- collMod: this.threadCollName,
- validator: encryptSchema,
- validationAction: "warn"
- }),
- ErrorCodes.QueryFeatureNotAllowed);
+ assertAlways.commandFailedWithCode(
+ db.runCommand({
+ collMod: this.threadCollName,
+ validator: encryptSchema,
+ validationAction: "warn"
+ }),
+ [ErrorCodes.ConflictingOperationInProgress, ErrorCodes.QueryFeatureNotAllowed]);
}
return {collMod: collMod};
diff --git a/jstests/concurrency/fsm_workloads/random_moveChunk_index_operations.js b/jstests/concurrency/fsm_workloads/random_moveChunk_index_operations.js
index 69290c93785..a9b24f652a2 100644
--- a/jstests/concurrency/fsm_workloads/random_moveChunk_index_operations.js
+++ b/jstests/concurrency/fsm_workloads/random_moveChunk_index_operations.js
@@ -90,7 +90,8 @@ var $config = (function() {
const acceptableCodes = [
ErrorCodes.Interrupted,
ErrorCodes.DuplicateKey,
- ErrorCodes.BackgroundOperationInProgressForNamespace
+ ErrorCodes.BackgroundOperationInProgressForNamespace,
+ ErrorCodes.LockBusy,
];
if (e.code && acceptableCodes.includes(e.code) ||
// Indexes may be transiently inconsistent across shards, which can lead a
@@ -167,7 +168,8 @@ var $config = (function() {
collMod: this.collName,
index: {keyPattern: indexToModify, expireAfterSeconds: data.expireAfterSeconds}
});
- assertAlways.commandWorked(result);
+ assertAlways.commandWorkedOrFailedWithCode(result,
+ ErrorCodes.ConflictingOperationInProgress);
},
// Verify that the indexes that we expect to be on disk are actually there and that indexes
diff --git a/jstests/concurrency/fsm_workloads/view_catalog_cycle_lookup.js b/jstests/concurrency/fsm_workloads/view_catalog_cycle_lookup.js
index ec513399e93..e89ae283988 100644
--- a/jstests/concurrency/fsm_workloads/view_catalog_cycle_lookup.js
+++ b/jstests/concurrency/fsm_workloads/view_catalog_cycle_lookup.js
@@ -96,7 +96,12 @@ var $config = (function() {
const toName = this.getRandomView(this.viewList);
const res = db.runCommand(
{collMod: fromName, viewOn: toName, pipeline: this.getRandomViewPipeline()});
- assertAlways(res.ok === 1 || res.code === ErrorCodes.GraphContainsCycle, tojson(res));
+ assertAlways(res.ok === 1 ||
+ [
+ ErrorCodes.GraphContainsCycle,
+ ErrorCodes.ConflictingOperationInProgress
+ ].includes(res.code),
+ tojson(res));
}
/**
@@ -112,7 +117,12 @@ var $config = (function() {
const fromName = this.getRandomView(this.viewList);
const res = db.runCommand(
{collMod: fromName, viewOn: collName, pipeline: this.getRandomViewPipeline()});
- assertAlways(res.ok === 1 || res.code === ErrorCodes.GraphContainsCycle, tojson(res));
+ assertAlways(res.ok === 1 ||
+ [
+ ErrorCodes.GraphContainsCycle,
+ ErrorCodes.ConflictingOperationInProgress
+ ].includes(res.code),
+ tojson(res));
}
function readFromView(db, collName) {
diff --git a/jstests/concurrency/fsm_workloads/view_catalog_cycle_with_drop.js b/jstests/concurrency/fsm_workloads/view_catalog_cycle_with_drop.js
index d974376d10c..ef5bd2d6028 100644
--- a/jstests/concurrency/fsm_workloads/view_catalog_cycle_with_drop.js
+++ b/jstests/concurrency/fsm_workloads/view_catalog_cycle_with_drop.js
@@ -31,7 +31,11 @@ var $config = (function() {
const toName = this.getRandomView(this.viewList);
const cmd = {collMod: fromName, viewOn: toName, pipeline: []};
const res = db.runCommand(cmd);
- const errorCodes = [ErrorCodes.GraphContainsCycle, ErrorCodes.NamespaceNotFound];
+ const errorCodes = [
+ ErrorCodes.GraphContainsCycle,
+ ErrorCodes.NamespaceNotFound,
+ ErrorCodes.ConflictingOperationInProgress
+ ];
assertAlways.commandWorkedOrFailedWithCode(
res, errorCodes, () => `cmd: ${tojson(cmd)}`);
}
diff --git a/jstests/core/collmod_convert_to_unique_violations.js b/jstests/core/collmod_convert_to_unique_violations.js
index dc0c7953346..d9e2de0abb9 100644
--- a/jstests/core/collmod_convert_to_unique_violations.js
+++ b/jstests/core/collmod_convert_to_unique_violations.js
@@ -30,23 +30,6 @@ if (!collModIndexUniqueEnabled) {
return;
}
-function extractResult(obj) {
- if (!FixtureHelpers.isMongos(db)) {
- return obj;
- }
-
- let numFields = 0;
- let result = null;
- for (let field in obj.raw) {
- result = obj.raw[field];
- numFields++;
- }
-
- assert.neq(null, result);
- assert.eq(1, numFields);
- return result;
-}
-
function sortViolationsArray(arr) {
// Sorting unsorted arrays of unsorted arrays -- Sort subarrays, then sort main array by first
// key of subarray.
@@ -66,11 +49,11 @@ function sortViolationsArray(arr) {
// Checks that the violations match what we expect.
function assertFailedWithViolations(result, violations) {
- const error = extractResult(result);
- assert.commandFailedWithCode(error, ErrorCodes.CannotEnableIndexConstraint);
- assert.eq(bsonWoCompare(sortViolationsArray(error.violations), sortViolationsArray(violations)),
- 0,
- tojson(error));
+ assert.commandFailedWithCode(result, ErrorCodes.CannotEnableIndexConstraint);
+ assert.eq(
+ bsonWoCompare(sortViolationsArray(result.violations), sortViolationsArray(violations)),
+ 0,
+ tojson(result));
}
const collName = 'collmod_convert_to_unique_violations';
diff --git a/jstests/core/hidden_index.js b/jstests/core/hidden_index.js
index 287c7cc5d0a..838d99fb71c 100644
--- a/jstests/core/hidden_index.js
+++ b/jstests/core/hidden_index.js
@@ -98,7 +98,9 @@ assert.eq(idxSpec.hidden, true);
// Can't hide any index in a system collection.
const systemColl = db.getSiblingDB('admin').system.version;
assert.commandWorked(systemColl.createIndex({a: 1}));
-assert.commandFailedWithCode(systemColl.hideIndex("a_1"), 2);
+assert.commandFailedWithCode(
+ systemColl.hideIndex("a_1"),
+ FixtureHelpers.isMongos(db) ? ErrorCodes.NoShardingEnabled : ErrorCodes.BadValue);
assert.commandFailedWithCode(systemColl.createIndex({a: 1}, {hidden: true}), 2);
// Can't hide the '_id' index.
diff --git a/jstests/core/views/views_all_commands.js b/jstests/core/views/views_all_commands.js
index 45cc9533b2b..73dafb4afaa 100644
--- a/jstests/core/views/views_all_commands.js
+++ b/jstests/core/views/views_all_commands.js
@@ -153,6 +153,8 @@ let viewsCommandTests = {
_shardsvrSetAllowMigrations: {skip: isAnInternalCommand},
_shardsvrShardCollection:
{skip: isAnInternalCommand}, // TODO SERVER-58843: Remove once 6.0 becomes last LTS
+ _shardsvrCollMod: {skip: isAnInternalCommand},
+ _shardsvrCollModParticipant: {skip: isAnInternalCommand},
_transferMods: {skip: isAnInternalCommand},
_vectorClockPersist: {skip: isAnInternalCommand},
abortReshardCollection: {skip: isUnrelated},
diff --git a/jstests/multiVersion/sharded_timeseries_collmod_mixed_version.js b/jstests/multiVersion/sharded_timeseries_collmod_mixed_version.js
new file mode 100644
index 00000000000..12c0f4bf6ef
--- /dev/null
+++ b/jstests/multiVersion/sharded_timeseries_collmod_mixed_version.js
@@ -0,0 +1,50 @@
+/**
+ * Tests latest shards can process collMod on time-series collection sent from old 5.0 mongos.
+ */
+
+(function() {
+"use strict";
+
+const dbName = 'testDB';
+const collName = 'testColl';
+const timeField = 'tm';
+const metaField = 'mt';
+const indexName = 'index';
+const viewNss = `${dbName}.${collName}`;
+
+const st = new ShardingTest(
+ {shards: 2, rs: {nodes: 3}, mongos: [{binVersion: 'latest'}, {binVersion: '5.0'}]});
+const mongos = st.s0;
+const db = mongos.getDB(dbName);
+
+assert.commandWorked(
+ mongos.adminCommand({setFeatureCompatibilityVersion: binVersionToFCV('latest')}));
+
+assert.commandWorked(
+ db.createCollection(collName, {timeseries: {timeField: timeField, metaField: metaField}}));
+assert.commandWorked(mongos.adminCommand({enableSharding: dbName}));
+assert.commandWorked(db[collName].createIndex({[metaField]: 1}, {name: indexName}));
+assert.commandWorked(mongos.adminCommand({
+ shardCollection: viewNss,
+ key: {[metaField]: 1},
+}));
+
+assert.commandWorked(mongos.adminCommand({setFeatureCompatibilityVersion: '5.0'}));
+
+const oldDb = st.s1.getDB(dbName);
+// Assert that collMod works with matching versions of mongos and mongod.
+assert.commandWorked(db.runCommand({collMod: collName, index: {name: indexName, hidden: true}}));
+// Assert that collMod still works with old version of mongos.
+assert.commandWorked(
+ oldDb.runCommand({collMod: collName, index: {name: indexName, hidden: false}}));
+
+// Assert that collMod with granularity update fails with matching versions of mongos and mongod.
+assert.commandFailedWithCode(db.runCommand({collMod: collName, timeseries: {granularity: 'hours'}}),
+ ErrorCodes.NotImplemented);
+// Assert that collMod with granularity update still fails with old version of mongos.
+assert.commandFailedWithCode(
+ oldDb.runCommand({collMod: collName, timeseries: {granularity: 'hours'}}),
+ ErrorCodes.NotImplemented);
+
+st.stop();
+})();
diff --git a/jstests/replsets/db_reads_while_recovering_all_commands.js b/jstests/replsets/db_reads_while_recovering_all_commands.js
index bc06d4f4eaa..2701e5fc516 100644
--- a/jstests/replsets/db_reads_while_recovering_all_commands.js
+++ b/jstests/replsets/db_reads_while_recovering_all_commands.js
@@ -88,6 +88,8 @@ const allCommands = {
_shardsvrReshardingOperationTime: {skip: isPrimaryOnly},
_shardsvrRefineCollectionShardKey: {skip: isPrimaryOnly},
_shardsvrSetAllowMigrations: {skip: isPrimaryOnly},
+ _shardsvrCollMod: {skip: isPrimaryOnly},
+ _shardsvrCollModParticipant: {skip: isAnInternalCommand},
_transferMods: {skip: isPrimaryOnly},
_vectorClockPersist: {skip: isPrimaryOnly},
abortReshardCollection: {skip: isPrimaryOnly},
diff --git a/jstests/sharding/catalog_cache_refresh_counters.js b/jstests/sharding/catalog_cache_refresh_counters.js
index f9946e5fb45..028ef2f2c59 100644
--- a/jstests/sharding/catalog_cache_refresh_counters.js
+++ b/jstests/sharding/catalog_cache_refresh_counters.js
@@ -103,10 +103,12 @@ runTest(() => assert.commandWorked(mongos1Coll.remove({x: 250})), [
/**
* Verify that non-CRUD commands get logged when blocked by a refresh.
*/
-runTest(() => assert.commandWorked(mongos1DB.runCommand({collMod: collName})), [
- {opType: 'countAllOperations', increase: 1},
- {opType: 'countCommands', increase: 1},
-]);
+runTest(() => assert.commandWorked(mongos1DB.runCommand(
+ {createIndexes: collName, indexes: [{key: {a: 1}, name: 'index'}]})),
+ [
+ {opType: 'countAllOperations', increase: 1},
+ {opType: 'countCommands', increase: 1},
+ ]);
st.stop();
})();
diff --git a/jstests/sharding/index_commands_shard_targeting.js b/jstests/sharding/index_commands_shard_targeting.js
index d7a5f0d0c86..c9f70fc8f50 100644
--- a/jstests/sharding/index_commands_shard_targeting.js
+++ b/jstests/sharding/index_commands_shard_targeting.js
@@ -55,7 +55,8 @@ function assertCommandChecksShardVersions(st, dbName, collName, testCase) {
// (no chunks).
ShardVersioningUtil.assertCollectionVersionOlderThan(st.shard0, ns, latestCollectionVersion);
- // Assert that the targeted shards have the latest collection version after the command is run.
+ // Assert that the targeted shards have the latest collection version after the command is
+ // run.
ShardVersioningUtil.assertCollectionVersionEquals(st.shard1, ns, latestCollectionVersion);
ShardVersioningUtil.assertCollectionVersionEquals(st.shard2, ns, latestCollectionVersion);
}
@@ -180,19 +181,6 @@ const testCases = {
}
};
},
- collMod: collName => {
- return {
- command: {collMod: collName, validator: {x: {$type: "string"}}},
- assertCommandRanOnShard: (shard) => {
- assert.commandFailedWithCode(
- shard.getCollection(dbName + "." + collName).insert({x: 1}),
- ErrorCodes.DocumentValidationFailure);
- },
- assertCommandDidNotRunOnShard: (shard) => {
- assert.commandWorked(shard.getCollection(dbName + "." + collName).insert({x: 1}));
- }
- };
- },
};
assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
diff --git a/jstests/sharding/index_operations_abort_concurrent_outgoing_migrations.js b/jstests/sharding/index_operations_abort_concurrent_outgoing_migrations.js
index 651fde8261d..44ffd313d71 100644
--- a/jstests/sharding/index_operations_abort_concurrent_outgoing_migrations.js
+++ b/jstests/sharding/index_operations_abort_concurrent_outgoing_migrations.js
@@ -129,33 +129,5 @@ stepNames.forEach((stepName) => {
}
});
-stepNames.forEach((stepName) => {
- jsTest.log(`Testing that collMod aborts concurrent outgoing migrations that are in step ${
- stepName}...`);
- const collName = "testCollModMoveChunkStep" + stepName;
- const ns = dbName + "." + collName;
-
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: shardKey}));
-
- assertCommandAbortsConcurrentOutgoingMigration(st, stepName, ns, () => {
- assert.commandWorked(
- testDB.runCommand({collMod: collName, validator: {x: {$type: "string"}}}));
- });
-
- // Verify that the index command succeeds.
- assert.commandFailedWithCode(st.shard0.getCollection(ns).insert({x: 1}),
- ErrorCodes.DocumentValidationFailure);
-
- // If collMod is run after the migration has reached the steady state, shard1
- // will not perform schema validation because the validation rule just does not
- // exist when shard1 clones the collection options from shard0. However, if collMod
- // is run after the cloning step starts but before the steady state is reached,
- // shard0 may have the validation rule when shard1 does the cloning so shard1 may
- // or may not perform schema validation.
- if (stepName == moveChunkStepNames.reachedSteadyState) {
- assert.commandWorked(st.shard1.getCollection(ns).insert({x: 1}));
- }
-});
-
st.stop();
})();
diff --git a/jstests/sharding/libs/mongos_api_params_util.js b/jstests/sharding/libs/mongos_api_params_util.js
index e22045f1761..97e94ab2081 100644
--- a/jstests/sharding/libs/mongos_api_params_util.js
+++ b/jstests/sharding/libs/mongos_api_params_util.js
@@ -217,7 +217,7 @@ let MongosAPIParametersUtil = (function() {
commandName: "collMod",
run: {
inAPIVersion1: true,
- shardCommandName: "collMod",
+ shardCommandName: "_shardsvrCollMod",
permittedInTxn: false,
command: () => ({collMod: "collection"}),
}
diff --git a/jstests/sharding/move_primary_with_writes.js b/jstests/sharding/move_primary_with_writes.js
index f5b18998539..b31647b5789 100644
--- a/jstests/sharding/move_primary_with_writes.js
+++ b/jstests/sharding/move_primary_with_writes.js
@@ -144,11 +144,13 @@ function buildCommands(collName, shouldFail) {
},
{
command: {collMod: collName, index: {keyPattern: {c: 1}, expireAfterSeconds: 3600}},
- shouldFail: shouldFail
+ shouldFail: true,
+ errorCodes: [ErrorCodes.LockBusy, ErrorCodes.MovePrimaryInProgress]
},
{
command: {collMod: collName + "View", viewOn: collName, pipeline: [{$match: {_id: 1}}]},
- shouldFail: true
+ shouldFail: true,
+ errorCodes: [ErrorCodes.LockBusy, ErrorCodes.MovePrimaryInProgress]
},
{command: {convertToCapped: "unshardedFoo", size: 1000000}, shouldFail: true},
{command: {dropIndexes: collName, index: collName + "Index"}, shouldFail: shouldFail},
diff --git a/jstests/sharding/read_write_concern_defaults_application.js b/jstests/sharding/read_write_concern_defaults_application.js
index 3bf244d9bfc..4fe0793ec8b 100644
--- a/jstests/sharding/read_write_concern_defaults_application.js
+++ b/jstests/sharding/read_write_concern_defaults_application.js
@@ -153,6 +153,8 @@ let testCases = {
_shardsvrReshardCollection: {skip: "internal command"},
_shardsvrReshardingOperationTime: {skip: "internal command"},
_shardsvrSetAllowMigrations: {skip: "internal command"},
+ _shardsvrCollMod: {skip: "internal command"},
+ _shardsvrCollModParticipant: {skip: "internal command"},
_shardsvrShardCollection:
{skip: "internal command"}, // TODO SERVER-58843: Remove once 6.0 becomes last LTS
_transferMods: {skip: "internal command"},
diff --git a/jstests/sharding/resharding_disallow_writes.js b/jstests/sharding/resharding_disallow_writes.js
index 22f502a949c..a403a58a99a 100644
--- a/jstests/sharding/resharding_disallow_writes.js
+++ b/jstests/sharding/resharding_disallow_writes.js
@@ -72,8 +72,10 @@ reshardingTest.withReshardingInBackground(
jsTestLog("Attempting collMod");
assert.commandFailedWithCode(
- sourceCollection.runCommand({collMod: sourceCollection.getName()}),
- ErrorCodes.ReshardCollectionInProgress);
+ // The collMod is serialized with the resharding command, so we explicitly add an
+ // timeout to the command so that it doesn't get blocked and timeout the test.
+ sourceCollection.runCommand({collMod: sourceCollection.getName(), maxTimeMS: 5000}),
+ [ErrorCodes.ReshardCollectionInProgress, ErrorCodes.MaxTimeMSExpired]);
jsTestLog("Attempting drop index");
assert.commandFailedWithCode(
diff --git a/jstests/sharding/resharding_prohibited_commands.js b/jstests/sharding/resharding_prohibited_commands.js
index 6cbaf338ca5..6d49babf98f 100644
--- a/jstests/sharding/resharding_prohibited_commands.js
+++ b/jstests/sharding/resharding_prohibited_commands.js
@@ -33,7 +33,9 @@ const indexDroppedByTest = {
};
const prohibitedCommands = [
- {collMod: collectionName},
+ // The collMod is serialized with the resharding command, so we explicitly add an timeout to the
+ // command so that it doesn't get blocked and timeout the test.
+ {collMod: collectionName, maxTimeMS: 5000},
{createIndexes: collectionName, indexes: [{name: "idx1", key: indexCreatedByTest}]},
{dropIndexes: collectionName, index: indexDroppedByTest},
];
@@ -63,8 +65,9 @@ const assertCommandsSucceedAfterReshardingOpFinishes = (database) => {
const assertCommandsFailDuringReshardingOp = (database) => {
prohibitedCommands.forEach((command) => {
jsTest.log(`Testing that ${tojson(command)} fails during resharding operation`);
- assert.commandFailedWithCode(database.runCommand(command),
- ErrorCodes.ReshardCollectionInProgress);
+ assert.commandFailedWithCode(
+ database.runCommand(command),
+ [ErrorCodes.ReshardCollectionInProgress, ErrorCodes.MaxTimeMSExpired]);
});
};
diff --git a/jstests/sharding/sharding_statistics_server_status.js b/jstests/sharding/sharding_statistics_server_status.js
index 95904fe5d0b..eade4143508 100644
--- a/jstests/sharding/sharding_statistics_server_status.js
+++ b/jstests/sharding/sharding_statistics_server_status.js
@@ -116,9 +116,12 @@ const coll = mongos.getCollection(dbName + "." + collName);
const numDocsToInsert = 3;
const shardArr = [st.shard0, st.shard1];
const stats = [new ShardStat(), new ShardStat()];
-const index = {
+const index1 = {
x: 1
};
+const index2 = {
+ y: 1
+};
let numDocsInserted = 0;
assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
@@ -242,7 +245,7 @@ moveChunkThread.start();
waitForMoveChunkStep(donorConn, moveChunkStepNames.startedMoveChunk);
// Run an index command.
-assert.commandWorked(coll.createIndexes([index]));
+assert.commandWorked(coll.createIndexes([index1]));
// Unpause the migration and verify that it gets aborted.
unpauseMoveChunkAtStep(donorConn, moveChunkStepNames.startedMoveChunk);
@@ -259,8 +262,7 @@ moveChunkThread.start();
waitForMoveChunkStep(donorConn, moveChunkStepNames.reachedSteadyState);
// Run an index command.
-assert.commandWorked(
- st.s.getDB(dbName).runCommand({collMod: collName, validator: {x: {$type: "string"}}}));
+assert.commandWorked(coll.createIndexes([index2]));
// Unpause the migration and verify that it gets aborted.
unpauseMoveChunkAtStep(donorConn, moveChunkStepNames.reachedSteadyState);
diff --git a/jstests/sharding/timeseries_multiple_mongos.js b/jstests/sharding/timeseries_multiple_mongos.js
index b63ec7a0af6..3bb0a6ea929 100644
--- a/jstests/sharding/timeseries_multiple_mongos.js
+++ b/jstests/sharding/timeseries_multiple_mongos.js
@@ -58,6 +58,7 @@ function generateBatch(size) {
function runTest({shardKey, cmdObj, numProfilerEntries}) {
const isDelete = cmdObj["delete"] !== undefined;
const isUpdate = cmdObj["update"] !== undefined;
+ const isCollMod = cmdObj["collMod"] !== undefined;
const cmdCollName = cmdObj[Object.keys(cmdObj)[0]];
const shardKeyHasMetaField = shardKey[metaField] !== undefined;
@@ -112,7 +113,10 @@ function runTest({shardKey, cmdObj, numProfilerEntries}) {
filter = {"op": "update", "ns": `${dbName}.${cmdCollName}`, "ok": {$ne: 0}};
} else if (isDelete) {
filter = {"op": "remove", "ns": `${dbName}.${cmdCollName}`, "ok": {$ne: 0}};
- } else if (unVersioned) {
+ } else if (isCollMod) {
+ const command = unVersioned ? "_shardsvrCollMod" : "_shardsvrCollModParticipant";
+ filter = {[`command.${command}`]: cmdCollName, "ok": {$ne: 0}};
+ } else if (unVersioned && !isCollMod) {
filter["command.shardVersion.0"] = Timestamp(0, 0);
}
@@ -130,7 +134,7 @@ function runTest({shardKey, cmdObj, numProfilerEntries}) {
}
// The update command is always logged as being on the user-provided namespace.
- validateCommand((isUpdate || isDelete) ? cmdCollName : bucketsCollName,
+ validateCommand((isUpdate || isDelete || isCollMod) ? cmdCollName : bucketsCollName,
numProfilerEntries.sharded);
// Insert dummy data so that the 'mongos1' sees the collection as sharded.
diff --git a/src/mongo/db/SConscript b/src/mongo/db/SConscript
index 2becfdb5dfc..80b80f3753a 100644
--- a/src/mongo/db/SConscript
+++ b/src/mongo/db/SConscript
@@ -608,9 +608,23 @@ env.CppUnitTest(
)
env.Library(
- target="commands",
+ target="coll_mod_command_idl",
source=[
'coll_mod.idl',
+ ],
+ LIBDEPS=[
+ '$BUILD_DIR/mongo/db/catalog/collection_options',
+ ],
+ LIBDEPS_PRIVATE=[
+ '$BUILD_DIR/mongo/db/auth/authprivilege',
+ '$BUILD_DIR/mongo/db/timeseries/timeseries_options',
+ '$BUILD_DIR/mongo/idl/idl_parser',
+ ],
+)
+
+env.Library(
+ target="commands",
+ source=[
'coll_mod_reply_validation.cpp',
'commands.cpp',
'drop.idl',
@@ -639,6 +653,7 @@ env.Library(
'$BUILD_DIR/mongo/rpc/rewrite_state_change_errors',
'$BUILD_DIR/mongo/rpc/rpc',
'audit',
+ 'coll_mod_command_idl',
'index_commands_idl',
'namespace_string',
],
diff --git a/src/mongo/db/coll_mod.idl b/src/mongo/db/coll_mod.idl
index 49fdfd6471d..455fff974a0 100644
--- a/src/mongo/db/coll_mod.idl
+++ b/src/mongo/db/coll_mod.idl
@@ -89,23 +89,9 @@ structs:
type: safeBool
unstable: true
-commands:
- collMod:
- description: "Specify collMod Command."
- command_name: collMod
- namespace: concatenate_with_db
- cpp_name: collMod
+ CollModRequest:
+ description: "The collMod command's request."
strict: true
- api_version: "1"
- access_check:
- complex:
- - check: should_ignore_auth_checks
- - privilege:
- resource_pattern: exact_namespace
- action_type: collMod
- - privilege:
- resource_pattern: exact_namespace
- action_type: find
fields:
index:
description: "Index to be modified."
@@ -169,6 +155,8 @@ commands:
optional: true
type: CollModTimeseries
unstable: false
+ # TODO (SERVER-61685): `isTimeseriesNamespace` is not needed for the collMod command but kept
+ # for backward compatibility. Remove this flag after 6.0 branching.
isTimeseriesNamespace:
description: "This flag is set to true when the command was originally sent to
mongos on the time-series view, but got rewritten to target
@@ -183,4 +171,25 @@ commands:
optional: true
type: safeBool
unstable: true
+
+commands:
+ collMod:
+ description: "Specify collMod Command."
+ command_name: collMod
+ namespace: concatenate_with_db
+ cpp_name: collMod
+ strict: true
+ api_version: "1"
+ access_check:
+ complex:
+ - check: should_ignore_auth_checks
+ - privilege:
+ resource_pattern: exact_namespace
+ action_type: collMod
+ - privilege:
+ resource_pattern: exact_namespace
+ action_type: find
+ inline_chained_structs: true
+ chained_structs:
+ CollModRequest: CollModRequest
reply_type: CollModReply
diff --git a/src/mongo/db/s/SConscript b/src/mongo/db/s/SConscript
index e5752d9748c..22372d7aed1 100644
--- a/src/mongo/db/s/SConscript
+++ b/src/mongo/db/s/SConscript
@@ -289,6 +289,8 @@ env.Library(
'cleanup_orphaned_cmd.cpp',
'clone_catalog_data_command.cpp',
'clone_collection_options_from_primary_shard_cmd.cpp',
+ 'collmod_coordinator.cpp',
+ 'collmod_coordinator_document.idl',
'config/configsvr_abort_reshard_collection_command.cpp',
'config/configsvr_add_shard_command.cpp',
'config/configsvr_add_shard_to_zone_command.cpp',
@@ -341,6 +343,7 @@ env.Library(
'set_allow_migrations_coordinator.cpp',
'set_allow_migrations_coordinator_document.idl',
'set_shard_version_command.cpp',
+ 'sharded_collmod.idl',
'sharded_index_consistency_server_status.cpp',
'sharded_rename_collection.idl',
'sharding_ddl_coordinator.cpp',
@@ -350,6 +353,8 @@ env.Library(
'sharding_state_command.cpp',
'shardsvr_abort_reshard_collection_command.cpp',
'shardsvr_cleanup_reshard_collection_command.cpp',
+ 'shardsvr_collmod_command.cpp',
+ 'shardsvr_collmod_participant_command.cpp',
'shardsvr_commit_reshard_collection_command.cpp',
'shardsvr_create_collection_command.cpp',
'shardsvr_create_collection_participant_command.cpp',
@@ -383,6 +388,7 @@ env.Library(
'$BUILD_DIR/mongo/db/repl/repl_coordinator_interface',
'$BUILD_DIR/mongo/db/repl/replica_set_messages',
'$BUILD_DIR/mongo/db/timeseries/catalog_helper',
+ '$BUILD_DIR/mongo/db/timeseries/timeseries_collmod',
'$BUILD_DIR/mongo/db/timeseries/timeseries_conversion_util',
'$BUILD_DIR/mongo/db/timeseries/timeseries_options',
'$BUILD_DIR/mongo/s/commands/sharded_cluster_sharding_commands',
diff --git a/src/mongo/db/s/collmod_coordinator.cpp b/src/mongo/db/s/collmod_coordinator.cpp
new file mode 100644
index 00000000000..9c13e2756a5
--- /dev/null
+++ b/src/mongo/db/s/collmod_coordinator.cpp
@@ -0,0 +1,236 @@
+/**
+ * Copyright (C) 2021-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
+
+#include "mongo/db/s/collmod_coordinator.h"
+
+#include "mongo/db/catalog/collection_catalog.h"
+#include "mongo/db/catalog/database_holder.h"
+#include "mongo/db/coll_mod_gen.h"
+#include "mongo/db/db_raii.h"
+#include "mongo/db/ops/insert.h"
+#include "mongo/db/s/sharded_collmod_gen.h"
+#include "mongo/db/s/sharding_ddl_util.h"
+#include "mongo/db/s/sharding_state.h"
+#include "mongo/db/timeseries/catalog_helper.h"
+#include "mongo/db/timeseries/timeseries_collmod.h"
+#include "mongo/idl/idl_parser.h"
+#include "mongo/logv2/log.h"
+#include "mongo/s/async_requests_sender.h"
+#include "mongo/s/cluster_commands_helpers.h"
+#include "mongo/s/grid.h"
+
+namespace mongo {
+
+namespace {
+
+bool isShardedColl(OperationContext* opCtx, const NamespaceString& nss) {
+ try {
+ auto coll = Grid::get(opCtx)->catalogClient()->getCollection(opCtx, nss);
+ return true;
+ } catch (const ExceptionFor<ErrorCodes::NamespaceNotFound>&) {
+ // The collection is not sharded or doesn't exist.
+ return false;
+ }
+}
+
+bool hasTimeSeriesGranularityUpdate(const CollModRequest& request) {
+ return request.getTimeseries() && request.getTimeseries()->getGranularity();
+}
+
+} // namespace
+
+CollModCoordinator::CollModCoordinator(ShardingDDLCoordinatorService* service,
+ const BSONObj& initialState)
+ : ShardingDDLCoordinator(service, initialState) {
+ _initialState = initialState.getOwned();
+ _doc = CollModCoordinatorDocument::parse(IDLParserErrorContext("CollModCoordinatorDocument"),
+ _initialState);
+}
+
+void CollModCoordinator::checkIfOptionsConflict(const BSONObj& doc) const {
+ const auto otherDoc =
+ CollModCoordinatorDocument::parse(IDLParserErrorContext("CollModCoordinatorDocument"), doc);
+
+ const auto& selfReq = _doc.getCollModRequest().toBSON();
+ const auto& otherReq = otherDoc.getCollModRequest().toBSON();
+
+ uassert(ErrorCodes::ConflictingOperationInProgress,
+ str::stream() << "Another collMod for namespace " << nss()
+ << " is being executed with different parameters: " << selfReq,
+ SimpleBSONObjComparator::kInstance.evaluate(selfReq == otherReq));
+}
+
+boost::optional<BSONObj> CollModCoordinator::reportForCurrentOp(
+ MongoProcessInterface::CurrentOpConnectionsMode connMode,
+ MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept {
+
+ BSONObjBuilder cmdBob;
+ if (const auto& optComment = getForwardableOpMetadata().getComment()) {
+ cmdBob.append(optComment.get().firstElement());
+ }
+ cmdBob.appendElements(_doc.getCollModRequest().toBSON());
+ BSONObjBuilder bob;
+ bob.append("type", "op");
+ bob.append("desc", "CollModCoordinator");
+ bob.append("op", "command");
+ bob.append("ns", nss().toString());
+ bob.append("command", cmdBob.obj());
+ bob.append("currentPhase", _doc.getPhase());
+ bob.append("active", true);
+ return bob.obj();
+}
+
+void CollModCoordinator::_enterPhase(Phase newPhase) {
+ StateDoc newDoc(_doc);
+ newDoc.setPhase(newPhase);
+
+ LOGV2_DEBUG(6069401,
+ 2,
+ "CollMod coordinator phase transition",
+ "namespace"_attr = nss(),
+ "newPhase"_attr = CollModCoordinatorPhase_serializer(newDoc.getPhase()),
+ "oldPhase"_attr = CollModCoordinatorPhase_serializer(_doc.getPhase()));
+
+ if (_doc.getPhase() == Phase::kUnset) {
+ _doc = _insertStateDocument(std::move(newDoc));
+ return;
+ }
+ _doc = _updateStateDocument(cc().makeOperationContext().get(), std::move(newDoc));
+}
+
+void CollModCoordinator::_performNoopRetryableWriteOnParticipants(
+ OperationContext* opCtx, const std::shared_ptr<executor::TaskExecutor>& executor) {
+ auto shardsAndConfigsvr = [&] {
+ const auto shardRegistry = Grid::get(opCtx)->shardRegistry();
+ auto participants = shardRegistry->getAllShardIds(opCtx);
+ participants.emplace_back(shardRegistry->getConfigShard()->getId());
+ return participants;
+ }();
+
+ _doc = _updateSession(opCtx, _doc);
+ sharding_ddl_util::performNoopRetryableWriteOnShards(
+ opCtx, shardsAndConfigsvr, getCurrentSession(_doc), executor);
+}
+
+ExecutorFuture<void> CollModCoordinator::_runImpl(
+ std::shared_ptr<executor::ScopedTaskExecutor> executor,
+ const CancellationToken& token) noexcept {
+ return ExecutorFuture<void>(**executor)
+ .then(_executePhase(
+ Phase::kUpdateShards,
+ [this, executor = executor, anchor = shared_from_this()] {
+ auto opCtxHolder = cc().makeOperationContext();
+ auto* opCtx = opCtxHolder.get();
+ getForwardableOpMetadata().setOn(opCtx);
+
+ const auto isTimeSeries = timeseries::getTimeseriesOptions(
+ opCtx, nss(), !nss().isTimeseriesBucketsCollection());
+ const auto collNss = isTimeSeries && !nss().isTimeseriesBucketsCollection()
+ ? nss().makeTimeseriesBucketsNamespace()
+ : nss();
+ const auto isSharded = isShardedColl(opCtx, collNss);
+
+ if (isSharded) {
+ // Updating granularity on sharded time-series collections is not allowed.
+ if (isTimeSeries) {
+ uassert(
+ ErrorCodes::NotImplemented,
+ str::stream()
+ << "Cannot update granularity of a sharded time-series collection.",
+ !hasTimeSeriesGranularityUpdate(_doc.getCollModRequest()));
+ }
+
+ if (_recoveredFromDisk) {
+ _performNoopRetryableWriteOnParticipants(opCtx, **executor);
+ }
+ _doc = _updateSession(opCtx, _doc);
+ const OperationSessionInfo osi = getCurrentSession(_doc);
+
+ const auto chunkManager = uassertStatusOK(
+ Grid::get(opCtx)->catalogCache()->getCollectionRoutingInfoWithRefresh(
+ opCtx, collNss));
+ std::unique_ptr<CollatorInterface> collator;
+ const auto expCtx =
+ make_intrusive<ExpressionContext>(opCtx, std::move(collator), collNss);
+ std::set<ShardId> participants;
+ chunkManager.getShardIdsForQuery(
+ expCtx, {} /* query */, {} /* collation */, &participants);
+
+ ShardsvrCollModParticipant request(nss(), _doc.getCollModRequest());
+ const auto cmdObj =
+ CommandHelpers::appendMajorityWriteConcern(request.toBSON({}));
+ const auto& responses = sharding_ddl_util::sendAuthenticatedCommandToShards(
+ opCtx,
+ nss().db(),
+ cmdObj.addFields(osi.toBSON()),
+ {std::make_move_iterator(participants.begin()),
+ std::make_move_iterator(participants.end())},
+ **executor);
+ BSONObjBuilder builder;
+ std::string errmsg;
+ auto ok = appendRawResponses(opCtx, &errmsg, &builder, responses).responseOK;
+ if (!errmsg.empty()) {
+ CommandHelpers::appendSimpleCommandStatus(builder, ok, errmsg);
+ }
+ _result = builder.obj();
+ } else {
+ CollMod cmd(nss());
+ cmd.setCollModRequest(_doc.getCollModRequest());
+ BSONObjBuilder collModResBuilder;
+ uassertStatusOK(timeseries::processCollModCommandWithTimeSeriesTranslation(
+ opCtx, nss(), cmd, &collModResBuilder));
+ auto collModRes = collModResBuilder.obj();
+
+ const auto dbInfo = uassertStatusOK(
+ Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, nss().db()));
+ const auto shard = uassertStatusOK(
+ Grid::get(opCtx)->shardRegistry()->getShard(opCtx, dbInfo.primaryId()));
+ BSONObjBuilder builder;
+ builder.appendElements(collModRes);
+ BSONObjBuilder subBuilder(builder.subobjStart("raw"));
+ subBuilder.append(shard->getConnString().toString(), collModRes);
+ subBuilder.doneFast();
+ _result = builder.obj();
+ }
+ }))
+ .onError([this, anchor = shared_from_this()](const Status& status) {
+ if (!status.isA<ErrorCategory::NotPrimaryError>() &&
+ !status.isA<ErrorCategory::ShutdownError>()) {
+ LOGV2_ERROR(5757002,
+ "Error running collMod",
+ "namespace"_attr = nss(),
+ "error"_attr = redact(status));
+ }
+ return status;
+ });
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/s/collmod_coordinator.h b/src/mongo/db/s/collmod_coordinator.h
new file mode 100644
index 00000000000..885551d4853
--- /dev/null
+++ b/src/mongo/db/s/collmod_coordinator.h
@@ -0,0 +1,96 @@
+/**
+ * Copyright (C) 2021-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#pragma once
+
+#include "mongo/db/s/collmod_coordinator_document_gen.h"
+#include "mongo/db/s/sharding_ddl_coordinator.h"
+#include "mongo/s/request_types/sharded_ddl_commands_gen.h"
+
+namespace mongo {
+
+class CollModCoordinator final : public ShardingDDLCoordinator {
+public:
+ using StateDoc = CollModCoordinatorDocument;
+ using Phase = CollModCoordinatorPhaseEnum;
+
+ CollModCoordinator(ShardingDDLCoordinatorService* service, const BSONObj& initialState);
+
+ void checkIfOptionsConflict(const BSONObj& doc) const override;
+
+ boost::optional<BSONObj> reportForCurrentOp(
+ MongoProcessInterface::CurrentOpConnectionsMode connMode,
+ MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept override;
+
+ /**
+ * Waits for the termination of the parent DDLCoordinator (so all the resources are liberated)
+ * and then return the result.
+ */
+ BSONObj getResult(OperationContext* opCtx) {
+ getCompletionFuture().get(opCtx);
+ invariant(_result.is_initialized());
+ return *_result;
+ }
+
+private:
+ ShardingDDLCoordinatorMetadata const& metadata() const override {
+ return _doc.getShardingDDLCoordinatorMetadata();
+ }
+
+ ExecutorFuture<void> _runImpl(std::shared_ptr<executor::ScopedTaskExecutor> executor,
+ const CancellationToken& token) noexcept override;
+
+ template <typename Func>
+ auto _executePhase(const Phase& newPhase, Func&& func) {
+ return [=] {
+ const auto& currPhase = _doc.getPhase();
+
+ if (currPhase > newPhase) {
+ // Do not execute this phase if we already reached a subsequent one.
+ return;
+ }
+ if (currPhase < newPhase) {
+ // Persist the new phase if this is the first time we are executing it.
+ _enterPhase(newPhase);
+ }
+ return func();
+ };
+ }
+
+ void _enterPhase(Phase newPhase);
+
+ void _performNoopRetryableWriteOnParticipants(
+ OperationContext* opCtx, const std::shared_ptr<executor::TaskExecutor>& executor);
+
+ BSONObj _initialState;
+ CollModCoordinatorDocument _doc;
+ boost::optional<BSONObj> _result;
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/s/collmod_coordinator_document.idl b/src/mongo/db/s/collmod_coordinator_document.idl
new file mode 100644
index 00000000000..8ff37dc6308
--- /dev/null
+++ b/src/mongo/db/s/collmod_coordinator_document.idl
@@ -0,0 +1,63 @@
+# Copyright (C) 2021-present MongoDB, Inc.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the Server Side Public License, version 1,
+# as published by MongoDB, Inc.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# Server Side Public License for more details.
+#
+# You should have received a copy of the Server Side Public License
+# along with this program. If not, see
+# <http://www.mongodb.com/licensing/server-side-public-license>.
+#
+# As a special exception, the copyright holders give permission to link the
+# code of portions of this program with the OpenSSL library under certain
+# conditions as described in each individual source file and distribute
+# linked combinations including the program with the OpenSSL library. You
+# must comply with the Server Side Public License in all respects for
+# all of the code used other than as permitted herein. If you modify file(s)
+# with this exception, you may extend this exception to your version of the
+# file(s), but you are not obligated to do so. If you do not wish to do so,
+# delete this exception statement from your version. If you delete this
+# exception statement from all source files in the program, then also delete
+# it in the license file.
+#
+
+# This file defines the format of documents stored in config.ddl.collMod on the coordinator
+# shard for a collMod operation.
+
+global:
+ cpp_namespace: "mongo"
+ cpp_includes:
+ - "mongo/s/catalog/type_collection.h"
+
+imports:
+ - "mongo/idl/basic_types.idl"
+ - "mongo/db/coll_mod.idl"
+ - "mongo/db/s/sharding_ddl_coordinator.idl"
+
+enums:
+ CollModCoordinatorPhase:
+ description: "The current state of a collMod operation on the coordinator."
+ type: string
+ values:
+ kUnset: "unset"
+ kUpdateShards: "UpdateShards"
+
+structs:
+ CollModCoordinatorDocument:
+ description: "Represents a collMod operation on the coordinator shard."
+ generate_comparison_operators: false
+ strict: false
+ chained_structs:
+ ShardingDDLCoordinatorMetadata: ShardingDDLCoordinatorMetadata
+ fields:
+ phase:
+ type: CollModCoordinatorPhase
+ default: kUnset
+ collModRequest:
+ type: CollModRequest
+ description: "Initial collMod request."
diff --git a/src/mongo/db/s/sharded_collmod.idl b/src/mongo/db/s/sharded_collmod.idl
new file mode 100644
index 00000000000..1bd2081e25f
--- /dev/null
+++ b/src/mongo/db/s/sharded_collmod.idl
@@ -0,0 +1,46 @@
+# Copyright (C) 2018-present MongoDB, Inc.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the Server Side Public License, version 1,
+# as published by MongoDB, Inc.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# Server Side Public License for more details.
+#
+# You should have received a copy of the Server Side Public License
+# along with this program. If not, see
+# <http://www.mongodb.com/licensing/server-side-public-license>.
+#
+# As a special exception, the copyright holders give permission to link the
+# code of portions of this program with the OpenSSL library under certain
+# conditions as described in each individual source file and distribute
+# linked combinations including the program with the OpenSSL library. You
+# must comply with the Server Side Public License in all respects for
+# all of the code used other than as permitted herein. If you modify file(s)
+# with this exception, you may extend this exception to your version of the
+# file(s), but you are not obligated to do so. If you do not wish to do so,
+# delete this exception statement from your version. If you delete this
+# exception statement from all source files in the program, then also delete
+# it in the license file.
+#
+global:
+ cpp_namespace: "mongo"
+
+imports:
+ - "mongo/db/coll_mod.idl"
+ - "mongo/idl/basic_types.idl"
+
+commands:
+ _shardsvrCollModParticipant:
+ command_name: _shardsvrCollModParticipant
+ cpp_name: ShardsvrCollModParticipant
+ description: "Internal command sent to all shards to implement collMod locally"
+ strict: false
+ api_version: ""
+ namespace: concatenate_with_db
+ fields:
+ collModRequest:
+ type: CollModRequest
+ description: "Initial collMod request"
diff --git a/src/mongo/db/s/sharding_ddl_coordinator.idl b/src/mongo/db/s/sharding_ddl_coordinator.idl
index 17142de4ec7..2b43a7b48f4 100644
--- a/src/mongo/db/s/sharding_ddl_coordinator.idl
+++ b/src/mongo/db/s/sharding_ddl_coordinator.idl
@@ -50,6 +50,7 @@ enums:
kCreateCollection: "createCollection"
kRefineCollectionShardKey: "refineCollectionShardKey"
kSetAllowMigrations: "setAllowMigrations"
+ kCollMod: "collMod"
types:
ForwardableOperationMetadata:
diff --git a/src/mongo/db/s/sharding_ddl_coordinator_service.cpp b/src/mongo/db/s/sharding_ddl_coordinator_service.cpp
index aa3324ce8a1..1ec794cc435 100644
--- a/src/mongo/db/s/sharding_ddl_coordinator_service.cpp
+++ b/src/mongo/db/s/sharding_ddl_coordinator_service.cpp
@@ -42,6 +42,7 @@
#include "mongo/db/s/sharding_ddl_coordinator.h"
#include "mongo/logv2/log.h"
+#include "mongo/db/s/collmod_coordinator.h"
#include "mongo/db/s/create_collection_coordinator.h"
#include "mongo/db/s/drop_collection_coordinator.h"
#include "mongo/db/s/drop_database_coordinator.h"
@@ -80,6 +81,10 @@ std::shared_ptr<ShardingDDLCoordinator> constructShardingDDLCoordinatorInstance(
case DDLCoordinatorTypeEnum::kSetAllowMigrations:
return std::make_shared<SetAllowMigrationsCoordinator>(service,
std::move(initialState));
+ break;
+ case DDLCoordinatorTypeEnum::kCollMod:
+ return std::make_shared<CollModCoordinator>(service, std::move(initialState));
+ break;
default:
uasserted(ErrorCodes::BadValue,
str::stream()
diff --git a/src/mongo/db/s/sharding_ddl_util.cpp b/src/mongo/db/s/sharding_ddl_util.cpp
index 3154339e52c..2d4dc68a02b 100644
--- a/src/mongo/db/s/sharding_ddl_util.cpp
+++ b/src/mongo/db/s/sharding_ddl_util.cpp
@@ -160,11 +160,12 @@ void linearizeCSRSReads(OperationContext* opCtx) {
ShardingCatalogClient::kMajorityWriteConcern));
}
-void sendAuthenticatedCommandToShards(OperationContext* opCtx,
- StringData dbName,
- const BSONObj& command,
- const std::vector<ShardId>& shardIds,
- const std::shared_ptr<executor::TaskExecutor>& executor) {
+std::vector<AsyncRequestsSender::Response> sendAuthenticatedCommandToShards(
+ OperationContext* opCtx,
+ StringData dbName,
+ const BSONObj& command,
+ const std::vector<ShardId>& shardIds,
+ const std::shared_ptr<executor::TaskExecutor>& executor) {
// TODO SERVER-57519: remove the following scope
{
// Ensure ShardRegistry is initialized before using the AsyncRequestsSender that relies on
@@ -180,7 +181,8 @@ void sendAuthenticatedCommandToShards(OperationContext* opCtx,
BSONObjBuilder bob(command);
rpc::writeAuthDataToImpersonatedUserMetadata(opCtx, &bob);
auto authenticatedCommand = bob.obj();
- sharding_util::sendCommandToShards(opCtx, dbName, authenticatedCommand, shardIds, executor);
+ return sharding_util::sendCommandToShards(
+ opCtx, dbName, authenticatedCommand, shardIds, executor);
}
void removeTagsMetadataFromConfig(OperationContext* opCtx,
diff --git a/src/mongo/db/s/sharding_ddl_util.h b/src/mongo/db/s/sharding_ddl_util.h
index 5a34e125924..ec198cc7ad6 100644
--- a/src/mongo/db/s/sharding_ddl_util.h
+++ b/src/mongo/db/s/sharding_ddl_util.h
@@ -33,6 +33,7 @@
#include "mongo/db/namespace_string.h"
#include "mongo/db/operation_context.h"
#include "mongo/executor/task_executor.h"
+#include "mongo/s/async_requests_sender.h"
#include "mongo/s/catalog/type_collection.h"
#include "mongo/s/request_types/sharded_ddl_commands_gen.h"
@@ -48,11 +49,12 @@ void linearizeCSRSReads(OperationContext* opCtx);
/**
* Generic utility to send a command to a list of shards. Throws if one of the commands fails.
*/
-void sendAuthenticatedCommandToShards(OperationContext* opCtx,
- StringData dbName,
- const BSONObj& command,
- const std::vector<ShardId>& shardIds,
- const std::shared_ptr<executor::TaskExecutor>& executor);
+std::vector<AsyncRequestsSender::Response> sendAuthenticatedCommandToShards(
+ OperationContext* opCtx,
+ StringData dbName,
+ const BSONObj& command,
+ const std::vector<ShardId>& shardIds,
+ const std::shared_ptr<executor::TaskExecutor>& executor);
/**
* Erase tags metadata from config server for the given namespace, using the _configsvrRemoveTags
diff --git a/src/mongo/db/s/sharding_util.cpp b/src/mongo/db/s/sharding_util.cpp
index 26c8da56f8c..675f10cc521 100644
--- a/src/mongo/db/s/sharding_util.cpp
+++ b/src/mongo/db/s/sharding_util.cpp
@@ -36,7 +36,6 @@
#include "mongo/db/commands.h"
#include "mongo/logv2/log.h"
-#include "mongo/s/async_requests_sender.h"
#include "mongo/s/request_types/flush_routing_table_cache_updates_gen.h"
namespace mongo {
@@ -55,16 +54,18 @@ void tellShardsToRefreshCollection(OperationContext* opCtx,
sendCommandToShards(opCtx, NamespaceString::kAdminDb, cmdObj, shardIds, executor);
}
-void sendCommandToShards(OperationContext* opCtx,
- StringData dbName,
- const BSONObj& command,
- const std::vector<ShardId>& shardIds,
- const std::shared_ptr<executor::TaskExecutor>& executor) {
+std::vector<AsyncRequestsSender::Response> sendCommandToShards(
+ OperationContext* opCtx,
+ StringData dbName,
+ const BSONObj& command,
+ const std::vector<ShardId>& shardIds,
+ const std::shared_ptr<executor::TaskExecutor>& executor) {
std::vector<AsyncRequestsSender::Request> requests;
for (const auto& shardId : shardIds) {
requests.emplace_back(shardId, command);
}
+ std::vector<AsyncRequestsSender::Response> responses;
if (!requests.empty()) {
// The _flushRoutingTableCacheUpdatesWithWriteConcern command will fail with a
// QueryPlanKilled error response if the config.cache.chunks collection is dropped
@@ -93,8 +94,11 @@ void sendCommandToShards(OperationContext* opCtx,
auto wcStatus = getWriteConcernStatusFromCommandResult(shardResponse.data);
uassertStatusOKWithContext(wcStatus, errorContext);
+
+ responses.push_back(std::move(response));
}
}
+ return responses;
}
} // namespace sharding_util
diff --git a/src/mongo/db/s/sharding_util.h b/src/mongo/db/s/sharding_util.h
index 684568b0214..1a2c3c51eeb 100644
--- a/src/mongo/db/s/sharding_util.h
+++ b/src/mongo/db/s/sharding_util.h
@@ -34,6 +34,7 @@
#include "mongo/db/namespace_string.h"
#include "mongo/db/operation_context.h"
#include "mongo/executor/task_executor.h"
+#include "mongo/s/async_requests_sender.h"
#include "mongo/s/shard_id.h"
namespace mongo {
@@ -51,11 +52,12 @@ void tellShardsToRefreshCollection(OperationContext* opCtx,
/**
* Generic utility to send a command to a list of shards. Throws if one of the commands fails.
*/
-void sendCommandToShards(OperationContext* opCtx,
- StringData dbName,
- const BSONObj& command,
- const std::vector<ShardId>& shardIds,
- const std::shared_ptr<executor::TaskExecutor>& executor);
+std::vector<AsyncRequestsSender::Response> sendCommandToShards(
+ OperationContext* opCtx,
+ StringData dbName,
+ const BSONObj& command,
+ const std::vector<ShardId>& shardIds,
+ const std::shared_ptr<executor::TaskExecutor>& executor);
} // namespace sharding_util
} // namespace mongo
diff --git a/src/mongo/db/s/shardsvr_collmod_command.cpp b/src/mongo/db/s/shardsvr_collmod_command.cpp
new file mode 100644
index 00000000000..bd837280240
--- /dev/null
+++ b/src/mongo/db/s/shardsvr_collmod_command.cpp
@@ -0,0 +1,125 @@
+/**
+ * Copyright (C) 2018-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
+
+#include "mongo/db/auth/authorization_checks.h"
+#include "mongo/db/auth/authorization_session.h"
+#include "mongo/db/coll_mod_gen.h"
+#include "mongo/db/coll_mod_reply_validation.h"
+#include "mongo/db/commands.h"
+#include "mongo/db/curop.h"
+#include "mongo/db/s/collmod_coordinator.h"
+#include "mongo/db/s/sharding_state.h"
+#include "mongo/logv2/log.h"
+#include "mongo/s/chunk_manager_targeter.h"
+#include "mongo/s/grid.h"
+
+namespace mongo {
+namespace {
+
+class ShardsvrCollModCommand final : public BasicCommandWithRequestParser<ShardsvrCollModCommand> {
+public:
+ using Request = ShardsvrCollMod;
+ using Response = CollModReply;
+
+ ShardsvrCollModCommand() : BasicCommandWithRequestParser() {}
+
+ AllowedOnSecondary secondaryAllowed(ServiceContext*) const override {
+ return Command::AllowedOnSecondary::kNever;
+ }
+
+ bool supportsWriteConcern(const BSONObj& cmd) const override {
+ return true;
+ }
+
+ bool collectsResourceConsumptionMetrics() const override {
+ return true;
+ }
+
+ std::string help() const override {
+ return "Internal command, which is exported by the primary sharding server. Do not call "
+ "directly. Modifies collection.";
+ }
+
+ Status checkAuthForCommand(Client* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) const override {
+ const NamespaceString nss(parseNs(dbname, cmdObj));
+ return auth::checkAuthForCollMod(AuthorizationSession::get(client), nss, cmdObj, false);
+ }
+
+ bool skipApiVersionCheck() const override {
+ // Internal command (server to server).
+ return true;
+ }
+
+ bool runWithRequestParser(OperationContext* opCtx,
+ const std::string& db,
+ const BSONObj& cmdObj,
+ const RequestParser& requestParser,
+ BSONObjBuilder& result) override {
+ auto const shardingState = ShardingState::get(opCtx);
+ uassertStatusOK(shardingState->canAcceptShardedCommands());
+
+ uassert(ErrorCodes::InvalidOptions,
+ str::stream() << Request::kCommandName
+ << " must be called with majority writeConcern, got "
+ << opCtx->getWriteConcern().wMode,
+ opCtx->getWriteConcern().wMode == WriteConcernOptions::kMajority);
+
+ opCtx->setAlwaysInterruptAtStepDownOrUp();
+
+ // Since this operation is not directly writing locally we need to force its db
+ // profile level increase in order to be logged in "<db>.system.profile"
+ const auto& cmd = requestParser.request();
+ CurOp::get(opCtx)->raiseDbProfileLevel(
+ CollectionCatalog::get(opCtx)->getDatabaseProfileLevel(cmd.getDbName()));
+
+ auto coordinatorDoc = CollModCoordinatorDocument();
+ coordinatorDoc.setCollModRequest(cmd.getCollModRequest());
+ coordinatorDoc.setShardingDDLCoordinatorMetadata(
+ {{cmd.getNamespace(), DDLCoordinatorTypeEnum::kCollMod}});
+ auto service = ShardingDDLCoordinatorService::getService(opCtx);
+ auto collModCoordinator = checked_pointer_cast<CollModCoordinator>(
+ service->getOrCreateInstance(opCtx, coordinatorDoc.toBSON()));
+ result.appendElements(collModCoordinator->getResult(opCtx));
+ return true;
+ }
+
+ void validateResult(const BSONObj& resultObj) final {
+ StringDataSet ignorableFields({"raw", "ok", "errmsg"});
+ auto reply = Response::parse(IDLParserErrorContext("CollModReply"),
+ resultObj.removeFields(ignorableFields));
+ coll_mod_reply_validation::validateReply(reply);
+ }
+} shardsvrCollModCommand;
+
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/s/shardsvr_collmod_participant_command.cpp b/src/mongo/db/s/shardsvr_collmod_participant_command.cpp
new file mode 100644
index 00000000000..155d298f751
--- /dev/null
+++ b/src/mongo/db/s/shardsvr_collmod_participant_command.cpp
@@ -0,0 +1,109 @@
+/**
+ * Copyright (C) 2018-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
+
+#include "mongo/db/auth/authorization_session.h"
+#include "mongo/db/catalog_raii.h"
+#include "mongo/db/commands.h"
+#include "mongo/db/s/collmod_coordinator.h"
+#include "mongo/db/s/sharded_collmod_gen.h"
+#include "mongo/db/s/sharding_state.h"
+#include "mongo/db/timeseries/timeseries_collmod.h"
+#include "mongo/logv2/log.h"
+#include "mongo/s/catalog/sharding_catalog_client.h"
+#include "mongo/s/chunk_manager_targeter.h"
+
+namespace mongo {
+namespace {
+
+class ShardSvrCollModParticipantCommand final
+ : public TypedCommand<ShardSvrCollModParticipantCommand> {
+public:
+ using Request = ShardsvrCollModParticipant;
+ using Response = CollModReply;
+
+ std::string help() const override {
+ return "Internal command, which is exported by the shards. Do not call "
+ "directly. Blocks writes during collection mod.";
+ }
+
+ bool skipApiVersionCheck() const override {
+ // Internal command (server to server).
+ return true;
+ }
+
+ AllowedOnSecondary secondaryAllowed(ServiceContext*) const override {
+ return Command::AllowedOnSecondary::kNever;
+ }
+
+ class Invocation final : public InvocationBase {
+ public:
+ using InvocationBase::InvocationBase;
+
+ Response typedRun(OperationContext* opCtx) {
+ uassertStatusOK(ShardingState::get(opCtx)->canAcceptShardedCommands());
+
+ uassert(ErrorCodes::InvalidOptions,
+ str::stream() << Request::kCommandName
+ << " must be called with majority writeConcern, got "
+ << opCtx->getWriteConcern().wMode,
+ opCtx->getWriteConcern().wMode == WriteConcernOptions::kMajority);
+
+ opCtx->setAlwaysInterruptAtStepDownOrUp();
+
+ BSONObjBuilder builder;
+ CollMod cmd(ns());
+ cmd.setCollModRequest(request().getCollModRequest());
+ uassertStatusOK(timeseries::processCollModCommandWithTimeSeriesTranslation(
+ opCtx, ns(), cmd, &builder));
+ return CollModReply::parse(IDLParserErrorContext("CollModReply"), builder.obj());
+ }
+
+ private:
+ NamespaceString ns() const override {
+ return request().getNamespace();
+ }
+
+ bool supportsWriteConcern() const override {
+ return true;
+ }
+
+ void doCheckAuthorization(OperationContext* opCtx) const override {
+ uassert(ErrorCodes::Unauthorized,
+ "Unauthorized",
+ AuthorizationSession::get(opCtx->getClient())
+ ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(),
+ ActionType::internal));
+ }
+ };
+} shardsvrCollModParticipantCommand;
+
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/timeseries/SConscript b/src/mongo/db/timeseries/SConscript
index 5d00033beaf..a9cc40ef985 100644
--- a/src/mongo/db/timeseries/SConscript
+++ b/src/mongo/db/timeseries/SConscript
@@ -102,7 +102,7 @@ env.Library(
'$BUILD_DIR/mongo/base',
'$BUILD_DIR/mongo/bson/mutable/mutable_bson',
'$BUILD_DIR/mongo/db/catalog/catalog_helpers',
- '$BUILD_DIR/mongo/db/commands',
+ '$BUILD_DIR/mongo/db/coll_mod_command_idl',
'$BUILD_DIR/mongo/db/exec/bucket_unpacker',
'$BUILD_DIR/mongo/db/namespace_string',
'catalog_helper',
diff --git a/src/mongo/db/timeseries/timeseries_collmod.cpp b/src/mongo/db/timeseries/timeseries_collmod.cpp
index de548b89995..1e5f6f5a91b 100644
--- a/src/mongo/db/timeseries/timeseries_collmod.cpp
+++ b/src/mongo/db/timeseries/timeseries_collmod.cpp
@@ -69,18 +69,20 @@ std::unique_ptr<CollMod> makeTimeseriesBucketsCollModCommand(OperationContext* o
auto ns =
isCommandOnTimeseriesBucketNamespace ? origNs : origNs.makeTimeseriesBucketsNamespace();
+ CollModRequest request;
+ request.setIndex(index);
+ request.setValidator(origCmd.getValidator());
+ request.setValidationLevel(origCmd.getValidationLevel());
+ request.setValidationAction(origCmd.getValidationAction());
+ request.setViewOn(origCmd.getViewOn());
+ request.setPipeline(origCmd.getPipeline());
+ request.setRecordPreImages(origCmd.getRecordPreImages());
+ request.setChangeStreamPreAndPostImages(origCmd.getChangeStreamPreAndPostImages());
+ request.setExpireAfterSeconds(origCmd.getExpireAfterSeconds());
+ request.setTimeseries(origCmd.getTimeseries());
+ request.setDryRun(origCmd.getDryRun());
auto cmd = std::make_unique<CollMod>(ns);
- cmd->setIndex(index);
- cmd->setValidator(origCmd.getValidator());
- cmd->setValidationLevel(origCmd.getValidationLevel());
- cmd->setValidationAction(origCmd.getValidationAction());
- cmd->setViewOn(origCmd.getViewOn());
- cmd->setPipeline(origCmd.getPipeline());
- cmd->setRecordPreImages(origCmd.getRecordPreImages());
- cmd->setChangeStreamPreAndPostImages(origCmd.getChangeStreamPreAndPostImages());
- cmd->setExpireAfterSeconds(origCmd.getExpireAfterSeconds());
- cmd->setTimeseries(origCmd.getTimeseries());
-
+ cmd->setCollModRequest(request);
return cmd;
}
@@ -108,7 +110,9 @@ std::unique_ptr<CollMod> makeTimeseriesViewCollModCommand(OperationContext* opCt
constexpr bool asArray = false;
std::vector<BSONObj> pipeline = {
timeseries::generateViewPipeline(newOptions, asArray)};
- cmd->setPipeline(std::move(pipeline));
+ CollModRequest viewRequest;
+ viewRequest.setPipeline(std::move(pipeline));
+ cmd->setCollModRequest(viewRequest);
return cmd;
}
}
diff --git a/src/mongo/db/transaction_validation.cpp b/src/mongo/db/transaction_validation.cpp
index f30d64eb205..648639d8599 100644
--- a/src/mongo/db/transaction_validation.cpp
+++ b/src/mongo/db/transaction_validation.cpp
@@ -57,7 +57,8 @@ const StringMap<int> retryableWriteCommands = {{"delete", 1},
{"_shardsvrDropCollectionParticipant", 1},
{"_shardsvrRenameCollectionParticipant", 1},
{"_shardsvrRenameCollectionParticipantUnblock", 1},
- {"_configsvrRenameCollectionMetadata", 1}};
+ {"_configsvrRenameCollectionMetadata", 1},
+ {"_shardsvrCollModParticipant", 1}};
// Commands that can be sent with session info but should not check out a session.
const StringMap<int> skipSessionCheckoutList = {
diff --git a/src/mongo/s/SConscript b/src/mongo/s/SConscript
index ee70b48fe08..66ab7a5fd6d 100644
--- a/src/mongo/s/SConscript
+++ b/src/mongo/s/SConscript
@@ -226,6 +226,7 @@ env.Library(
],
LIBDEPS=[
'$BUILD_DIR/mongo/client/connection_string',
+ '$BUILD_DIR/mongo/db/coll_mod_command_idl',
'$BUILD_DIR/mongo/db/common',
'$BUILD_DIR/mongo/db/namespace_string',
'$BUILD_DIR/mongo/db/query/query_request',
diff --git a/src/mongo/s/commands/cluster_collection_mod_cmd.cpp b/src/mongo/s/commands/cluster_collection_mod_cmd.cpp
index e2e56136543..d295ca18c71 100644
--- a/src/mongo/s/commands/cluster_collection_mod_cmd.cpp
+++ b/src/mongo/s/commands/cluster_collection_mod_cmd.cpp
@@ -36,11 +36,11 @@
#include "mongo/db/coll_mod_gen.h"
#include "mongo/db/coll_mod_reply_validation.h"
#include "mongo/db/commands.h"
-#include "mongo/db/timeseries/timeseries_commands_conversion_helper.h"
#include "mongo/logv2/log.h"
#include "mongo/s/chunk_manager_targeter.h"
#include "mongo/s/cluster_commands_helpers.h"
#include "mongo/s/grid.h"
+#include "mongo/s/request_types/sharded_ddl_commands_gen.h"
namespace mongo {
namespace {
@@ -84,7 +84,7 @@ public:
const BSONObj& cmdObj,
const RequestParser& requestParser,
BSONObjBuilder& result) final {
- auto cmd = requestParser.request();
+ const auto& cmd = requestParser.request();
auto nss = cmd.getNamespace();
LOGV2_DEBUG(22748,
1,
@@ -93,32 +93,23 @@ public:
"namespace"_attr = nss,
"command"_attr = redact(cmdObj));
- const auto targeter = ChunkManagerTargeter(opCtx, nss);
- const auto& routingInfo = targeter.getRoutingInfo();
- auto cmdToBeSent = cmdObj;
- if (targeter.timeseriesNamespaceNeedsRewrite(nss)) {
- cmdToBeSent = timeseries::makeTimeseriesCommand(
- cmdToBeSent, nss, getName(), CollMod::kIsTimeseriesNamespaceFieldName);
- }
-
- auto shardResponses = scatterGatherVersionedTargetByRoutingTable(
- opCtx,
- cmd.getDbName(),
- targeter.getNS(),
- routingInfo,
- applyReadWriteConcern(
- opCtx, this, CommandHelpers::filterCommandRequestForPassthrough(cmdToBeSent)),
- ReadPreferenceSetting::get(opCtx),
- Shard::RetryPolicy::kNoRetry,
- BSONObj() /* query */,
- BSONObj() /* collation */);
- std::string errmsg;
- auto ok = appendRawResponses(opCtx, &errmsg, &result, std::move(shardResponses)).responseOK;
- if (!errmsg.empty()) {
- CommandHelpers::appendSimpleCommandStatus(result, ok, errmsg);
- }
-
- return ok;
+ const auto dbInfo =
+ uassertStatusOK(Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, cmd.getDbName()));
+ ShardsvrCollMod collModCommand(nss);
+ collModCommand.setCollModRequest(cmd.getCollModRequest());
+ auto cmdResponse =
+ uassertStatusOK(executeCommandAgainstDatabasePrimary(
+ opCtx,
+ db,
+ dbInfo,
+ CommandHelpers::appendMajorityWriteConcern(
+ collModCommand.toBSON({}), opCtx->getWriteConcern()),
+ ReadPreferenceSetting(ReadPreference::PrimaryOnly),
+ Shard::RetryPolicy::kIdempotent)
+ .swResponse);
+
+ CommandHelpers::filterCommandReplyForPassthrough(cmdResponse.data, &result);
+ return cmdResponse.isOK();
}
void validateResult(const BSONObj& resultObj) final {
diff --git a/src/mongo/s/request_types/sharded_ddl_commands.idl b/src/mongo/s/request_types/sharded_ddl_commands.idl
index 4306a35e66e..b0151f36182 100644
--- a/src/mongo/s/request_types/sharded_ddl_commands.idl
+++ b/src/mongo/s/request_types/sharded_ddl_commands.idl
@@ -35,6 +35,7 @@ imports:
- "mongo/db/commands/rename_collection.idl"
- "mongo/db/drop_database.idl"
- "mongo/db/keypattern.idl"
+ - "mongo/db/coll_mod.idl"
- "mongo/idl/basic_types.idl"
- "mongo/s/chunk_version.idl"
- "mongo/s/database_version.idl"
@@ -326,3 +327,13 @@ commands:
description: "May only be set to 'true'. If set, indicates to the config server that
it must turn on the 'enableSharding' bit for that database."
optional: true
+
+ _shardsvrCollMod:
+ command_name: _shardsvrCollMod
+ cpp_name: ShardsvrCollMod
+ description: "Internal command sent to the primary shard in a collMod procedure"
+ strict: false
+ api_version: ""
+ namespace: concatenate_with_db
+ chained_structs:
+ CollModRequest: CollModRequest