diff options
26 files changed, 740 insertions, 41 deletions
diff --git a/jstests/core/views/views_all_commands.js b/jstests/core/views/views_all_commands.js index 605a235e83a..93c221da2d8 100644 --- a/jstests/core/views/views_all_commands.js +++ b/jstests/core/views/views_all_commands.js @@ -150,6 +150,7 @@ let viewsCommandTests = { _shardsvrRenameCollectionParticipantUnblock: {skip: isAnInternalCommand}, _shardsvrReshardCollection: {skip: isAnInternalCommand}, _shardsvrReshardingOperationTime: {skip: isAnInternalCommand}, + _shardsvrSetAllowMigrations: {skip: isAnInternalCommand}, _shardsvrShardCollection: {skip: isAnInternalCommand}, // TODO SERVER-58843: Remove once 6.0 becomes last LTS _transferMods: {skip: isAnInternalCommand}, @@ -534,6 +535,16 @@ let viewsCommandTests = { }, revokeRolesFromRole: {skip: isUnrelated}, revokeRolesFromUser: {skip: isUnrelated}, + setAllowMigrations: { + command: {setAllowMigrations: "test.view", allowMigrations: false}, + setup: function(conn) { + assert.commandWorked(conn.adminCommand({enableSharding: "test"})); + }, + expectedErrorCode: ErrorCodes.NamespaceNotSharded, + skipStandalone: true, + expectFailure: true, + isAdminCommand: true + }, rolesInfo: {skip: isUnrelated}, rotateCertificates: {skip: isUnrelated}, saslContinue: {skip: isUnrelated}, diff --git a/jstests/replsets/db_reads_while_recovering_all_commands.js b/jstests/replsets/db_reads_while_recovering_all_commands.js index 34af3fb78a1..c29ddb1beb5 100644 --- a/jstests/replsets/db_reads_while_recovering_all_commands.js +++ b/jstests/replsets/db_reads_while_recovering_all_commands.js @@ -87,6 +87,7 @@ const allCommands = { _shardsvrReshardCollection: {skip: isPrimaryOnly}, _shardsvrReshardingOperationTime: {skip: isPrimaryOnly}, _shardsvrRefineCollectionShardKey: {skip: isPrimaryOnly}, + _shardsvrSetAllowMigrations: {skip: isPrimaryOnly}, _transferMods: {skip: isPrimaryOnly}, _vectorClockPersist: {skip: isPrimaryOnly}, abortReshardCollection: {skip: isPrimaryOnly}, diff --git a/jstests/replsets/tenant_migration_concurrent_writes_on_donor.js b/jstests/replsets/tenant_migration_concurrent_writes_on_donor.js index 78c46b3f747..3c607241244 100644 --- a/jstests/replsets/tenant_migration_concurrent_writes_on_donor.js +++ b/jstests/replsets/tenant_migration_concurrent_writes_on_donor.js @@ -533,6 +533,7 @@ const testCases = { _shardsvrCreateCollection: {skip: isOnlySupportedOnShardedCluster}, _shardsvrCreateCollectionParticipant: {skip: isOnlySupportedOnShardedCluster}, _shardsvrMovePrimary: {skip: isNotRunOnUserDatabase}, + _shardsvrSetAllowMigrations: {skip: isOnlySupportedOnShardedCluster}, _shardsvrShardCollection: {skip: isNotRunOnUserDatabase}, // TODO SERVER-58843: Remove once 6.0 becomes last LTS _shardsvrRenameCollection: {skip: isOnlySupportedOnShardedCluster}, @@ -934,6 +935,7 @@ const testCases = { saslStart: {skip: isAuthCommand}, sbe: {skip: isNotRunOnUserDatabase}, serverStatus: {skip: isNotRunOnUserDatabase}, + setAllowMigrations: {skip: isNotRunOnUserDatabase}, setCommittedSnapshot: {skip: isNotRunOnUserDatabase}, setDefaultRWConcern: {skip: isNotRunOnUserDatabase}, setFeatureCompatibilityVersion: {skip: isNotRunOnUserDatabase}, diff --git a/jstests/sharding/database_versioning_all_commands.js b/jstests/sharding/database_versioning_all_commands.js index 200c4cbe3fa..686c1877fea 100644 --- a/jstests/sharding/database_versioning_all_commands.js +++ b/jstests/sharding/database_versioning_all_commands.js @@ -617,6 +617,7 @@ let testCases = { saslContinue: {skip: "not on a user database"}, saslStart: {skip: "not on a user database"}, serverStatus: {skip: "executes locally on mongos (not sent to any remote node)"}, + setAllowMigrations: {skip: "not on a user database"}, setAuditConfig: {skip: "not on a user database", conditional: true}, setDefaultRWConcern: {skip: "always targets the config server"}, setIndexCommitQuorum: { diff --git a/jstests/sharding/ddl_ops_reported_on_current_op_command.js b/jstests/sharding/ddl_ops_reported_on_current_op_command.js index 27a3a23db9e..20e75062fe8 100644 --- a/jstests/sharding/ddl_ops_reported_on_current_op_command.js +++ b/jstests/sharding/ddl_ops_reported_on_current_op_command.js @@ -59,7 +59,7 @@ let getCurrentOpOfDDL = (ddlOpThread, desc) => { // refineCollectionShardKey and movePrimary to use the new DDLCoordinator. if (jsTestOptions().useRandomBinVersionsWithinReplicaSet || jsTestOptions().shardMixedBinVersions) { jsTest.log( - "Skipping checking refineCollectionShardKey and movePrimary due to the fact that they're not using a DDLCoordinator on 5.0"); + "Skipping checking refineCollectionShardKey, movePrimary and setAllowMigrations due to the fact that they're not using a DDLCoordinator on 5.0"); } else { { jsTestLog('Check refine collection shard key shows in current op'); @@ -98,6 +98,24 @@ if (jsTestOptions().useRandomBinVersionsWithinReplicaSet || jsTestOptions().shar assert(currOp[0].command.request.hasOwnProperty('toShardId')); assert.eq(st.shard0.shardName, currOp[0].command.request.toShardId); } + + { + jsTestLog('Check set allow migrations shows in current op'); + + let ddlOpThread = new Thread((mongosConnString, nss) => { + let mongos = new Mongo(mongosConnString); + mongos.adminCommand({setAllowMigrations: nss, allowMigrations: true}); + }, st.s0.host, nss); + + let currOp = getCurrentOpOfDDL(ddlOpThread, 'SetAllowMigrationsCoordinator'); + + // There must be one operation running with the appropiate ns. + assert.eq(1, currOp.length); + assert.eq(nss, currOp[0].ns); + assert(currOp[0].hasOwnProperty('command')); + assert(currOp[0].command.hasOwnProperty('allowMigrations')); + assert.eq(true, currOp[0].command.allowMigrations); + } } { diff --git a/jstests/sharding/libs/last_lts_mongos_commands.js b/jstests/sharding/libs/last_lts_mongos_commands.js index 66f1ecc871f..84eb7fd8e96 100644 --- a/jstests/sharding/libs/last_lts_mongos_commands.js +++ b/jstests/sharding/libs/last_lts_mongos_commands.js @@ -14,6 +14,7 @@ const commandsAddedToMongosSinceLastLTS = [ "configureCollectionAutoSplitter", "reshardCollection", "rotateCertificates", + "setAllowMigrations", "testDeprecation", "testDeprecationInVersion2", "testRemoval", diff --git a/jstests/sharding/libs/mongos_api_params_util.js b/jstests/sharding/libs/mongos_api_params_util.js index ef4ce3012da..e22045f1761 100644 --- a/jstests/sharding/libs/mongos_api_params_util.js +++ b/jstests/sharding/libs/mongos_api_params_util.js @@ -1163,6 +1163,23 @@ let MongosAPIParametersUtil = (function() { skip: "executes locally on mongos (not sent to any remote node)" }, { + commandName: "setAllowMigrations", + run: { + inAPIVersion1: false, + shardCommandName: "_shardsvrSetAllowMigrations", + runsAgainstAdminDb: true, + permittedInTxn: false, + requiresShardedCollection: true, + setUp: () => { + assert.commandWorked(st.s.adminCommand( + {enableSharding: "db", primaryShard: st.shard0.shardName})); + assert.commandWorked( + st.s.adminCommand({shardCollection: "db.collection", key: {_id: 1}})); + }, + command: () => ({setAllowMigrations: "db.collection", allowMigrations: true}) + } + }, + { commandName: "setDefaultRWConcern", run: { inAPIVersion1: false, diff --git a/jstests/sharding/move_chunk_allowMigrations.js b/jstests/sharding/move_chunk_allowMigrations.js index 4db3d97b946..1ef488f7f33 100644 --- a/jstests/sharding/move_chunk_allowMigrations.js +++ b/jstests/sharding/move_chunk_allowMigrations.js @@ -62,38 +62,6 @@ const setUpDb = function setUpDatabaseAndEnableSharding() { assert.eq(false, cachedEntry.allowMigrations); })(); -// TODO SERVER-61033: remove after permitMigrations have been merged with allowMigrations. -// Tests that moveChunk does not succeed when {permitMigrations: false} -(function testPermitMigrationsFalsePreventsMoveChunk() { - setUpDb(); - - const collName = "collA"; - const ns = dbName + "." + collName; - - assert.commandWorked(st.s.getDB(dbName).getCollection(collName).insert({_id: 0})); - assert.commandWorked(st.s.getDB(dbName).getCollection(collName).insert({_id: 1})); - assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}})); - - // Confirm that an inProgress moveChunk fails once {allowMigrations: false} - const fp = configureFailPoint(st.shard0, "moveChunkHangAtStep4"); - const awaitResult = startParallelShell( - funWithArgs(function(ns, toShardName) { - assert.commandFailedWithCode( - db.adminCommand({moveChunk: ns, find: {_id: 0}, to: toShardName}), - ErrorCodes.ConflictingOperationInProgress); - }, ns, st.shard1.shardName), st.s.port); - fp.wait(); - assert.commandWorked(configDB.collections.updateOne( - {_id: ns}, {$set: {permitMigrations: false}}, {writeConcern: {w: "majority"}})); - fp.off(); - awaitResult(); - - // {permitMigrations: false} is set, sending a new moveChunk command should also fail. - assert.commandFailedWithCode( - st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: st.shard1.shardName}), - ErrorCodes.ConflictingOperationInProgress); -})(); - // Tests {allowMigrations: false} disables balancing for collB and does not interfere with balancing // for collA. // @@ -238,9 +206,6 @@ testBalancer(false /* allowMigrations */, {}); testBalancer(false /* allowMigrations */, {noBalance: false}); testBalancer(false /* allowMigrations */, {noBalance: true}); -// TODO SERVER-61033: merge permitMigrations with allowMigrations. -testBalancer(true /* allowMigrations */, {permitMigrations: false}); - // Test the _configsvrSetAllowMigrations internal command testConfigsvrSetAllowMigrationsCommand(); diff --git a/jstests/sharding/move_chunk_permitMigrations.js b/jstests/sharding/move_chunk_permitMigrations.js new file mode 100644 index 00000000000..9f21ba01673 --- /dev/null +++ b/jstests/sharding/move_chunk_permitMigrations.js @@ -0,0 +1,162 @@ +/** + * Tests that a collection with permitMigrations: false in config.collections prohibits committing a + * moveChunk and disables the balancer. + * + * @tags: [ + * does_not_support_stepdowns, + * requires_fcv_52, + * ] + */ +(function() { +'use strict'; + +load('jstests/libs/fail_point_util.js'); +load('jstests/libs/parallel_shell_helpers.js'); +load("jstests/sharding/libs/find_chunks_util.js"); +load("jstests/sharding/libs/shard_versioning_util.js"); + +const st = new ShardingTest({shards: 2}); +const configDB = st.s.getDB("config"); +const dbName = 'AllowMigrations'; + +// Resets database dbName and enables sharding and establishes shard0 as primary, test case agnostic +const setUpDb = function setUpDatabaseAndEnableSharding() { + assert.commandWorked(st.s.getDB(dbName).dropDatabase()); + assert.commandWorked( + st.s.adminCommand({enableSharding: dbName, primaryShard: st.shard0.shardName})); +}; + +// Use the setAllowMigrations command to set the permitMigrations flag in the collection. +const setAllowMigrations = function(ns, allow) { + assert.commandWorked(st.s.adminCommand({setAllowMigrations: ns, allowMigrations: allow})); +}; + +// Tests that moveChunk does not succeed when setAllowMigrations is called with a false value. +(function testSetAllowMigrationsFalsePreventsMoveChunk() { + setUpDb(); + + const collName = "collA"; + const ns = dbName + "." + collName; + + assert.commandWorked(st.s.getDB(dbName).getCollection(collName).insert({_id: 0})); + assert.commandWorked(st.s.getDB(dbName).getCollection(collName).insert({_id: 1})); + assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}})); + + setAllowMigrations(ns, false); + + // setAllowMigrations was called, sending a new moveChunk command should fail. + assert.commandFailedWithCode( + st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: st.shard1.shardName}), + ErrorCodes.ConflictingOperationInProgress); +})(); + +// Tests setAllowMigrations disables balancing for collB and does not interfere with +// balancing for collA. +// +// collBSetParams specify the field(s) that will be set on the collB in config.collections. +const testBalancer = function(setAllowMigrations, collBSetNoBalanceParam) { + setUpDb(); + + const collAName = "collA"; + const collBName = "collB"; + const collA = st.s.getCollection(`${dbName}.${collAName}`); + const collB = st.s.getCollection(`${dbName}.${collBName}`); + + assert.commandWorked(st.s.adminCommand({shardCollection: collA.getFullName(), key: {_id: 1}})); + assert.commandWorked(st.s.adminCommand({shardCollection: collB.getFullName(), key: {_id: 1}})); + + // Split both collections into 4 chunks so balancing can occur. + for (let coll of [collA, collB]) { + coll.insert({_id: 1}); + coll.insert({_id: 10}); + coll.insert({_id: 20}); + coll.insert({_id: 30}); + + assert.commandWorked(st.splitAt(coll.getFullName(), {_id: 10})); + assert.commandWorked(st.splitAt(coll.getFullName(), {_id: 20})); + assert.commandWorked(st.splitAt(coll.getFullName(), {_id: 30})); + + // Confirm the chunks are initially unbalanced. All chunks should start out on shard0 + // (primary shard for the database). + const balancerStatus = assert.commandWorked( + st.s0.adminCommand({balancerCollectionStatus: coll.getFullName()})); + assert.eq(balancerStatus.balancerCompliant, false); + assert.eq(balancerStatus.firstComplianceViolation, 'chunksImbalance'); + assert.eq(4, + findChunksUtil + .findChunksByNs(configDB, coll.getFullName(), {shard: st.shard0.shardName}) + .count()); + } + + jsTestLog(`Disabling balancing of ${collB.getFullName()} with setAllowMigrations ${ + setAllowMigrations} and parameters ${tojson(collBSetNoBalanceParam)}`); + assert.commandWorked( + configDB.collections.update({_id: collB.getFullName()}, {$set: collBSetNoBalanceParam})); + + setAllowMigrations(collB.getFullName(), setAllowMigrations); + + st.startBalancer(); + assert.soon(() => { + st.awaitBalancerRound(); + const shard0Chunks = + findChunksUtil + .findChunksByNs(configDB, collA.getFullName(), {shard: st.shard0.shardName}) + .itcount(); + const shard1Chunks = + findChunksUtil + .findChunksByNs(configDB, collA.getFullName(), {shard: st.shard1.shardName}) + .itcount(); + jsTestLog(`shard0 chunks ${shard0Chunks}, shard1 chunks ${shard1Chunks}`); + return shard0Chunks == 2 && shard1Chunks == 2; + }, `Balancer failed to balance ${collA.getFullName()}`, 1000 * 60 * 10); + st.stopBalancer(); + + const collABalanceStatus = + assert.commandWorked(st.s.adminCommand({balancerCollectionStatus: collA.getFullName()})); + assert.eq(collABalanceStatus.balancerCompliant, true); + + // Test that collB remains unbalanced. + const collBBalanceStatus = + assert.commandWorked(st.s.adminCommand({balancerCollectionStatus: collB.getFullName()})); + assert.eq(collBBalanceStatus.balancerCompliant, false); + assert.eq(collBBalanceStatus.firstComplianceViolation, 'chunksImbalance'); + assert.eq( + 4, + findChunksUtil.findChunksByNs(configDB, collB.getFullName(), {shard: st.shard0.shardName}) + .count()); +}; + +const testSetAllowMigrationsCommand = function() { + setUpDb(); + + const collName = "foo"; + const ns = dbName + "." + collName; + + assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {x: 1}})); + + ShardVersioningUtil.assertCollectionVersionEquals(st.shard0, ns, Timestamp(1, 0)); + + // Use setAllowMigrations to forbid migrations from happening + setAllowMigrations(ns, false); + + // Check that allowMigrations has been set to 'false' on the configsvr config.collections. + assert.eq(false, configDB.collections.findOne({_id: ns}).permitMigrations); + + // Use setAllowMigrations to allow migrations to happen + setAllowMigrations(ns, true); + + // Check that permitMigrations has been unset (that implies migrations are allowed) on the + // configsvr config.collections. + assert.eq(undefined, configDB.collections.findOne({_id: ns}).permitMigrations); +}; + +// Test cases that should disable the balancer. +testBalancer(false /* setAllowMigrations */, {}); +testBalancer(false /* setAllowMigrations */, {noBalance: false}); +testBalancer(false /* setAllowMigrations */, {noBalance: true}); + +// Test the setAllowMigrations command. +testSetAllowMigrationsCommand(); + +st.stop(); +})(); diff --git a/jstests/sharding/read_write_concern_defaults_application.js b/jstests/sharding/read_write_concern_defaults_application.js index 54e40386a51..ea0e292f9ca 100644 --- a/jstests/sharding/read_write_concern_defaults_application.js +++ b/jstests/sharding/read_write_concern_defaults_application.js @@ -152,6 +152,7 @@ let testCases = { _shardsvrRenameCollectionParticipantUnblock: {skip: "internal command"}, _shardsvrReshardCollection: {skip: "internal command"}, _shardsvrReshardingOperationTime: {skip: "internal command"}, + _shardsvrSetAllowMigrations: {skip: "internal command"}, _shardsvrShardCollection: {skip: "internal command"}, // TODO SERVER-58843: Remove once 6.0 becomes last LTS _transferMods: {skip: "internal command"}, @@ -641,6 +642,7 @@ let testCases = { saslStart: {skip: "does not accept read or write concern"}, sbe: {skip: "internal command"}, serverStatus: {skip: "does not accept read or write concern"}, + setAllowMigrations: {skip: "does not accept read or write concern"}, setAuditConfig: {skip: "does not accept read or write concern"}, setCommittedSnapshot: {skip: "internal command"}, setDefaultRWConcern: {skip: "special case (must run after all other commands)"}, diff --git a/jstests/sharding/safe_secondary_reads_drop_recreate.js b/jstests/sharding/safe_secondary_reads_drop_recreate.js index 98b56fcfcde..bef327e2700 100644 --- a/jstests/sharding/safe_secondary_reads_drop_recreate.js +++ b/jstests/sharding/safe_secondary_reads_drop_recreate.js @@ -299,6 +299,7 @@ let testCases = { saslStart: {skip: "primary only"}, sbe: {skip: "internal command"}, serverStatus: {skip: "does not return user data"}, + setAllowMigrations: {skip: "primary only"}, setAuditConfig: {skip: "does not return user data"}, setCommittedSnapshot: {skip: "does not return user data"}, setDefaultRWConcern: {skip: "primary only"}, diff --git a/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js b/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js index eb315ccf58c..8b1ba4de4ac 100644 --- a/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js +++ b/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js @@ -368,6 +368,7 @@ let testCases = { saslStart: {skip: "primary only"}, sbe: {skip: "internal command"}, serverStatus: {skip: "does not return user data"}, + setAllowMigrations: {skip: "primary only"}, setAuditConfig: {skip: "does not return user data"}, setCommittedSnapshot: {skip: "does not return user data"}, setDefaultRWConcern: {skip: "primary only"}, diff --git a/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js b/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js index c52c0ce3c84..02b45219dea 100644 --- a/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js +++ b/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js @@ -304,6 +304,7 @@ let testCases = { saslStart: {skip: "primary only"}, sbe: {skip: "internal command"}, serverStatus: {skip: "does not return user data"}, + setAllowMigrations: {skip: "primary only"}, setAuditConfig: {skip: "does not return user data"}, setCommittedSnapshot: {skip: "does not return user data"}, setDefaultRWConcern: {skip: "primary only"}, diff --git a/src/mongo/db/s/SConscript b/src/mongo/db/s/SConscript index a7ff9dc9c9f..de0d00ab993 100644 --- a/src/mongo/db/s/SConscript +++ b/src/mongo/db/s/SConscript @@ -319,6 +319,7 @@ env.Library( 'drop_database_coordinator.cpp', 'drop_database_coordinator_document.idl', 'flush_database_cache_updates_command.cpp', + 'flush_resharding_state_change_command.cpp', 'flush_routing_table_cache_updates_command.cpp', 'get_database_version_command.cpp', 'get_shard_version_command.cpp', @@ -336,6 +337,8 @@ env.Library( 'reshard_collection_coordinator.cpp', 'resharding_test_commands.cpp', 'resharding_test_commands.idl', + 'set_allow_migrations_coordinator.cpp', + 'set_allow_migrations_coordinator_document.idl', 'set_shard_version_command.cpp', 'sharded_index_consistency_server_status.cpp', 'sharded_rename_collection.idl', @@ -354,13 +357,13 @@ env.Library( 'shardsvr_drop_collection_participant_command.cpp', 'shardsvr_drop_database_command.cpp', 'shardsvr_drop_database_participant_command.cpp', - 'flush_resharding_state_change_command.cpp', 'shardsvr_move_primary_command.cpp', 'shardsvr_refine_collection_shard_key_command.cpp', 'shardsvr_rename_collection_command.cpp', 'shardsvr_rename_collection_participant_command.cpp', 'shardsvr_reshard_collection_command.cpp', 'shardsvr_resharding_operation_time_command.cpp', + 'shardsvr_set_allow_migrations_command.cpp', 'split_chunk_command.cpp', 'split_vector_command.cpp', 'txn_two_phase_commit_cmds.cpp', diff --git a/src/mongo/db/s/refine_collection_shard_key_coordinator.cpp b/src/mongo/db/s/refine_collection_shard_key_coordinator.cpp index 2edc46ccf38..8f741085cd1 100644 --- a/src/mongo/db/s/refine_collection_shard_key_coordinator.cpp +++ b/src/mongo/db/s/refine_collection_shard_key_coordinator.cpp @@ -49,12 +49,12 @@ RefineCollectionShardKeyCoordinator::RefineCollectionShardKeyCoordinator( void RefineCollectionShardKeyCoordinator::checkIfOptionsConflict(const BSONObj& doc) const { - // If we have two shard collections on the same namespace, then the arguments must be the same. + // If we have two refine collections on the same namespace, then the arguments must be the same. const auto otherDoc = RefineCollectionShardKeyCoordinatorDocument::parse( IDLParserErrorContext("RefineCollectionShardKeyCoordinatorDocument"), doc); uassert(ErrorCodes::ConflictingOperationInProgress, - "Another create collection with different arguments is already running for the same " + "Another refine collection with different arguments is already running for the same " "namespace", SimpleBSONObjComparator::kInstance.evaluate( _doc.getRefineCollectionShardKeyRequest().toBSON() == diff --git a/src/mongo/db/s/set_allow_migrations_coordinator.cpp b/src/mongo/db/s/set_allow_migrations_coordinator.cpp new file mode 100644 index 00000000000..2016b395226 --- /dev/null +++ b/src/mongo/db/s/set_allow_migrations_coordinator.cpp @@ -0,0 +1,145 @@ +/** + * Copyright (C) 2021-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * <http://www.mongodb.com/licensing/server-side-public-license>. + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding + +#include "mongo/db/s/set_allow_migrations_coordinator.h" + +#include "mongo/db/commands.h" +#include "mongo/logv2/log.h" +#include "mongo/s/catalog/type_collection.h" +#include "mongo/s/grid.h" +#include "mongo/s/write_ops/batched_command_request.h" +#include "mongo/s/write_ops/batched_command_response.h" + +namespace mongo { + +bool isCollectionSharded(OperationContext* opCtx, const NamespaceString& nss) { + try { + Grid::get(opCtx)->catalogClient()->getCollection(opCtx, nss); + return true; + } catch (ExceptionFor<ErrorCodes::NamespaceNotFound>&) { + // The collection is unsharded or doesn't exist + return false; + } +} + +SetAllowMigrationsCoordinator::SetAllowMigrationsCoordinator(ShardingDDLCoordinatorService* service, + const BSONObj& initialState) + : ShardingDDLCoordinator(service, initialState), + _doc(SetAllowMigrationsCoordinatorDocument::parse( + IDLParserErrorContext("SetAllowMigrationsCoordinatorDocument"), initialState)), + _allowMigrations(_doc.getAllowMigrations()) {} + + +void SetAllowMigrationsCoordinator::checkIfOptionsConflict(const BSONObj& doc) const { + // If we have two set allow migrations on the same namespace, then the arguments must be the + // same. + const auto otherDoc = SetAllowMigrationsCoordinatorDocument::parse( + IDLParserErrorContext("SetAllowMigrationsCoordinatorDocument"), doc); + + uassert(ErrorCodes::ConflictingOperationInProgress, + "Another set allow migrations with different arguments is already running for the same " + "namespace", + SimpleBSONObjComparator::kInstance.evaluate( + _doc.getSetAllowMigrationsRequest().toBSON() == + otherDoc.getSetAllowMigrationsRequest().toBSON())); +} + +boost::optional<BSONObj> SetAllowMigrationsCoordinator::reportForCurrentOp( + MongoProcessInterface::CurrentOpConnectionsMode connMode, + MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept { + BSONObjBuilder cmdBob; + if (const auto& optComment = getForwardableOpMetadata().getComment()) { + cmdBob.append(optComment.get().firstElement()); + } + cmdBob.appendElements(_doc.getSetAllowMigrationsRequest().toBSON()); + + BSONObjBuilder bob; + bob.append("type", "op"); + bob.append("desc", "SetAllowMigrationsCoordinator"); + bob.append("op", "command"); + bob.append("ns", nss().toString()); + bob.append("command", cmdBob.obj()); + bob.append("active", true); + return bob.obj(); +} + +ExecutorFuture<void> SetAllowMigrationsCoordinator::_runImpl( + std::shared_ptr<executor::ScopedTaskExecutor> executor, + const CancellationToken& token) noexcept { + return ExecutorFuture<void>(**executor) + .then([this, anchor = shared_from_this()] { + auto opCtxHolder = cc().makeOperationContext(); + auto* opCtx = opCtxHolder.get(); + getForwardableOpMetadata().setOn(opCtx); + + uassert(ErrorCodes::NamespaceNotSharded, + "Collection must be sharded so migrations can be blocked", + isCollectionSharded(opCtx, nss())); + + const auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard(); + + BatchedCommandRequest updateRequest([&]() { + write_ops::UpdateCommandRequest updateOp(CollectionType::ConfigNS); + updateOp.setUpdates({[&] { + write_ops::UpdateOpEntry entry; + entry.setQ(BSON(CollectionType::kNssFieldName << nss().ns())); + if (_allowMigrations) { + entry.setU(write_ops::UpdateModification::parseFromClassicUpdate(BSON( + "$unset" << BSON(CollectionType::kPermitMigrationsFieldName << true)))); + } else { + entry.setU(write_ops::UpdateModification::parseFromClassicUpdate(BSON( + "$set" << BSON(CollectionType::kPermitMigrationsFieldName << false)))); + } + entry.setMulti(false); + return entry; + }()}); + return updateOp; + }()); + + updateRequest.setWriteConcern(ShardingCatalogClient::kMajorityWriteConcern.toBSON()); + + auto response = configShard->runBatchWriteCommand(opCtx, + Shard::kDefaultConfigCommandTimeout, + updateRequest, + Shard::RetryPolicy::kIdempotent); + + uassertStatusOK(response.toStatus()); + }) + .onError([this, anchor = shared_from_this()](const Status& status) { + LOGV2_ERROR(5622700, + "Error running set allow migrations", + "namespace"_attr = nss(), + "error"_attr = redact(status)); + return status; + }); +} + +} // namespace mongo diff --git a/src/mongo/db/s/set_allow_migrations_coordinator.h b/src/mongo/db/s/set_allow_migrations_coordinator.h new file mode 100644 index 00000000000..6d6861d4ace --- /dev/null +++ b/src/mongo/db/s/set_allow_migrations_coordinator.h @@ -0,0 +1,64 @@ +/** + * Copyright (C) 2021-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * <http://www.mongodb.com/licensing/server-side-public-license>. + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include "mongo/db/operation_context.h" +#include "mongo/db/s/set_allow_migrations_coordinator_document_gen.h" +#include "mongo/db/s/sharding_ddl_coordinator.h" +#include "mongo/s/request_types/set_allow_migrations_gen.h" +#include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/util/future.h" + +namespace mongo { + +class SetAllowMigrationsCoordinator final : public ShardingDDLCoordinator { + +public: + SetAllowMigrationsCoordinator(ShardingDDLCoordinatorService* service, + const BSONObj& initialState); + + void checkIfOptionsConflict(const BSONObj& coorDoc) const override; + + boost::optional<BSONObj> reportForCurrentOp( + MongoProcessInterface::CurrentOpConnectionsMode connMode, + MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept override; + +private: + ShardingDDLCoordinatorMetadata const& metadata() const override { + return _doc.getShardingDDLCoordinatorMetadata(); + } + + ExecutorFuture<void> _runImpl(std::shared_ptr<executor::ScopedTaskExecutor> executor, + const CancellationToken& token) noexcept override; + + SetAllowMigrationsCoordinatorDocument _doc; + const bool _allowMigrations; +}; +} // namespace mongo diff --git a/src/mongo/db/s/set_allow_migrations_coordinator_document.idl b/src/mongo/db/s/set_allow_migrations_coordinator_document.idl new file mode 100644 index 00000000000..851d6a9deae --- /dev/null +++ b/src/mongo/db/s/set_allow_migrations_coordinator_document.idl @@ -0,0 +1,47 @@ +# Copyright (C) 2021-present MongoDB, Inc. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the Server Side Public License, version 1, +# as published by MongoDB, Inc. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# Server Side Public License for more details. +# +# You should have received a copy of the Server Side Public License +# along with this program. If not, see +# <http://www.mongodb.com/licensing/server-side-public-license>. +# +# As a special exception, the copyright holders give permission to link the +# code of portions of this program with the OpenSSL library under certain +# conditions as described in each individual source file and distribute +# linked combinations including the program with the OpenSSL library. You +# must comply with the Server Side Public License in all respects for +# all of the code used other than as permitted herein. If you modify file(s) +# with this exception, you may extend this exception to your version of the +# file(s), but you are not obligated to do so. If you do not wish to do so, +# delete this exception statement from your version. If you delete this +# exception statement from all source files in the program, then also delete +# it in the license file. +# + +# This file defines the format of documents stored in config.ddl.dropCollections, used by the +# shard coordinator to guarantee resilience in the event of stepdowns while creating collections. + +global: + cpp_namespace: "mongo" + +imports: + - "mongo/idl/basic_types.idl" + - "mongo/db/s/sharding_ddl_coordinator.idl" + - "mongo/s/request_types/sharded_ddl_commands.idl" + +structs: + SetAllowMigrationsCoordinatorDocument: + description: "Represents a set allow migrations operation on the coordinator shard." + generate_comparison_operators: false + strict: true + chained_structs: + ShardingDDLCoordinatorMetadata: ShardingDDLCoordinatorMetadata + SetAllowMigrationsRequest: SetAllowMigrationsRequest diff --git a/src/mongo/db/s/sharding_ddl_coordinator.idl b/src/mongo/db/s/sharding_ddl_coordinator.idl index 7892d6fc8d5..17142de4ec7 100644 --- a/src/mongo/db/s/sharding_ddl_coordinator.idl +++ b/src/mongo/db/s/sharding_ddl_coordinator.idl @@ -49,6 +49,7 @@ enums: kRenameCollection: "renameCollection" kCreateCollection: "createCollection" kRefineCollectionShardKey: "refineCollectionShardKey" + kSetAllowMigrations: "setAllowMigrations" types: ForwardableOperationMetadata: diff --git a/src/mongo/db/s/sharding_ddl_coordinator_service.cpp b/src/mongo/db/s/sharding_ddl_coordinator_service.cpp index 7e239e6d2a9..aa3324ce8a1 100644 --- a/src/mongo/db/s/sharding_ddl_coordinator_service.cpp +++ b/src/mongo/db/s/sharding_ddl_coordinator_service.cpp @@ -48,6 +48,7 @@ #include "mongo/db/s/move_primary_coordinator.h" #include "mongo/db/s/refine_collection_shard_key_coordinator.h" #include "mongo/db/s/rename_collection_coordinator.h" +#include "mongo/db/s/set_allow_migrations_coordinator.h" namespace mongo { namespace { @@ -76,6 +77,9 @@ std::shared_ptr<ShardingDDLCoordinator> constructShardingDDLCoordinatorInstance( return std::make_shared<RefineCollectionShardKeyCoordinator>(service, std::move(initialState)); break; + case DDLCoordinatorTypeEnum::kSetAllowMigrations: + return std::make_shared<SetAllowMigrationsCoordinator>(service, + std::move(initialState)); default: uasserted(ErrorCodes::BadValue, str::stream() diff --git a/src/mongo/db/s/shardsvr_set_allow_migrations_command.cpp b/src/mongo/db/s/shardsvr_set_allow_migrations_command.cpp new file mode 100644 index 00000000000..1703dd86829 --- /dev/null +++ b/src/mongo/db/s/shardsvr_set_allow_migrations_command.cpp @@ -0,0 +1,117 @@ +/** + * Copyright (C) 2021-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * <http://www.mongodb.com/licensing/server-side-public-license>. + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding + +#include "mongo/db/auth/authorization_session.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/commands.h" +#include "mongo/db/s/set_allow_migrations_coordinator.h" +#include "mongo/db/s/sharding_ddl_coordinator_service.h" +#include "mongo/db/s/sharding_state.h" +#include "mongo/logv2/log.h" + +namespace mongo { +namespace { + +class ShardsvrSetAllowMigrationsCommand final + : public TypedCommand<ShardsvrSetAllowMigrationsCommand> { +public: + using Request = ShardsvrSetAllowMigrations; + + bool skipApiVersionCheck() const override { + // Internal command (server to server). + return true; + } + + std::string help() const override { + return "Internal command. Do not call directly. Enable/disable migrations in a collection."; + } + + bool adminOnly() const override { + return false; + } + + AllowedOnSecondary secondaryAllowed(ServiceContext*) const override { + return AllowedOnSecondary::kNever; + } + + class Invocation final : public InvocationBase { + public: + using InvocationBase::InvocationBase; + + void typedRun(OperationContext* opCtx) { + uassertStatusOK(ShardingState::get(opCtx)->canAcceptShardedCommands()); + + opCtx->setAlwaysInterruptAtStepDownOrUp(); + + uassert(ErrorCodes::InvalidOptions, + str::stream() << Request::kCommandName + << " must be called with majority writeConcern, got " + << request().toBSON(BSONObj()), + opCtx->getWriteConcern().wMode == WriteConcernOptions::kMajority); + + SetAllowMigrationsRequest setAllowMigationsCmdRequest = + request().getSetAllowMigrationsRequest(); + auto nss = ns(); + auto coordinatorDoc = SetAllowMigrationsCoordinatorDocument(); + coordinatorDoc.setShardingDDLCoordinatorMetadata( + {{std::move(nss), DDLCoordinatorTypeEnum::kSetAllowMigrations}}); + coordinatorDoc.setSetAllowMigrationsRequest(std::move(setAllowMigationsCmdRequest)); + + auto service = ShardingDDLCoordinatorService::getService(opCtx); + auto setAllowMigrationsCoordinator = + checked_pointer_cast<SetAllowMigrationsCoordinator>( + service->getOrCreateInstance(opCtx, coordinatorDoc.toBSON())); + setAllowMigrationsCoordinator->getCompletionFuture().get(opCtx); + } + + private: + NamespaceString ns() const override { + return request().getNamespace(); + } + + bool supportsWriteConcern() const override { + return true; + } + + void doCheckAuthorization(OperationContext* opCtx) const override { + uassert(ErrorCodes::Unauthorized, + "Unauthorized", + AuthorizationSession::get(opCtx->getClient()) + ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), + ActionType::internal)); + } + }; + +} shardsvrSetAllowMigrationsCommand; + +} // namespace +} // namespace mongo diff --git a/src/mongo/s/catalog/type_collection.h b/src/mongo/s/catalog/type_collection.h index f7d5641ec80..c1925df0a76 100644 --- a/src/mongo/s/catalog/type_collection.h +++ b/src/mongo/s/catalog/type_collection.h @@ -88,6 +88,7 @@ public: using CollectionTypeBase::kMaxChunkSizeBytesFieldName; using CollectionTypeBase::kNoAutoSplitFieldName; using CollectionTypeBase::kNssFieldName; + using CollectionTypeBase::kPermitMigrationsFieldName; using CollectionTypeBase::kReshardingFieldsFieldName; using CollectionTypeBase::kSupportingLongNameFieldName; using CollectionTypeBase::kTimeseriesFieldsFieldName; diff --git a/src/mongo/s/commands/SConscript b/src/mongo/s/commands/SConscript index 90ff032369f..9433e876c8a 100644 --- a/src/mongo/s/commands/SConscript +++ b/src/mongo/s/commands/SConscript @@ -76,6 +76,7 @@ env.Library( 'cluster_repl_set_get_status_cmd.cpp', 'cluster_reshard_collection_cmd.cpp', 'cluster_rwc_defaults_commands.cpp', + 'cluster_set_allow_migrations_cmd.cpp', 'cluster_set_feature_compatibility_version_cmd.cpp', 'cluster_set_free_monitoring_cmd.cpp' if get_option("enable-free-mon") == 'on' else [], 'cluster_set_index_commit_quorum_cmd.cpp', diff --git a/src/mongo/s/commands/cluster_set_allow_migrations_cmd.cpp b/src/mongo/s/commands/cluster_set_allow_migrations_cmd.cpp new file mode 100644 index 00000000000..0bf48de01d5 --- /dev/null +++ b/src/mongo/s/commands/cluster_set_allow_migrations_cmd.cpp @@ -0,0 +1,103 @@ +/** + * Copyright (C) 2018-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * <http://www.mongodb.com/licensing/server-side-public-license>. + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding + +#include "mongo/platform/basic.h" + +#include "mongo/db/auth/authorization_session.h" +#include "mongo/db/commands.h" +#include "mongo/s/cluster_commands_helpers.h" +#include "mongo/s/grid.h" +#include "mongo/s/request_types/set_allow_migrations_gen.h" +#include "mongo/s/request_types/sharded_ddl_commands_gen.h" + +namespace mongo { +namespace { + +class SetAllowMigrationsCmd final : public TypedCommand<SetAllowMigrationsCmd> { +public: + using Request = SetAllowMigrations; + + AllowedOnSecondary secondaryAllowed(ServiceContext*) const override { + return AllowedOnSecondary::kNever; + } + + bool adminOnly() const override { + return true; + } + + class Invocation final : public InvocationBase { + public: + using InvocationBase::InvocationBase; + + void typedRun(OperationContext* opCtx) { + const auto& nss = ns(); + + SetAllowMigrationsRequest allowMigrationsRequest; + allowMigrationsRequest.setAllowMigrations(request().getAllowMigrations()); + ShardsvrSetAllowMigrations shardsvrRequest(nss); + shardsvrRequest.setSetAllowMigrationsRequest(allowMigrationsRequest); + + auto catalogCache = Grid::get(opCtx)->catalogCache(); + const auto dbInfo = uassertStatusOK(catalogCache->getDatabase(opCtx, nss.db())); + auto cmdResponse = executeCommandAgainstDatabasePrimary( + opCtx, + nss.db(), + dbInfo, + CommandHelpers::appendMajorityWriteConcern(shardsvrRequest.toBSON({})), + ReadPreferenceSetting(ReadPreference::PrimaryOnly), + Shard::RetryPolicy::kIdempotent); + + const auto remoteResponse = uassertStatusOK(cmdResponse.swResponse); + uassertStatusOK(getStatusFromCommandResult(remoteResponse.data)); + } + + NamespaceString ns() const override { + return request().getCommandParameter(); + } + + // Considering this command will stop migrations, it is reasonable to ensure the same + // permissions as moveChunk. + void doCheckAuthorization(OperationContext* opCtx) const override { + uassert(ErrorCodes::Unauthorized, + "Unauthorized to perform migration operations", + AuthorizationSession::get(opCtx->getClient()) + ->isAuthorizedForActionsOnResource(ResourcePattern::forExactNamespace(ns()), + ActionType::moveChunk)); + } + + bool supportsWriteConcern() const override { + return true; + } + }; +} setAllowMigrationsCmd; + +} // namespace +} // namespace mongo diff --git a/src/mongo/s/request_types/set_allow_migrations.idl b/src/mongo/s/request_types/set_allow_migrations.idl index 515f5cd40d3..c779d64f5c9 100644 --- a/src/mongo/s/request_types/set_allow_migrations.idl +++ b/src/mongo/s/request_types/set_allow_migrations.idl @@ -26,13 +26,13 @@ # it in the license file. # -# configsvrSetAllowMigrations IDL File - global: cpp_namespace: "mongo" imports: - "mongo/idl/basic_types.idl" + - "mongo/db/s/sharding_ddl_coordinator.idl" + - "mongo/s/request_types/sharded_ddl_commands.idl" commands: _configsvrSetAllowMigrations: @@ -52,3 +52,14 @@ commands: type: uuid description: "The uuid of the collection." optional: true # Optional for backwards compatibility + + setAllowMigrations: + command_name: setAllowMigrations + cpp_name: SetAllowMigrations + description: "user faced setAllowMigrations command" + namespace: type + api_version: "" + type: namespacestring + strict: false + chained_structs: + SetAllowMigrationsRequest: SetAllowMigrationsRequest diff --git a/src/mongo/s/request_types/sharded_ddl_commands.idl b/src/mongo/s/request_types/sharded_ddl_commands.idl index 4da22362366..4306a35e66e 100644 --- a/src/mongo/s/request_types/sharded_ddl_commands.idl +++ b/src/mongo/s/request_types/sharded_ddl_commands.idl @@ -149,6 +149,15 @@ structs: description: "The index specification document to use as the new shard key." optional: false + SetAllowMigrationsRequest: + description: "Parameters sent for the set allow migrations command" + strict: false + fields: + allowMigrations: + type: bool + description: "If false balancer rounds should be disabled and migrations commit prohibited." + optional: false + commands: _shardsvrCreateCollection: @@ -224,6 +233,16 @@ commands: chained_structs: RenameCollectionRequest: RenameCollectionRequest + _shardsvrSetAllowMigrations: + command_name: _shardsvrSetAllowMigrations + cpp_name: shardsvrSetAllowMigrations + description: "Internal setAllowMigrations command for a shard." + strict: false + namespace: concatenate_with_db + api_version: "" + chained_structs: + SetAllowMigrationsRequest: SetAllowMigrationsRequest + _configsvrRenameCollectionMetadata: command_name: _configsvrRenameCollectionMetadata cpp_name: ConfigsvrRenameCollectionMetadata |