summaryrefslogtreecommitdiff
path: root/jstests/sharding
diff options
context:
space:
mode:
Diffstat (limited to 'jstests/sharding')
-rw-r--r--jstests/sharding/database_versioning_all_commands.js1
-rw-r--r--jstests/sharding/ddl_ops_reported_on_current_op_command.js20
-rw-r--r--jstests/sharding/libs/last_lts_mongos_commands.js1
-rw-r--r--jstests/sharding/libs/mongos_api_params_util.js17
-rw-r--r--jstests/sharding/move_chunk_allowMigrations.js35
-rw-r--r--jstests/sharding/move_chunk_permitMigrations.js162
-rw-r--r--jstests/sharding/read_write_concern_defaults_application.js2
-rw-r--r--jstests/sharding/safe_secondary_reads_drop_recreate.js1
-rw-r--r--jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js1
-rw-r--r--jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js1
10 files changed, 205 insertions, 36 deletions
diff --git a/jstests/sharding/database_versioning_all_commands.js b/jstests/sharding/database_versioning_all_commands.js
index 200c4cbe3fa..686c1877fea 100644
--- a/jstests/sharding/database_versioning_all_commands.js
+++ b/jstests/sharding/database_versioning_all_commands.js
@@ -617,6 +617,7 @@ let testCases = {
saslContinue: {skip: "not on a user database"},
saslStart: {skip: "not on a user database"},
serverStatus: {skip: "executes locally on mongos (not sent to any remote node)"},
+ setAllowMigrations: {skip: "not on a user database"},
setAuditConfig: {skip: "not on a user database", conditional: true},
setDefaultRWConcern: {skip: "always targets the config server"},
setIndexCommitQuorum: {
diff --git a/jstests/sharding/ddl_ops_reported_on_current_op_command.js b/jstests/sharding/ddl_ops_reported_on_current_op_command.js
index 27a3a23db9e..20e75062fe8 100644
--- a/jstests/sharding/ddl_ops_reported_on_current_op_command.js
+++ b/jstests/sharding/ddl_ops_reported_on_current_op_command.js
@@ -59,7 +59,7 @@ let getCurrentOpOfDDL = (ddlOpThread, desc) => {
// refineCollectionShardKey and movePrimary to use the new DDLCoordinator.
if (jsTestOptions().useRandomBinVersionsWithinReplicaSet || jsTestOptions().shardMixedBinVersions) {
jsTest.log(
- "Skipping checking refineCollectionShardKey and movePrimary due to the fact that they're not using a DDLCoordinator on 5.0");
+ "Skipping checking refineCollectionShardKey, movePrimary and setAllowMigrations due to the fact that they're not using a DDLCoordinator on 5.0");
} else {
{
jsTestLog('Check refine collection shard key shows in current op');
@@ -98,6 +98,24 @@ if (jsTestOptions().useRandomBinVersionsWithinReplicaSet || jsTestOptions().shar
assert(currOp[0].command.request.hasOwnProperty('toShardId'));
assert.eq(st.shard0.shardName, currOp[0].command.request.toShardId);
}
+
+ {
+ jsTestLog('Check set allow migrations shows in current op');
+
+ let ddlOpThread = new Thread((mongosConnString, nss) => {
+ let mongos = new Mongo(mongosConnString);
+ mongos.adminCommand({setAllowMigrations: nss, allowMigrations: true});
+ }, st.s0.host, nss);
+
+ let currOp = getCurrentOpOfDDL(ddlOpThread, 'SetAllowMigrationsCoordinator');
+
+ // There must be one operation running with the appropiate ns.
+ assert.eq(1, currOp.length);
+ assert.eq(nss, currOp[0].ns);
+ assert(currOp[0].hasOwnProperty('command'));
+ assert(currOp[0].command.hasOwnProperty('allowMigrations'));
+ assert.eq(true, currOp[0].command.allowMigrations);
+ }
}
{
diff --git a/jstests/sharding/libs/last_lts_mongos_commands.js b/jstests/sharding/libs/last_lts_mongos_commands.js
index 66f1ecc871f..84eb7fd8e96 100644
--- a/jstests/sharding/libs/last_lts_mongos_commands.js
+++ b/jstests/sharding/libs/last_lts_mongos_commands.js
@@ -14,6 +14,7 @@ const commandsAddedToMongosSinceLastLTS = [
"configureCollectionAutoSplitter",
"reshardCollection",
"rotateCertificates",
+ "setAllowMigrations",
"testDeprecation",
"testDeprecationInVersion2",
"testRemoval",
diff --git a/jstests/sharding/libs/mongos_api_params_util.js b/jstests/sharding/libs/mongos_api_params_util.js
index ef4ce3012da..e22045f1761 100644
--- a/jstests/sharding/libs/mongos_api_params_util.js
+++ b/jstests/sharding/libs/mongos_api_params_util.js
@@ -1163,6 +1163,23 @@ let MongosAPIParametersUtil = (function() {
skip: "executes locally on mongos (not sent to any remote node)"
},
{
+ commandName: "setAllowMigrations",
+ run: {
+ inAPIVersion1: false,
+ shardCommandName: "_shardsvrSetAllowMigrations",
+ runsAgainstAdminDb: true,
+ permittedInTxn: false,
+ requiresShardedCollection: true,
+ setUp: () => {
+ assert.commandWorked(st.s.adminCommand(
+ {enableSharding: "db", primaryShard: st.shard0.shardName}));
+ assert.commandWorked(
+ st.s.adminCommand({shardCollection: "db.collection", key: {_id: 1}}));
+ },
+ command: () => ({setAllowMigrations: "db.collection", allowMigrations: true})
+ }
+ },
+ {
commandName: "setDefaultRWConcern",
run: {
inAPIVersion1: false,
diff --git a/jstests/sharding/move_chunk_allowMigrations.js b/jstests/sharding/move_chunk_allowMigrations.js
index 4db3d97b946..1ef488f7f33 100644
--- a/jstests/sharding/move_chunk_allowMigrations.js
+++ b/jstests/sharding/move_chunk_allowMigrations.js
@@ -62,38 +62,6 @@ const setUpDb = function setUpDatabaseAndEnableSharding() {
assert.eq(false, cachedEntry.allowMigrations);
})();
-// TODO SERVER-61033: remove after permitMigrations have been merged with allowMigrations.
-// Tests that moveChunk does not succeed when {permitMigrations: false}
-(function testPermitMigrationsFalsePreventsMoveChunk() {
- setUpDb();
-
- const collName = "collA";
- const ns = dbName + "." + collName;
-
- assert.commandWorked(st.s.getDB(dbName).getCollection(collName).insert({_id: 0}));
- assert.commandWorked(st.s.getDB(dbName).getCollection(collName).insert({_id: 1}));
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
-
- // Confirm that an inProgress moveChunk fails once {allowMigrations: false}
- const fp = configureFailPoint(st.shard0, "moveChunkHangAtStep4");
- const awaitResult = startParallelShell(
- funWithArgs(function(ns, toShardName) {
- assert.commandFailedWithCode(
- db.adminCommand({moveChunk: ns, find: {_id: 0}, to: toShardName}),
- ErrorCodes.ConflictingOperationInProgress);
- }, ns, st.shard1.shardName), st.s.port);
- fp.wait();
- assert.commandWorked(configDB.collections.updateOne(
- {_id: ns}, {$set: {permitMigrations: false}}, {writeConcern: {w: "majority"}}));
- fp.off();
- awaitResult();
-
- // {permitMigrations: false} is set, sending a new moveChunk command should also fail.
- assert.commandFailedWithCode(
- st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: st.shard1.shardName}),
- ErrorCodes.ConflictingOperationInProgress);
-})();
-
// Tests {allowMigrations: false} disables balancing for collB and does not interfere with balancing
// for collA.
//
@@ -238,9 +206,6 @@ testBalancer(false /* allowMigrations */, {});
testBalancer(false /* allowMigrations */, {noBalance: false});
testBalancer(false /* allowMigrations */, {noBalance: true});
-// TODO SERVER-61033: merge permitMigrations with allowMigrations.
-testBalancer(true /* allowMigrations */, {permitMigrations: false});
-
// Test the _configsvrSetAllowMigrations internal command
testConfigsvrSetAllowMigrationsCommand();
diff --git a/jstests/sharding/move_chunk_permitMigrations.js b/jstests/sharding/move_chunk_permitMigrations.js
new file mode 100644
index 00000000000..9f21ba01673
--- /dev/null
+++ b/jstests/sharding/move_chunk_permitMigrations.js
@@ -0,0 +1,162 @@
+/**
+ * Tests that a collection with permitMigrations: false in config.collections prohibits committing a
+ * moveChunk and disables the balancer.
+ *
+ * @tags: [
+ * does_not_support_stepdowns,
+ * requires_fcv_52,
+ * ]
+ */
+(function() {
+'use strict';
+
+load('jstests/libs/fail_point_util.js');
+load('jstests/libs/parallel_shell_helpers.js');
+load("jstests/sharding/libs/find_chunks_util.js");
+load("jstests/sharding/libs/shard_versioning_util.js");
+
+const st = new ShardingTest({shards: 2});
+const configDB = st.s.getDB("config");
+const dbName = 'AllowMigrations';
+
+// Resets database dbName and enables sharding and establishes shard0 as primary, test case agnostic
+const setUpDb = function setUpDatabaseAndEnableSharding() {
+ assert.commandWorked(st.s.getDB(dbName).dropDatabase());
+ assert.commandWorked(
+ st.s.adminCommand({enableSharding: dbName, primaryShard: st.shard0.shardName}));
+};
+
+// Use the setAllowMigrations command to set the permitMigrations flag in the collection.
+const setAllowMigrations = function(ns, allow) {
+ assert.commandWorked(st.s.adminCommand({setAllowMigrations: ns, allowMigrations: allow}));
+};
+
+// Tests that moveChunk does not succeed when setAllowMigrations is called with a false value.
+(function testSetAllowMigrationsFalsePreventsMoveChunk() {
+ setUpDb();
+
+ const collName = "collA";
+ const ns = dbName + "." + collName;
+
+ assert.commandWorked(st.s.getDB(dbName).getCollection(collName).insert({_id: 0}));
+ assert.commandWorked(st.s.getDB(dbName).getCollection(collName).insert({_id: 1}));
+ assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+
+ setAllowMigrations(ns, false);
+
+ // setAllowMigrations was called, sending a new moveChunk command should fail.
+ assert.commandFailedWithCode(
+ st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: st.shard1.shardName}),
+ ErrorCodes.ConflictingOperationInProgress);
+})();
+
+// Tests setAllowMigrations disables balancing for collB and does not interfere with
+// balancing for collA.
+//
+// collBSetParams specify the field(s) that will be set on the collB in config.collections.
+const testBalancer = function(setAllowMigrations, collBSetNoBalanceParam) {
+ setUpDb();
+
+ const collAName = "collA";
+ const collBName = "collB";
+ const collA = st.s.getCollection(`${dbName}.${collAName}`);
+ const collB = st.s.getCollection(`${dbName}.${collBName}`);
+
+ assert.commandWorked(st.s.adminCommand({shardCollection: collA.getFullName(), key: {_id: 1}}));
+ assert.commandWorked(st.s.adminCommand({shardCollection: collB.getFullName(), key: {_id: 1}}));
+
+ // Split both collections into 4 chunks so balancing can occur.
+ for (let coll of [collA, collB]) {
+ coll.insert({_id: 1});
+ coll.insert({_id: 10});
+ coll.insert({_id: 20});
+ coll.insert({_id: 30});
+
+ assert.commandWorked(st.splitAt(coll.getFullName(), {_id: 10}));
+ assert.commandWorked(st.splitAt(coll.getFullName(), {_id: 20}));
+ assert.commandWorked(st.splitAt(coll.getFullName(), {_id: 30}));
+
+ // Confirm the chunks are initially unbalanced. All chunks should start out on shard0
+ // (primary shard for the database).
+ const balancerStatus = assert.commandWorked(
+ st.s0.adminCommand({balancerCollectionStatus: coll.getFullName()}));
+ assert.eq(balancerStatus.balancerCompliant, false);
+ assert.eq(balancerStatus.firstComplianceViolation, 'chunksImbalance');
+ assert.eq(4,
+ findChunksUtil
+ .findChunksByNs(configDB, coll.getFullName(), {shard: st.shard0.shardName})
+ .count());
+ }
+
+ jsTestLog(`Disabling balancing of ${collB.getFullName()} with setAllowMigrations ${
+ setAllowMigrations} and parameters ${tojson(collBSetNoBalanceParam)}`);
+ assert.commandWorked(
+ configDB.collections.update({_id: collB.getFullName()}, {$set: collBSetNoBalanceParam}));
+
+ setAllowMigrations(collB.getFullName(), setAllowMigrations);
+
+ st.startBalancer();
+ assert.soon(() => {
+ st.awaitBalancerRound();
+ const shard0Chunks =
+ findChunksUtil
+ .findChunksByNs(configDB, collA.getFullName(), {shard: st.shard0.shardName})
+ .itcount();
+ const shard1Chunks =
+ findChunksUtil
+ .findChunksByNs(configDB, collA.getFullName(), {shard: st.shard1.shardName})
+ .itcount();
+ jsTestLog(`shard0 chunks ${shard0Chunks}, shard1 chunks ${shard1Chunks}`);
+ return shard0Chunks == 2 && shard1Chunks == 2;
+ }, `Balancer failed to balance ${collA.getFullName()}`, 1000 * 60 * 10);
+ st.stopBalancer();
+
+ const collABalanceStatus =
+ assert.commandWorked(st.s.adminCommand({balancerCollectionStatus: collA.getFullName()}));
+ assert.eq(collABalanceStatus.balancerCompliant, true);
+
+ // Test that collB remains unbalanced.
+ const collBBalanceStatus =
+ assert.commandWorked(st.s.adminCommand({balancerCollectionStatus: collB.getFullName()}));
+ assert.eq(collBBalanceStatus.balancerCompliant, false);
+ assert.eq(collBBalanceStatus.firstComplianceViolation, 'chunksImbalance');
+ assert.eq(
+ 4,
+ findChunksUtil.findChunksByNs(configDB, collB.getFullName(), {shard: st.shard0.shardName})
+ .count());
+};
+
+const testSetAllowMigrationsCommand = function() {
+ setUpDb();
+
+ const collName = "foo";
+ const ns = dbName + "." + collName;
+
+ assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {x: 1}}));
+
+ ShardVersioningUtil.assertCollectionVersionEquals(st.shard0, ns, Timestamp(1, 0));
+
+ // Use setAllowMigrations to forbid migrations from happening
+ setAllowMigrations(ns, false);
+
+ // Check that allowMigrations has been set to 'false' on the configsvr config.collections.
+ assert.eq(false, configDB.collections.findOne({_id: ns}).permitMigrations);
+
+ // Use setAllowMigrations to allow migrations to happen
+ setAllowMigrations(ns, true);
+
+ // Check that permitMigrations has been unset (that implies migrations are allowed) on the
+ // configsvr config.collections.
+ assert.eq(undefined, configDB.collections.findOne({_id: ns}).permitMigrations);
+};
+
+// Test cases that should disable the balancer.
+testBalancer(false /* setAllowMigrations */, {});
+testBalancer(false /* setAllowMigrations */, {noBalance: false});
+testBalancer(false /* setAllowMigrations */, {noBalance: true});
+
+// Test the setAllowMigrations command.
+testSetAllowMigrationsCommand();
+
+st.stop();
+})();
diff --git a/jstests/sharding/read_write_concern_defaults_application.js b/jstests/sharding/read_write_concern_defaults_application.js
index 54e40386a51..ea0e292f9ca 100644
--- a/jstests/sharding/read_write_concern_defaults_application.js
+++ b/jstests/sharding/read_write_concern_defaults_application.js
@@ -152,6 +152,7 @@ let testCases = {
_shardsvrRenameCollectionParticipantUnblock: {skip: "internal command"},
_shardsvrReshardCollection: {skip: "internal command"},
_shardsvrReshardingOperationTime: {skip: "internal command"},
+ _shardsvrSetAllowMigrations: {skip: "internal command"},
_shardsvrShardCollection:
{skip: "internal command"}, // TODO SERVER-58843: Remove once 6.0 becomes last LTS
_transferMods: {skip: "internal command"},
@@ -641,6 +642,7 @@ let testCases = {
saslStart: {skip: "does not accept read or write concern"},
sbe: {skip: "internal command"},
serverStatus: {skip: "does not accept read or write concern"},
+ setAllowMigrations: {skip: "does not accept read or write concern"},
setAuditConfig: {skip: "does not accept read or write concern"},
setCommittedSnapshot: {skip: "internal command"},
setDefaultRWConcern: {skip: "special case (must run after all other commands)"},
diff --git a/jstests/sharding/safe_secondary_reads_drop_recreate.js b/jstests/sharding/safe_secondary_reads_drop_recreate.js
index 98b56fcfcde..bef327e2700 100644
--- a/jstests/sharding/safe_secondary_reads_drop_recreate.js
+++ b/jstests/sharding/safe_secondary_reads_drop_recreate.js
@@ -299,6 +299,7 @@ let testCases = {
saslStart: {skip: "primary only"},
sbe: {skip: "internal command"},
serverStatus: {skip: "does not return user data"},
+ setAllowMigrations: {skip: "primary only"},
setAuditConfig: {skip: "does not return user data"},
setCommittedSnapshot: {skip: "does not return user data"},
setDefaultRWConcern: {skip: "primary only"},
diff --git a/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js b/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js
index eb315ccf58c..8b1ba4de4ac 100644
--- a/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js
+++ b/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js
@@ -368,6 +368,7 @@ let testCases = {
saslStart: {skip: "primary only"},
sbe: {skip: "internal command"},
serverStatus: {skip: "does not return user data"},
+ setAllowMigrations: {skip: "primary only"},
setAuditConfig: {skip: "does not return user data"},
setCommittedSnapshot: {skip: "does not return user data"},
setDefaultRWConcern: {skip: "primary only"},
diff --git a/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js b/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js
index c52c0ce3c84..02b45219dea 100644
--- a/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js
+++ b/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js
@@ -304,6 +304,7 @@ let testCases = {
saslStart: {skip: "primary only"},
sbe: {skip: "internal command"},
serverStatus: {skip: "does not return user data"},
+ setAllowMigrations: {skip: "primary only"},
setAuditConfig: {skip: "does not return user data"},
setCommittedSnapshot: {skip: "does not return user data"},
setDefaultRWConcern: {skip: "primary only"},