summaryrefslogtreecommitdiff
path: root/jstests/sharding
diff options
context:
space:
mode:
Diffstat (limited to 'jstests/sharding')
-rw-r--r--jstests/sharding/migration_ignore_interrupts_1.js6
-rw-r--r--jstests/sharding/migration_ignore_interrupts_3.js12
-rw-r--r--jstests/sharding/migration_waits_for_majority_commit.js6
-rw-r--r--jstests/sharding/movechunk_interrupt_at_primary_stepdown.js6
-rw-r--r--jstests/sharding/movechunk_parallel.js12
-rw-r--r--jstests/sharding/txn_writes_during_movechunk.js6
6 files changed, 24 insertions, 24 deletions
diff --git a/jstests/sharding/migration_ignore_interrupts_1.js b/jstests/sharding/migration_ignore_interrupts_1.js
index d093f0a8f0b..b0a3db9d9e9 100644
--- a/jstests/sharding/migration_ignore_interrupts_1.js
+++ b/jstests/sharding/migration_ignore_interrupts_1.js
@@ -43,10 +43,10 @@ assert.commandWorked(admin.runCommand(
jsTest.log("Set up complete, now proceeding to test that migration interruptions fail.");
// Start a migration between shard0 and shard1 on coll1 and then pause it
-pauseMigrateAtStep(shard1, migrateStepNames.deletedPriorDataInRange);
+pauseMigrateAtStep(shard1, migrateStepNames.rangeDeletionTaskScheduled);
var joinMoveChunk = moveChunkParallel(
staticMongod, st.s0.host, {a: 0}, null, coll1.getFullName(), st.shard1.shardName);
-waitForMigrateStep(shard1, migrateStepNames.deletedPriorDataInRange);
+waitForMigrateStep(shard1, migrateStepNames.rangeDeletionTaskScheduled);
assert.commandFailedWithCode(
admin.runCommand({moveChunk: ns1, find: {a: -10}, to: st.shard2.shardName}),
@@ -64,7 +64,7 @@ assert.commandFailedWithCode(
"(3) A shard should not be able to be both a donor and recipient of migrations.");
// Finish migration
-unpauseMigrateAtStep(shard1, migrateStepNames.deletedPriorDataInRange);
+unpauseMigrateAtStep(shard1, migrateStepNames.rangeDeletionTaskScheduled);
assert.doesNotThrow(function() {
joinMoveChunk();
});
diff --git a/jstests/sharding/migration_ignore_interrupts_3.js b/jstests/sharding/migration_ignore_interrupts_3.js
index a8138604df2..bea7864275a 100644
--- a/jstests/sharding/migration_ignore_interrupts_3.js
+++ b/jstests/sharding/migration_ignore_interrupts_3.js
@@ -54,11 +54,11 @@ jsTest.log("Set up complete, now proceeding to test that migration interruption
// Start coll1 migration to shard1: pause recipient after delete step, donor before interrupt
// check.
-pauseMigrateAtStep(shard1, migrateStepNames.deletedPriorDataInRange);
+pauseMigrateAtStep(shard1, migrateStepNames.rangeDeletionTaskScheduled);
pauseMoveChunkAtStep(shard0, moveChunkStepNames.startedMoveChunk);
const joinMoveChunk = moveChunkParallel(
staticMongod, st.s0.host, {a: 0}, null, coll1.getFullName(), st.shard1.shardName);
-waitForMigrateStep(shard1, migrateStepNames.deletedPriorDataInRange);
+waitForMigrateStep(shard1, migrateStepNames.rangeDeletionTaskScheduled);
// Abort migration on donor side, recipient is unaware.
killRunningMoveChunk(admin);
@@ -69,13 +69,13 @@ assert.throws(function() {
});
// Start coll2 migration to shard2, pause recipient after delete step.
-pauseMigrateAtStep(shard2, migrateStepNames.deletedPriorDataInRange);
+pauseMigrateAtStep(shard2, migrateStepNames.rangeDeletionTaskScheduled);
const joinMoveChunk2 = moveChunkParallel(
staticMongod, st.s0.host, {a: 0}, null, coll2.getFullName(), st.shard2.shardName);
-waitForMigrateStep(shard2, migrateStepNames.deletedPriorDataInRange);
+waitForMigrateStep(shard2, migrateStepNames.rangeDeletionTaskScheduled);
jsTest.log('Releasing coll1 migration recipient, whose clone command should fail....');
-unpauseMigrateAtStep(shard1, migrateStepNames.deletedPriorDataInRange);
+unpauseMigrateAtStep(shard1, migrateStepNames.rangeDeletionTaskScheduled);
assert.soon(function() {
// Wait for the destination shard to report that it is not in an active migration.
var res = shard1.adminCommand({'_recvChunkStatus': 1});
@@ -86,7 +86,7 @@ assert.eq(
0, shard1Coll1.find().itcount(), "shard1 cloned documents despite donor migration abortion.");
jsTest.log('Finishing coll2 migration, which should succeed....');
-unpauseMigrateAtStep(shard2, migrateStepNames.deletedPriorDataInRange);
+unpauseMigrateAtStep(shard2, migrateStepNames.rangeDeletionTaskScheduled);
assert.doesNotThrow(function() {
joinMoveChunk2();
});
diff --git a/jstests/sharding/migration_waits_for_majority_commit.js b/jstests/sharding/migration_waits_for_majority_commit.js
index 6581a8da592..eea1592b586 100644
--- a/jstests/sharding/migration_waits_for_majority_commit.js
+++ b/jstests/sharding/migration_waits_for_majority_commit.js
@@ -24,7 +24,7 @@ assert.commandWorked(st.s.adminCommand({split: "test.foo", middle: {_id: 0}}));
assert.eq(1, testDB.foo.find().readConcern("majority").itcount());
// Advance a migration to the beginning of the cloning phase.
-pauseMigrateAtStep(st.rs1.getPrimary(), 2);
+pauseMigrateAtStep(st.rs1.getPrimary(), migrateStepNames.rangeDeletionTaskScheduled);
// For startParallelOps to write its state
let staticMongod = MongoRunner.runMongod({});
@@ -39,7 +39,7 @@ let awaitMigration = moveChunkParallel(staticMongod,
// Wait for the migration to reach the failpoint and allow any writes to become majority committed
// before pausing replication.
-waitForMigrateStep(st.rs1.getPrimary(), 2);
+waitForMigrateStep(st.rs1.getPrimary(), migrateStepNames.rangeDeletionTaskScheduled);
st.rs1.awaitLastOpCommitted();
// Disable replication on the recipient shard's secondary node, so the recipient shard's majority
@@ -50,7 +50,7 @@ assert.commandWorked(
"failed to enable fail point on secondary");
// Allow the migration to begin cloning.
-unpauseMigrateAtStep(st.rs1.getPrimary(), 2);
+unpauseMigrateAtStep(st.rs1.getPrimary(), migrateStepNames.rangeDeletionTaskScheduled);
// The migration should fail to commit without being able to advance the majority commit point.
if (jsTestOptions().mongosBinVersion == "last-stable") {
diff --git a/jstests/sharding/movechunk_interrupt_at_primary_stepdown.js b/jstests/sharding/movechunk_interrupt_at_primary_stepdown.js
index 463f67008d4..a22a9993d35 100644
--- a/jstests/sharding/movechunk_interrupt_at_primary_stepdown.js
+++ b/jstests/sharding/movechunk_interrupt_at_primary_stepdown.js
@@ -29,7 +29,7 @@ var coll = mongos.getDB('TestDB').TestColl;
var staticMongod = MongoRunner.runMongod({});
function interruptMoveChunkAndRecover(fromShard, toShard, isJumbo) {
- pauseMigrateAtStep(toShard, migrateStepNames.deletedPriorDataInRange);
+ pauseMigrateAtStep(toShard, migrateStepNames.rangeDeletionTaskScheduled);
var joinMoveChunk = moveChunkParallel(staticMongod,
mongos.host,
@@ -39,7 +39,7 @@ function interruptMoveChunkAndRecover(fromShard, toShard, isJumbo) {
toShard.shardName,
true /* expectSuccess */,
isJumbo);
- waitForMigrateStep(toShard, migrateStepNames.deletedPriorDataInRange);
+ waitForMigrateStep(toShard, migrateStepNames.rangeDeletionTaskScheduled);
// Stepdown the primary in order to force the balancer to stop. Use a timeout of 5 seconds for
// both step down operations, because mongos will retry to find the CSRS primary for up to 20
@@ -65,7 +65,7 @@ function interruptMoveChunkAndRecover(fromShard, toShard, isJumbo) {
// Ensure a new primary is found promptly
st.configRS.getPrimary(30000);
- unpauseMigrateAtStep(toShard, migrateStepNames.deletedPriorDataInRange);
+ unpauseMigrateAtStep(toShard, migrateStepNames.rangeDeletionTaskScheduled);
// Ensure that migration succeeded
joinMoveChunk();
diff --git a/jstests/sharding/movechunk_parallel.js b/jstests/sharding/movechunk_parallel.js
index ca16d4caa8b..f03166a6fa7 100644
--- a/jstests/sharding/movechunk_parallel.js
+++ b/jstests/sharding/movechunk_parallel.js
@@ -41,8 +41,8 @@ assert.eq(2,
.itcount());
// Pause migrations at shards 2 and 3
-pauseMigrateAtStep(st.shard2, migrateStepNames.deletedPriorDataInRange);
-pauseMigrateAtStep(st.shard3, migrateStepNames.deletedPriorDataInRange);
+pauseMigrateAtStep(st.shard2, migrateStepNames.rangeDeletionTaskScheduled);
+pauseMigrateAtStep(st.shard3, migrateStepNames.rangeDeletionTaskScheduled);
// Both move chunk operations should proceed
var joinMoveChunk1 = moveChunkParallel(
@@ -50,11 +50,11 @@ var joinMoveChunk1 = moveChunkParallel(
var joinMoveChunk2 = moveChunkParallel(
staticMongod, st.s0.host, {Key: 30}, null, 'TestDB.TestColl', st.shard3.shardName);
-waitForMigrateStep(st.shard2, migrateStepNames.deletedPriorDataInRange);
-waitForMigrateStep(st.shard3, migrateStepNames.deletedPriorDataInRange);
+waitForMigrateStep(st.shard2, migrateStepNames.rangeDeletionTaskScheduled);
+waitForMigrateStep(st.shard3, migrateStepNames.rangeDeletionTaskScheduled);
-unpauseMigrateAtStep(st.shard2, migrateStepNames.deletedPriorDataInRange);
-unpauseMigrateAtStep(st.shard3, migrateStepNames.deletedPriorDataInRange);
+unpauseMigrateAtStep(st.shard2, migrateStepNames.rangeDeletionTaskScheduled);
+unpauseMigrateAtStep(st.shard3, migrateStepNames.rangeDeletionTaskScheduled);
joinMoveChunk1();
joinMoveChunk2();
diff --git a/jstests/sharding/txn_writes_during_movechunk.js b/jstests/sharding/txn_writes_during_movechunk.js
index 75432ed818c..66cecf302ed 100644
--- a/jstests/sharding/txn_writes_during_movechunk.js
+++ b/jstests/sharding/txn_writes_during_movechunk.js
@@ -17,12 +17,12 @@ assert.commandWorked(coll.insert({_id: 'updateMe'}));
assert.commandWorked(coll.insert({_id: 'deleteMe'}));
assert.commandWorked(coll.insert({_id: 'deleteMeUsingFindAndModify'}));
-pauseMigrateAtStep(st.shard1, migrateStepNames.deletedPriorDataInRange);
+pauseMigrateAtStep(st.shard1, migrateStepNames.rangeDeletionTaskScheduled);
let joinMoveChunk =
moveChunkParallel(staticMongod, st.s0.host, {_id: 0}, null, 'test.user', st.shard1.shardName);
-waitForMigrateStep(st.shard1, migrateStepNames.deletedPriorDataInRange);
+waitForMigrateStep(st.shard1, migrateStepNames.rangeDeletionTaskScheduled);
let session = st.s.startSession();
let sessionDB = session.getDatabase('test');
@@ -35,7 +35,7 @@ sessionColl.remove({_id: 'deleteMe'});
sessionColl.findAndModify({query: {_id: 'deleteMeUsingFindAndModify'}, remove: true});
pauseMoveChunkAtStep(st.shard0, moveChunkStepNames.reachedSteadyState);
-unpauseMigrateAtStep(st.shard1, migrateStepNames.deletedPriorDataInRange);
+unpauseMigrateAtStep(st.shard1, migrateStepNames.rangeDeletionTaskScheduled);
waitForMoveChunkStep(st.shard0, moveChunkStepNames.reachedSteadyState);
let recipientColl = st.rs1.getPrimary().getDB('test').user;