summaryrefslogtreecommitdiff
path: root/jstests/replsets/tenant_migration_resume_collection_cloner_after_rename.js
diff options
context:
space:
mode:
authorJason Chan <jason.chan@mongodb.com>2021-02-01 22:12:28 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-02-04 02:16:43 +0000
commit78a69ed20c764fc0f826ffb89ade2772787ee542 (patch)
tree27e1b0322dbaea168955478d00006ddfd58dc0a3 /jstests/replsets/tenant_migration_resume_collection_cloner_after_rename.js
parent3a02c6d491ee701bcf6180285a260b5e1e9c9346 (diff)
downloadmongo-78a69ed20c764fc0f826ffb89ade2772787ee542.tar.gz
SERVER-52722 Handle cloning restart in TenantMigrationRecipientService
Diffstat (limited to 'jstests/replsets/tenant_migration_resume_collection_cloner_after_rename.js')
-rw-r--r--jstests/replsets/tenant_migration_resume_collection_cloner_after_rename.js24
1 files changed, 12 insertions, 12 deletions
diff --git a/jstests/replsets/tenant_migration_resume_collection_cloner_after_rename.js b/jstests/replsets/tenant_migration_resume_collection_cloner_after_rename.js
index 7f89c304df2..7e9a602f9a5 100644
--- a/jstests/replsets/tenant_migration_resume_collection_cloner_after_rename.js
+++ b/jstests/replsets/tenant_migration_resume_collection_cloner_after_rename.js
@@ -44,8 +44,9 @@ const docs = [{_id: 0}, {_id: "string"}, {_id: UUID()}, {_id: new Date()}];
tenantMigrationTest.insertDonorDB(dbName, collName, docs);
const migrationId = UUID();
+const migrationIdString = extractUUIDFromObject(migrationId);
const migrationOpts = {
- migrationIdString: extractUUIDFromObject(migrationId),
+ migrationIdString: migrationIdString,
recipientConnString: tenantMigrationTest.getRecipientConnString(),
tenantId: tenantId,
};
@@ -66,31 +67,30 @@ migrationThread.start();
hangDuringCollectionClone.wait();
assert.eq(2, recipientColl.find().itcount());
+// Insert some documents that will be fetched by the recipient. This is to test that on failover,
+// the fetcher will resume fetching from where it left off. The system is expected to crash if
+// the recipient fetches a duplicate oplog entry upon resuming the migration.
+tenantMigrationTest.insertDonorDB(dbName, "aNewColl", [{_id: "docToBeFetched"}]);
+assert.soon(() => {
+ const configDb = recipientPrimary.getDB("config");
+ const oplogBuffer = configDb.getCollection("repl.migration.oplog_" + migrationIdString);
+ return oplogBuffer.find({"entry.o._id": "docToBeFetched"}).count() === 1;
+});
+
// Step up a new node in the recipient set and trigger a failover. The new primary should resume
// cloning starting from the third document.
recipientRst.awaitLastOpCommitted();
const newRecipientPrimary = recipientRst.getSecondaries()[0];
const newRecipientDb = newRecipientPrimary.getDB(dbName);
-const hangAfterStartingOplogFetcher = configureFailPoint(
- newRecipientDb, "fpAfterStartingOplogFetcherMigrationRecipientInstance", {action: "hang"});
assert.commandWorked(newRecipientPrimary.adminCommand({replSetStepUp: 1}));
hangDuringCollectionClone.off();
recipientRst.getPrimary();
-// Wait until the new recipient primary starts the oplog fetcher. This failpoint is needed only
-// because tenant migration didn't fully support resuming after a failover (specifically, not
-// preserving the startApplyingDonorOpTime) when this test was written. Otherwise, we can simply
-// rename the collection on the donor before stepping up the new recipient primary.
-hangAfterStartingOplogFetcher.wait();
-
// Rename the collection on the donor.
const donorColl = donorPrimary.getDB(dbName).getCollection(collName);
const collNameRenamed = collName + "_renamed";
assert.commandWorked(donorColl.renameCollection(collNameRenamed));
-// Resume collection cloning on the new recipient primary.
-hangAfterStartingOplogFetcher.off();
-
// The migration should go through after recipient failover.
assert.commandWorked(migrationThread.returnData());