blob: 1ef29f7067534c3f64d218bb0c79a1aa52fce833 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
|
/**
* Prove that it's possible to run reconfigs during a shard split.
*
* @tags: [requires_fcv_62, serverless]
*/
load("jstests/serverless/libs/shard_split_test.js");
(function() {
"use strict";
const tenantIds = ["tenant1", "tenant2"];
const test = new ShardSplitTest({quickGarbageCollection: true});
test.addRecipientNodes();
test.donor.awaitSecondaryNodes();
const donorPrimary = test.donor.getPrimary();
const pauseAfterBlockingFp = configureFailPoint(donorPrimary, "pauseShardSplitAfterBlocking");
const split = test.createSplitOperation(tenantIds);
const commitThread = split.commitAsync();
pauseAfterBlockingFp.wait();
// Prepare a new config which removes all of the recipient nodes
const config = assert.commandWorked(donorPrimary.adminCommand({replSetGetConfig: 1})).config;
const recipientHosts = test.recipientNodes.map(node => node.host);
config.members = config.members.filter(member => !recipientHosts.includes(member.host));
config.version++;
assert.commandWorked(donorPrimary.adminCommand({replSetReconfig: config}));
pauseAfterBlockingFp.off();
assert.commandFailedWithCode(commitThread.returnData(), ErrorCodes.TenantMigrationAborted);
split.forget();
test.cleanupSuccesfulAborted(split.migrationId, tenantIds);
test.stop();
})();
|