summaryrefslogtreecommitdiff
path: root/jstests/replsets/tenant_migration_concurrent_migrations.js
blob: a9cf2b579b95a4be826bc2a78fe83a43ed5e9cbc (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
/**
 * Test that multiple concurrent tenant migrations are supported.
 *
 * Tenant migrations are not expected to be run on servers with ephemeralForTest, and in particular
 * this test fails on ephemeralForTest because the donor has to wait for the write to set the
 * migration state to "committed" and "aborted" to be majority committed but it cannot do that on
 * ephemeralForTest.
 *
 * Incompatible with shard merge, which can't handle concurrent migrations.
 *
 * @tags: [
 *   incompatible_with_macos,
 *   incompatible_with_shard_merge,
 *   incompatible_with_windows_tls,
 *   requires_majority_read_concern,
 *   requires_persistence,
 *   serverless,
 * ]
 */

(function() {
'use strict';

load("jstests/libs/fail_point_util.js");
load("jstests/libs/parallelTester.js");
load("jstests/libs/uuid_util.js");
load("jstests/replsets/libs/tenant_migration_test.js");
load("jstests/replsets/libs/tenant_migration_util.js");

const x509Options0 = TenantMigrationUtil.makeX509Options("jstests/libs/rs0.pem");
const x509Options1 = TenantMigrationUtil.makeX509Options("jstests/libs/rs1.pem");
const x509Options2 = TenantMigrationUtil.makeX509Options("jstests/libs/rs2.pem");

const rst0 = new ReplSetTest({nodes: 1, name: 'rst0', nodeOptions: x509Options0});
const rst1 = new ReplSetTest({nodes: 1, name: 'rst1', nodeOptions: x509Options1});
const rst2 = new ReplSetTest({nodes: 1, name: 'rst2', nodeOptions: x509Options2});

rst0.startSet();
rst0.initiate();

rst1.startSet();
rst1.initiate();

rst2.startSet();
rst2.initiate();

const kTenantIdPrefix = "testTenantId";

// Test concurrent outgoing migrations to different recipients.
(() => {
    const tenantMigrationTest0 = new TenantMigrationTest({donorRst: rst0, recipientRst: rst1});
    const tenantMigrationTest1 = new TenantMigrationTest({donorRst: rst0, recipientRst: rst2});
    const tenantId0 = `${kTenantIdPrefix}-ConcurrentOutgoingMigrationsToDifferentRecipient0`;
    const tenantId1 = `${kTenantIdPrefix}-ConcurrentOutgoingMigrationsToDifferentRecipient1`;
    const donorPrimary = rst0.getPrimary();
    const connPoolStatsBefore = assert.commandWorked(donorPrimary.adminCommand({connPoolStats: 1}));

    const migrationOpts0 = {
        migrationIdString: extractUUIDFromObject(UUID()),
        tenantId: tenantId0,
    };
    const migrationOpts1 = {
        migrationIdString: extractUUIDFromObject(UUID()),
        tenantId: tenantId1,
    };

    assert.commandWorked(tenantMigrationTest0.startMigration(migrationOpts0));
    assert.commandWorked(tenantMigrationTest1.startMigration(migrationOpts1));

    // Wait for both migration to finish and verify they succeeded.
    TenantMigrationTest.assertCommitted(
        tenantMigrationTest0.waitForMigrationToComplete(migrationOpts0));
    TenantMigrationTest.assertCommitted(
        tenantMigrationTest1.waitForMigrationToComplete(migrationOpts1));

    const connPoolStatsAfter0 = assert.commandWorked(donorPrimary.adminCommand({connPoolStats: 1}));
    // Donor targeted two different replica sets.
    assert.eq(connPoolStatsAfter0.numReplicaSetMonitorsCreated,
              connPoolStatsBefore.numReplicaSetMonitorsCreated + 2);
    assert.eq(Object.keys(connPoolStatsAfter0.replicaSets).length, 2);

    assert.commandWorked(tenantMigrationTest0.forgetMigration(migrationOpts0.migrationIdString));
    assert.commandWorked(tenantMigrationTest1.forgetMigration(migrationOpts1.migrationIdString));

    // After migrations are complete, RSMs are garbage collected.
    const connPoolStatsAfter1 = assert.commandWorked(donorPrimary.adminCommand({connPoolStats: 1}));
    assert.eq(Object.keys(connPoolStatsAfter1.replicaSets).length, 0);

    assert.eq(Object
                  .keys(assert.commandWorked(rst1.getPrimary().adminCommand({connPoolStats: 1}))
                            .replicaSets)
                  .length,
              0);
})();

// Test concurrent incoming migrations from different donors.
(() => {
    const tenantMigrationTest0 = new TenantMigrationTest({donorRst: rst0, recipientRst: rst2});
    const tenantMigrationTest1 = new TenantMigrationTest({donorRst: rst1, recipientRst: rst2});
    const tenantId0 = `${kTenantIdPrefix}-ConcurrentIncomingMigrations0`;
    const tenantId1 = `${kTenantIdPrefix}-ConcurrentIncomingMigrations1`;

    const migrationOpts0 = {
        migrationIdString: extractUUIDFromObject(UUID()),
        tenantId: tenantId0,
    };
    const migrationOpts1 = {
        migrationIdString: extractUUIDFromObject(UUID()),
        tenantId: tenantId1,
    };

    assert.commandWorked(tenantMigrationTest0.startMigration(migrationOpts0));
    assert.commandWorked(tenantMigrationTest1.startMigration(migrationOpts1));

    // Wait for both migration to finish and verify they succeeded.
    TenantMigrationTest.assertCommitted(
        tenantMigrationTest0.waitForMigrationToComplete(migrationOpts0));
    TenantMigrationTest.assertCommitted(
        tenantMigrationTest1.waitForMigrationToComplete(migrationOpts1));

    // Cleanup.
    assert.commandWorked(tenantMigrationTest0.forgetMigration(migrationOpts0.migrationIdString));
    assert.commandWorked(tenantMigrationTest1.forgetMigration(migrationOpts1.migrationIdString));

    const connPoolStatsAfter0 =
        assert.commandWorked(rst0.getPrimary().adminCommand({connPoolStats: 1}));
    assert.eq(Object.keys(connPoolStatsAfter0.replicaSets).length, 0);

    const connPoolStatsAfter1 =
        assert.commandWorked(rst1.getPrimary().adminCommand({connPoolStats: 1}));
    assert.eq(Object.keys(connPoolStatsAfter1.replicaSets).length, 0);
})();

// Test concurrent outgoing migrations to same recipient. Verify that tenant
// migration donor only removes a ReplicaSetMonitor for a recipient when the last
// migration to that recipient completes.
(() => {
    const tenantMigrationTest0 = new TenantMigrationTest({donorRst: rst0, recipientRst: rst1});
    const tenantMigrationTest1 = new TenantMigrationTest({donorRst: rst0, recipientRst: rst1});

    const tenantId0 = `${kTenantIdPrefix}-ConcurrentOutgoingMigrationsToSameRecipient0`;
    const tenantId1 = `${kTenantIdPrefix}-ConcurrentOutgoingMigrationsToSameRecipient1`;

    const donorsColl = tenantMigrationTest0.getDonorRst().getPrimary().getCollection(
        TenantMigrationTest.kConfigDonorsNS);

    const migrationOpts0 = {
        migrationIdString: extractUUIDFromObject(UUID()),
        tenantId: tenantId0,
    };
    const migrationOpts1 = {
        migrationIdString: extractUUIDFromObject(UUID()),
        tenantId: tenantId1,
    };

    const donorPrimary = rst0.getPrimary();

    const connPoolStatsBefore = assert.commandWorked(donorPrimary.adminCommand({connPoolStats: 1}));

    let blockFp = configureFailPoint(
        donorPrimary, "pauseTenantMigrationBeforeLeavingBlockingState", {tenantId: tenantId1});
    assert.commandWorked(tenantMigrationTest0.startMigration(migrationOpts0));
    assert.commandWorked(tenantMigrationTest1.startMigration(migrationOpts1));

    // Wait migration1 to pause in the blocking state and for migration0 to commit.
    blockFp.wait();
    TenantMigrationTest.assertCommitted(
        tenantMigrationTest0.waitForMigrationToComplete(migrationOpts0));

    // Verify that exactly one RSM was created.
    const connPoolStatsDuringMigration =
        assert.commandWorked(donorPrimary.adminCommand({connPoolStats: 1}));
    assert.eq(connPoolStatsDuringMigration.numReplicaSetMonitorsCreated,
              connPoolStatsBefore.numReplicaSetMonitorsCreated + 1);
    assert.eq(Object.keys(connPoolStatsDuringMigration.replicaSets).length, 1);

    // Garbage collect migration0 and verify that the RSM was not removed.
    assert.commandWorked(tenantMigrationTest0.forgetMigration(migrationOpts0.migrationIdString));
    assert.eq(
        Object.keys(assert.commandWorked(donorPrimary.adminCommand({connPoolStats: 1})).replicaSets)
            .length,
        1);

    // Let the migration1 to finish.
    blockFp.off();
    TenantMigrationTest.assertCommitted(
        tenantMigrationTest1.waitForMigrationToComplete(migrationOpts1));

    // Verify that now the RSM is garbage collected after the migration1 is cleaned.
    assert.commandWorked(tenantMigrationTest1.forgetMigration(migrationOpts1.migrationIdString));

    assert.eq(
        Object.keys(assert.commandWorked(donorPrimary.adminCommand({connPoolStats: 1})).replicaSets)
            .length,
        0);
})();

rst0.stopSet();
rst1.stopSet();
rst2.stopSet();
})();