1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
|
/*
* Tests that serverStatus includes a migration status when called on the source shard of an active
* migration.
*
* @tags: [requires_fcv_63]
*/
load('./jstests/libs/chunk_manipulation_util.js');
load("jstests/libs/feature_flag_util.js");
(function() {
'use strict';
var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
var st = new ShardingTest({shards: 2, mongos: 1});
var mongos = st.s0;
var admin = mongos.getDB("admin");
var coll = mongos.getCollection("migration_server_status.coll");
const usingSetClusterParameter =
FeatureFlagUtil.isPresentAndEnabled(st.config, "ClusterCardinalityParameter");
assert.commandWorked(
admin.runCommand({enableSharding: coll.getDB() + "", primaryShard: st.shard0.shardName}));
assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 0}}));
// Mimic inserts for a retryable-write session
var documents = [];
for (var x = -2600; x < 2400; x++) {
documents.push({_id: x});
}
assert.commandWorked(
mongos.getDB("migration_server_status")
.runCommand(
{insert: "coll", documents: documents, lsid: {id: UUID()}, txnNumber: NumberLong(1)}));
// Pause the migration once it starts on both shards -- somewhat arbitrary pause point.
pauseMoveChunkAtStep(st.shard0, moveChunkStepNames.startedMoveChunk);
var joinMoveChunk = moveChunkParallel(
staticMongod, st.s0.host, {_id: 1}, null, coll.getFullName(), st.shard1.shardName);
var assertMigrationStatusOnServerStatus = function(serverStatusResult,
sourceShard,
destinationShard,
isDonorShard,
minKey,
maxKey,
collectionName) {
var migrationResult = serverStatusResult.sharding.migrations;
assert.eq(sourceShard, migrationResult.source);
assert.eq(destinationShard, migrationResult.destination);
assert.eq(isDonorShard, migrationResult.isDonorShard);
assert.eq(minKey, migrationResult.chunk.min);
assert.eq(maxKey, migrationResult.chunk.max);
assert.eq(collectionName, migrationResult.collection);
};
var assertSessionMigrationStatusSource = function(
serverStatusResult, expectedEntriesToBeMigrated, expectedEntriesSkippedLowerBound) {
var migrationResult = serverStatusResult.sharding.migrations;
// If the expected value is null, just check that the field exists
if (expectedEntriesToBeMigrated == null) {
assert(migrationResult.sessionOplogEntriesToBeMigratedSoFar);
} else {
assert.eq(migrationResult.sessionOplogEntriesToBeMigratedSoFar,
expectedEntriesToBeMigrated);
}
// If the expected value is null, just check that the field exists
if (expectedEntriesSkippedLowerBound == null) {
assert(migrationResult.sessionOplogEntriesSkippedSoFarLowerBound);
} else {
// Running DDL operations increases this number by 1
let actualEntriesSkippedLowerBound = usingSetClusterParameter
? expectedEntriesSkippedLowerBound + 1
: expectedEntriesSkippedLowerBound;
assert.eq(migrationResult.sessionOplogEntriesSkippedSoFarLowerBound,
actualEntriesSkippedLowerBound);
}
};
var assertSessionMigrationStatusDestination = function(serverStatusResult,
expectedEntriesMigrated) {
var migrationResult = serverStatusResult.sharding.migrations;
// If the expected value is null, just check that the field exists
if (expectedEntriesMigrated == null) {
assert(migrationResult.sessionOplogEntriesMigrated);
} else {
assert.eq(migrationResult.sessionOplogEntriesMigrated, expectedEntriesMigrated);
}
};
waitForMoveChunkStep(st.shard0, moveChunkStepNames.startedMoveChunk);
// Source shard should return a migration status.
var shard0ServerStatus = st.shard0.getDB('admin').runCommand({serverStatus: 1});
assert(shard0ServerStatus.sharding.migrations);
assertMigrationStatusOnServerStatus(shard0ServerStatus,
st.shard0.shardName,
st.shard1.shardName,
true,
{"_id": 0},
{"_id": {"$maxKey": 1}},
coll + "");
assertSessionMigrationStatusSource(shard0ServerStatus, null, null);
// Destination shard should return a migration status.
var shard1ServerStatus = st.shard1.getDB('admin').runCommand({serverStatus: 1});
assert(shard1ServerStatus.sharding.migrations);
assertMigrationStatusOnServerStatus(shard1ServerStatus,
st.shard0.shardName,
st.shard1.shardName,
false,
{"_id": 0},
{"_id": {"$maxKey": 1}},
coll + "");
assertSessionMigrationStatusDestination(shard1ServerStatus, null);
// Mongos should never return a migration status.
var mongosServerStatus = st.s0.getDB('admin').runCommand({serverStatus: 1});
assert(!mongosServerStatus.sharding.migrations);
// Pause the migration once chunk data is comitted. At this point we know that the sessions
// are fully transferred because chunk migration only happens after session migration is complete.
pauseMoveChunkAtStep(st.shard0, moveChunkStepNames.chunkDataCommitted);
unpauseMoveChunkAtStep(st.shard0, moveChunkStepNames.startedMoveChunk);
waitForMoveChunkStep(st.shard0, moveChunkStepNames.chunkDataCommitted);
// Source shard should have the correct server status
shard0ServerStatus = st.shard0.getDB('admin').runCommand({serverStatus: 1});
assert(shard0ServerStatus.sharding.migrations);
assertMigrationStatusOnServerStatus(shard0ServerStatus,
st.shard0.shardName,
st.shard1.shardName,
true,
{"_id": 0},
{"_id": {"$maxKey": 1}},
coll + "");
// Background metadata operations on the config server can throw off the count, so just assert the
// fields are present for a catalog shard.
const expectedEntriesMigrated = TestData.catalogShard ? undefined : 2400;
const expectedEntriesSkipped = TestData.catalogShard ? undefined : 2600;
assertSessionMigrationStatusSource(
shard0ServerStatus, expectedEntriesMigrated, expectedEntriesSkipped);
// Destination shard should have the correct server status
shard1ServerStatus = st.shard1.getDB('admin').runCommand({serverStatus: 1});
assert(shard1ServerStatus.sharding.migrations);
assertMigrationStatusOnServerStatus(shard1ServerStatus,
st.shard0.shardName,
st.shard1.shardName,
false,
{"_id": 0},
{"_id": {"$maxKey": 1}},
coll + "");
assertSessionMigrationStatusDestination(
shard1ServerStatus, expectedEntriesMigrated, expectedEntriesSkipped);
unpauseMoveChunkAtStep(st.shard0, moveChunkStepNames.chunkDataCommitted);
joinMoveChunk();
// Migration is over, should no longer get a migration status.
shard0ServerStatus = st.shard0.getDB('admin').runCommand({serverStatus: 1});
assert(!shard0ServerStatus.sharding.migrations);
var shard1ServerStatus = st.shard0.getDB('admin').runCommand({serverStatus: 1});
assert(!shard1ServerStatus.sharding.migrations);
st.stop();
MongoRunner.stopMongod(staticMongod);
})();
|