1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
|
/**
* Mongos has special targeting behavior for createIndex, reIndex, dropIndex, and collMod:
*
* - If called on an unsharded collection, the request is routed only to the primary shard.
* - If called on a sharded collection, the request is broadcast to shards with chunks.
*
* This test verifies this behavior.
*/
// This test shuts down a shard's node and because of this consistency checking
// cannot be performed on that node, which causes the consistency checker to fail.
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
TestData.skipCheckingIndexesConsistentAcrossCluster = true;
(function() {
// Helper function that runs listIndexes against shards to check for the existence of an index.
function checkShardIndexes(indexKey, shardsWithIndex, shardsWithoutIndex) {
function shardHasIndex(indexKey, shard) {
const res = shard.getDB(dbName).runCommand({listIndexes: collName});
if (res.code === ErrorCodes.NamespaceNotFound) {
return [res, false];
}
assert.commandWorked(res);
for (index of res.cursor.firstBatch) {
if (index.key.hasOwnProperty(indexKey)) {
return [res, true];
}
}
return [res, false];
}
for (shard of shardsWithIndex) {
[listIndexesRes, foundIndex] = shardHasIndex(indexKey, shard);
assert(foundIndex,
"expected to see index with key " + indexKey + " in listIndexes response from " +
shard + ": " + tojson(listIndexesRes));
}
for (shard of shardsWithoutIndex) {
[listIndexesRes, foundIndex] = shardHasIndex(indexKey, shard);
assert(!foundIndex,
"expected not to see index with key " + indexKey + " in listIndexes response from " +
shard + ": " + tojson(listIndexesRes));
}
}
// Helper function that runs listCollections against shards to check for the existence of a
// collection option.
function checkShardCollOption(optionKey, optionValue, shardsWithOption, shardsWithoutOption) {
function shardHasOption(optionKey, optionValue, shard) {
const res = shard.getDB(dbName).runCommand({listCollections: 1, filter: {name: collName}});
assert.commandWorked(res);
if (res.cursor.firstBatch.length === 0) {
return [res, false];
}
assert.eq(1, res.cursor.firstBatch.length);
if (friendlyEqual(res.cursor.firstBatch[0].options[optionKey], optionValue)) {
return [res, true];
}
return [res, false];
}
for (shard of shardsWithOption) {
[listCollsRes, foundOption] = shardHasOption(optionKey, optionValue, shard);
assert(foundOption,
"expected to see option " + optionKey + " in listCollections response from " +
shard + ": " + tojson(listCollsRes));
}
for (shard of shardsWithoutOption) {
[listOptionsRes, foundOption] = shardHasOption(optionKey, optionValue, shard);
assert(!foundOption,
"expected not to see option " + optionKey + " in listCollections response from " +
shard + ": " + tojson(listCollsRes));
}
}
const dbName = "test";
const collName = "foo";
const ns = dbName + "." + collName;
var st = new ShardingTest(
{shards: {rs0: {nodes: 1}, rs1: {nodes: 1}, rs2: {nodes: 1}}, other: {config: 3}});
assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
st.ensurePrimaryShard(dbName, st.shard0.name);
// When creating index or setting a collection option on an unsharded collection, only the
// primary shard is affected.
assert.commandWorked(st.s.getDB(dbName).getCollection(collName).createIndex({"idx1": 1}));
checkShardIndexes("idx1", [st.shard0], [st.shard1, st.shard2]);
const validationOption1 = {
dummyField1: {$type: "string"}
};
assert.commandWorked(st.s.getDB(dbName).runCommand({
collMod: collName,
validator: validationOption1,
validationLevel: "moderate",
validationAction: "warn"
}));
checkShardCollOption("validator", validationOption1, [st.shard0], [st.shard1, st.shard2]);
// After sharding the collection but before any migrations, only the primary shard has the
// index and collection option.
assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {x: 1}}));
checkShardIndexes("idx1", [st.shard0], [st.shard1, st.shard2]);
checkShardCollOption("validator", validationOption1, [st.shard0], [st.shard1, st.shard2]);
// After a migration, only shards that own data for the collection have the index and collection
// option.
assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {x: 0}, to: st.shard1.shardName}));
checkShardIndexes("idx1", [st.shard0, st.shard1], [st.shard2]);
checkShardCollOption("validator", validationOption1, [st.shard0, st.shard1], [st.shard2]);
// Starting in v4.4, createIndex, reIndex, dropIndex, collMod only target the shards that own
// chunks for the collection (as supposed to all shards in the previous versions). The commands
// will retry on shard version errors, and only report overall success. That is, IndexNotFound
// errors from shards are ignored, and not included in the 'raw' shard responses.
var res;
// createIndex
res = st.s.getDB(dbName).getCollection(collName).createIndex({"idx2": 1});
assert.commandWorked(res);
assert.eq(undefined, res.raw[st.shard0.host], tojson(res));
assert.eq(res.raw[st.shard1.host].ok, 1, tojson(res));
assert.eq(undefined, res.raw[st.shard2.host], tojson(res));
checkShardIndexes("idx2", [st.shard1], [st.shard2]);
// dropIndex
res = st.s.getDB(dbName).getCollection(collName).dropIndex("idx1_1");
assert.commandWorked(res);
assert.eq(undefined, res.raw[st.shard0.host], tojson(res));
assert.eq(res.raw[st.shard1.host].ok, 1, tojson(res));
assert.eq(undefined, res.raw[st.shard2.host], tojson(res));
checkShardIndexes("idx1", [], [st.shard1, st.shard2]);
// collMod
const validationOption2 = {
dummyField2: {$type: "string"}
};
res = st.s.getDB(dbName).runCommand({
collMod: collName,
validator: validationOption2,
validationLevel: "moderate",
validationAction: "warn"
});
assert.commandWorked(res);
assert.eq(undefined, res.raw[st.shard0.host], tojson(res));
assert.eq(res.raw[st.shard1.host].ok, 1, tojson(res));
assert.eq(undefined, res.raw[st.shard2.host], tojson(res));
checkShardCollOption("validator", validationOption2, [st.shard1], [st.shard2]);
// Check that errors from shards are aggregated correctly.
// If no shard returns success, then errors that are usually ignored should be reported.
res = st.s.getDB(dbName).getCollection("unshardedColl").dropIndex("nonexistentIndex");
assert.eq(res.raw[st.shard0.host].ok, 0, tojson(res));
assert.eq(res.code, res.raw[st.shard0.host].code, tojson(res));
assert.eq(res.codeName, res.raw[st.shard0.host].codeName, tojson(res));
assert.eq(res.code, ErrorCodes.NamespaceNotFound, tojson(res));
assert.eq("NamespaceNotFound", res.codeName, tojson(res));
assert.neq(null, res.errmsg, tojson(res));
// If all shards report the same error, the overall command error should be set to that error.
res = st.s.getDB(dbName).getCollection(collName).createIndex({});
assert.eq(undefined, res.raw[st.shard0.host], tojson(res));
assert.eq(undefined, res.raw[st.shard2.host], tojson(res));
assert.eq(res.raw[st.shard1.host].ok, 0, tojson(res));
assert.eq(res.code, res.raw[st.shard1.host].code, tojson(res));
assert.eq(res.codeName, res.raw[st.shard1.host].codeName, tojson(res));
assert.eq(res.code, ErrorCodes.CannotCreateIndex, tojson(res));
assert.eq("CannotCreateIndex", res.codeName, tojson(res));
assert.neq(null, res.errmsg, tojson(res));
// If all the non-ignorable errors reported by shards are the same, the overall command error
// should be set to that error.
res = st.s.getDB(dbName).getCollection(collName).createIndex({z: 1}, {unique: true});
assert.eq(undefined, res.raw[st.shard0.host], tojson(res));
assert.eq(res.raw[st.shard1.host].ok, 0, tojson(res));
assert.eq(null, res.raw[st.shard2.host], tojson(res));
assert.eq(ErrorCodes.CannotCreateIndex, res.raw[st.shard1.host].code, tojson(res));
assert.eq("CannotCreateIndex", res.raw[st.shard1.host].codeName, tojson(res));
assert.eq(res.code, ErrorCodes.CannotCreateIndex, tojson(res));
assert.eq("CannotCreateIndex", res.codeName, tojson(res));
assert.neq(null, res.errmsg, tojson(res));
st.rs0.stopSet();
// If we receive a non-ignorable error, it should be reported as the command error.
res = st.s.getDB(dbName).getCollection("unshardedColl").createIndex({"validIdx": 1});
assert.eq(res.raw[st.shard0.host].ok, 0, tojson(res));
assert.eq(res.code, res.raw[st.shard0.host].code, tojson(res));
assert.eq(res.codeName, res.raw[st.shard0.host].codeName, tojson(res));
// We might see 'HostUnreachable' the first time if the mongos's ReplicaSetMonitor does not yet
// know that the shard is down.
assert(res.code === ErrorCodes.HostUnreachable ||
res.code === ErrorCodes.FailedToSatisfyReadPreference,
tojson(res));
assert(res.codeName === "HostUnreachable" || res.codeName === "FailedToSatisfyReadPreference",
tojson(res));
// If some shard returns a non-ignorable error, it should be reported as the command error, even
// if other shards returned ignorable errors.
res = st.s.getDB(dbName).getCollection(collName).createIndex({"validIdx": 1});
assert.eq(undefined, res.raw[st.shard0.host], tojson(res));
assert.eq(res.ok, 1, tojson(res));
assert.eq(res.raw[st.shard1.host].ok, 1, tojson(res)); // gets created on shard that owns chunks
assert.eq(undefined, res.raw[st.shard2.host], tojson(res)); // shard does not own chunks
st.stop();
})();
|