summaryrefslogtreecommitdiff
path: root/jstests/sharding/replmonitor_bad_seed.js
blob: 402cc7f9016307066d65c3634a26271ee5881f1b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
/**
 * This tests tries to check that a ReplicaSetMonitor initialized with a
 * replica set seed that has none of the nodes up will be able to recover
 * once the replica set come back up.
 *
 * ReplSetMonitor is tested indirectly through mongos. This is because
 * attempting to create a connection through the Mongo constructor won't
 * work because the shell will throw an exception before the mongo shell
 * binds the variable properly to the js environment (in simple terms,
 * the connection object is never returned when it cannot connect to it).
 * Another reason for using mongos in this test is so we can use
 * connPoolStats to synchronize the test and make sure that the monitor
 * was able to refresh before proceeding to check.
 *
 * Any tests that restart a shard mongod and send sharding requests to it after restart cannot make
 * the shard use an in-memory storage engine, since the shardIdentity document will be lost after
 * restart.
 *
 * @tags: [requires_persistence]
 */
(function() {
'use strict';
load("jstests/replsets/rslib.js");

var st = new ShardingTest({shards: 1, rs: {oplogSize: 10}});
var replTest = st.rs0;

assert.commandWorked(st.s0.adminCommand({enableSharding: 'test'}));
assert.commandWorked(st.s0.adminCommand({shardCollection: 'test.user', key: {x: 1}}));

// The cluster now has the shard information. Then kill the replica set so when mongos restarts
// and tries to create a ReplSetMonitor for that shard, it will not be able to connect to any of
// the seed servers.
// Don't clear the data directory so that the shardIdentity is not deleted.
replTest.stopSet(undefined /* send default signal */, true /* don't clear data directory */);

st.restartMongos(0);

replTest.startSet({restart: true, noCleanData: true});
replTest.awaitSecondaryNodes();

// Verify that the replSetMonitor can reach the restarted set
awaitRSClientHosts(st.s0, replTest.nodes, {ok: true});
replTest.awaitNodesAgreeOnPrimary();

assert.commandWorked(st.s0.getDB('test').user.insert({x: 1}));

st.stop();
})();