summaryrefslogtreecommitdiff
path: root/jstests/sharding/startup_with_all_configs_down.js
blob: 67d94812399b74ed820210f5d857df3634ebb758 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
// Tests that mongos and shard mongods can both be started up successfully when there is no config
// server, and that they will wait until there is a config server online before handling any
// sharding operations.
//
// This test involves restarting a standalone shard, so cannot be run on ephemeral storage engines.
// A restarted standalone will lose all data when using an ephemeral storage engine.
// @tags: [requires_persistence]

// The UUID consistency check uses connections to shards cached on the ShardingTest object, but this
// test restarts a shard, so the cached connection is not usable.
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;

(function() {
"use strict";

var st = new ShardingTest({shards: 2});

jsTestLog("Setting up initial data");

for (var i = 0; i < 100; i++) {
    assert.commandWorked(st.s.getDB('test').foo.insert({_id: i}));
}

assert.commandWorked(st.s0.adminCommand({enableSharding: 'test'}));
st.ensurePrimaryShard('test', st.shard0.shardName);

assert.commandWorked(st.s0.adminCommand({shardCollection: 'test.foo', key: {_id: 1}}));
assert.commandWorked(st.s0.adminCommand({split: 'test.foo', find: {_id: 50}}));
assert.commandWorked(
    st.s0.adminCommand({moveChunk: 'test.foo', find: {_id: 75}, to: st.shard1.shardName}));

// Make sure the pre-existing mongos already has the routing information loaded into memory
assert.eq(100, st.s.getDB('test').foo.find().itcount());

jsTestLog("Shutting down all config servers");
st.configRS.nodes.forEach((config) => {
    st.stopConfigServer(config);
});

jsTestLog("Starting a new mongos when there are no config servers up");
var newMongosInfo = MongoRunner.runMongos({configdb: st._configDB, waitForConnect: false});
// The new mongos won't accept any new connections, but it should stay up and continue trying
// to contact the config servers to finish startup.
assert.throws(function() {
    new Mongo(newMongosInfo.host);
});

jsTestLog("Restarting a shard while there are no config servers up");
st.rs1.stopSet(undefined, true);
st.rs1.startSet({waitForConnect: false}, true);

jsTestLog("Queries should fail because the shard can't initialize sharding state");
var error = assert.throws(function() {
    st.s.getDB('test').foo.find().itcount();
});

assert(ErrorCodes.ReplicaSetNotFound == error.code || ErrorCodes.ExceededTimeLimit == error.code ||
       ErrorCodes.HostUnreachable == error.code ||
       ErrorCodes.FailedToSatisfyReadPreference == error.code);

jsTestLog("Restarting the config servers");
st.configRS.nodes.forEach((config) => {
    st.restartConfigServer(config);
});

print("Sleeping for 60 seconds to let the other shards restart their ReplicaSetMonitors");
sleep(60000);

jsTestLog("Queries against the original mongos should work again");
assert.eq(100, st.s.getDB('test').foo.find().itcount());

jsTestLog("Should now be possible to connect to the mongos that was started while the config " +
          "servers were down");
var newMongosConn = null;
var caughtException = null;
assert.soon(function() {
    try {
        newMongosConn = new Mongo(newMongosInfo.host);
        return true;
    } catch (e) {
        caughtException = e;
        return false;
    }
}, "Failed to connect to mongos after config servers were restarted: " + tojson(caughtException));

assert.eq(100, newMongosConn.getDB('test').foo.find().itcount());

st.stop();
MongoRunner.stopMongos(newMongosInfo);
}());