summaryrefslogtreecommitdiff
path: root/jstests/sharding/shard_identity_config_update.js
blob: 3e668c5903c822885b2496526c238cecea7e5726 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
/**
 * Tests that the config server connection string in the shard identity document of both the
 * primary and secondary will get updated whenever the config server membership changes.
 * @tags: [requires_persistence]
 */

// Checking UUID consistency involves talking to a shard node, which in this test is shutdown
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;

(function() {
"use strict";

load('jstests/replsets/rslib.js');

var st = new ShardingTest({shards: {rs0: {nodes: 2}}});

var shardPri = st.rs0.getPrimary();

// Note: Adding new replica set member by hand because of SERVER-24011.

var newNode =
    MongoRunner.runMongod({configsvr: '', replSet: st.configRS.name, storageEngine: 'wiredTiger'});

var replConfig = st.configRS.getReplSetConfigFromNode();
replConfig.version += 1;
replConfig.members.push({_id: 3, host: newNode.host});

reconfig(st.configRS, replConfig);

/**
 * Returns true if the shardIdentity document has all the replica set member nodes in the
 * expectedConfigStr.
 */
var checkConfigStrUpdated = function(conn, expectedConfigStr) {
    var shardIdentity = conn.getDB('admin').system.version.findOne({_id: 'shardIdentity'});

    var shardConfigsvrStr = shardIdentity.configsvrConnectionString;
    var shardConfigReplName = shardConfigsvrStr.split('/')[0];
    var expectedReplName = expectedConfigStr.split('/')[0];

    assert.eq(expectedReplName, shardConfigReplName);

    var expectedHostList = expectedConfigStr.split('/')[1].split(',');
    var shardConfigHostList = shardConfigsvrStr.split('/')[1].split(',');

    if (expectedHostList.length != shardConfigHostList.length) {
        return false;
    }

    for (var x = 0; x < expectedHostList.length; x++) {
        if (shardConfigsvrStr.indexOf(expectedHostList[x]) == -1) {
            return false;
        }
    }

    return true;
};

var origConfigConnStr = st.configRS.getURL();
var expectedConfigStr = origConfigConnStr + ',' + newNode.host;
assert.soon(function() {
    return checkConfigStrUpdated(st.rs0.getPrimary(), expectedConfigStr);
});

var secConn = st.rs0.getSecondary();
secConn.setSlaveOk(true);
assert.soon(function() {
    return checkConfigStrUpdated(secConn, expectedConfigStr);
});

//
// Remove the newly added member from the config replSet while the shards are down.
// Check that the shard identity document will be updated with the new replSet connection
// string when they come back up.
//

st.rs0.stop(0);
st.rs0.stop(1);

MongoRunner.stopMongod(newNode);

replConfig = st.configRS.getReplSetConfigFromNode();
replConfig.version += 1;
replConfig.members.pop();

reconfig(st.configRS, replConfig);

st.rs0.restart(0, {shardsvr: ''});
st.rs0.restart(1, {shardsvr: ''});

st.rs0.waitForPrimary();
st.rs0.awaitSecondaryNodes();

assert.soon(function() {
    return checkConfigStrUpdated(st.rs0.getPrimary(), origConfigConnStr);
});

secConn = st.rs0.getSecondary();
secConn.setSlaveOk(true);
assert.soon(function() {
    return checkConfigStrUpdated(secConn, origConfigConnStr);
});

st.stop();
})();