summaryrefslogtreecommitdiff
path: root/jstests/sharding/implicit_default_write_concern_add_shard.js
blob: 6cab41aca96a73e80fbc4e89ef8c367cb81461b5 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
/**
 * Tests adding shard to sharded cluster will fail if the implicitDefaultWriteConcern is
 * w:1 and CWWC is not set.
 */

(function() {
"use strict";

// Adds a shard near the end of the test that won't have metadata for the sessions collection during
// test shutdown. This is only a problem with a config shard because otherwise there are no shards
// so the sessions collection can't be created.
TestData.skipCheckShardFilteringMetadata = TestData.configShard;

load("jstests/replsets/rslib.js");  // For reconfig and isConfigCommitted.

function addNonArbiterNode(nodeId, rst) {
    const config = rst.getReplSetConfigFromNode();
    config.members.push({_id: nodeId, host: rst.add().host});
    reconfig(rst, config);
    assert.soon(() => isConfigCommitted(rst.getPrimary()));
    rst.waitForConfigReplication(rst.getPrimary());
    rst.awaitReplication();
    // When we add a new node to a replica set, we temporarily add the "newlyAdded" field so that it
    // is non-voting until it completes initial sync.
    // This waits for the primary to see that the node has transitioned to a secondary, recovering,
    // or rollback state to ensure that we can do the automatic reconfig to remove the "newlyAdded"
    // field so that the node can actually vote so replication coordinator can update implicit
    // default write-concern depending on the newly added voting member.
    rst.waitForAllNewlyAddedRemovals();
}

function testAddShard(CWWCSet, isPSASet, fixAddShard) {
    jsTestLog("Running sharding test with CWWCSet: " + tojson(CWWCSet) +
              ", isPSASet: " + tojson(isPSASet));
    let replSetNodes = [{}, {}];
    if (isPSASet) {
        replSetNodes = [{}, {}, {arbiter: true}];
    }

    let shardServer = new ReplSetTest(
        {name: "shardServer", nodes: replSetNodes, nodeOptions: {shardsvr: ""}, useHostName: true});
    const conns = shardServer.startSet();
    shardServer.initiate();

    const st = new ShardingTest({
        shards: TestData.configShard ? 1 : 0,
        mongos: 1,
    });
    var admin = st.getDB('admin');

    if (CWWCSet) {
        jsTestLog("Setting the CWWC before adding shard.");
        assert.commandWorked(st.s.adminCommand(
            {setDefaultRWConcern: 1, defaultWriteConcern: {w: "majority", wtimeout: 0}}));
    }

    jsTestLog("Attempting to add shard to the cluster");
    if (!CWWCSet && isPSASet) {
        jsTestLog("Adding shard to the cluster should fail.");
        assert.commandFailed(admin.runCommand({addshard: shardServer.getURL()}));

        if (fixAddShard == "setCWWC") {
            jsTestLog("Setting the CWWC to fix addShard.");
            assert.commandWorked(st.s.adminCommand(
                {setDefaultRWConcern: 1, defaultWriteConcern: {w: "majority", wtimeout: 0}}));
        } else {
            jsTestLog("Reconfig shardServer to fix addShard.");
            addNonArbiterNode(3, shardServer);
            addNonArbiterNode(4, shardServer);
        }
    }

    jsTestLog("Adding shard to the cluster should succeed.");
    assert.commandWorked(admin.runCommand({addshard: shardServer.getURL()}));

    st.stop();
    shardServer.stopSet();
}

for (const CWWCSet of [true, false]) {
    for (const isPSASet of [false, true]) {
        if (!CWWCSet && isPSASet) {
            for (const fixAddShard of ["setCWWC", "reconfig"]) {
                testAddShard(CWWCSet, isPSASet, fixAddShard);
            }
        } else {
            testAddShard(CWWCSet, isPSASet);
        }
    }
}
})();