summaryrefslogtreecommitdiff
path: root/jstests/sharding/sharding_state_after_stepdown.js
blob: c130e7964ab1b3bd09ab8cce9ec99ff1debb0f39 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
// Tests the state of sharding data after a replica set reconfig
//
// This test involves restarting a single-node replica set, so cannot be run on ephemeral storage
// engines. A single node replica set that is using an ephemeral engine will have no knowledge of
// the replica set configuration once restarted, so will not elect itself as primary.
// @tags: [requires_persistence]

(function() {

var st = new ShardingTest({ shards: 2,
                            mongos: 1,
                            other: {
                                rs: true,
                                rsOptions: { nodes : 1 }
                            }
                          });

var mongos = st.s0;
var admin = mongos.getDB("admin");
var shards = mongos.getCollection("config.shards").find().toArray();

var coll = mongos.getCollection("foo.bar");
var collSharded = mongos.getCollection("foo.barSharded");

assert.commandWorked(admin.runCommand({ enableSharding : coll.getDB() + "" }));
printjson(admin.runCommand({ movePrimary : coll.getDB() + "", to : shards[0]._id }));
assert.commandWorked(admin.runCommand({ shardCollection : collSharded.toString(),
                                        key : { _id : 1 } }));
assert.commandWorked(admin.runCommand({ moveChunk : collSharded.toString(),
                                        find : { _id : 0 },
                                        to : shards[1]._id }));

assert.writeOK(coll.insert({ some : "data" }));
assert.writeOK(collSharded.insert({ some : "data" }));
assert.eq(2, mongos.adminCommand({ getShardVersion : collSharded.toString() }).version.t);

st.printShardingStatus();

// Restart both primaries to reset our sharding data
var restartPrimaries = function() {
    var rs0Primary = st.rs0.getPrimary();
    var rs1Primary = st.rs1.getPrimary();

    st.rs0.stop(rs0Primary);
    st.rs1.stop(rs1Primary);

    ReplSetTest.awaitRSClientHosts(mongos, [rs0Primary, rs1Primary], { ok : false });

    st.rs0.start(rs0Primary, { restart : true });
    st.rs1.start(rs1Primary, { restart : true });

    ReplSetTest.awaitRSClientHosts(mongos, [rs0Primary, rs1Primary], { ismaster : true });
};

restartPrimaries();

// Sharding data gets initialized either when shards are hit by an unsharded query or if some
// metadata operation was run before the step down, which wrote a minOpTime recovery record (CSRS
// only). In this case we did a moveChunk above from shard0 to shard1, so we will have this record
// on shard0.
if (st.configRS) {
    assert.neq("",
               st.rs0.getPrimary().adminCommand({ getShardVersion: coll.toString() }).configServer);
}
else {
    assert.eq("",
              st.rs0.getPrimary().adminCommand({ getShardVersion: coll.toString() }).configServer);
}
assert.eq("",
          st.rs1.getPrimary().adminCommand({ getShardVersion : coll.toString() }).configServer);

// Doing a find only accesses the primary (rs0), which is already recovered. Ensure that the
// secondary still has no sharding knowledge.
assert.neq(null, coll.findOne({}));
assert.eq("",
          st.rs1.getPrimary().adminCommand({ getShardVersion : coll.toString() }).configServer);

//
//
// Sharding data initialized when shards are hit by a sharded query
assert.neq(null, collSharded.findOne({}));
assert.neq("",
           st.rs0.getPrimary().adminCommand({ getShardVersion : coll.toString() }).configServer);
assert.neq("",
           st.rs1.getPrimary().adminCommand({ getShardVersion : coll.toString() }).configServer);


// Stepdown both primaries to reset our sharding data
var stepDownPrimaries = function() {

    var rs0Primary = st.rs0.getPrimary();
    var rs1Primary = st.rs1.getPrimary();

    try {
        rs0Primary.adminCommand({ replSetStepDown : 1000 * 1000, force : true });
        assert(false);
    }
    catch(ex) {
        // Expected connection exception, will check for stepdown later
    }

    try {
        rs1Primary.adminCommand({ replSetStepDown : 1000 * 1000, force : true });
        assert(false);
    }
    catch(ex) {
        // Expected connection exception, will check for stepdown later
    }

    ReplSetTest.awaitRSClientHosts(mongos, [rs0Primary, rs1Primary], { secondary : true });

    assert.commandWorked(new Mongo(rs0Primary.host).adminCommand({ replSetFreeze : 0 }));
    assert.commandWorked(new Mongo(rs1Primary.host).adminCommand({ replSetFreeze : 0 }));

    rs0Primary = st.rs0.getPrimary();
    rs1Primary = st.rs1.getPrimary();

    // Flush connections to avoid transient issues with conn pooling
    assert.commandWorked(rs0Primary.adminCommand({ connPoolSync : true }));
    assert.commandWorked(rs1Primary.adminCommand({ connPoolSync : true }));

    ReplSetTest.awaitRSClientHosts(mongos, [rs0Primary, rs1Primary], { ismaster : true });
};

stepDownPrimaries();

//
//
// No sharding metadata until shards are hit by a metadata operation
assert.eq({},
          st.rs0.getPrimary().adminCommand(
            { getShardVersion : collSharded.toString(), fullMetadata : true }).metadata);
assert.eq({},
          st.rs1.getPrimary().adminCommand(
            { getShardVersion : collSharded.toString(), fullMetadata : true }).metadata);

//
//
// Metadata commands should enable sharding data implicitly
assert.commandWorked(mongos.adminCommand({ split : collSharded.toString(), middle : { _id : 0 }}));
assert.eq({},
          st.rs0.getPrimary().adminCommand(
            { getShardVersion : collSharded.toString(), fullMetadata : true }).metadata);
assert.neq({},
           st.rs1.getPrimary().adminCommand(
            { getShardVersion : collSharded.toString(), fullMetadata : true }).metadata);

//
//
// MoveChunk command should enable sharding data implicitly on TO-shard
assert.commandWorked(mongos.adminCommand({ moveChunk : collSharded.toString(), find : { _id : 0 },
                                           to : shards[0]._id }));
assert.neq({},
           st.rs0.getPrimary().adminCommand(
                { getShardVersion : collSharded.toString(), fullMetadata : true }).metadata);
assert.neq({},
           st.rs1.getPrimary().adminCommand(
                { getShardVersion : collSharded.toString(), fullMetadata : true }).metadata);

jsTest.log( "DONE!" );

st.stop();

})();