1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
|
/*
* Basic test of successful rollback in replica sets.
*
* This test sets up a 3-node set, with an arbiter and 2 data-bearing nodes, A and B.
* A is the initial primary node.
*
* The test inserts 3 documents into A, and waits for them to replicate to B. Then, it partitions A
* from the other nodes, causing it to step down and causing B to be elected primary.
*
* Next, 3 more documents inserted into B, and B is partitioned from the arbiter.
*
* Next, A is allowed to connect to the arbiter again, and gets reelected primary. Because the
* arbiter doesn't know about the writes that B accepted, A becomes primary and we insert 3 new
* documents. Now, A and B have diverged. We heal the remaining network partition, bringing B back
* into the network.
*
* Finally, we expect either A or B to roll back its 3 divergent documents and acquire the other
* node's.
*/
load("jstests/replsets/rslib.js");
(function() {
"use strict";
// helper function for verifying contents at the end of the test
var checkFinalResults = function(db) {
var x = db.bar.find().sort({q: 1}).toArray();
assert.eq(5, x.length, "incorrect number of documents found. Docs found: " + tojson(x));
assert.eq(1, x[0].q);
assert.eq(2, x[1].q);
assert.eq(3, x[2].q);
assert.eq(7, x[3].q);
assert.eq(8, x[4].q);
};
var replTest = new ReplSetTest({name: 'unicomplex', nodes: 3, oplogSize: 1, useBridge: true});
var nodes = replTest.nodeList();
var conns = replTest.startSet();
var r = replTest.initiate({
"_id": "unicomplex",
"members": [
{"_id": 0, "host": nodes[0], "priority": 3},
{"_id": 1, "host": nodes[1]},
{"_id": 2, "host": nodes[2], arbiterOnly: true}
]
});
// Make sure we have a master
replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY);
var master = replTest.getPrimary();
var a_conn = conns[0];
var A = a_conn.getDB("admin");
var b_conn = conns[1];
a_conn.setSlaveOk();
b_conn.setSlaveOk();
var B = b_conn.getDB("admin");
assert(master == conns[0], "conns[0] assumed to be master");
assert(a_conn == master);
// Wait for initial replication
var a = a_conn.getDB("foo");
var b = b_conn.getDB("foo");
/* force the oplog to roll */
if (new Date() % 2 == 0) {
jsTest.log("ROLLING OPLOG AS PART OF TEST (we only do this sometimes)");
var pass = 1;
var first = a.getSisterDB("local").oplog.rs.find().sort({$natural: 1}).limit(1)[0];
a.roll.insert({x: 1});
while (1) {
var bulk = a.roll.initializeUnorderedBulkOp();
for (var i = 0; i < 1000; i++) {
bulk.find({}).update({$inc: {x: 1}});
}
// unlikely secondary isn't keeping up, but let's avoid possible intermittent
// issues with that.
bulk.execute({w: 2});
var op = a.getSisterDB("local").oplog.rs.find().sort({$natural: 1}).limit(1)[0];
if (tojson(op.h) != tojson(first.h)) {
printjson(op);
printjson(first);
break;
}
pass++;
}
jsTest.log("PASSES FOR OPLOG ROLL: " + pass);
} else {
jsTest.log("NO ROLL");
}
assert.writeOK(a.bar.insert({q: 1, a: "foo"}));
assert.writeOK(a.bar.insert({q: 2, a: "foo", x: 1}));
assert.writeOK(a.bar.insert({q: 3, bb: 9, a: "foo"}, {writeConcern: {w: 2}}));
assert.eq(a.bar.find().itcount(), 3, "a.count");
assert.eq(b.bar.find().itcount(), 3, "b.count");
conns[0].disconnect(conns[1]);
conns[0].disconnect(conns[2]);
assert.soon(function() {
try {
return B.isMaster().ismaster;
} catch (e) {
return false;
}
});
// These 97 documents will be rolled back eventually.
for (var i = 4; i <= 100; i++) {
b.bar.insert({q: i});
}
assert.eq(100, b.bar.find().itcount(), "u.count");
// a should not have the new data as it was partitioned.
conns[1].disconnect(conns[2]);
jsTest.log("*************** wait for server to reconnect ****************");
conns[0].reconnect(conns[2]);
jsTest.log("*************** B ****************");
assert.soon(function() {
try {
return !B.isMaster().ismaster;
} catch (e) {
return false;
}
});
jsTest.log("*************** A ****************");
assert.soon(function() {
try {
return A.isMaster().ismaster;
} catch (e) {
return false;
}
});
assert.eq(3, a.bar.find().itcount(), "t is 3");
assert.writeOK(a.bar.insert({q: 7}));
assert.writeOK(a.bar.insert({q: 8}));
// A is 1 2 3 7 8
// B is 1 2 3 4 5 6 ... 100
var connectionsCreatedOnPrimaryBeforeRollback = a.serverStatus().connections.totalCreated;
// bring B back online
conns[0].reconnect(conns[1]);
conns[1].reconnect(conns[2]);
awaitOpTime(b.getMongo(), getLatestOp(a_conn).ts);
replTest.awaitSecondaryNodes();
replTest.awaitReplication();
checkFinalResults(a);
checkFinalResults(b);
var connectionsCreatedOnPrimaryAfterRollback = a.serverStatus().connections.totalCreated;
var connectionsCreatedOnPrimaryDuringRollback =
connectionsCreatedOnPrimaryAfterRollback - connectionsCreatedOnPrimaryBeforeRollback;
jsTest.log('connections created during rollback = ' +
connectionsCreatedOnPrimaryDuringRollback);
assert.lt(connectionsCreatedOnPrimaryDuringRollback,
50,
'excessive number of connections made by secondary to primary during rollback');
replTest.stopSet(15);
}());
|