summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--jstests/multiVersion/2_test_launching_cluster.js5
-rw-r--r--jstests/multiVersion/downgrade_replset.js4
-rw-r--r--jstests/multiVersion/initialsync.js7
-rw-r--r--jstests/multiVersion/mixed_storage_version_replication.js3
-rw-r--r--jstests/multiVersion/upgrade_downgrade_mongod.js4
-rw-r--r--jstests/multiVersion/wt_index_option_defaults_replset.js1
-rw-r--r--jstests/noPassthrough/initial_sync_cloner_dups.js4
-rw-r--r--jstests/noPassthroughWithMongod/ttl_repl.js7
-rw-r--r--jstests/replsets/bulk_api_wc.js2
-rw-r--r--jstests/replsets/election_not_blocked.js3
-rw-r--r--jstests/replsets/initial_sync1.js37
-rw-r--r--jstests/replsets/initial_sync2.js22
-rw-r--r--jstests/replsets/maintenance.js5
-rw-r--r--jstests/replsets/oplog_note_cmd.js2
-rw-r--r--jstests/replsets/protocol_version_upgrade_downgrade.js7
-rw-r--r--jstests/replsets/read_after_optime.js8
-rw-r--r--jstests/replsets/remove1.js27
-rw-r--r--jstests/replsets/replset1.js3
-rw-r--r--jstests/replsets/tags.js3
-rw-r--r--jstests/sharding/csrs_upgrade_during_migrate.js13
-rw-r--r--jstests/sharding/diffservers1.js1
-rw-r--r--jstests/slow1/replsets_priority1.js2
-rw-r--r--jstests/tool/dumprestore10.js4
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp5
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_elect_test.cpp5
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_heartbeat_test.cpp41
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp4
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp18
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_test.cpp117
-rw-r--r--src/mongo/db/repl/replication_coordinator_test_fixture.cpp19
-rw-r--r--src/mongo/db/repl/replication_coordinator_test_fixture.h18
-rw-r--r--src/mongo/db/repl/topology_coordinator_impl_v1_test.cpp6
-rw-r--r--src/mongo/shell/replsettest.js24
-rw-r--r--src/mongo/shell/shardingtest.js18
34 files changed, 253 insertions, 196 deletions
diff --git a/jstests/multiVersion/2_test_launching_cluster.js b/jstests/multiVersion/2_test_launching_cluster.js
index 6f00facaea3..014bf98fb6e 100644
--- a/jstests/multiVersion/2_test_launching_cluster.js
+++ b/jstests/multiVersion/2_test_launching_cluster.js
@@ -53,7 +53,7 @@ st = new ShardingTest({ shards : 2,
mongosOptions : { binVersion : versionsToCheck },
configOptions : { binVersion : versionsToCheck },
shardOptions : { binVersion : versionsToCheck }
-
+
} });
shards = [ st.shard0, st.shard1 ];
@@ -98,8 +98,7 @@ st = new ShardingTest({ shards : 2,
mongosOptions : { binVersion : versionsToCheck },
configOptions : { binVersion : versionsToCheck },
- rsOptions : { binVersion : versionsToCheck }
-
+ rsOptions : { binVersion : versionsToCheck, protocolVersion: 0 }
} });
var nodesA = st.rs0.nodes;
diff --git a/jstests/multiVersion/downgrade_replset.js b/jstests/multiVersion/downgrade_replset.js
index 17581827f11..6dd492efab3 100644
--- a/jstests/multiVersion/downgrade_replset.js
+++ b/jstests/multiVersion/downgrade_replset.js
@@ -14,7 +14,9 @@ var nodes = {n1: {binVersion: newVersion},
var rst = new ReplSetTest({name: name, nodes: nodes, nodeOptions: {storageEngine: 'mmapv1'}});
rst.startSet();
-rst.initiate();
+var replSetConfig = rst.getReplSetConfig();
+replSetConfig.protocolVersion = 0;
+rst.initiate(replSetConfig);
var primary = rst.getPrimary();
var coll = "test.foo";
diff --git a/jstests/multiVersion/initialsync.js b/jstests/multiVersion/initialsync.js
index 2fad997bf66..dac71ff24f4 100644
--- a/jstests/multiVersion/initialsync.js
+++ b/jstests/multiVersion/initialsync.js
@@ -14,7 +14,12 @@ var multitest = function(replSetVersion, newNodeVersion) {
print("Start up a two-node " + replSetVersion + " replica set.");
var rst = new ReplSetTest({name: name, nodes: nodes});
rst.startSet();
- rst.initiate();
+ var config = rst.getReplSetConfig();
+ // Set protocol version to 0 for 3.2 replset.
+ if (replSetVersion == newVersion) {
+ config.protocolVersion = 0;
+ }
+ rst.initiate(config);
// Wait for a primary node.
var primary = rst.getPrimary();
diff --git a/jstests/multiVersion/mixed_storage_version_replication.js b/jstests/multiVersion/mixed_storage_version_replication.js
index 888b4465389..a3a6f36c520 100644
--- a/jstests/multiVersion/mixed_storage_version_replication.js
+++ b/jstests/multiVersion/mixed_storage_version_replication.js
@@ -612,7 +612,8 @@ function doMultiThreadedWork(primary, numThreads) {
// Make sure everyone is syncing from the primary, to ensure we have all combinations of
// primary/secondary syncing.
config.settings = {chainingAllowed: false};
- replTest.initiate();
+ config.protocolVersion = 0;
+ replTest.initiate(config);
// Ensure all are synced.
replTest.awaitSecondaryNodes(120000);
var primary = replTest.getPrimary();
diff --git a/jstests/multiVersion/upgrade_downgrade_mongod.js b/jstests/multiVersion/upgrade_downgrade_mongod.js
index 244ea79d71e..35c93318d5e 100644
--- a/jstests/multiVersion/upgrade_downgrade_mongod.js
+++ b/jstests/multiVersion/upgrade_downgrade_mongod.js
@@ -258,7 +258,9 @@
function init_replication(conn){
var testDB = conn.getDB('test');
var testName = this.name;
- var rsconf = {_id: 'oplog', members: [ {_id: 0, host: 'localhost:' + conn.port}]};
+ var rsconf = {_id: 'oplog',
+ members: [ {_id: 0, host: 'localhost:' + conn.port}],
+ protocolVersion: 0};
assert.commandWorked(testDB.adminCommand({replSetInitiate : rsconf}),
testName + ' replSetInitiate');
diff --git a/jstests/multiVersion/wt_index_option_defaults_replset.js b/jstests/multiVersion/wt_index_option_defaults_replset.js
index a5c9c2cb248..451c56f807c 100644
--- a/jstests/multiVersion/wt_index_option_defaults_replset.js
+++ b/jstests/multiVersion/wt_index_option_defaults_replset.js
@@ -24,6 +24,7 @@
// Rig the election so that the 3.2 node becomes the primary.
var replSetConfig = rst.getReplSetConfig();
replSetConfig.members[1].priority = 0;
+ replSetConfig.protocolVersion = 0;
rst.initiate(replSetConfig);
diff --git a/jstests/noPassthrough/initial_sync_cloner_dups.js b/jstests/noPassthrough/initial_sync_cloner_dups.js
index 554f88e1ca5..602b6af9f4b 100644
--- a/jstests/noPassthrough/initial_sync_cloner_dups.js
+++ b/jstests/noPassthrough/initial_sync_cloner_dups.js
@@ -75,6 +75,10 @@ jsTestLog("add a new secondary");
var secondary = replTest.add({});
replTest.reInitiate(4*60*1000);
secondary.setSlaveOk();
+// Wait for the secondary to get ReplSetInitiate command.
+replTest.waitForState(secondary,
+ [replTest.STARTUP2, replTest.RECOVERING, replTest.SECONDARY],
+ 60 * 1000);
// This fail point will cause the first intial sync to fail, and leave an op in the buffer to
// verify the fix from SERVER-17807
diff --git a/jstests/noPassthroughWithMongod/ttl_repl.js b/jstests/noPassthroughWithMongod/ttl_repl.js
index 791dc217b44..f56134f5008 100644
--- a/jstests/noPassthroughWithMongod/ttl_repl.js
+++ b/jstests/noPassthroughWithMongod/ttl_repl.js
@@ -5,6 +5,8 @@
* Part 3: Change the TTL expireAfterSeconds field and check successful propogation to secondary.
*/
+load("jstests/replsets/rslib.js");
+
var rt = new ReplSetTest( { name : "ttl_repl" , nodes: 2 } );
/******** Part 1 ***************/
@@ -64,8 +66,9 @@ assert.eq( 6 , slave1col.count() , "docs not deleted on secondary" );
// add a new secondary, wait for it to fully join
var slave = rt.add();
-rt.reInitiate();
-rt.awaitSecondaryNodes();
+var config = rt.getReplSetConfig();
+config.version = 2;
+reconfig(rt, config);
var slave2col = slave.getDB( 'd' )[ 'c' ];
diff --git a/jstests/replsets/bulk_api_wc.js b/jstests/replsets/bulk_api_wc.js
index 05f28d573ab..86348943644 100644
--- a/jstests/replsets/bulk_api_wc.js
+++ b/jstests/replsets/bulk_api_wc.js
@@ -4,7 +4,7 @@
jsTest.log("Starting bulk api write concern tests...");
-// Start a single-node replica set with no journal
+// Start a 2-node replica set with no journal
//Allows testing immediate write concern failures and wc application failures
var rst = new ReplSetTest({ nodes : 2 });
rst.startSet({ nojournal : "" });
diff --git a/jstests/replsets/election_not_blocked.js b/jstests/replsets/election_not_blocked.js
index f15efa1635b..20c2ff7cc59 100644
--- a/jstests/replsets/election_not_blocked.js
+++ b/jstests/replsets/election_not_blocked.js
@@ -20,7 +20,8 @@
{_id: 1, host: host+":"+port[1]},
{_id: 2, host: host+":"+port[2], hidden: true, priority: 0},
],
- // vetos only exist in protocol version 0, so we use PV0 explicitly here.
+ // In PV1, a voter writes the last vote to disk before granting the vote,
+ // so it cannot vote while fsync locked in PV1. Use PV0 explicitly here.
protocolVersion: 0});
replTest.waitForState(replTest.nodes[0], replTest.PRIMARY, 60 * 1000);
var master = replTest.getMaster();
diff --git a/jstests/replsets/initial_sync1.js b/jstests/replsets/initial_sync1.js
index f55ae0b4910..a3ecaf5db68 100644
--- a/jstests/replsets/initial_sync1.js
+++ b/jstests/replsets/initial_sync1.js
@@ -20,7 +20,7 @@ print("1. Bring up set");
// SERVER-7455, this test is called from ssl/auth_x509.js
var x509_options1;
var x509_options2;
-var replTest = new ReplSetTest({name: basename,
+var replTest = new ReplSetTest({name: basename,
nodes : {node0 : x509_options1, node1 : x509_options2}});
var conns = replTest.startSet();
@@ -61,7 +61,7 @@ var admin_s2 = slave2.getDB("admin");
var config = replTest.getReplSetConfig();
config.version = 2;
-config.members.push({_id:2, host:hostname+":"+slave2.port});
+config.members.push({_id:2, host: slave2.host});
try {
admin.runCommand({replSetReconfig:config});
}
@@ -82,37 +82,20 @@ wait(function() {
(config3 && config3.version == config.version);
});
-wait(function() {
- var status = admin_s2.runCommand({replSetGetStatus:1});
- printjson(status);
- return status.members &&
- (status.members[2].state == 3 || status.members[2].state == 2);
- });
+replTest.waitForState(slave2, [replTest.SECONDARY, replTest.RECOVERING], 60 * 1000);
+print("7. Kill the secondary in the middle of syncing");
+replTest.stop(slave1);
-print("7. Kill #2 in the middle of syncing");
-replTest.stop(1);
-
-print("8. Eventually it should become a secondary");
+print("8. Eventually the new node should become a secondary");
print("if initial sync has started, this will cause it to fail and sleep for 5 minutes");
-wait(function() {
- var status = admin_s2.runCommand({replSetGetStatus:1});
- occasionally(function() { printjson(status); });
- return status.members[2].state == 2;
- }, 350);
+replTest.waitForState(slave2, replTest.SECONDARY, 60 * 1000);
-
-print("9. Bring #2 back up");
-replTest.start(1, {}, true);
+print("9. Bring the secondary back up");
+replTest.start(slave1, {}, true);
reconnect(slave1);
-wait(function() {
- var status = admin_s1.runCommand({replSetGetStatus:1});
- printjson(status);
- return status.ok === 1 && status.members && status.members.length >= 2 &&
- (status.members[1].state === 2 || status.members[1].state === 1);
- });
-
+replTest.waitForState(slave1, [replTest.PRIMARY, replTest.SECONDARY], 60 * 1000);
print("10. Insert some stuff");
master = replTest.getMaster();
diff --git a/jstests/replsets/initial_sync2.js b/jstests/replsets/initial_sync2.js
index 1e519048fae..03cf5b4e6f2 100644
--- a/jstests/replsets/initial_sync2.js
+++ b/jstests/replsets/initial_sync2.js
@@ -147,27 +147,7 @@ for (var i=0; i<10000; i++) {
print("12. Everyone happy eventually");
-// if 3 is master...
-if (master+"" != origMaster+"") {
- print("3 is master");
- slave2 = origMaster;
-}
-
-wait(function() {
- var op1 = getLatestOp(master);
- var op2 = getLatestOp(slave1);
- var op3 = getLatestOp(slave2);
-
- occasionally(function() {
- print("latest ops:");
- printjson(op1);
- printjson(op2);
- printjson(op3);
- });
-
- return friendlyEqual(getLatestOp(master), getLatestOp(slave1)) &&
- friendlyEqual(getLatestOp(master), getLatestOp(slave2));
- });
+replTest.awaitReplication();
replTest.stopSet();
};
diff --git a/jstests/replsets/maintenance.js b/jstests/replsets/maintenance.js
index 506e885e0c4..34c0e83993b 100644
--- a/jstests/replsets/maintenance.js
+++ b/jstests/replsets/maintenance.js
@@ -2,7 +2,10 @@
var replTest = new ReplSetTest( {name: 'unicomplex', nodes: 2} );
var conns = replTest.startSet({ verbose: 1 });
-replTest.initiate();
+var config = replTest.getReplSetConfig();
+config.members[0].priority = 2;
+replTest.initiate(config);
+replTest.waitForState(replTest.nodes[0], replTest.PRIMARY, 60000);
// Make sure we have a master
var master = replTest.getMaster();
diff --git a/jstests/replsets/oplog_note_cmd.js b/jstests/replsets/oplog_note_cmd.js
index 25e60e0a94c..4a501211cd6 100644
--- a/jstests/replsets/oplog_note_cmd.js
+++ b/jstests/replsets/oplog_note_cmd.js
@@ -12,7 +12,7 @@ db.foo.insert({a:1});
var statusBefore = db.runCommand({replSetGetStatus: 1});
assert.commandWorked(db.runCommand({appendOplogNote: 1, data: {a: 1}}));
var statusAfter = db.runCommand({replSetGetStatus: 1});
-assert.lt(statusBefore.members[0].optime, statusAfter.members[0].optime);
+assert.lt(statusBefore.members[0].optime.ts, statusAfter.members[0].optime.ts);
// Make sure note written successfully
var op = db.getSiblingDB('local').oplog.rs.find().sort({$natural: -1}).limit(1).next();
diff --git a/jstests/replsets/protocol_version_upgrade_downgrade.js b/jstests/replsets/protocol_version_upgrade_downgrade.js
index 801eb9d0956..34928b4adc6 100644
--- a/jstests/replsets/protocol_version_upgrade_downgrade.js
+++ b/jstests/replsets/protocol_version_upgrade_downgrade.js
@@ -37,7 +37,8 @@ assert.writeOK(primaryColl.bar.insert({x: 1}, {writeConcern: {w: 3}}));
// Check optime format in protocol version 0, which is a Timestamp.
var res = primary.adminCommand({replSetGetStatus: 1});
assert.commandWorked(res);
-assert.eq(res.members[0].optime.term, null);
+// Check the optime is a Timestamp { t: ..., i: ...} , not an OpTime { ts: ..., t: ... }
+assert.eq(res.members[0].optime.ts, null);
//
// Upgrade protocol version
@@ -58,7 +59,7 @@ assert.writeOK(primaryColl.bar.insert({x: 2}, {writeConcern: {w: 3}}));
// Check optime format in protocol version 1, which is an object including the term.
res = primary.adminCommand({replSetGetStatus: 1});
assert.commandWorked(res);
-assert.eq(res.members[0].optime.term, NumberLong(0));
+assert.eq(res.members[0].optime.t, NumberLong(0));
//
// Downgrade protocol version
@@ -76,6 +77,6 @@ assert.writeOK(primaryColl.bar.insert({x: 3}, {writeConcern: {w: 3}}));
// Check optime format in protocol version 0, which is a Timestamp.
res = primary.adminCommand({replSetGetStatus: 1});
assert.commandWorked(res);
-assert.eq(res.members[0].optime.term, null);
+assert.eq(res.members[0].optime.ts, null);
})();
diff --git a/jstests/replsets/read_after_optime.js b/jstests/replsets/read_after_optime.js
index cb19940ee05..c73ab4574a8 100644
--- a/jstests/replsets/read_after_optime.js
+++ b/jstests/replsets/read_after_optime.js
@@ -14,15 +14,15 @@ var runTest = function(testDB, primaryConn) {
var localDB = primaryConn.getDB('local');
- var oplogTS = localDB.oplog.rs.find().sort({ $natural: -1 }).limit(1).next().ts;
- var twoSecTS = new Timestamp(oplogTS.getTime() + 2, 0);
+ var oplogTS = localDB.oplog.rs.find().sort({ $natural: -1 }).limit(1).next();
+ var twoSecTS = new Timestamp(oplogTS.ts.getTime() + 2, 0);
// Test timeout with maxTimeMS
var res = assert.commandFailed(testDB.runCommand({
find: 'user',
filter: { x: 1 },
readConcern: {
- afterOpTime: { ts: twoSecTS, t: 0 }
+ afterOpTime: { ts: twoSecTS, t: oplogTS.t }
},
maxTimeMS: 1000
}));
@@ -40,7 +40,7 @@ var runTest = function(testDB, primaryConn) {
find: 'user',
filter: { x: 1 },
readConcern: {
- afterOpTime: { ts: twoSecTS, t: 0 },
+ afterOpTime: { ts: twoSecTS, t: oplogTS.t },
maxTimeMS: 10 * 1000
}
}));
diff --git a/jstests/replsets/remove1.js b/jstests/replsets/remove1.js
index ca2055bf566..b70de7c257d 100644
--- a/jstests/replsets/remove1.js
+++ b/jstests/replsets/remove1.js
@@ -17,6 +17,7 @@ var replTest = new ReplSetTest( {name: name, nodes: 2} );
var nodes = replTest.startSet();
replTest.initiate();
var master = replTest.getMaster();
+var secondary = replTest.getSecondary();
print("Initial sync");
master.getDB("foo").bar.baz.insert({x:1});
@@ -25,11 +26,17 @@ replTest.awaitReplication();
print("Remove secondary");
var config = replTest.getReplSetConfig();
-
-config.members.pop();
+for (var i = 0; i < config.members.length; i++) {
+ if (config.members[i].host == secondary.host) {
+ config.members.splice(i, 1);
+ break;
+ }
+};
config.version = 2;
-assert.eq(replTest.nodes[1].getDB("admin").runCommand({ping:1}).ok, 1, "we are connected to node[1]");
+assert.eq(secondary.getDB("admin").runCommand({ping:1}).ok,
+ 1,
+ "we should be connected to the secondary");
try {
master.getDB("admin").runCommand({replSetReconfig:config});
@@ -38,11 +45,11 @@ catch(e) {
print(e);
}
-// This test that nodes[1] disconnects us when it picks up the new config
+// This tests that the secondary disconnects us when it picks up the new config.
assert.soon(
function() {
try {
- replTest.nodes[1].getDB("admin").runCommand({ping:1});
+ secondary.getDB("admin").runCommand({ping:1});
} catch (e) {
return true;
}
@@ -50,9 +57,9 @@ assert.soon(
}
);
-// Now we should successfully reconnect to nodes[1]
-assert.eq(replTest.nodes[1].getDB("admin").runCommand({ping:1}).ok, 1,
- "we are connected to node[1]");
+// Now we should successfully reconnect to the secondary.
+assert.eq(secondary.getDB("admin").runCommand({ping:1}).ok, 1,
+ "we aren't connected to the secondary");
reconnect(master);
@@ -62,7 +69,7 @@ assert.soon(function() {
});
print("Add it back as a secondary");
-config.members.push({_id:1, host : host+":"+replTest.getPort(1)});
+config.members.push({_id:2, host : secondary.host});
config.version = 3;
printjson(config);
wait(function() {
@@ -105,7 +112,7 @@ wait(function() {
} , "wait2" );
print("reconfig with minority");
-replTest.stop(1);
+replTest.stop(secondary);
assert.soon(function() {
try {
diff --git a/jstests/replsets/replset1.js b/jstests/replsets/replset1.js
index 4688b8d7e04..6a4b1063de9 100644
--- a/jstests/replsets/replset1.js
+++ b/jstests/replsets/replset1.js
@@ -132,7 +132,8 @@ var doTest = function( signal ) {
printjson(result);
var lastOp = result.lastOp;
var lastOplogOp = master.getDB("local").oplog.rs.find().sort({$natural : -1}).limit(1).next();
- assert.eq(lastOplogOp['ts'], lastOp);
+ assert.eq(lastOplogOp['ts'], lastOp['ts']);
+ assert.eq(lastOplogOp['t'], lastOp['t']);
ts.forEach( function(z){ assert.eq( 2 , z.getIndexKeys().length , "A " + z.getMongo() ); } );
diff --git a/jstests/replsets/tags.js b/jstests/replsets/tags.js
index 2a1f77608a6..f58e255b53a 100644
--- a/jstests/replsets/tags.js
+++ b/jstests/replsets/tags.js
@@ -164,6 +164,9 @@ replTest.partition(2, 0);
replTest.partition(2, 1);
replTest.stop(2);
+// Node 1 with slightly higher priority will take over.
+replTest.waitForState(nodes[1], replTest.PRIMARY, 60 * 1000);
+
myprint("1 must become primary here because otherwise the other members will take too long " +
"timing out their old sync threads");
master = replTest.getMaster();
diff --git a/jstests/sharding/csrs_upgrade_during_migrate.js b/jstests/sharding/csrs_upgrade_during_migrate.js
index 0d717cb7554..1f0da2dc34d 100644
--- a/jstests/sharding/csrs_upgrade_during_migrate.js
+++ b/jstests/sharding/csrs_upgrade_during_migrate.js
@@ -36,9 +36,13 @@ var st;
var addSlaveDelay = function(rst) {
var conf = rst.getMaster().getDB('local').system.replset.findOne();
conf.version++;
- conf.members[1].priority = 0;
- conf.members[1].hidden = true;
- conf.members[1].slaveDelay = 30;
+ var secondaryIndex = 0;
+ if (conf.members[secondaryIndex].host === rst.getMaster().host) {
+ secondaryIndex = 1;
+ }
+ conf.members[secondaryIndex].priority = 0;
+ conf.members[secondaryIndex].hidden = true;
+ conf.members[secondaryIndex].slaveDelay = 30;
reconfig(rst, conf);
}
@@ -92,9 +96,6 @@ var st;
version: 1,
configsvr: true,
members: [ { _id: 0, host: st.c0.name }],
- settings: {
- protocolVersion :1
- }
};
assert.commandWorked(st.c0.adminCommand({replSetInitiate: csrsConfig}));
var csrs = [];
diff --git a/jstests/sharding/diffservers1.js b/jstests/sharding/diffservers1.js
index 0d477147be8..f2c16664398 100644
--- a/jstests/sharding/diffservers1.js
+++ b/jstests/sharding/diffservers1.js
@@ -1,7 +1,6 @@
var s = new ShardingTest( "diffservers1" , 2 );
assert.eq( 2 , s.config.shards.count() , "server count wrong" );
-assert.eq( 2 , s._configServers[0].getDB( "config" ).shards.count() , "where are servers!" );
assert.eq( 0 , s._shardServers[0].getDB( "config" ).shards.count() , "shouldn't be here" );
assert.eq( 0 , s._shardServers[1].getDB( "config" ).shards.count() , "shouldn't be here" );
diff --git a/jstests/slow1/replsets_priority1.js b/jstests/slow1/replsets_priority1.js
index db28539ebe4..c9ec08fd78f 100644
--- a/jstests/slow1/replsets_priority1.js
+++ b/jstests/slow1/replsets_priority1.js
@@ -33,7 +33,7 @@ var everyoneOkSoon = function() {
var checkPrimaryIs = function (node) {
- print("nreplsets_priority1.js checkPrimaryIs(" + node + ")");
+ print("nreplsets_priority1.js checkPrimaryIs(" + node.host + ")");
var status;
diff --git a/jstests/tool/dumprestore10.js b/jstests/tool/dumprestore10.js
index f59b131bb05..858032827a7 100644
--- a/jstests/tool/dumprestore10.js
+++ b/jstests/tool/dumprestore10.js
@@ -33,7 +33,7 @@ step("mongodump from replset");
var data = MongoRunner.dataDir + "/dumprestore10-dump1/";
-runMongoProgram( "mongodump", "--host", "127.0.0.1:"+replTest.ports[0], "--out", data );
+runMongoProgram( "mongodump", "--host", "127.0.0.1:"+master.port, "--out", data );
{
@@ -48,7 +48,7 @@ runMongoProgram( "mongodump", "--host", "127.0.0.1:"+replTest.ports[0], "--out",
step("try mongorestore with write concern");
-runMongoProgram( "mongorestore", "--writeConcern", "2", "--host", "127.0.0.1:"+replTest.ports[0], "--dir", data );
+runMongoProgram( "mongorestore", "--writeConcern", "2", "--host", "127.0.0.1:"+master.port, "--dir", data );
var x = 0;
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index d6de5b44011..031558554c4 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -1891,7 +1891,7 @@ Status ReplicationCoordinatorImpl::processReplSetReconfig(OperationContext* txn,
if (args.force) {
newConfigObj = incrementConfigVersionByRandom(newConfigObj);
}
- Status status = newConfig.initialize(newConfigObj);
+ Status status = newConfig.initialize(newConfigObj, oldConfig.getProtocolVersion() == 1);
if (!status.isOK()) {
error() << "replSetReconfig got " << status << " while parsing " << newConfigObj;
return Status(ErrorCodes::InvalidReplicaSetConfig, status.reason());
@@ -2003,11 +2003,10 @@ Status ReplicationCoordinatorImpl::processReplSetInitiate(OperationContext* txn,
}
ReplicaSetConfig newConfig;
- Status status = newConfig.initialize(configObj);
+ Status status = newConfig.initialize(configObj, true);
if (!status.isOK()) {
error() << "replSet initiate got " << status << " while parsing " << configObj;
return Status(ErrorCodes::InvalidReplicaSetConfig, status.reason());
- ;
}
if (replEnabled && newConfig.getReplSetName() != _settings.ourSetName()) {
str::stream errmsg;
diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect_test.cpp
index 1eb90c747ac..e75ad1a675c 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_elect_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_elect_test.cpp
@@ -54,10 +54,15 @@ using executor::RemoteCommandResponse;
class ReplCoordElectTest : public ReplCoordTest {
protected:
+ void assertStartSuccess(const BSONObj& configDoc, const HostAndPort& selfHost);
void simulateEnoughHeartbeatsForElectability();
void simulateFreshEnoughForElectability();
};
+void ReplCoordElectTest::assertStartSuccess(const BSONObj& configDoc, const HostAndPort& selfHost) {
+ ReplCoordTest::assertStartSuccess(addProtocolVersion(configDoc, 0), selfHost);
+}
+
void ReplCoordElectTest::simulateEnoughHeartbeatsForElectability() {
ReplicationCoordinatorImpl* replCoord = getReplCoord();
ReplicaSetConfig rsConfig = replCoord->getReplicaSetConfig_forTest();
diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat_test.cpp
index fa9843b15ad..0c630f772a5 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat_test.cpp
@@ -53,12 +53,17 @@ using executor::RemoteCommandResponse;
class ReplCoordHBTest : public ReplCoordTest {
protected:
+ void assertStartSuccess(const BSONObj& configDoc, const HostAndPort& selfHost);
void assertMemberState(MemberState expected, std::string msg = "");
ReplSetHeartbeatResponse receiveHeartbeatFrom(const ReplicaSetConfig& rsConfig,
int sourceId,
const HostAndPort& source);
};
+void ReplCoordHBTest::assertStartSuccess(const BSONObj& configDoc, const HostAndPort& selfHost) {
+ ReplCoordTest::assertStartSuccess(addProtocolVersion(configDoc, 0), selfHost);
+}
+
void ReplCoordHBTest::assertMemberState(const MemberState expected, std::string msg) {
const MemberState actual = getReplCoord()->getMemberState();
ASSERT(expected == actual) << "Expected coordinator to report state " << expected.toString()
@@ -83,15 +88,15 @@ ReplSetHeartbeatResponse ReplCoordHBTest::receiveHeartbeatFrom(const ReplicaSetC
TEST_F(ReplCoordHBTest, JoinExistingReplSet) {
logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(3));
- ReplicaSetConfig rsConfig =
- assertMakeRSConfig(BSON("_id"
- << "mySet"
- << "version" << 3 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1:1")
- << BSON("_id" << 2 << "host"
- << "h2:1") << BSON("_id" << 3 << "host"
- << "h3:1"))));
+ ReplicaSetConfig rsConfig = assertMakeRSConfigV0(BSON("_id"
+ << "mySet"
+ << "version" << 3 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1:1")
+ << BSON("_id" << 2 << "host"
+ << "h2:1")
+ << BSON("_id" << 3 << "host"
+ << "h3:1"))));
init("mySet");
addSelf(HostAndPort("h2", 1));
const Date_t startDate = getNet()->now();
@@ -146,15 +151,15 @@ TEST_F(ReplCoordHBTest, DoNotJoinReplSetIfNotAMember) {
// Tests that a node in RS_STARTUP will not transition to RS_REMOVED if it receives a
// configuration that does not contain it.
logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(3));
- ReplicaSetConfig rsConfig =
- assertMakeRSConfig(BSON("_id"
- << "mySet"
- << "version" << 3 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1:1")
- << BSON("_id" << 2 << "host"
- << "h2:1") << BSON("_id" << 3 << "host"
- << "h3:1"))));
+ ReplicaSetConfig rsConfig = assertMakeRSConfigV0(BSON("_id"
+ << "mySet"
+ << "version" << 3 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1:1")
+ << BSON("_id" << 2 << "host"
+ << "h2:1")
+ << BSON("_id" << 3 << "host"
+ << "h3:1"))));
init("mySet");
addSelf(HostAndPort("h4", 1));
const Date_t startDate = getNet()->now();
diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp
index f7a70005a7f..9fc73d5ec61 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp
@@ -94,7 +94,7 @@ TEST_F(ReplCoordHBV1Test, JoinExistingReplSet) {
<< BSON("_id" << 2 << "host"
<< "h2:1") << BSON("_id" << 3 << "host"
<< "h3:1"))
- << "settings" << BSON("protocolVersion" << 1)));
+ << "protocolVersion" << 1));
init("mySet");
addSelf(HostAndPort("h2", 1));
const Date_t startDate = getNet()->now();
@@ -157,7 +157,7 @@ TEST_F(ReplCoordHBV1Test, DoNotJoinReplSetIfNotAMember) {
<< BSON("_id" << 2 << "host"
<< "h2:1") << BSON("_id" << 3 << "host"
<< "h3:1"))
- << "settings" << BSON("protocolVersion" << 1)));
+ << "protocolVersion" << 1));
init("mySet");
addSelf(HostAndPort("h4", 1));
const Date_t startDate = getNet()->now();
diff --git a/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
index e35a58eaae9..d75b2c847fa 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
@@ -100,7 +100,7 @@ TEST_F(ReplCoordTest, ReconfigWithUninitializableConfig) {
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastOptime(OpTime(Timestamp(100, 0), 0));
- simulateSuccessfulElection();
+ simulateSuccessfulV1Election();
BSONObjBuilder result;
ReplSetReconfigArgs args;
@@ -133,7 +133,7 @@ TEST_F(ReplCoordTest, ReconfigWithWrongReplSetName) {
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastOptime(OpTime(Timestamp(100, 0), 0));
- simulateSuccessfulElection();
+ simulateSuccessfulV1Election();
BSONObjBuilder result;
ReplSetReconfigArgs args;
@@ -164,7 +164,7 @@ TEST_F(ReplCoordTest, ReconfigValidateFails) {
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastOptime(OpTime(Timestamp(100, 0), 0));
- simulateSuccessfulElection();
+ simulateSuccessfulV1Election();
BSONObjBuilder result;
ReplSetReconfigArgs args;
@@ -227,7 +227,7 @@ TEST_F(ReplCoordTest, ReconfigQuorumCheckFails) {
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastOptime(OpTime(Timestamp(100, 0), 0));
- simulateSuccessfulElection();
+ simulateSuccessfulV1Election();
Status status(ErrorCodes::InternalError, "Not Set");
stdx::thread reconfigThread(stdx::bind(doReplSetReconfig, getReplCoord(), &status));
@@ -265,7 +265,7 @@ TEST_F(ReplCoordTest, ReconfigStoreLocalConfigDocumentFails) {
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastOptime(OpTime(Timestamp(100, 0), 0));
- simulateSuccessfulElection();
+ simulateSuccessfulV1Election();
Status status(ErrorCodes::InternalError, "Not Set");
getExternalState()->setStoreLocalConfigDocumentStatus(
@@ -290,7 +290,7 @@ TEST_F(ReplCoordTest, ReconfigWhileReconfiggingFails) {
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastOptime(OpTime(Timestamp(100, 0), 0));
- simulateSuccessfulElection();
+ simulateSuccessfulV1Election();
Status status(ErrorCodes::InternalError, "Not Set");
// first reconfig
@@ -367,7 +367,7 @@ TEST_F(ReplCoordTest, ReconfigSuccessful) {
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastOptime(OpTime(Timestamp(100, 0), 0));
- simulateSuccessfulElection();
+ simulateSuccessfulV1Election();
Status status(ErrorCodes::InternalError, "Not Set");
stdx::thread reconfigThread(stdx::bind(doReplSetReconfig, getReplCoord(), &status));
@@ -406,7 +406,7 @@ TEST_F(ReplCoordTest, ReconfigDuringHBReconfigFails) {
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastOptime(OpTime(Timestamp(100, 0), 0));
- simulateSuccessfulElection();
+ simulateSuccessfulV1Election();
ASSERT_TRUE(getReplCoord()->getMemberState().primary());
// set hbreconfig to hang while in progress
@@ -461,7 +461,7 @@ TEST_F(ReplCoordTest, HBReconfigDuringReconfigFails) {
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastOptime(OpTime(Timestamp(100, 0), 0));
- simulateSuccessfulElection();
+ simulateSuccessfulV1Election();
ASSERT_TRUE(getReplCoord()->getMemberState().primary());
// start reconfigThread
diff --git a/src/mongo/db/repl/replication_coordinator_impl_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
index cce2003c814..fe2811a8127 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
@@ -44,6 +44,7 @@
#include "mongo/db/repl/read_concern_args.h"
#include "mongo/db/repl/read_concern_response.h"
#include "mongo/db/repl/repl_set_heartbeat_args.h"
+#include "mongo/db/repl/repl_set_heartbeat_args_v1.h"
#include "mongo/db/repl/repl_settings.h"
#include "mongo/db/repl/replica_set_config.h"
#include "mongo/db/repl/replication_coordinator.h" // ReplSetReconfigArgs
@@ -91,6 +92,18 @@ struct OpTimeWithTermZero {
Timestamp timestamp;
};
+void runSingleNodeElection(ReplicationCoordinatorImpl* replCoord) {
+ replCoord->setMyLastOptime(OpTime(Timestamp(1, 0), 0));
+ ASSERT(replCoord->setFollowerMode(MemberState::RS_SECONDARY));
+ replCoord->waitForElectionFinish_forTest();
+
+ ASSERT(replCoord->isWaitingForApplierToDrain());
+ ASSERT(replCoord->getMemberState().primary()) << replCoord->getMemberState().toString();
+
+ OperationContextReplMock txn;
+ replCoord->signalDrainComplete(&txn);
+}
+
TEST_F(ReplCoordTest, StartupWithValidLocalConfig) {
assertStartSuccess(BSON("_id"
<< "mySet"
@@ -681,7 +694,7 @@ TEST_F(ReplCoordTest, AwaitReplicationReplSetBaseCases) {
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastOptime(OpTimeWithTermZero(100, 0));
- simulateSuccessfulElection();
+ simulateSuccessfulV1Election();
statusAndDur = getReplCoord()->awaitReplication(&txn, time, writeConcern);
ASSERT_OK(statusAndDur.status);
@@ -708,7 +721,7 @@ TEST_F(ReplCoordTest, AwaitReplicationNumberOfNodesNonBlocking) {
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastOptime(OpTimeWithTermZero(100, 0));
- simulateSuccessfulElection();
+ simulateSuccessfulV1Election();
OpTimeWithTermZero time1(100, 1);
OpTimeWithTermZero time2(100, 2);
@@ -796,7 +809,7 @@ TEST_F(ReplCoordTest, AwaitReplicationNamedModesNonBlocking) {
HostAndPort("node0"));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastOptime(OpTime(Timestamp(100, 0), 0));
- simulateSuccessfulElection();
+ simulateSuccessfulV1Election();
OpTime time1(Timestamp(100, 1), 1);
OpTime time2(Timestamp(100, 2), 1);
@@ -964,7 +977,7 @@ TEST_F(ReplCoordTest, AwaitReplicationNumberOfNodesBlocking) {
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastOptime(OpTimeWithTermZero(100, 0));
- simulateSuccessfulElection();
+ simulateSuccessfulV1Election();
ReplicationAwaiter awaiter(getReplCoord(), &txn);
@@ -1020,7 +1033,7 @@ TEST_F(ReplCoordTest, AwaitReplicationTimeout) {
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastOptime(OpTimeWithTermZero(100, 0));
- simulateSuccessfulElection();
+ simulateSuccessfulV1Election();
ReplicationAwaiter awaiter(getReplCoord(), &txn);
@@ -1058,7 +1071,7 @@ TEST_F(ReplCoordTest, AwaitReplicationShutdown) {
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastOptime(OpTimeWithTermZero(100, 0));
- simulateSuccessfulElection();
+ simulateSuccessfulV1Election();
ReplicationAwaiter awaiter(getReplCoord(), &txn);
@@ -1099,7 +1112,7 @@ TEST_F(ReplCoordTest, AwaitReplicationStepDown) {
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastOptime(OpTimeWithTermZero(100, 0));
- simulateSuccessfulElection();
+ simulateSuccessfulV1Election();
ReplicationAwaiter awaiter(getReplCoord(), &txn);
@@ -1137,7 +1150,7 @@ TEST_F(ReplCoordTest, AwaitReplicationInterrupt) {
HostAndPort("node1"));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastOptime(OpTimeWithTermZero(100, 0));
- simulateSuccessfulElection();
+ simulateSuccessfulV1Election();
ReplicationAwaiter awaiter(getReplCoord(), &txn);
@@ -1257,7 +1270,7 @@ TEST_F(StepDownTest, StepDownTimeoutAcquiringGlobalLock) {
ASSERT_OK(getReplCoord()->setLastOptime_forTest(1, 1, optime1));
ASSERT_OK(getReplCoord()->setLastOptime_forTest(1, 2, optime1));
- simulateSuccessfulElection();
+ simulateSuccessfulV1Election();
// Make sure stepDown cannot grab the global shared lock
Lock::GlobalWrite lk(txn.lockState());
@@ -1275,7 +1288,7 @@ TEST_F(StepDownTest, StepDownNoWaiting) {
ASSERT_OK(getReplCoord()->setLastOptime_forTest(1, 1, optime1));
ASSERT_OK(getReplCoord()->setLastOptime_forTest(1, 2, optime1));
- simulateSuccessfulElection();
+ simulateSuccessfulV1Election();
enterNetwork();
getNet()->runUntil(getNet()->now() + Seconds(2));
@@ -1283,7 +1296,7 @@ TEST_F(StepDownTest, StepDownNoWaiting) {
NetworkInterfaceMock::NetworkOperationIterator noi = getNet()->getNextReadyRequest();
RemoteCommandRequest request = noi->getRequest();
log() << request.target.toString() << " processing " << request.cmdObj;
- ReplSetHeartbeatArgs hbArgs;
+ ReplSetHeartbeatArgsV1 hbArgs;
if (hbArgs.initialize(request.cmdObj).isOK()) {
ReplSetHeartbeatResponse hbResp;
hbResp.setSetName(hbArgs.getSetName());
@@ -1321,9 +1334,7 @@ TEST_F(ReplCoordTest, StepDownAndBackUpSingleNode) {
<< "test1:1234"))),
HostAndPort("test1", 1234));
OperationContextReplMock txn;
- getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
-
- ASSERT_TRUE(getReplCoord()->getMemberState().primary());
+ runSingleNodeElection(getReplCoord());
ASSERT_OK(getReplCoord()->stepDown(&txn, true, Milliseconds(0), Milliseconds(1000)));
getNet()->enterNetwork(); // Must do this before inspecting the topocoord
Date_t stepdownUntil = getNet()->now() + Seconds(1);
@@ -1419,7 +1430,7 @@ TEST_F(StepDownTest, StepDownNotCaughtUp) {
runner.setWaitTime(Milliseconds(0));
runner.setStepDownTime(Milliseconds(1000));
- simulateSuccessfulElection();
+ simulateSuccessfulV1Election();
runner.start(&txn);
Status status = runner.getResult();
@@ -1460,7 +1471,7 @@ TEST_F(StepDownTest, StepDownCatchUp) {
runner.setWaitTime(Milliseconds(10000));
runner.setStepDownTime(Milliseconds(60000));
- simulateSuccessfulElection();
+ simulateSuccessfulV1Election();
runner.start(&txn);
@@ -1471,7 +1482,7 @@ TEST_F(StepDownTest, StepDownCatchUp) {
NetworkInterfaceMock::NetworkOperationIterator noi = getNet()->getNextReadyRequest();
RemoteCommandRequest request = noi->getRequest();
log() << request.target.toString() << " processing " << request.cmdObj;
- ReplSetHeartbeatArgs hbArgs;
+ ReplSetHeartbeatArgsV1 hbArgs;
if (hbArgs.initialize(request.cmdObj).isOK()) {
ReplSetHeartbeatResponse hbResp;
hbResp.setSetName(hbArgs.getSetName());
@@ -1509,7 +1520,7 @@ TEST_F(StepDownTest, InterruptStepDown) {
runner.setWaitTime(Milliseconds(10000));
runner.setStepDownTime(Milliseconds(60000));
- simulateSuccessfulElection();
+ simulateSuccessfulV1Election();
ASSERT_TRUE(getReplCoord()->getMemberState().primary());
runner.start(&txn);
@@ -1592,13 +1603,14 @@ TEST_F(ReplCoordTest, TestPrepareReplSetUpdatePositionCommand) {
memberIds.insert(memberId);
if (memberId == 0) {
// TODO(siyuan) Update when we change replSetUpdatePosition format
- ASSERT_EQUALS(optime1.timestamp, entry["optime"].timestamp());
+ ASSERT_EQUALS(optime1.timestamp, entry["optime"]["ts"].timestamp());
} else if (memberId == 1) {
- ASSERT_EQUALS(optime2.timestamp, entry["optime"].timestamp());
+ ASSERT_EQUALS(optime2.timestamp, entry["optime"]["ts"].timestamp());
} else {
ASSERT_EQUALS(2, memberId);
- ASSERT_EQUALS(optime3.timestamp, entry["optime"].timestamp());
+ ASSERT_EQUALS(optime3.timestamp, entry["optime"]["ts"].timestamp());
}
+ ASSERT_EQUALS(0, entry["optime"]["t"].Number());
}
ASSERT_EQUALS(3U, memberIds.size()); // Make sure we saw all 3 nodes
}
@@ -1916,7 +1928,7 @@ TEST_F(ReplCoordTest, UpdatePositionWithConfigVersionAndMemberIdTest) {
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastOptime(OpTimeWithTermZero(100, 0));
- simulateSuccessfulElection();
+ simulateSuccessfulV1Election();
OpTimeWithTermZero time1(100, 1);
OpTimeWithTermZero time2(100, 2);
@@ -2016,7 +2028,7 @@ TEST_F(ReplCoordTest, AwaitReplicationReconfigSimple) {
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastOptime(OpTimeWithTermZero(100, 2));
- simulateSuccessfulElection();
+ simulateSuccessfulV1Election();
OpTimeWithTermZero time(100, 2);
@@ -2078,7 +2090,7 @@ TEST_F(ReplCoordTest, AwaitReplicationReconfigNodeCountExceedsNumberOfNodes) {
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastOptime(OpTimeWithTermZero(100, 2));
- simulateSuccessfulElection();
+ simulateSuccessfulV1Election();
OpTimeWithTermZero time(100, 2);
@@ -2127,8 +2139,8 @@ TEST_F(ReplCoordTest, AwaitReplicationReconfigToSmallerMajority) {
<< "_id" << 4))),
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
- getReplCoord()->setMyLastOptime(OpTime(Timestamp(100, 1), 0));
- simulateSuccessfulElection();
+ getReplCoord()->setMyLastOptime(OpTimeWithTermZero(100, 1));
+ simulateSuccessfulV1Election();
OpTime time(Timestamp(100, 2), 1);
@@ -2193,7 +2205,7 @@ TEST_F(ReplCoordTest, AwaitReplicationMajority) {
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
OpTime time(Timestamp(100, 0), 1);
getReplCoord()->setMyLastOptime(time);
- simulateSuccessfulElection();
+ simulateSuccessfulV1Election();
WriteConcernOptions majorityWriteConcern;
majorityWriteConcern.wTimeout = WriteConcernOptions::kNoWaiting;
@@ -2244,7 +2256,7 @@ TEST_F(ReplCoordTest, LastCommittedOpTime) {
OpTime zero(Timestamp(0, 0), 0);
OpTime time(Timestamp(100, 0), 1);
getReplCoord()->setMyLastOptime(time);
- simulateSuccessfulElection();
+ simulateSuccessfulV1Election();
ASSERT_EQUALS(zero, getReplCoord()->getLastCommittedOpTime());
ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 1, time));
@@ -2389,7 +2401,7 @@ TEST_F(ReplCoordTest, ReadAfterCommittedWhileShutdown) {
<< "node1:12345"
<< "_id" << 0))),
HostAndPort("node1", 12345));
- getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
+ runSingleNodeElection(getReplCoord());
getReplCoord()->setMyLastOptime(OpTime(Timestamp(10, 0), 0));
@@ -2410,7 +2422,7 @@ TEST_F(ReplCoordTest, ReadAfterCommittedInterrupted) {
<< "node1:12345"
<< "_id" << 0))),
HostAndPort("node1", 12345));
- getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
+ runSingleNodeElection(getReplCoord());
getReplCoord()->setMyLastOptime(OpTime(Timestamp(10, 0), 0));
@@ -2431,12 +2443,12 @@ TEST_F(ReplCoordTest, ReadAfterCommittedGreaterOpTime) {
<< "node1:12345"
<< "_id" << 0))),
HostAndPort("node1", 12345));
- getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
+ runSingleNodeElection(getReplCoord());
- getReplCoord()->setMyLastOptime(OpTime(Timestamp(100, 0), 0));
- getReplCoord()->onSnapshotCreate(OpTime(Timestamp(100, 0), 0), SnapshotName(1));
+ getReplCoord()->setMyLastOptime(OpTime(Timestamp(100, 0), 1));
+ getReplCoord()->onSnapshotCreate(OpTime(Timestamp(100, 0), 1), SnapshotName(1));
auto result = getReplCoord()->waitUntilOpTime(
- &txn, ReadConcernArgs(OpTime(Timestamp(50, 0), 0), ReadConcernLevel::kMajorityReadConcern));
+ &txn, ReadConcernArgs(OpTime(Timestamp(50, 0), 1), ReadConcernLevel::kMajorityReadConcern));
ASSERT_TRUE(result.didWait());
ASSERT_OK(result.getStatus());
@@ -2450,10 +2462,8 @@ TEST_F(ReplCoordTest, ReadAfterCommittedEqualOpTime) {
<< "node1:12345"
<< "_id" << 0))),
HostAndPort("node1", 12345));
- getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
-
-
- OpTime time(Timestamp(100, 0), 0);
+ runSingleNodeElection(getReplCoord());
+ OpTime time(Timestamp(100, 0), 1);
getReplCoord()->setMyLastOptime(time);
getReplCoord()->onSnapshotCreate(time, SnapshotName(1));
auto result = getReplCoord()->waitUntilOpTime(
@@ -2471,10 +2481,9 @@ TEST_F(ReplCoordTest, ReadAfterCommittedDeferredGreaterOpTime) {
<< "node1:12345"
<< "_id" << 0))),
HostAndPort("node1", 12345));
- getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
-
- getReplCoord()->setMyLastOptime(OpTime(Timestamp(0, 0), 0));
- OpTime committedOpTime(Timestamp(200, 0), 0);
+ runSingleNodeElection(getReplCoord());
+ getReplCoord()->setMyLastOptime(OpTime(Timestamp(0, 0), 1));
+ OpTime committedOpTime(Timestamp(200, 0), 1);
auto pseudoLogOp =
stdx::async(stdx::launch::async,
[this, &committedOpTime]() {
@@ -2485,7 +2494,7 @@ TEST_F(ReplCoordTest, ReadAfterCommittedDeferredGreaterOpTime) {
auto result = getReplCoord()->waitUntilOpTime(
&txn,
- ReadConcernArgs(OpTime(Timestamp(100, 0), 0), ReadConcernLevel::kMajorityReadConcern));
+ ReadConcernArgs(OpTime(Timestamp(100, 0), 1), ReadConcernLevel::kMajorityReadConcern));
pseudoLogOp.get();
ASSERT_TRUE(result.didWait());
@@ -2500,11 +2509,10 @@ TEST_F(ReplCoordTest, ReadAfterCommittedDeferredEqualOpTime) {
<< "node1:12345"
<< "_id" << 0))),
HostAndPort("node1", 12345));
- getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
-
- getReplCoord()->setMyLastOptime(OpTime(Timestamp(0, 0), 0));
+ runSingleNodeElection(getReplCoord());
+ getReplCoord()->setMyLastOptime(OpTime(Timestamp(0, 0), 1));
- OpTime opTimeToWait(Timestamp(100, 0), 0);
+ OpTime opTimeToWait(Timestamp(100, 0), 1);
auto pseudoLogOp =
stdx::async(stdx::launch::async,
@@ -2703,12 +2711,13 @@ TEST_F(ReplCoordTest, CancelAndRescheduleElectionTimeout) {
TEST_F(ReplCoordTest, CancelAndRescheduleElectionTimeoutWhenNotProtocolVersion1) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members" << BSON_ARRAY(BSON("host"
- << "node1:12345"
- << "_id" << 0)
- << BSON("host"
- << "node2:12345"
- << "_id" << 1))),
+ << "protocolVersion" << 0 << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id" << 0)
+ << BSON("host"
+ << "node2:12345"
+ << "_id" << 1))),
HostAndPort("node1", 12345));
ReplicationCoordinatorImpl* replCoord = getReplCoord();
ASSERT_TRUE(replCoord->setFollowerMode(MemberState::RS_SECONDARY));
@@ -2914,7 +2923,7 @@ TEST_F(ReplCoordTest, SnapshotCommitting) {
<< "test1:1234"))),
HostAndPort("test1", 1234));
OperationContextReplMock txn;
- getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
+ runSingleNodeElection(getReplCoord());
OpTime time1(Timestamp(100, 1), 1);
OpTime time2(Timestamp(100, 2), 1);
diff --git a/src/mongo/db/repl/replication_coordinator_test_fixture.cpp b/src/mongo/db/repl/replication_coordinator_test_fixture.cpp
index 7f0b3799217..738626eabdc 100644
--- a/src/mongo/db/repl/replication_coordinator_test_fixture.cpp
+++ b/src/mongo/db/repl/replication_coordinator_test_fixture.cpp
@@ -61,6 +61,18 @@ ReplicaSetConfig ReplCoordTest::assertMakeRSConfig(const BSONObj& configBson) {
return config;
}
+ReplicaSetConfig ReplCoordTest::assertMakeRSConfigV0(const BSONObj& configBson) {
+ return assertMakeRSConfig(addProtocolVersion(configBson, 0));
+}
+
+BSONObj ReplCoordTest::addProtocolVersion(const BSONObj& configDoc, int protocolVersion) {
+ BSONObjBuilder builder;
+ builder << "protocolVersion" << protocolVersion;
+ builder.appendElementsUnique(configDoc);
+ return builder.obj();
+}
+
+
void ReplCoordTest::setUp() {
_settings.replSet = "mySet/node1:12345,node2:54321";
}
@@ -149,7 +161,12 @@ void ReplCoordTest::start(const HostAndPort& selfHost) {
}
void ReplCoordTest::assertStartSuccess(const BSONObj& configDoc, const HostAndPort& selfHost) {
- start(configDoc, selfHost);
+ // Set default protocol version to 1.
+ if (!configDoc.hasField("protocolVersion")) {
+ start(addProtocolVersion(configDoc, 1), selfHost);
+ } else {
+ start(configDoc, selfHost);
+ }
ASSERT_NE(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
}
diff --git a/src/mongo/db/repl/replication_coordinator_test_fixture.h b/src/mongo/db/repl/replication_coordinator_test_fixture.h
index 7d64c936fd0..a4023857091 100644
--- a/src/mongo/db/repl/replication_coordinator_test_fixture.h
+++ b/src/mongo/db/repl/replication_coordinator_test_fixture.h
@@ -67,12 +67,24 @@ public:
* Constructs a ReplicaSetConfig from the given BSON, or raises a test failure exception.
*/
static ReplicaSetConfig assertMakeRSConfig(const BSONObj& configBSON);
+ static ReplicaSetConfig assertMakeRSConfigV0(const BSONObj& configBson);
+
+ /**
+ * Adds { protocolVersion: 0 or 1 } to the config.
+ */
+ static BSONObj addProtocolVersion(const BSONObj& configDoc, int protocolVersion);
protected:
virtual void setUp();
virtual void tearDown();
/**
+ * Asserts that calling start(configDoc, selfHost) successfully initiates the
+ * ReplicationCoordinator under test.
+ */
+ virtual void assertStartSuccess(const BSONObj& configDoc, const HostAndPort& selfHost);
+
+ /**
* Gets the network mock.
*/
executor::NetworkInterfaceMock* getNet() {
@@ -184,12 +196,6 @@ protected:
void simulateSuccessfulV1Election();
/**
- * Asserts that calling start(configDoc, selfHost) successfully initiates the
- * ReplicationCoordinator under test.
- */
- void assertStartSuccess(const BSONObj& configDoc, const HostAndPort& selfHost);
-
- /**
* Shuts down the objects under test.
*/
void shutdown();
diff --git a/src/mongo/db/repl/topology_coordinator_impl_v1_test.cpp b/src/mongo/db/repl/topology_coordinator_impl_v1_test.cpp
index 63f0c80532e..0b0e87e7a70 100644
--- a/src/mongo/db/repl/topology_coordinator_impl_v1_test.cpp
+++ b/src/mongo/db/repl/topology_coordinator_impl_v1_test.cpp
@@ -2307,12 +2307,14 @@ TEST_F(HeartbeatResponseTestV1, ShouldRespondNegativelyToPV0ElectionCommands) {
ASSERT_EQUALS("replset: incompatible replset protocol version: 1", status.reason());
ASSERT_TRUE(responseBuilder.obj().isEmpty());
+ BSONObjBuilder electResponseBuilder;
ReplicationCoordinator::ReplSetElectArgs electArgs;
status = internalErrorStatus;
- getTopoCoord().prepareElectResponse(electArgs, Date_t(), OpTime(), &responseBuilder, &status);
+ getTopoCoord().prepareElectResponse(
+ electArgs, Date_t(), OpTime(), &electResponseBuilder, &status);
ASSERT_EQUALS(ErrorCodes::BadValue, status);
ASSERT_EQUALS("replset: incompatible replset protocol version: 1", status.reason());
- ASSERT_TRUE(responseBuilder.obj().isEmpty());
+ ASSERT_TRUE(electResponseBuilder.obj().isEmpty());
}
TEST_F(HeartbeatResponseTestV1, ShouldChangeSyncSourceFresherMemberIsDown) {
diff --git a/src/mongo/shell/replsettest.js b/src/mongo/shell/replsettest.js
index 2547db80a52..548eb0c434a 100644
--- a/src/mongo/shell/replsettest.js
+++ b/src/mongo/shell/replsettest.js
@@ -472,13 +472,23 @@ ReplSetTest.prototype.initiate = function( cfg , initCmd , timeout ) {
}
}
-ReplSetTest.prototype.reInitiate = function(timeout) {
- var master = this.nodes[0];
- var c = master.getDB("local")['system.replset'].findOne();
- var config = this.getReplSetConfig();
- var timeout = timeout || 60000;
- config.version = c.version + 1;
- this.initiate( config , 'replSetReconfig', timeout );
+ReplSetTest.prototype.reInitiate = function() {
+ "use strict";
+
+ var master = this.getMaster();
+ var res = master.adminCommand({ replSetGetConfig: 1 });
+ assert.commandWorked(res);
+ var config = this.getReplSetConfig();
+ config.version = res.config.version + 1;
+
+ try {
+ assert.commandWorked(master.adminCommand({replSetReconfig: config}));
+ }
+ catch (e) {
+ if (tojson(e).indexOf("error doing query: failed") < 0) {
+ throw e;
+ }
+ }
}
ReplSetTest.prototype.getLastOpTimeWritten = function() {
diff --git a/src/mongo/shell/shardingtest.js b/src/mongo/shell/shardingtest.js
index 99ed173c615..e794e02c874 100644
--- a/src/mongo/shell/shardingtest.js
+++ b/src/mongo/shell/shardingtest.js
@@ -19,6 +19,8 @@
* contain:
* {
* nodes {number}: number of replica members. Defaults to 3.
+ * protocolVersion {number}: protocol version of replset used by the
+ * replset initiation.
* For other options, @see ReplSetTest#start
* }
*
@@ -64,10 +66,10 @@
* specify options that are common all replica members.
* useHostname {boolean}: if true, use hostname of machine,
* otherwise use localhost
- * numReplicas {number}
+ * numReplicas {number}
* }
* }
- *
+ *
* Member variables:
* s {Mongo} - connection to the first mongos
* s0, s1, ... {Mongo} - connection to different mongos
@@ -193,8 +195,10 @@ ShardingTest = function( testName , numShards , verboseLevel , numMongos , other
rsDefaults = Object.merge( rsDefaults, otherParams["rs" + i] )
rsDefaults.nodes = rsDefaults.nodes || otherParams.numReplicas
- var numReplicas = rsDefaults.nodes || 3
- delete rsDefaults.nodes
+ var numReplicas = rsDefaults.nodes || 3;
+ delete rsDefaults.nodes;
+ var protocolVersion = rsDefaults.protocolVersion;
+ delete rsDefaults.protocolVersion;
print( "Replica set test!" )
@@ -209,7 +213,11 @@ ShardingTest = function( testName , numShards , verboseLevel , numMongos , other
nodes : rs.startSet(rsDefaults),
url : rs.getURL() };
- rs.initiate();
+ var config = rs.getReplSetConfig();
+ if (protocolVersion !== undefined && protocolVersion !== null) {
+ config.protocolVersion = protocolVersion;
+ }
+ rs.initiate(config);
this["rs" + i] = rs
this._rsObjects[i] = rs