summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorScott Hernandez <scotthernandez@gmail.com>2014-09-09 08:01:12 -0400
committerScott Hernandez <scotthernandez@gmail.com>2014-09-12 08:35:39 -0400
commit41fcb70192d773987cc9635d4f125b6afbf54e1f (patch)
tree3c4dae3a7c83107bede21a102bfa4d0f08e80e41
parentc9eab15c54d135dcf3357802402b1d8d2855b212 (diff)
downloadmongo-41fcb70192d773987cc9635d4f125b6afbf54e1f.tar.gz
rewrite fastsync jstest, and slow down resync_with_write_load
-rw-r--r--jstests/replsets/fastsync.js233
-rw-r--r--jstests/replsets/resync_with_write_load.js31
-rw-r--r--jstests/replsets/sync_fastsync.js58
-rw-r--r--src/mongo/shell/servers_misc.js11
4 files changed, 94 insertions, 239 deletions
diff --git a/jstests/replsets/fastsync.js b/jstests/replsets/fastsync.js
deleted file mode 100644
index bd615675904..00000000000
--- a/jstests/replsets/fastsync.js
+++ /dev/null
@@ -1,233 +0,0 @@
-/*
- * 1. insert 10000 objects
- * 2. export to two dbpaths
- * 3. add one node w/fastsync
- * 4. check that we never get "errmsg" : "initial sync cloning db: whatever"
- * 5. check writes are replicated
- */
-
-var w = 0;
-var wait = function(f) {
- w++;
- var n = 0;
- while (!f()) {
- if( n % 4 == 0 )
- print("toostale.js waiting " + w);
- if (++n == 4) {
- print("" + f);
- }
- assert(n < 200, 'tried 200 times, giving up');
- sleep(1000);
- }
-}
-
-var reconnect = function(a) {
- wait(function() {
- try {
- a.getDB("foo").bar.stats();
- return true;
- } catch(e) {
- print(e);
- return false;
- }
- });
-};
-
-ports = allocatePorts( 4 );
-
-var basename = "jstests_fastsync";
-var basePath = MongoRunner.dataPath + basename;
-var hostname = getHostName();
-
-var pargs = new MongodRunner( ports[ 0 ], basePath + "-p", false, false,
- ["--replSet", basename, "--oplogSize", 2],
- {no_bind : true} );
-p = pargs.start();
-
-var admin = p.getDB("admin");
-var foo = p.getDB("foo");
-var local = p.getDB("local");
-
-var config = {_id : basename, members : [{_id : 0, host : hostname+":"+ports[0], priority:2}]};
-printjson(config);
-var result = admin.runCommand({replSetInitiate : config});
-print("result:");
-printjson(result);
-
-var count = 0;
-while (count < 10 && result.ok != 1) {
- count++;
- sleep(2000);
- result = admin.runCommand({replSetInitiate : config});
-}
-
-assert(result.ok, tojson(result));
-assert.soon(function() { result = false;
- try {
- result = admin.runCommand({isMaster:1}).ismaster;
- }
- catch(e) {
- print(e);
- return false;
- }
- return result;
- });
-
-print("1");
-var bulk = foo.bar.initializeUnorderedBulkOp();
-for (var i=0; i<10000; i++) {
- bulk.insert({ date: new Date(), x: i, str: "all the talk on the market" });
-}
-assert.writeOK(bulk.execute());
-print("total in foo: "+foo.bar.count());
-
-print("2");
-admin.runCommand( {fsync:1,lock:1} );
-copyDbpath( basePath + "-p", basePath + "-s"+1 );
-admin.$cmd.sys.unlock.findOne();
-
-print("3");
-var startSlave = function(n) {
- var sargs = new MongodRunner( ports[ n ], basePath + "-s"+n, false, false,
- ["--replSet", basename, "--fastsync",
- "--oplogSize", 2], {no_bind : true} );
- var reuseData = true;
- var conn = sargs.start(reuseData);
-
- config = local.system.replset.findOne();
- config.version++;
- config.members.push({_id:n, host:hostname+":"+ports[n]});
-
- // When the slave is started, it'll try to load the config and find that it's
- // not in the config and close all connections in preparation for transitioning
- // to "removed" state. If the reconfig adding it to the set happens to occur at
- // this point, the heartbeat request's connection will be cut off, causing the
- // reconfig to fail..
- assert.soon(function() {
- try {
- result = admin.runCommand({replSetReconfig : config});
- }
- catch (e) {
- print("failed to reconfig: "+e);
- return false;
- }
- return result.ok;
- });
- reconnect(p);
-
- print("4");
- var status = admin.runCommand({replSetGetStatus : 1});
- var count = 0;
- while (status.members[n].state != 2 && count < 200) {
- print("not a secondary yet");
- if (count % 10 == 0) {
- printjson(status);
- }
- assert(!status.members[n].errmsg || !status.members[n].errmsg.match("^initial sync cloning db"));
-
- sleep(1000);
-
- // disconnection could happen here
- try {
- status = admin.runCommand({replSetGetStatus : 1});
- }
- catch (e) {
- print(e);
- }
- count++;
- }
-
- assert.eq(status.members[n].state, 2);
-
- assert.soon(function() {
- return admin.runCommand({isMaster : 1}).ismaster;
- });
-
- admin.foo.insert({x:1});
- assert.soon(function() {
- try {
- var last = local.oplog.rs.find().sort({$natural:-1}).limit(1).next();
- var cur = conn.getDB("local").oplog.rs.find().sort({$natural:-1}).limit(1).next();
- print("last: "+tojson(last)+" cur: "+tojson(cur));
- return cur != null && last != null && cur.ts.t == last.ts.t && cur.ts.i == last.ts.i;
- }
- catch (e) {
- print(e);
- }
- return false;
- });
-
- return conn;
-};
-
-var s1 = startSlave(1);
-
-var me1 = null;
-
-// local.me will not be populated until the secondary reports back to the
-// primary that it is syncing
-assert.soon(function() {
- me1 = s1.getDB("local").me.findOne();
- if (me1 == null) {
- return false;
- }
-
- print("me: " +me1._id);
- return me1._id != null;
-});
-
-print("5");
-s1.getDB("admin").runCommand( {fsync:1,lock:1} );
-copyDbpath( basePath + "-s1", basePath + "-s2" );
-s1.getDB("admin").$cmd.sys.unlock.findOne();
-
-var s2 = startSlave(2);
-
-var me2 = s2.getDB("local").me.findOne();
-
-print("me: " +me2._id);
-assert(me1._id != me2._id);
-
-print("restart member with a different port and make it a new set");
-try {
- p.getDB("admin").runCommand({shutdown:1});
-}
-catch(e) {
- print("good, shutting down: " +e);
-}
-sleep(10000);
-
-pargs = new MongodRunner( ports[ 3 ], basePath + "-p", false, false,
- ["--replSet", basename, "--oplogSize", 2],
- {no_bind : true} );
-pargs.start(true);
-
-p = new Mongo("localhost:"+ports[3]);
-
-// initFromConfig will keep closing sockets, so we'll a couple of times
-assert.soon(function() {
- try {
- p.getDB("admin").runCommand({replSetReconfig : {
- _id : basename,
- members : [{_id:0, host : hostname+":"+ports[3]}]
- }, force : true});
- }
- catch (e) {
- print(e);
- return false;
- }
-
- return true;
-});
-
-print("start waiting for primary...");
-assert.soon(function() {
- try {
- return p.getDB("admin").runCommand({isMaster : 1}).ismaster;
- }
- catch(e) {
- print(e);
- }
- return false;
- }, "waiting for master", 60000);
-
diff --git a/jstests/replsets/resync_with_write_load.js b/jstests/replsets/resync_with_write_load.js
index aaf53c91dd8..0e93e0300e7 100644
--- a/jstests/replsets/resync_with_write_load.js
+++ b/jstests/replsets/resync_with_write_load.js
@@ -5,11 +5,12 @@
* We cannot test each phase of the initial sync directly but by providing constant writes we can
* assume that each individual phase will have data to work with, and therefore tested.
*/
-var replTest = new ReplSetTest({name: 'resync', nodes: 2, oplogSize: 100});
+var testName = "resync_with_write_load"
+var replTest = new ReplSetTest({name: testName, nodes: 2, oplogSize: 100});
var nodes = replTest.nodeList();
var conns = replTest.startSet();
-var config = { "_id": "resync",
+var config = { "_id": testName,
"members": [
{"_id": 0, "host": nodes[0], priority:4},
{"_id": 1, "host": nodes[1]}]
@@ -34,20 +35,38 @@ assert.writeOK( A.foo.insert({ x: 1 }, { writeConcern: { w: 1, wtimeout: 60000 }
replTest.stop(BID);
print("******************** starting load for 30 secs *********************");
-var work = 'var start=new Date().getTime(); db.timeToStartTrigger.insert({_id:1}); while(true) {for(x=0;x<1000;x++) {db["a" + x].insert({a:x})};sleep(1); if((new Date().getTime() - start) > 30000) break; }';
+var work = function() {
+ print("starting loadgen");
+ var start=new Date().getTime();
+ db.timeToStartTrigger.insert({_id:1});
+ while (true) {
+ for (x=0; x < 100; x++) {
+ db["a" + x].insert({a:x});
+ };
+
+ var runTime = (new Date().getTime() - start);
+ if (runTime > 30000)
+ break;
+ else if (runTime < 5000) // back-off more during first 2 seconds
+ sleep(50);
+ else
+ sleep(1);
+ };
+ print("finshing loadgen");
+ };
//insert enough that resync node has to go through oplog replay in each step
-var loadGen = startParallelShell( work, replTest.ports[0] );
+var loadGen = startParallelShell(work, replTest.ports[0]);
// wait for document to appear to continue
assert.soon(function() {
try {
- return 1 == a_conn.getDB("test")["timeToStartTrigger"].count();
+ return 1 == master.getDB("test")["timeToStartTrigger"].count();
} catch ( e ) {
print( e );
return false;
}
-}, "waited too long for start trigger");
+}, "waited too long for start trigger", 90 * 1000 /* 90 secs */ );
print("*************** STARTING node without data ***************");
replTest.start(BID);
diff --git a/jstests/replsets/sync_fastsync.js b/jstests/replsets/sync_fastsync.js
new file mode 100644
index 00000000000..5bc2351a734
--- /dev/null
+++ b/jstests/replsets/sync_fastsync.js
@@ -0,0 +1,58 @@
+/*
+ * Create a replia set where a member has the --fastsync option,
+ * then write some docs and ensure they
+ * are on the newly restarted member (with --fastsync) but older writes aren't.
+ */
+
+var testName = "sync_fastsync"
+var replTest = new ReplSetTest({name: testName,
+ nodes: {n0:{}, n1:{}, n2:{fastsync:""}},
+ oplogSize: 2});
+var nodes = replTest.nodeList();
+var conns = replTest.startSet();
+var config = { "_id": testName,
+ "members": [
+ {"_id": 0, "host": nodes[0]},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], priority:0}]
+ };
+var r = replTest.initiate(config);
+var master = replTest.getMaster();
+var mColl = master.getDB("test")[testName];
+var sColl = conns[1].getDB("test")[testName];
+var fsColl = conns[2].getDB("test")[testName];
+
+mColl.save({_id:1}, {writeConcern:{w:3}});
+
+// Ensure everyone has the same doc, and replication is working normally.
+assert.eq({_id:1}, mColl.findOne());
+assert.eq({_id:1}, sColl.findOne());
+assert.eq({_id:1}, fsColl.findOne());
+
+// Stop fastsync member to test, 3rd node which is prio:0
+replTest.stop(2);
+
+// Do write to the other two members, and check it.
+mColl.save({_id:2}, {writeConcern:{w:2}});
+assert.eq({_id:2}, mColl.findOne({_id:2}));
+assert.eq({_id:2}, sColl.findOne({_id:2}));
+
+// Start node without data
+replTest.start(2);
+assert.soon(function() {
+ try{
+ return fsColl.getDB().runCommand("isMaster").secondary;
+ } catch (e) {
+ printjson(e);
+ return false;
+ }
+})
+
+// Make sure only the tail of the oplog is on the fastsync member, plus new write.
+mColl.save({_id:3}, {writeConcern:{w:3}});
+assert.eq(null, fsColl.findOne({_id:1}), tojson(fsColl.find().toArray()));
+assert.eq({_id:2}, fsColl.findOne({_id:2}), tojson(fsColl.find().toArray()));
+assert.eq({_id:3}, fsColl.findOne({_id:3}), tojson(fsColl.find().toArray()));
+
+print("****** Test Completed *******")
+replTest.stopSet(); \ No newline at end of file
diff --git a/src/mongo/shell/servers_misc.js b/src/mongo/shell/servers_misc.js
index 12274368111..85d1cbec337 100644
--- a/src/mongo/shell/servers_misc.js
+++ b/src/mongo/shell/servers_misc.js
@@ -289,6 +289,17 @@ function startParallelShell( jsCode, port, noConnect ){
var args = ["mongo"];
+ // Convert function into call-string
+ if (typeof(jsCode) == "function") {
+ var id = Math.floor(Math.random() * 100000);
+ jsCode = "var f" + id + " = " + jsCode.toString() + ";f" + id + "();";
+ }
+ else if(typeof(jsCode) == "string") {}
+ // do nothing
+ else {
+ throw Error("bad first argument to startParallelShell");
+ }
+
if (noConnect) {
args.push("--nodb");
} else if (typeof(db) == "object") {