summaryrefslogtreecommitdiff
path: root/jstests/master_slave
diff options
context:
space:
mode:
Diffstat (limited to 'jstests/master_slave')
-rw-r--r--jstests/master_slave/basic1.js173
-rw-r--r--jstests/master_slave/batch_write_command_wc_repl.js71
-rw-r--r--jstests/master_slave/block1.js19
-rw-r--r--jstests/master_slave/block2.js29
-rw-r--r--jstests/master_slave/initial_sync_id_index.js48
-rw-r--r--jstests/master_slave/initial_sync_wc2.js22
-rw-r--r--jstests/master_slave/master1.js55
-rw-r--r--jstests/master_slave/mod_move.js58
-rw-r--r--jstests/master_slave/repair.js15
-rw-r--r--jstests/master_slave/repl1.js64
-rw-r--r--jstests/master_slave/repl10.js41
-rw-r--r--jstests/master_slave/repl12.js51
-rw-r--r--jstests/master_slave/repl13.js66
-rw-r--r--jstests/master_slave/repl14.js68
-rw-r--r--jstests/master_slave/repl16.js40
-rw-r--r--jstests/master_slave/repl2.js69
-rw-r--r--jstests/master_slave/repl3.js51
-rw-r--r--jstests/master_slave/repl4.js41
-rw-r--r--jstests/master_slave/repl5.js38
-rw-r--r--jstests/master_slave/repl6.js82
-rw-r--r--jstests/master_slave/repl7.js56
-rw-r--r--jstests/master_slave/repl8.js44
-rw-r--r--jstests/master_slave/repl9.js61
-rw-r--r--jstests/master_slave/repl_sync_only_db_with_special_chars.js25
-rw-r--r--jstests/master_slave/snapshot1.js47
25 files changed, 0 insertions, 1334 deletions
diff --git a/jstests/master_slave/basic1.js b/jstests/master_slave/basic1.js
deleted file mode 100644
index ff286b23721..00000000000
--- a/jstests/master_slave/basic1.js
+++ /dev/null
@@ -1,173 +0,0 @@
-
-// test repl basics
-// data on master/slave is the same
-
-var rt = new ReplTest("basic1");
-
-m = rt.start(true);
-s = rt.start(false);
-
-function block() {
- am.runCommand({getlasterror: 1, w: 2, wtimeout: 3000});
-}
-
-am = m.getDB("foo");
-as = s.getDB("foo");
-
-function check(note) {
- var start = new Date();
- var x, y;
- while ((new Date()).getTime() - start.getTime() < 30000) {
- x = am.runCommand("dbhash");
- y = as.runCommand("dbhash");
- if (x.md5 == y.md5)
- return;
- sleep(200);
- }
- lastOpLogEntry =
- m.getDB("local").oplog.$main.find({op: {$ne: "n"}}).sort({$natural: -1}).limit(-1).next();
- note = note + tojson(am.a.find().toArray()) + " != " + tojson(as.a.find().toArray()) +
- "last oplog:" + tojson(lastOpLogEntry);
- assert.eq(x.md5, y.md5, note);
-}
-
-am.a.save({x: 1});
-check("A");
-
-am.a.save({x: 5});
-
-am.a.update({}, {$inc: {x: 1}});
-check("B");
-
-am.a.update({}, {$inc: {x: 1}}, false, true);
-check("C");
-
-// ----- check features -------
-
-// map/reduce
-assert.writeOK(am.mr.insert({tags: ["a"]}));
-assert.writeOK(am.mr.insert({tags: ["a", "b"]}));
-check("mr setup");
-
-m = function() {
- for (var i = 0; i < this.tags.length; i++) {
- print("\t " + i);
- emit(this.tags[i], 1);
- }
-};
-
-r = function(key, v) {
- return Array.sum(v);
-};
-
-correct = {
- a: 2,
- b: 1
-};
-
-function checkMR(t) {
- var res = t.mapReduce(m, r, {out: {inline: 1}});
- assert.eq(correct, res.convertToSingleObject(), "checkMR: " + tojson(t));
-}
-
-function checkNumCollections(msg, diff) {
- if (!diff)
- diff = 0;
- var m = am.getCollectionNames();
- var s = as.getCollectionNames();
- assert.eq(m.length + diff, s.length, msg + " lengths bad \n" + tojson(m) + "\n" + tojson(s));
-}
-
-checkNumCollections("MR1");
-checkMR(am.mr);
-checkMR(as.mr);
-checkNumCollections("MR2");
-
-block();
-checkNumCollections("MR3");
-
-var res = am.mr.mapReduce(m, r, {out: "xyz"});
-block();
-
-checkNumCollections("MR4");
-
-var t = am.rpos;
-var writeOption = {writeConcern: {w: 2, wtimeout: 3000}};
-t.insert({_id: 1, a: [{n: "a", c: 1}, {n: "b", c: 1}, {n: "c", c: 1}], b: [1, 2, 3]}, writeOption);
-check("after pos 1 ");
-
-t.update({"a.n": "b"}, {$inc: {"a.$.c": 1}}, writeOption);
-check("after pos 2 ");
-
-t.update({b: 2}, {$inc: {"b.$": 1}}, writeOption);
-check("after pos 3 ");
-
-t.update({b: 3}, {$set: {"b.$": 17}}, writeOption);
-check("after pos 4 ");
-
-printjson(am.rpos.findOne());
-printjson(as.rpos.findOne());
-
-// am.getSisterDB( "local" ).getCollection( "oplog.$main" ).find().limit(10).sort( { $natural : -1 }
-// ).forEach( printjson )
-
-t = am.b;
-var updateOption = {upsert: true, multi: false, writeConcern: {w: 2, wtimeout: 3000}};
-t.update({_id: "fun"}, {$inc: {"a.b.c.x": 6743}}, updateOption);
-check("b 1");
-
-t.update({_id: "fun"}, {$inc: {"a.b.c.x": 5}}, updateOption);
-check("b 2");
-
-t.update({_id: "fun"}, {$inc: {"a.b.c.x": 100, "a.b.c.y": 911}}, updateOption);
-assert.eq({_id: "fun", a: {b: {c: {x: 6848, y: 911}}}}, as.b.findOne(), "b 3");
-check("b 4");
-
-// lots of indexes
-
-am.lotOfIndexes.insert({x: 1});
-for (i = 0; i < 200; i++) {
- var idx = {};
- idx["x" + i] = 1;
- am.lotOfIndexes.ensureIndex(idx);
-}
-
-assert.soon(function() {
- return am.lotOfIndexes.getIndexes().length == as.lotOfIndexes.getIndexes().length;
-}, "lots of indexes a");
-
-assert.eq(
- am.lotOfIndexes.getIndexes().length, as.lotOfIndexes.getIndexes().length, "lots of indexes b");
-
-// multi-update with $inc
-
-am.mu1.update({_id: 1, $atomic: 1}, {$inc: {x: 1}}, true, true);
-x = {
- _id: 1,
- x: 1
-};
-assert.eq(x, am.mu1.findOne(), "mu1");
-assert.soon(function() {
- z = as.mu1.findOne();
- printjson(z);
- return friendlyEqual(x, z);
-}, "mu2");
-
-// profiling - this should be last
-
-am.setProfilingLevel(2);
-am.foo.insert({x: 1}, writeOption);
-am.foo.findOne();
-assert.eq(2, am.system.profile.count(), "P1");
-assert.eq(0, as.system.profile.count(), "P2");
-
-assert.eq(1, as.foo.findOne().x, "P3");
-assert.eq(0, as.system.profile.count(), "P4");
-
-assert(as.getCollectionNames().indexOf("system.profile") < 0, "P4.5");
-
-as.setProfilingLevel(2);
-as.foo.findOne();
-assert.eq(1, as.system.profile.count(), "P5");
-
-rt.stop();
diff --git a/jstests/master_slave/batch_write_command_wc_repl.js b/jstests/master_slave/batch_write_command_wc_repl.js
deleted file mode 100644
index 167182ed6ae..00000000000
--- a/jstests/master_slave/batch_write_command_wc_repl.js
+++ /dev/null
@@ -1,71 +0,0 @@
-//
-// Tests write-concern-related batch write protocol functionality for master/slave replication
-// More general write concern tests in replsets/batch_write_command_wc.js
-//
-
-var request;
-var result;
-
-// NOTE: ALL TESTS BELOW SHOULD BE SELF-CONTAINED, FOR EASIER DEBUGGING
-
-jsTest.log("Starting legacy repl tests...");
-
-// Start a master node
-// Allows testing legacy repl failures
-var mongod = MongoRunner.runMongod({master: "", oplogSize: 40, smallfiles: ""});
-var coll = mongod.getCollection("test.batch_write_command_wc_repl");
-
-//
-// Basic insert, default WC
-coll.remove({});
-printjson(request = {
- insert: coll.getName(),
- documents: [{a: 1}]
-});
-printjson(result = coll.runCommand(request));
-assert(result.ok);
-assert.eq(1, result.n);
-assert.eq(1, coll.count());
-
-//
-// Basic insert, majority WC
-coll.remove({});
-printjson(request = {
- insert: coll.getName(),
- documents: [{a: 1}],
- writeConcern: {w: 'majority'}
-});
-printjson(result = coll.runCommand(request));
-assert(result.ok);
-assert.eq(1, result.n);
-assert.eq(1, coll.count());
-
-//
-// Basic insert, immediate bad wMode error
-coll.remove({});
-printjson(request = {
- insert: coll.getName(),
- documents: [{a: 1}],
- writeConcern: {w: 'invalid'}
-});
-printjson(result = coll.runCommand(request));
-assert(!result.ok);
-assert.eq(0, coll.count());
-
-//
-// Basic insert, error on WC with wtimeout
-coll.remove({});
-printjson(request = {
- insert: coll.getName(),
- documents: [{a: 1}],
- writeConcern: {w: 2, wtimeout: 1}
-});
-printjson(result = coll.runCommand(request));
-assert(result.ok);
-assert.eq(1, result.n);
-assert(result.writeConcernError);
-assert(result.writeConcernError.errInfo.wtimeout);
-assert.eq(1, coll.count());
-
-jsTest.log("DONE legacy repl tests");
-MongoRunner.stopMongod(mongod);
diff --git a/jstests/master_slave/block1.js b/jstests/master_slave/block1.js
deleted file mode 100644
index ab427270340..00000000000
--- a/jstests/master_slave/block1.js
+++ /dev/null
@@ -1,19 +0,0 @@
-
-var rt = new ReplTest("block1");
-
-m = rt.start(true);
-s = rt.start(false);
-
-dbm = m.getDB("foo");
-dbs = s.getDB("foo");
-
-tm = dbm.bar;
-ts = dbs.bar;
-
-for (var i = 0; i < 1000; i++) {
- tm.insert({_id: i}, {writeConcern: {w: 2}});
- assert.eq(i + 1, ts.count(), "A" + i);
- assert.eq(i + 1, tm.count(), "B" + i);
-}
-
-rt.stop();
diff --git a/jstests/master_slave/block2.js b/jstests/master_slave/block2.js
deleted file mode 100644
index 6b6e1357aec..00000000000
--- a/jstests/master_slave/block2.js
+++ /dev/null
@@ -1,29 +0,0 @@
-
-var rt = new ReplTest("block1");
-
-m = rt.start(true);
-s = rt.start(false);
-
-function setup() {
- dbm = m.getDB("foo");
- dbs = s.getDB("foo");
-
- tm = dbm.bar;
- ts = dbs.bar;
-}
-setup();
-
-function check(msg) {
- assert.eq(tm.count(), ts.count(), "check: " + msg);
-}
-
-check("A");
-
-assert.writeOK(tm.insert({x: 1}, {writeConcern: {w: 2, wtimeout: ReplTest.kDefaultTimeoutMS}}));
-assert.writeOK(tm.insert({x: 2}, {writeConcern: {w: 2, wtimeout: ReplTest.kDefaultTimeoutMS}}));
-
-rt.stop(false);
-assert.writeError(tm.insert({x: 3}, {writeConcern: {w: 2, wtimeout: 3000}}));
-assert.eq(3, tm.count(), "D1");
-
-rt.stop();
diff --git a/jstests/master_slave/initial_sync_id_index.js b/jstests/master_slave/initial_sync_id_index.js
deleted file mode 100644
index 0b15a04be71..00000000000
--- a/jstests/master_slave/initial_sync_id_index.js
+++ /dev/null
@@ -1,48 +0,0 @@
-// Tests that the _id index spec is copied exactly during initial sync.
-(function() {
- "use strict";
-
- load("jstests/libs/get_index_helpers.js");
-
- const rt = new ReplTest();
- const master = rt.start(true);
- const masterDB = master.getDB("test");
-
- // Create a collection with a v=2 _id index.
- assert.commandWorked(
- masterDB.createCollection("collV2", {idIndex: {key: {_id: 1}, name: "_id_", v: 2}}));
- let spec = GetIndexHelpers.findByName(masterDB.collV2.getIndexes(), "_id_");
- assert.neq(spec, null);
- assert.eq(spec.v, 2);
-
- // Create a collection with a v=1 _id index.
- assert.commandWorked(
- masterDB.createCollection("collV1", {idIndex: {key: {_id: 1}, name: "_id_", v: 1}}));
- spec = GetIndexHelpers.findByName(masterDB.collV1.getIndexes(), "_id_");
- assert.neq(spec, null);
- assert.eq(spec.v, 1);
-
- // Initial sync a slave.
- const slave = rt.start(false);
- const slaveDB = slave.getDB("test");
-
- // Wait for the slave to sync the collections.
- assert.soon(function() {
- var res = slaveDB.runCommand({listCollections: 1, filter: {name: "collV2"}});
- return res.cursor.firstBatch.length === 1;
- }, "Collection with v:2 _id index failed to sync on slave");
- assert.soon(function() {
- var res = slaveDB.runCommand({listCollections: 1, filter: {name: "collV1"}});
- return res.cursor.firstBatch.length === 1;
- }, "Collection with v:1 _id index failed to sync on slave");
-
- // Check _id index versions on slave.
- spec = GetIndexHelpers.findByName(slaveDB.collV2.getIndexes(), "_id_");
- assert.neq(spec, null);
- assert.eq(spec.v, 2);
- spec = GetIndexHelpers.findByName(slaveDB.collV1.getIndexes(), "_id_");
- assert.neq(spec, null);
- assert.eq(spec.v, 1);
-
- rt.stop();
-})(); \ No newline at end of file
diff --git a/jstests/master_slave/initial_sync_wc2.js b/jstests/master_slave/initial_sync_wc2.js
deleted file mode 100644
index abcc490f781..00000000000
--- a/jstests/master_slave/initial_sync_wc2.js
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * This test ensures that the w:2 write correctly waits until initial sync is done.
- * Before SERVER-25618 the w:2 write could return during initial sync since the slave reported
- * progress during initial sync.
- */
-var rt = new ReplTest("initial_sync_wc2");
-
-// The database name needs to be at the top of the list or at least before the "test" db.
-var dbToCloneName = "a_toclone";
-
-// Start the master and insert some data to ensure that the slave has to clone a database.
-var master = rt.start(true);
-assert.writeOK(master.getDB(dbToCloneName).mycoll.insert({a: 1}));
-assert.eq(1, master.getDB(dbToCloneName).mycoll.find({}).itcount());
-
-// Start the slave.
-var slave = rt.start(false);
-
-// Perform a w=2 write to ensure that slave can be read from, and initial sync is complete.
-assert.writeOK(master.getDB("test").mycoll.insert({}, {writeConcern: {w: 2}}));
-assert.eq(1, slave.getDB("test").mycoll.find({}).itcount());
-rt.stop(); \ No newline at end of file
diff --git a/jstests/master_slave/master1.js b/jstests/master_slave/master1.js
deleted file mode 100644
index 2292a061939..00000000000
--- a/jstests/master_slave/master1.js
+++ /dev/null
@@ -1,55 +0,0 @@
-// Test handling of clock skew and optimes across mongod instances
-//
-// There is no automatic fail-over in a master/slave deployment, so if the master goes down, no new
-// master will be elected. Therefore if the master is using an ephemeral storage engine, it cannot
-// be restarted without losing all data. This test expects that restarting the master will maintain
-// the node's data, so cannot be run with ephemeral storage engines.
-// @tags: [requires_persistence]
-
-var baseName = "jstests_repl_master1test";
-
-oplog = function() {
- return m.getDB("local").oplog.$main;
-};
-
-lastop = function() {
- return oplog().find().sort({$natural: -1}).next();
-};
-
-am = function() {
- return m.getDB(baseName).a;
-};
-
-rt = new ReplTest(baseName);
-
-m = rt.start(true);
-
-am().save({});
-assert.eq("i", lastop().op);
-
-op = lastop();
-printjson(op);
-op.ts.t = op.ts.t + 600000; // 10 minutes
-assert.commandWorked(m.getDB("local").runCommand({godinsert: "oplog.$main", obj: op}));
-
-rt.stop(true);
-m = rt.start(true, null, true);
-
-assert.eq(op.ts.t, lastop().ts.t, tojson({lastop: lastop()}));
-am().save({});
-assert.eq(op.ts.t, lastop().ts.t);
-assert.eq(op.ts.i + 1, lastop().ts.i);
-
-op = lastop();
-printjson(op);
-op.ts.i = Math.pow(2, 31) - 1;
-printjson(op);
-assert.commandWorked(m.getDB("local").runCommand({godinsert: "oplog.$main", obj: op}));
-
-rt.stop(true);
-m = rt.start(true, null, true);
-assert.eq(op.ts.i, lastop().ts.i);
-
-am().save({});
-
-assert.eq(0, rt.stop(true));
diff --git a/jstests/master_slave/mod_move.js b/jstests/master_slave/mod_move.js
deleted file mode 100644
index 2ec8996dce3..00000000000
--- a/jstests/master_slave/mod_move.js
+++ /dev/null
@@ -1,58 +0,0 @@
-
-// test repl basics
-// data on master/slave is the same
-
-var rt = new ReplTest("mod_move");
-
-m = rt.start(true, {oplogSize: 50});
-
-am = m.getDB("foo");
-
-function check(note) {
- var start = new Date();
- var x, y;
- while ((new Date()).getTime() - start.getTime() < 5 * 60 * 1000) {
- x = am.runCommand("dbhash");
- y = as.runCommand("dbhash");
- if (x.md5 == y.md5)
- return;
- sleep(200);
- }
- assert.eq(x.md5, y.md5, note);
-}
-
-// insert a lot of 'big' docs
-// so when we delete them the small docs move here
-
-BIG = 100000;
-N = BIG * 2;
-
-var bulk = am.a.initializeUnorderedBulkOp();
-for (var i = 0; i < BIG; i++) {
- bulk.insert({_id: i, s: 1, x: 1});
-}
-for (; i < N; i++) {
- bulk.insert({_id: i, s: 1});
-}
-for (i = 0; i < BIG; i++) {
- bulk.find({_id: i}).remove();
-}
-assert.writeOK(bulk.execute());
-assert.eq(BIG, am.a.count());
-
-if (am.serverStatus().storageEngine.name == "mmapv1") {
- assert.eq(1, am.a.stats().paddingFactor, "A2");
-}
-
-// start slave
-s = rt.start(false);
-as = s.getDB("foo");
-bulk = am.a.initializeUnorderedBulkOp();
-for (i = N - 1; i >= BIG; i--) {
- bulk.find({_id: i}).update({$set: {x: 1}});
-}
-assert.writeOK(bulk.execute());
-
-check("B");
-
-rt.stop();
diff --git a/jstests/master_slave/repair.js b/jstests/master_slave/repair.js
deleted file mode 100644
index bb5185d326a..00000000000
--- a/jstests/master_slave/repair.js
+++ /dev/null
@@ -1,15 +0,0 @@
-// Test repair on master
-
-var baseName = "jstests_repl_repair";
-
-rt = new ReplTest(baseName);
-
-m = rt.start(true);
-
-m.getDB(baseName)[baseName].save({});
-var c = m.getDB('local').oplog.$main.count();
-assert.automsg("c > 0");
-
-assert.commandWorked(m.getDB("local").repairDatabase());
-assert.automsg("c <= m.getDB( 'local' ).oplog.$main.count()");
-rt.stop(); \ No newline at end of file
diff --git a/jstests/master_slave/repl1.js b/jstests/master_slave/repl1.js
deleted file mode 100644
index ea8f85a6260..00000000000
--- a/jstests/master_slave/repl1.js
+++ /dev/null
@@ -1,64 +0,0 @@
-// Test basic replication functionality
-//
-// There is no automatic fail-over in a master/slave deployment, so if the master goes down, no new
-// master will be elected. Therefore if the master is using an ephemeral storage engine, it cannot
-// be restarted without losing all data. This test expects that restarting the master will maintain
-// the node's data, so cannot be run with ephemeral storage engines.
-// @tags: [requires_persistence]
-
-var baseName = "jstests_repl1test";
-
-soonCount = function(count) {
- assert.soon(function() {
- // print( "check count" );
- // print( "count: " + s.getDB( baseName ).z.find().count() );
- return s.getDB(baseName).a.find().count() == count;
- });
-};
-
-doTest = function(signal) {
-
- rt = new ReplTest("repl1tests");
-
- m = rt.start(true);
- s = rt.start(false);
-
- am = m.getDB(baseName).a;
-
- for (i = 0; i < 1000; ++i)
- am.save({_id: new ObjectId(), i: i});
-
- soonCount(1000);
- as = s.getDB(baseName).a;
- assert.eq(1, as.find({i: 0}).count());
- assert.eq(1, as.find({i: 999}).count());
-
- rt.stop(false, signal);
-
- for (i = 1000; i < 1010; ++i)
- am.save({_id: new ObjectId(), i: i});
-
- s = rt.start(false, null, true);
- soonCount(1010);
- as = s.getDB(baseName).a;
- assert.eq(1, as.find({i: 1009}).count());
-
- rt.stop(true, signal);
-
- m = rt.start(true, null, true);
- am = m.getDB(baseName).a;
-
- for (i = 1010; i < 1020; ++i)
- am.save({_id: new ObjectId(), i: i});
-
- assert.soon(function() {
- return as.find().count() == 1020;
- });
- assert.eq(1, as.find({i: 1019}).count());
-
- assert.automsg("m.getDB( 'local' ).getCollection( 'oplog.$main' ).stats().size > 0");
-
- rt.stop();
-};
-
-doTest(15); // SIGTERM
diff --git a/jstests/master_slave/repl10.js b/jstests/master_slave/repl10.js
deleted file mode 100644
index 10a05d3131a..00000000000
--- a/jstests/master_slave/repl10.js
+++ /dev/null
@@ -1,41 +0,0 @@
-// Test slave delay
-(function() {
- "use strict";
-
- var baseName = "jstests_repl10test";
-
- var soonCount = function(s, count) {
- assert.soon(function() {
- // print( "check count" );
- // print( "count: " + s.getDB( baseName ).z.find().count() );
- return s.getDB(baseName).a.find().count() == count;
- });
- };
-
- var doTest = function(signal) {
-
- var rt = new ReplTest("repl10tests");
-
- var m = rt.start(true);
- var s = rt.start(false, {"slavedelay": "30"});
-
- var am = m.getDB(baseName).a;
-
- am.save({i: 1});
-
- soonCount(s, 1);
-
- am.save({i: 2});
- assert.eq(2, am.count());
- sleep(2000);
- assert.eq(1, s.getDB(baseName).a.count());
-
- soonCount(s, 2);
-
- rt.stop();
- };
-
- print("repl10.js dotest(15)");
- doTest(15); // SIGTERM
- print("repl10.js SUCCESS");
-}());
diff --git a/jstests/master_slave/repl12.js b/jstests/master_slave/repl12.js
deleted file mode 100644
index c3adac0ddbe..00000000000
--- a/jstests/master_slave/repl12.js
+++ /dev/null
@@ -1,51 +0,0 @@
-// SERVER-1626
-// check for initial sync of multiple db's
-
-function debug(x) {
- print("DEBUG:" + tojson(x));
-}
-
-rt = new ReplTest("repl12tests");
-
-m = rt.start(true);
-
-usedDBs = [];
-
-a = "a";
-for (i = 0; i < 3; ++i) {
- usedDBs.push(a);
- m.getDB(a).c.save({});
- a += "a";
-}
-
-// print("\n\n\n DB NAMES MASTER:");
-// printjson(m.getDBNames());
-
-var z = 10500;
-print("sleeping " + z + "ms");
-sleep(z);
-
-s = rt.start(false);
-
-function countHave() {
- var have = 0;
- for (var i = 0; i < usedDBs.length; i++) {
- if (s.getDB(usedDBs[i]).c.findOne())
- have++;
- }
- return have;
-}
-
-assert.soon(function() {
- try {
- var c = countHave();
- debug("count: " + c);
- return c == 3;
- } catch (e) {
- printjson(e);
- return false;
- }
-});
-
-// printjson(s.getDBNames());
-rt.stop(); \ No newline at end of file
diff --git a/jstests/master_slave/repl13.js b/jstests/master_slave/repl13.js
deleted file mode 100644
index b892de8a3d4..00000000000
--- a/jstests/master_slave/repl13.js
+++ /dev/null
@@ -1,66 +0,0 @@
-// Test update modifier uassert during initial sync. SERVER-4781
-var debuggingEnabled = false;
-
-function debug(x) {
- if (debuggingEnabled) {
- printjson(x);
- }
-}
-
-rt = new ReplTest("repl13tests");
-
-m = rt.start(true);
-mc = m.getDB('d')['c'];
-
-// Insert some documents with a:{} fields.
-var bulk = mc.initializeUnorderedBulkOp();
-for (var i = 0; i < 100000; ++i) {
- bulk.insert({_id: i, a: {}});
-}
-assert.writeOK(bulk.execute());
-
-s = rt.start(false);
-sc = s.getDB('d')['c'];
-
-// Wait for the initial clone to begin.
-assert.soon(function() {
- debug(sc.count());
- return sc.count() > 0;
-});
-
-// Update documents that will be cloned last with the intent that an updated version will be cloned.
-// This may cause an assertion when an update that was successfully applied to the original version
-// of a document is replayed against an updated version of the same document.
-bulk = mc.initializeUnorderedBulkOp();
-for (i = 99999; i >= 90000; --i) {
- // If the document is cloned as {a:1}, the {$set:{'a.b':1}} modifier will uassert.
- bulk.find({_id: i}).update({$set: {'a.b': 1}});
- bulk.find({_id: i}).update({$set: {a: 1}});
-}
-assert.writeOK(bulk.execute());
-
-// The initial sync completes and subsequent writes succeed, in spite of any assertions that occur
-// when the update operations above are replicated.
-mc.save({});
-assert.eq(100001, mc.count());
-assert.soon(function() {
- return sc.count() == 100001;
-});
-mc.save({});
-assert.eq(100002, mc.count());
-assert.soon(function() {
- return sc.count() == 100002;
-});
-
-debug(sc.findOne({_id: 99999}));
-debug(sc.findOne({_id: 90000}));
-
-assert.eq(1, sc.findOne({_id: 99999}).a);
-assert.eq(1, sc.findOne({_id: 90000}).a);
-
-m_hash = m.getDB("d").runCommand("dbhash");
-s_hash = s.getDB("d").runCommand("dbhash");
-
-assert.eq(
- m_hash.collections.c, s_hash.collections.c, "sad " + tojson(m_hash) + " " + tojson(s_hash));
-rt.stop(); \ No newline at end of file
diff --git a/jstests/master_slave/repl14.js b/jstests/master_slave/repl14.js
deleted file mode 100644
index e897d9376f1..00000000000
--- a/jstests/master_slave/repl14.js
+++ /dev/null
@@ -1,68 +0,0 @@
-// Test replication of an array by $push-ing to a missing field in the presence of a sparse index on
-// the field. SERVER-4907
-
-function testWithCollectionIndexIds(capped, sparse, useIds) {
- printjson({capped: capped, sparse: sparse, useIds: useIds});
-
- rt = new ReplTest("repl14tests");
-
- m = rt.start(true); // master
- if (capped) {
- m.getDB('d').createCollection('c', {capped: true, size: 5 * 1024});
- }
- mc = m.getDB('d')['c']; // master collection
-
- mc.ensureIndex({a: 1}, {sparse: sparse});
- toInsert = {};
- if (capped) {
- // Add a singleton array as padding, so the push later on will not change document size.
- toInsert = {p: [1]};
- }
- if (useIds) { // Insert wiith an auto generated _id.
- mc.insert(toInsert);
- } else { // Otherwise avoid the auto generated _id.
- mc._mongo.insert(mc._fullName, toInsert, 0);
- }
- assert.eq(mc.count(), 1);
-
- s = rt.start(false); // slave
- sc = s.getDB('d')['c']; // slave collection
-
- // Wait for the document to be cloned.
- assert.soon(function() {
- return sc.count() > 0;
- }, "doc not replicated soon enough", 60 * 1000);
-
- modifiers = {$push: {a: 1}};
- if (capped) {
- // Delete our singleton array to balance the new singleton array we're going to create.
- modifiers['$unset'] = {p: 1};
- }
- assert.writeOK(mc.update({}, modifiers));
-
- // Wait for the update to be replicated.
- assert.soon(function() {
- return sc.count({a: 1}) > 0;
- });
-
- rt.stop();
-}
-
-function testWithCollectionIndex(capped, sparse) {
- testWithCollectionIndexIds(capped, sparse, true);
- if (capped) {
- testWithCollectionIndexIds(capped, sparse, false);
- }
-}
-
-function testWithCollection(capped) {
- testWithCollectionIndex(capped, true);
- testWithCollectionIndex(capped, false);
-}
-
-function test() {
- testWithCollection(true);
- testWithCollection(false);
-}
-
-test();
diff --git a/jstests/master_slave/repl16.js b/jstests/master_slave/repl16.js
deleted file mode 100644
index 84d0073eff8..00000000000
--- a/jstests/master_slave/repl16.js
+++ /dev/null
@@ -1,40 +0,0 @@
-// Test deduping of new documents without an _id index
-// SERVER-14132
-
-if (0) {
- function doTest(insert) {
- rt = new ReplTest("repl16tests");
- master = rt.start(true);
- master.getDB('d').createCollection('c', {capped: true, size: 5 * 1024, autoIndexId: false});
- mc = master.getDB('d')['c'];
-
- insert({_id: 1});
- insert({_id: 2});
-
- slave = rt.start(false);
- sc = slave.getDB('d')['c'];
-
- // Wait for the slave to copy the documents.
- assert.soon(function() {
- return sc.count() == 2;
- });
-
- insert({_id: 1});
- insert({_id: 2});
- insert({_id: 3});
- assert.eq(5, mc.count());
-
- // Wait for the slave to apply the operations.
- assert.soon(function() {
- return sc.count() == 5;
- });
-
- rt.stop();
- }
-
- function insertWithIds(obj) {
- mc.insert(obj);
- }
-
- doTest(insertWithIds);
-}
diff --git a/jstests/master_slave/repl2.js b/jstests/master_slave/repl2.js
deleted file mode 100644
index 4c0bc468e4d..00000000000
--- a/jstests/master_slave/repl2.js
+++ /dev/null
@@ -1,69 +0,0 @@
-// Test resync command:
-// 1. Start master/slave deployment.
-// 2. Insert a document to seed the oplog.
-// 3. Assert that the resync command fails when the slave is caught up to the master.
-// 4. Stop the slave.
-// 5. Insert enough documents to rotate the oplog.
-// 6. Restart the slave.
-// 7. Assert the resync command now works on the slave.
-// 8. Assert the slave eventually has the same data.
-// 9. Assert the slave now rejects the resync command.
-//
-// This test cannot be run on ephemeral storage engines, because after restarting, at step 6, the
-// slave will not have any data and will start an initial sync, rejecting the resync command.
-// @tags: [requires_persistence]
-
-soonCount = function(count) {
- assert.soon(function() {
- // print( "check count" );
- // print( "count: " + s.getDB( baseName ).z.find().count() );
- return s.getDB("foo").a.find().count() == count;
- });
-};
-
-doTest = function(signal, extraOpts) {
- print("signal: " + signal);
-
- var rt = new ReplTest("repl2tests");
-
- // implicit small oplog makes slave get out of sync
- m = rt.start(true, {oplogSize: "1"});
- s = rt.start(false, extraOpts);
-
- am = m.getDB("foo").a;
-
- am.save({_id: new ObjectId()});
- soonCount(1);
- assert.eq(0, s.getDB("admin").runCommand({"resync": 1}).ok);
- rt.stop(false, signal);
-
- big = new Array(2000).toString();
- for (i = 0; i < 1000; ++i)
- am.save({_id: new ObjectId(), i: i, b: big});
-
- s = rt.start(false, extraOpts, true);
-
- print("earliest op in master: " +
- tojson(m.getDB("local").oplog.$main.find().sort({$natural: 1}).limit(1).next()));
- print("latest op on slave: " + tojson(s.getDB("local").sources.findOne()));
-
- assert.soon(function() {
- var result = s.getDB("admin").runCommand({"resync": 1});
- print("resync says: " + tojson(result));
- return result.ok == 1;
- });
-
- soonCount(1001);
- assert.automsg("m.getDB( 'local' ).getCollection( 'oplog.$main' ).stats().size > 0");
-
- as = s.getDB("foo").a;
- assert.eq(1, as.find({i: 0}).count());
- assert.eq(1, as.find({i: 999}).count());
-
- assert.eq(0, s.getDB("admin").runCommand({"resync": 1}).ok);
-
- rt.stop();
-
-};
-
-doTest(15, {"vv": null}); // SIGTERM
diff --git a/jstests/master_slave/repl3.js b/jstests/master_slave/repl3.js
deleted file mode 100644
index ce1c355342f..00000000000
--- a/jstests/master_slave/repl3.js
+++ /dev/null
@@ -1,51 +0,0 @@
-// Test autoresync
-
-var baseName = "jstests_repl3test";
-
-soonCount = function(count) {
- assert.soon(function() {
- // print( "check count" );
- // print( "count: " + s.getDB( baseName ).z.find().count() + ", expected: " +
- // count );
- return s.getDB(baseName).a.find().itcount() == count;
- });
-};
-
-doTest = function(signal) {
-
- print("repl3.js doTest(" + signal + ")");
-
- rt = new ReplTest("repl3tests");
-
- m = rt.start(true);
- s = rt.start(false);
-
- am = m.getDB(baseName).a;
-
- am.save({_id: new ObjectId()});
- soonCount(1);
- rt.stop(false, signal);
-
- big = new Array(2000).toString();
- for (i = 0; i < 1000; ++i)
- am.save({_id: new ObjectId(), i: i, b: big});
-
- s = rt.start(false, {autoresync: null}, true);
-
- // after SyncException, mongod waits 10 secs.
- sleep(15000);
-
- // Need the 2 additional seconds timeout, since commands don't work on an 'allDead' node.
- soonCount(1001);
- as = s.getDB(baseName).a;
- assert.eq(1, as.find({i: 0}).count());
- assert.eq(1, as.find({i: 999}).count());
-
- assert.commandFailed(s.getDB("admin").runCommand({"resync": 1}));
-
- rt.stop();
-};
-
-doTest(15); // SIGTERM
-
-print("repl3.js OK");
diff --git a/jstests/master_slave/repl4.js b/jstests/master_slave/repl4.js
deleted file mode 100644
index e28986e1573..00000000000
--- a/jstests/master_slave/repl4.js
+++ /dev/null
@@ -1,41 +0,0 @@
-// Test replication 'only' mode
-
-soonCount = function(db, coll, count) {
- assert.soon(function() {
- return s.getDB(db)[coll].find().count() == count;
- });
-};
-
-doTest = function() {
-
- rt = new ReplTest("repl4tests");
-
- m = rt.start(true);
- s = rt.start(false, {only: "c"});
-
- cm = m.getDB("c").c;
- bm = m.getDB("b").b;
-
- cm.save({x: 1});
- bm.save({x: 2});
-
- soonCount("c", "c", 1);
- assert.eq(1, s.getDB("c").c.findOne().x);
- sleep(10000);
- printjson(s.getDBNames());
- assert.eq(-1, s.getDBNames().indexOf("b"));
- assert.eq(0, s.getDB("b").b.find().count());
-
- rt.stop(false);
-
- cm.save({x: 3});
- bm.save({x: 4});
-
- s = rt.start(false, {only: "c"}, true);
- soonCount("c", "c", 2);
-};
-
-// Disabled because of SERVER-10344
-if (false) {
- doTest();
-}
diff --git a/jstests/master_slave/repl5.js b/jstests/master_slave/repl5.js
deleted file mode 100644
index 45d07b35da5..00000000000
--- a/jstests/master_slave/repl5.js
+++ /dev/null
@@ -1,38 +0,0 @@
-// Test auto reclone after failed initial clone
-
-soonCountAtLeast = function(db, coll, count) {
- assert.soon(function() {
- try {
- // print( "count: " + s.getDB( db )[ coll ].find().count() );
- return s.getDB(db)[coll].find().itcount() >= count;
- } catch (e) {
- return false;
- }
- });
-};
-
-doTest = function(signal, extraOpts) {
-
- rt = new ReplTest("repl5tests");
-
- m = rt.start(true);
-
- // Use a database that lexicographically follows "admin" to avoid failing to clone admin, since
- // as of SERVER-29452 mongod fails to start up without a featureCompatibilityVersion document.
- ma = m.getDB("b").a;
- var bulk = ma.initializeUnorderedBulkOp();
- for (i = 0; i < 100000; ++i)
- bulk.insert({i: i});
- assert.writeOK(bulk.execute());
-
- s = rt.start(false, extraOpts);
- soonCountAtLeast("b", "a", 1);
- rt.stop(false, signal);
-
- s = rt.start(false, extraOpts, true);
- soonCountAtLeast("b", "a", 10000);
-
- rt.stop();
-};
-
-doTest(15); // SIGTERM
diff --git a/jstests/master_slave/repl6.js b/jstests/master_slave/repl6.js
deleted file mode 100644
index c9ccdbdd559..00000000000
--- a/jstests/master_slave/repl6.js
+++ /dev/null
@@ -1,82 +0,0 @@
-// Test one master replicating to two slaves
-//
-// There is no automatic fail-over in a master/slave deployment, so if the master goes down, no new
-// master will be elected. Therefore if the master is using an ephemeral storage engine, it cannot
-// be restarted without losing all data. This test expects that restarting the master will maintain
-// the node's data, so cannot be run with ephemeral storage engines.
-// @tags: [requires_persistence]
-
-var baseName = "jstests_repl6test";
-
-soonCount = function(m, count) {
- assert.soon(function() {
- return m.getDB(baseName).a.find().count() == count;
- }, "expected count: " + count + " from : " + m);
-};
-
-doTest = function(signal) {
-
- ports = allocatePorts(3);
-
- ms1 = new ReplTest("repl6tests-1", [ports[0], ports[1]]);
- ms2 = new ReplTest("repl6tests-2", [ports[0], ports[2]]);
-
- m = ms1.start(true);
- s1 = ms1.start(false);
- s2 = ms2.start(false);
-
- am = m.getDB(baseName).a;
-
- for (i = 0; i < 1000; ++i)
- am.save({_id: new ObjectId(), i: i});
-
- soonCount(s1, 1000);
- soonCount(s2, 1000);
-
- check = function(as) {
- assert.eq(1, as.find({i: 0}).count());
- assert.eq(1, as.find({i: 999}).count());
- };
-
- as = s1.getDB(baseName).a;
- check(as);
- as = s2.getDB(baseName).a;
- check(as);
-
- ms1.stop(false, signal);
- ms2.stop(false, signal);
-
- for (i = 1000; i < 1010; ++i)
- am.save({_id: new ObjectId(), i: i});
-
- s1 = ms1.start(false, null, true);
- soonCount(s1, 1010);
- as = s1.getDB(baseName).a;
- assert.eq(1, as.find({i: 1009}).count());
-
- ms1.stop(true, signal);
-
- // Need to pause here on Windows, since killing processes does not synchronously close their
- // open file handles.
- sleep(5000);
-
- m = ms1.start(true, null, true);
- am = m.getDB(baseName).a;
-
- for (i = 1010; i < 1020; ++i)
- am.save({_id: new ObjectId(), i: i});
-
- soonCount(s1, 1020);
- assert.eq(1, as.find({i: 1019}).count());
-
- s2 = ms2.start(false, null, true);
- soonCount(s2, 1020);
- as = s2.getDB(baseName).a;
- assert.eq(1, as.find({i: 1009}).count());
- assert.eq(1, as.find({i: 1019}).count());
-
- ms1.stop();
- ms2.stop(false);
-};
-
-doTest(15); // SIGTERM
diff --git a/jstests/master_slave/repl7.js b/jstests/master_slave/repl7.js
deleted file mode 100644
index 3ed91b6aed3..00000000000
--- a/jstests/master_slave/repl7.js
+++ /dev/null
@@ -1,56 +0,0 @@
-// Test persistence of list of dbs to add.
-
-var getDBNamesNoThrow = function(conn) {
- try {
- return conn.getDBNames();
- } catch (e) {
- printjson(e);
- return [""];
- }
-};
-
-doTest = function(signal, extraOpts) {
-
- rt = new ReplTest("repl7tests");
-
- m = rt.start(true);
-
- // Use databases that lexicographically follow "admin" to avoid failing to clone admin, since
- // as of SERVER-29452 mongod fails to start up without a featureCompatibilityVersion document.
- for (n = "b"; n != "bbbbb"; n += "b") {
- m.getDB(n).b.save({x: 1});
- }
-
- s = rt.start(false, extraOpts);
-
- assert.soon(function() {
- return -1 != getDBNamesNoThrow(s).indexOf("bb");
- }, "bb timeout", 60000, 1000);
-
- rt.stop(false, signal);
-
- s = rt.start(false, extraOpts, signal);
-
- assert.soon(function() {
- for (n = "b"; n != "bbbbb"; n += "b") {
- if (-1 == getDBNamesNoThrow(s).indexOf(n))
- return false;
- }
- return true;
- }, "b-bbbb timeout", 60000, 1000);
-
- assert.soon(function() {
- for (n = "b"; n != "bbbbb"; n += "b") {
- if (1 != m.getDB(n).b.find().count()) {
- return false;
- }
- }
- return true;
- }, "b-bbbb count timeout");
-
- sleep(300);
-
- rt.stop();
-};
-
-doTest(15); // SIGTERM
diff --git a/jstests/master_slave/repl8.js b/jstests/master_slave/repl8.js
deleted file mode 100644
index ec0ea50317f..00000000000
--- a/jstests/master_slave/repl8.js
+++ /dev/null
@@ -1,44 +0,0 @@
-// Test cloning of capped collections
-
-baseName = "jstests_repl_repl8";
-
-rt = new ReplTest("repl8tests");
-
-m = rt.start(true);
-
-m.getDB(baseName).createCollection("first", {capped: true, size: 1000});
-assert(m.getDB(baseName).getCollection("first").isCapped());
-
-s = rt.start(false);
-
-assert.soon(function() {
- return s.getDB(baseName).getCollection("first").isCapped();
-});
-
-m.getDB(baseName).createCollection("second", {capped: true, size: 1000});
-assert.soon(function() {
- return s.getDB(baseName).getCollection("second").isCapped();
-});
-
-m.getDB(baseName).getCollection("third").save({a: 1});
-assert.soon(function() {
- return s.getDB(baseName).getCollection("third").exists();
-});
-assert.commandWorked(m.getDB("admin").runCommand(
- {renameCollection: "jstests_repl_repl8.third", to: "jstests_repl_repl8.third_rename"}));
-assert(m.getDB(baseName).getCollection("third_rename").exists());
-assert(!m.getDB(baseName).getCollection("third").exists());
-assert.soon(function() {
- return s.getDB(baseName).getCollection("third_rename").exists();
-});
-assert.soon(function() {
- return !s.getDB(baseName).getCollection("third").exists();
-});
-
-m.getDB(baseName).getCollection("fourth").save({a: 1});
-assert.commandWorked(m.getDB(baseName).getCollection("fourth").convertToCapped(1000));
-assert(m.getDB(baseName).getCollection("fourth").isCapped());
-assert.soon(function() {
- return s.getDB(baseName).getCollection("fourth").isCapped();
-});
-rt.stop(); \ No newline at end of file
diff --git a/jstests/master_slave/repl9.js b/jstests/master_slave/repl9.js
deleted file mode 100644
index 8bdabc93d0f..00000000000
--- a/jstests/master_slave/repl9.js
+++ /dev/null
@@ -1,61 +0,0 @@
-// Test replication of collection renaming
-
-baseName = "jstests_repl_repl9";
-
-rt = new ReplTest("repl9tests");
-
-m = rt.start(true);
-s = rt.start(false);
-
-admin = m.getDB("admin");
-
-debug = function(foo) {}; // print( foo ); }
-
-// rename within db
-
-m.getDB(baseName).one.save({a: 1});
-assert.soon(function() {
- v = s.getDB(baseName).one.findOne();
- return v && 1 == v.a;
-});
-
-assert.commandWorked(
- admin.runCommand({renameCollection: "jstests_repl_repl9.one", to: "jstests_repl_repl9.two"}));
-assert.soon(function() {
- if (-1 == s.getDB(baseName).getCollectionNames().indexOf("two")) {
- debug("no two coll");
- debug(tojson(s.getDB(baseName).getCollectionNames()));
- return false;
- }
- if (!s.getDB(baseName).two.findOne()) {
- debug("no two object");
- return false;
- }
- return 1 == s.getDB(baseName).two.findOne().a;
-});
-assert.eq(-1, s.getDB(baseName).getCollectionNames().indexOf("one"));
-
-// rename to new db
-
-first = baseName + "_first";
-second = baseName + "_second";
-
-m.getDB(first).one.save({a: 1});
-assert.soon(function() {
- return s.getDB(first).one.findOne() && 1 == s.getDB(first).one.findOne().a;
-});
-
-assert.commandWorked(admin.runCommand(
- {renameCollection: "jstests_repl_repl9_first.one", to: "jstests_repl_repl9_second.two"}));
-assert.soon(function() {
- return -1 != s.getDBNames().indexOf(second) &&
- -1 != s.getDB(second).getCollectionNames().indexOf("two") &&
- s.getDB(second).two.findOne() && 1 == s.getDB(second).two.findOne().a;
-});
-
-// Renaming a collection across databases is replicated as multiple operations so we have to wait
-// for source collection drop to be applied on the slave.
-assert.soon(function() {
- return -1 == s.getDB(first).getCollectionNames().indexOf("one");
-});
-rt.stop(); \ No newline at end of file
diff --git a/jstests/master_slave/repl_sync_only_db_with_special_chars.js b/jstests/master_slave/repl_sync_only_db_with_special_chars.js
deleted file mode 100644
index 1daaf1abe85..00000000000
--- a/jstests/master_slave/repl_sync_only_db_with_special_chars.js
+++ /dev/null
@@ -1,25 +0,0 @@
-
-doTest = function() {
-
- var rt = new ReplTest("repl_sync_only_db_with_special_chars");
- var normalDB = "abc";
- var specialDB = "[a-z]+";
- var master = rt.start(true);
- var slave = rt.start(false, {only: specialDB});
-
- master.getDB(normalDB).data.save({a: 1});
- master.getDB(specialDB).data.save({z: 1});
-
- assert.soon(function() {
- var normalDocs = slave.getDB(normalDB).data.find().count();
- var specialDocs = slave.getDB(specialDB).data.find().count();
-
- return normalDocs == 0 && specialDocs == 1;
- }, "Failed to only sync to " + specialDB);
-
-};
-
-// Disabled because of SERVER-10344
-if (false) {
- doTest();
-}
diff --git a/jstests/master_slave/snapshot1.js b/jstests/master_slave/snapshot1.js
deleted file mode 100644
index 50e552f418c..00000000000
--- a/jstests/master_slave/snapshot1.js
+++ /dev/null
@@ -1,47 +0,0 @@
-// Test SERVER-623 - starting slave from a new snapshot
-//
-// This test requires persistence because it assumes copying the dbpath with copy the data between
-// nodes. There should not be any data in the dbpath for ephemeral storage engines, so this will not
-// work. It also requires the fsync command to enduce replication lag.
-// @tags: [requires_persistence, requires_fsync]
-
-ports = allocatePorts(3);
-
-var baseName = "repl_snapshot1";
-
-rt1 = new ReplTest("repl_snapshot1-1", [ports[0], ports[1]]);
-rt2 = new ReplTest("repl_snapshot1-2", [ports[0], ports[2]]);
-m = rt1.start(true);
-
-big = new Array(2000).toString();
-for (i = 0; i < 1000; ++i)
- m.getDB(baseName)[baseName].save({_id: new ObjectId(), i: i, b: big});
-
-m.getDB("admin").runCommand({fsync: 1, lock: 1});
-copyDbpath(rt1.getPath(true), rt1.getPath(false));
-m.getDB("admin").fsyncUnlock();
-
-s1 = rt1.start(false, null, true);
-assert.eq(1000, s1.getDB(baseName)[baseName].count());
-m.getDB(baseName)[baseName].save({i: 1000});
-assert.soon(function() {
- return 1001 == s1.getDB(baseName)[baseName].count();
-});
-
-s1.getDB("admin").runCommand({fsync: 1, lock: 1});
-copyDbpath(rt1.getPath(false), rt2.getPath(false));
-s1.getDB("admin").fsyncUnlock();
-
-s2 = rt2.start(false, null, true);
-assert.eq(1001, s2.getDB(baseName)[baseName].count());
-m.getDB(baseName)[baseName].save({i: 1001});
-assert.soon(function() {
- return 1002 == s2.getDB(baseName)[baseName].count();
-});
-assert.soon(function() {
- return 1002 == s1.getDB(baseName)[baseName].count();
-});
-
-assert(!rawMongoProgramOutput().match(/resync/));
-rt1.stop();
-rt2.stop(); \ No newline at end of file