summaryrefslogtreecommitdiff
path: root/jstests/replsets/replset8.js
diff options
context:
space:
mode:
Diffstat (limited to 'jstests/replsets/replset8.js')
-rw-r--r--jstests/replsets/replset8.js44
1 files changed, 22 insertions, 22 deletions
diff --git a/jstests/replsets/replset8.js b/jstests/replsets/replset8.js
index ead9c50f066..69a16daa3a6 100644
--- a/jstests/replsets/replset8.js
+++ b/jstests/replsets/replset8.js
@@ -1,64 +1,64 @@
// test for SERVER-6303 - if documents move backward during an initial sync.
-var rt = new ReplSetTest( { name : "replset8tests" , nodes: 1 } );
+var rt = new ReplSetTest({name: "replset8tests", nodes: 1});
var nodes = rt.startSet();
rt.initiate();
var master = rt.getPrimary();
var bigstring = "a";
-var md = master.getDB( 'd' );
-var mdc = md[ 'c' ];
+var md = master.getDB('d');
+var mdc = md['c'];
// prep the data
// idea: create x documents of increasing size, then create x documents of size n.
-// delete first x documents. start initial sync (cloner). update all remaining
+// delete first x documents. start initial sync (cloner). update all remaining
// documents to be increasing size.
// this should result in the updates moving the docs backwards.
var doccount = 5000;
// Avoid empty extent issues
-mdc.insert( { _id:-1, x:"dummy" } );
+mdc.insert({_id: -1, x: "dummy"});
-print ("inserting bigstrings");
+print("inserting bigstrings");
var bulk = mdc.initializeUnorderedBulkOp();
-for( i = 0; i < doccount; ++i ) {
- bulk.insert( { _id:i, x:bigstring } );
+for (i = 0; i < doccount; ++i) {
+ bulk.insert({_id: i, x: bigstring});
bigstring += "a";
}
assert.writeOK(bulk.execute());
-print ("inserting x");
+print("inserting x");
bulk = mdc.initializeUnorderedBulkOp();
-for( i = doccount; i < doccount*2; ++i ) {
- bulk.insert( { _id:i, x:i } );
+for (i = doccount; i < doccount * 2; ++i) {
+ bulk.insert({_id: i, x: i});
}
assert.writeOK(bulk.execute());
-print ("deleting bigstrings");
+print("deleting bigstrings");
bulk = mdc.initializeUnorderedBulkOp();
-for( i = 0; i < doccount; ++i ) {
- bulk.find({ _id: i }).remove();
+for (i = 0; i < doccount; ++i) {
+ bulk.find({_id: i}).remove();
}
assert.writeOK(bulk.execute());
// add a secondary
var slave = rt.add();
rt.reInitiate();
-print ("initiation complete!");
-var sc = slave.getDB( 'd' )[ 'c' ];
+print("initiation complete!");
+var sc = slave.getDB('d')['c'];
slave.setSlaveOk();
sleep(25000);
-print ("updating documents backwards");
+print("updating documents backwards");
// Move all documents to the beginning by growing them to sizes that should
// fit the holes we made in phase 1
bulk = mdc.initializeUnorderedBulkOp();
-for (i = doccount*2; i > doccount; --i) {
- mdc.update( { _id:i, x:i }, { _id:i, x:bigstring } );
- bigstring = bigstring.slice(0, -1); // remove last char
+for (i = doccount * 2; i > doccount; --i) {
+ mdc.update({_id: i, x: i}, {_id: i, x: bigstring});
+ bigstring = bigstring.slice(0, -1); // remove last char
}
-print ("finished");
+print("finished");
// Wait for replication to catch up.
rt.awaitSecondaryNodes();
-assert.eq(doccount+1, slave.getDB( 'd' )['c'].count());
+assert.eq(doccount + 1, slave.getDB('d')['c'].count());