summaryrefslogtreecommitdiff
path: root/jstests
diff options
context:
space:
mode:
authorBenety Goh <benety@mongodb.com>2016-07-11 16:05:08 -0400
committerBenety Goh <benety@mongodb.com>2016-07-12 13:50:31 -0400
commitb675e2e962b89b0e0ec341b334d4c95b55c0772e (patch)
tree46d6ed2c882508373601c563f1135a3f5df19c3b /jstests
parentd256c702a9a1e411263aa4e054d917fa393246b0 (diff)
downloadmongo-b675e2e962b89b0e0ec341b334d4c95b55c0772e.tar.gz
SERVER-24926 replset8.js - additional assertions to check bulk operations
Diffstat (limited to 'jstests')
-rw-r--r--jstests/replsets/replset8.js140
1 files changed, 82 insertions, 58 deletions
diff --git a/jstests/replsets/replset8.js b/jstests/replsets/replset8.js
index 4f0a54cadb2..37a74a86dee 100644
--- a/jstests/replsets/replset8.js
+++ b/jstests/replsets/replset8.js
@@ -1,61 +1,85 @@
// test for SERVER-6303 - if documents move backward during an initial sync.
-var rt = new ReplSetTest({name: "replset8tests", nodes: 1});
-
-var nodes = rt.startSet();
-rt.initiate();
-var master = rt.getPrimary();
-var bigstring = "a";
-var md = master.getDB('d');
-var mdc = md['c'];
-
-// prep the data
-
-// idea: create x documents of increasing size, then create x documents of size n.
-// delete first x documents. start initial sync (cloner). update all remaining
-// documents to be increasing size.
-// this should result in the updates moving the docs backwards.
-
-var doccount = 5000;
-// Avoid empty extent issues
-mdc.insert({_id: -1, x: "dummy"});
-
-print("inserting bigstrings");
-var bulk = mdc.initializeUnorderedBulkOp();
-for (i = 0; i < doccount; ++i) {
- bulk.insert({_id: i, x: bigstring});
- bigstring += "a";
-}
-assert.writeOK(bulk.execute());
-
-print("inserting x");
-bulk = mdc.initializeUnorderedBulkOp();
-for (i = doccount; i < doccount * 2; ++i) {
- bulk.insert({_id: i, x: i});
-}
-assert.writeOK(bulk.execute());
-
-print("deleting bigstrings");
-bulk = mdc.initializeUnorderedBulkOp();
-for (i = 0; i < doccount; ++i) {
- bulk.find({_id: i}).remove();
-}
-assert.writeOK(bulk.execute());
-
-// add a secondary
-var slave = rt.add();
-rt.reInitiate();
-print("initiation complete!");
-rt.awaitSecondaryNodes();
-print("updating documents backwards");
-// Move all documents to the beginning by growing them to sizes that should
-// fit the holes we made in phase 1
-bulk = mdc.initializeUnorderedBulkOp();
-for (i = doccount * 2; i > doccount; --i) {
- bulk.find({_id: i, x: i}).update({$set: {x: bigstring}});
- bigstring = bigstring.slice(0, -1); // remove last char
-}
-assert.writeOK(bulk.execute({writeConcern: {w: rt.nodes.length}}));
-print("finished");
-assert.eq(doccount + 1, slave.getDB('d')['c'].find().itcount());
+(function() {
+ "use strict";
+ var rt = new ReplSetTest({name: "replset8", nodes: 1});
+
+ var nodes = rt.startSet();
+ rt.initiate();
+ var master = rt.getPrimary();
+ var bigstring = "a";
+ var md = master.getDB('d');
+ var mdc = md['c'];
+
+ // prep the data
+
+ // idea: create x documents of increasing size, then create x documents of size n.
+ // delete first x documents. start initial sync (cloner). update all remaining
+ // documents to be increasing size.
+ // this should result in the updates moving the docs backwards.
+
+ var doccount = 5000;
+ // Avoid empty extent issues
+ mdc.insert({_id: -1, x: "dummy"});
+
+ jsTestLog('inserting ' + doccount + ' bigstrings');
+ var bulk = mdc.initializeUnorderedBulkOp();
+ for (var i = 0; i < doccount; ++i) {
+ bulk.insert({_id: i, x: bigstring});
+ bigstring += "a";
+ }
+ var result = assert.writeOK(bulk.execute());
+ jsTestLog('insert 0-' + (doccount - 1) + ' result: ' + tojson(result));
+ assert.eq(doccount, result.nInserted);
+ assert.eq(doccount + 1, mdc.find().itcount());
+
+ jsTestLog('inserting ' + (doccount * 2) + ' documents - {_id: 0, x: 0} ... {_id: ' +
+ (doccount * 2 - 1) + ', x: ' + (doccount * 2 - 1) + '}');
+ bulk = mdc.initializeUnorderedBulkOp();
+ for (i = doccount; i < doccount * 2; ++i) {
+ bulk.insert({_id: i, x: i});
+ }
+ result = assert.writeOK(bulk.execute());
+ jsTestLog('insert ' + doccount + '-' + (doccount * 2 - 1) + ' result: ' + tojson(result));
+ assert.eq(doccount, result.nInserted);
+ assert.eq(doccount * 2 + 1, mdc.find().itcount());
+
+ jsTestLog('deleting ' + doccount + ' bigstrings');
+ bulk = mdc.initializeUnorderedBulkOp();
+ for (i = 0; i < doccount; ++i) {
+ bulk.find({_id: i}).remove();
+ }
+ result = assert.writeOK(bulk.execute());
+ jsTestLog('delete 0-' + (doccount - 1) + ' result: ' + tojson(result));
+ assert.eq(doccount, result.nRemoved);
+ assert.eq(doccount + 1, mdc.find().itcount());
+
+ // add a secondary
+ var slave = rt.add();
+ rt.reInitiate();
+ jsTestLog('reinitiation complete after adding new node to replicaset');
+ rt.awaitSecondaryNodes();
+ jsTestLog("updating documents backwards");
+ // Move all documents to the beginning by growing them to sizes that should
+ // fit the holes we made in phase 1
+ bulk = mdc.initializeUnorderedBulkOp();
+ for (i = doccount * 2; i > doccount; --i) {
+ bulk.find({_id: i}).update({$set: {x: bigstring}});
+ bigstring = bigstring.slice(0, -1); // remove last char
+ }
+ result = assert.writeOK(bulk.execute({writeConcern: {w: rt.nodes.length}}));
+ jsTestLog('update ' + (doccount + 1) + '-' + (doccount * 2 - 1) + ' result: ' + tojson(result));
+ assert.eq(doccount - 1, result.nMatched);
+ assert.eq(doccount - 1, result.nModified);
+
+ assert.eq(doccount + 1,
+ mdc.find().itcount(),
+ 'incorrect collection size on primary (fast count: ' + mdc.count() + ')');
+ assert.eq(doccount + 1,
+ slave.getDB('d')['c'].find().itcount(),
+ 'incorrect collection size on secondary (fast count: ' +
+ slave.getDB('d')['c'].count() + ')');
+
+ jsTestLog("finished");
+})();