summaryrefslogtreecommitdiff
path: root/jstests/replsets/initial_sync_cloner_dups.js
diff options
context:
space:
mode:
authorclang-format-7.0.1 <adam.martin@10gen.com>2019-07-26 18:20:35 -0400
committerADAM David Alan Martin <adam.martin@10gen.com>2019-07-27 11:02:23 -0400
commit134a4083953270e8a11430395357fb70a29047ad (patch)
treedd428e1230e31d92b20b393dfdc17ffe7fa79cb6 /jstests/replsets/initial_sync_cloner_dups.js
parent1e46b5049003f427047e723ea5fab15b5a9253ca (diff)
downloadmongo-134a4083953270e8a11430395357fb70a29047ad.tar.gz
SERVER-41772 Apply clang-format 7.0.1 to the codebase
Diffstat (limited to 'jstests/replsets/initial_sync_cloner_dups.js')
-rw-r--r--jstests/replsets/initial_sync_cloner_dups.js228
1 files changed, 114 insertions, 114 deletions
diff --git a/jstests/replsets/initial_sync_cloner_dups.js b/jstests/replsets/initial_sync_cloner_dups.js
index 23b1b989400..7132d9f2087 100644
--- a/jstests/replsets/initial_sync_cloner_dups.js
+++ b/jstests/replsets/initial_sync_cloner_dups.js
@@ -7,126 +7,126 @@
* verify collection and both indexes on the secondary have the right number of docs
*/
(function(doNotRun) {
- "use strict";
+"use strict";
- if (doNotRun) {
- return;
- }
+if (doNotRun) {
+ return;
+}
- load('jstests/libs/parallelTester.js');
+load('jstests/libs/parallelTester.js');
- Random.setRandomSeed();
+Random.setRandomSeed();
- // used to parse RAM log file
- var contains = function(logLines, func) {
- var i = logLines.length;
- while (i--) {
- printjson(logLines[i]);
- if (func(logLines[i])) {
- return true;
- }
+// used to parse RAM log file
+var contains = function(logLines, func) {
+ var i = logLines.length;
+ while (i--) {
+ printjson(logLines[i]);
+ if (func(logLines[i])) {
+ return true;
}
- return false;
- };
-
- var replTest = new ReplSetTest({name: 'cloner', nodes: 3, oplogSize: 150 /*~1.5x data size*/});
- replTest.startSet();
- var conf = replTest.getReplSetConfig();
- conf.settings = {};
- conf.settings.chainingAllowed = false;
- replTest.initiate(conf);
- replTest.awaitSecondaryNodes();
- var primary = replTest.getPrimary();
- var coll = primary.getDB('test').cloner;
- coll.drop();
- coll.createIndex({k: 1});
-
- // These need to be big enough to force initial-sync to use many batches
- var numDocs = 100 * 1000;
- var bigStr = Array(1001).toString();
- var batch = coll.initializeUnorderedBulkOp();
- for (var i = 0; i < numDocs; i++) {
- batch.insert({_id: i, bigStr: bigStr});
}
- batch.execute();
-
- replTest.awaitReplication();
-
- jsTestLog("Start remove/insert on primary");
- var insertAndRemove = function(host) {
- jsTestLog("starting bg writes on " + host);
- var m = new Mongo(host);
- var db = m.getDB('test');
- var coll = db.cloner;
- var numDocs = coll.count();
- for (var i = 0; !db.stop.findOne(); i++) {
- var id = Random.randInt(numDocs);
- coll.remove({_id: id});
- coll.insert({_id: id});
-
- var id = i % numDocs;
- // print(id);
- coll.remove({_id: id});
- coll.insert({_id: id});
-
- // Try to throttle this thread to prevent overloading slow machines.
- sleep(1);
- }
-
- jsTestLog("finished bg writes on " + host);
- };
- var worker = new ScopedThread(insertAndRemove, primary.host);
- worker.start();
-
- jsTestLog("add a new secondary");
- var secondary = replTest.add({});
- replTest.reInitiate();
- secondary.setSlaveOk();
- // Wait for the secondary to get ReplSetInitiate command.
- replTest.waitForState(
- secondary,
- [ReplSetTest.State.STARTUP_2, ReplSetTest.State.RECOVERING, ReplSetTest.State.SECONDARY]);
-
- // This fail point will cause the first intial sync to fail, and leave an op in the buffer to
- // verify the fix from SERVER-17807
- print("=================== failpoint enabled ==============");
- printjson(assert.commandWorked(secondary.getDB("admin").adminCommand(
- {configureFailPoint: 'failInitSyncWithBufferedEntriesLeft', mode: {times: 1}})));
- printjson(assert.commandWorked(secondary.getDB("admin").adminCommand({resync: true})));
-
- // NOTE: This is here to prevent false negatives, but it is racy and dependent on magic numbers.
- // Removed the assertion because it was too flaky. Printing a warning instead (dan)
- jsTestLog("making sure we dropped some dups");
- var res = secondary.adminCommand({getLog: "global"});
- var droppedDups = (contains(res.log, function(v) {
- return v.indexOf("index build dropped" /* NNN dups*/) != -1;
- }));
- if (!droppedDups) {
- jsTestLog(
- "Warning: Test did not trigger duplicate documents, this run will be a false negative");
+ return false;
+};
+
+var replTest = new ReplSetTest({name: 'cloner', nodes: 3, oplogSize: 150 /*~1.5x data size*/});
+replTest.startSet();
+var conf = replTest.getReplSetConfig();
+conf.settings = {};
+conf.settings.chainingAllowed = false;
+replTest.initiate(conf);
+replTest.awaitSecondaryNodes();
+var primary = replTest.getPrimary();
+var coll = primary.getDB('test').cloner;
+coll.drop();
+coll.createIndex({k: 1});
+
+// These need to be big enough to force initial-sync to use many batches
+var numDocs = 100 * 1000;
+var bigStr = Array(1001).toString();
+var batch = coll.initializeUnorderedBulkOp();
+for (var i = 0; i < numDocs; i++) {
+ batch.insert({_id: i, bigStr: bigStr});
+}
+batch.execute();
+
+replTest.awaitReplication();
+
+jsTestLog("Start remove/insert on primary");
+var insertAndRemove = function(host) {
+ jsTestLog("starting bg writes on " + host);
+ var m = new Mongo(host);
+ var db = m.getDB('test');
+ var coll = db.cloner;
+ var numDocs = coll.count();
+ for (var i = 0; !db.stop.findOne(); i++) {
+ var id = Random.randInt(numDocs);
+ coll.remove({_id: id});
+ coll.insert({_id: id});
+
+ var id = i % numDocs;
+ // print(id);
+ coll.remove({_id: id});
+ coll.insert({_id: id});
+
+ // Try to throttle this thread to prevent overloading slow machines.
+ sleep(1);
}
- jsTestLog("stopping writes and waiting for replica set to coalesce");
- primary.getDB('test').stop.insert({});
- worker.join();
- // make sure all secondaries are caught up, after init sync
- reconnect(secondary.getDB("test"));
- replTest.awaitSecondaryNodes();
- replTest.awaitReplication();
-
- jsTestLog("check that secondary has correct counts");
- var secondaryColl = secondary.getDB('test').getCollection('cloner');
- var index = secondaryColl.find({}, {_id: 1}).hint({_id: 1}).itcount();
- var secondary_index = secondaryColl.find({}, {_id: 1}).hint({k: 1}).itcount();
- var table = secondaryColl.find({}, {_id: 1}).hint({$natural: 1}).itcount();
- if (index != table || index != secondary_index) {
- printjson({
- name: coll,
- _id_index_count: index,
- secondary_index_count: secondary_index,
- table_count: table
- });
- }
- assert.eq(index, table);
- assert.eq(table, secondary_index);
+ jsTestLog("finished bg writes on " + host);
+};
+var worker = new ScopedThread(insertAndRemove, primary.host);
+worker.start();
+
+jsTestLog("add a new secondary");
+var secondary = replTest.add({});
+replTest.reInitiate();
+secondary.setSlaveOk();
+// Wait for the secondary to get ReplSetInitiate command.
+replTest.waitForState(
+ secondary,
+ [ReplSetTest.State.STARTUP_2, ReplSetTest.State.RECOVERING, ReplSetTest.State.SECONDARY]);
+
+// This fail point will cause the first intial sync to fail, and leave an op in the buffer to
+// verify the fix from SERVER-17807
+print("=================== failpoint enabled ==============");
+printjson(assert.commandWorked(secondary.getDB("admin").adminCommand(
+ {configureFailPoint: 'failInitSyncWithBufferedEntriesLeft', mode: {times: 1}})));
+printjson(assert.commandWorked(secondary.getDB("admin").adminCommand({resync: true})));
+
+// NOTE: This is here to prevent false negatives, but it is racy and dependent on magic numbers.
+// Removed the assertion because it was too flaky. Printing a warning instead (dan)
+jsTestLog("making sure we dropped some dups");
+var res = secondary.adminCommand({getLog: "global"});
+var droppedDups = (contains(res.log, function(v) {
+ return v.indexOf("index build dropped" /* NNN dups*/) != -1;
+}));
+if (!droppedDups) {
+ jsTestLog(
+ "Warning: Test did not trigger duplicate documents, this run will be a false negative");
+}
+
+jsTestLog("stopping writes and waiting for replica set to coalesce");
+primary.getDB('test').stop.insert({});
+worker.join();
+// make sure all secondaries are caught up, after init sync
+reconnect(secondary.getDB("test"));
+replTest.awaitSecondaryNodes();
+replTest.awaitReplication();
+
+jsTestLog("check that secondary has correct counts");
+var secondaryColl = secondary.getDB('test').getCollection('cloner');
+var index = secondaryColl.find({}, {_id: 1}).hint({_id: 1}).itcount();
+var secondary_index = secondaryColl.find({}, {_id: 1}).hint({k: 1}).itcount();
+var table = secondaryColl.find({}, {_id: 1}).hint({$natural: 1}).itcount();
+if (index != table || index != secondary_index) {
+ printjson({
+ name: coll,
+ _id_index_count: index,
+ secondary_index_count: secondary_index,
+ table_count: table
+ });
+}
+assert.eq(index, table);
+assert.eq(table, secondary_index);
})(true /* Disabled until SERVER-23476 re-enabled rsync command */);