summaryrefslogtreecommitdiff
path: root/jstests/slow1
diff options
context:
space:
mode:
authorEric Milkie <milkie@10gen.com>2016-03-14 13:56:45 -0400
committerEric Milkie <milkie@10gen.com>2016-03-15 13:28:47 -0400
commit61fd8eefb0fd8e563014b82b2d928d9b0bafda7b (patch)
treed6facaff9a0f0382bff93fd9ac5a52e9ebe80a65 /jstests/slow1
parenta40991b77d885ceb3048b9eaf3b5100e259234b9 (diff)
downloadmongo-61fd8eefb0fd8e563014b82b2d928d9b0bafda7b.tar.gz
SERVER-23111 isolate election_timing.js and memory.js by creating new serial_run suite
Diffstat (limited to 'jstests/slow1')
-rw-r--r--jstests/slow1/conc_update.js57
-rw-r--r--jstests/slow1/election_timing.js151
-rw-r--r--jstests/slow1/memory.js57
-rw-r--r--jstests/slow1/mr_during_migrate.js113
-rw-r--r--jstests/slow1/remove_during_mr.js58
5 files changed, 228 insertions, 208 deletions
diff --git a/jstests/slow1/conc_update.js b/jstests/slow1/conc_update.js
new file mode 100644
index 00000000000..b7b8b836831
--- /dev/null
+++ b/jstests/slow1/conc_update.js
@@ -0,0 +1,57 @@
+load("jstests/libs/slow_weekly_util.js");
+test = new SlowWeeklyMongod("conc_update");
+db = test.getDB("concurrency");
+db.dropDatabase();
+
+NRECORDS = 3 * 1024 * 1024;
+
+print("loading " + NRECORDS + " documents (progress msg every 1024*1024 documents)");
+var bulk = db.conc.initializeUnorderedBulkOp();
+for (var i = 0; i < NRECORDS; i++) {
+ bulk.insert({x: i});
+}
+assert.writeOK(bulk.execute());
+
+print("making an index (this will take a while)");
+db.conc.ensureIndex({x: 1});
+
+var c1 = db.conc.count({x: {$lt: NRECORDS}});
+
+updater = startParallelShell(
+ "db = db.getSisterDB('concurrency');\
+ db.concflag.insert({ inprog: true });\
+ sleep(20);\
+ assert.writeOK(db.conc.update({}, \
+ { $inc: { x: " +
+ NRECORDS +
+ "}}, false, true)); \
+ assert.writeOK(db.concflag.update({}, { inprog: false }));");
+
+assert.soon(function() {
+ var x = db.concflag.findOne();
+ return x && x.inprog;
+}, "wait for fork", 30000, 1);
+
+querycount = 0;
+decrements = 0;
+misses = 0;
+
+assert.soon(function() {
+ c2 = db.conc.count({x: {$lt: NRECORDS}});
+ print(c2);
+ querycount++;
+ if (c2 < c1)
+ decrements++;
+ else
+ misses++;
+ c1 = c2;
+ return !db.concflag.findOne().inprog;
+}, "update never finished", 2 * 60 * 60 * 1000, 10);
+
+print(querycount + " queries, " + decrements + " decrements, " + misses + " misses");
+
+assert.eq(NRECORDS, db.conc.count(), "AT END 1");
+
+updater(); // wait()
+
+test.stop();
diff --git a/jstests/slow1/election_timing.js b/jstests/slow1/election_timing.js
deleted file mode 100644
index 2fe83be02ed..00000000000
--- a/jstests/slow1/election_timing.js
+++ /dev/null
@@ -1,151 +0,0 @@
-// Run a small set of tests using the ElectionTimingTest framework. While this
-// reports the timing of the election, we are using it to check if any errors happen
-// during different election cycles.
-(function() {
- "use strict";
- load("jstests/libs/election_timing_test.js");
- var testStart = Date.now();
-
- var testCases = [
- {
- name: "testV1Stop",
- description: "protocolVersion 1, primary is stopped",
- protocolVersion: 1,
- // testRuns is the number of times a new ReplSetTest will be used.
- testRuns: 1,
- // testCycles is the number of election cycles that will be run per ReplSetTest lifespan.
- testCycles: 5,
- // testSetup is run after the replSet is initiated.
- // Function.prototype is the default.
- testSetup: Function.prototype,
- // Trigger an election by stepping down, stopping, or partitioning the primary.
- // stopPrimary is the default.
- electionTrigger: ElectionTimingTest.prototype.stopPrimary,
- // After the election has completed, make the old primary available again.
- // stopPrimaryReset is the default.
- testReset: ElectionTimingTest.prototype.stopPrimaryReset
- },
-
- /*
- This test case is flakey since waiting for the old primary to shutdown can take longer than
- the
- allowed timeout, even if a new primary was elected during the shutdown time.
-
- {
- name: "testV1StopTimeout1500",
- description: "protocolVersion 1, primary is stopped, electionTimeoutMillis set to 1500",
- protocolVersion: 1,
- testRuns: 1,
- testCycles: 5,
- // The settings object is merged into the replset config settings object.
- settings: {electionTimeoutMillis: 1500}
- },
- */
-
- {
- name: "testV1StepDown",
- description: "protocolVersion 1, primary is stepped down",
- protocolVersion: 1,
- testRuns: 1,
- testCycles: 5,
- electionTrigger: ElectionTimingTest.prototype.stepDownPrimary,
- testReset: ElectionTimingTest.prototype.stepDownPrimaryReset,
- },
-
- {
- name: "testV1StepDown1500",
- description: "protocolVersion 1, primary is stepped down",
- protocolVersion: 1,
- testRuns: 1,
- testCycles: 5,
- electionTrigger: ElectionTimingTest.prototype.stepDownPrimary,
- testReset: ElectionTimingTest.prototype.stepDownPrimaryReset,
- // The settings object is merged into the replset config settings object.
- settings: {electionTimeoutMillis: 1500}
- },
-
- {
- name: "testV1StepDownLargeCluster",
- description: "protocolVersion 1, primary is stepped down, 7 electable nodes",
- protocolVersion: 1,
- nodes: 7,
- testRuns: 1,
- testCycles: 5,
- electionTrigger: ElectionTimingTest.prototype.stepDownPrimary,
- testReset: function() {},
- waitForNewPrimary: function(rst, secondary) {
- rst.getPrimary();
- }
- },
-
- {
- name: "testV0Stop",
- description: "protocolVersion 0, primary is stopped",
- protocolVersion: 0,
- testRuns: 1,
- testCycles: 1
- },
-
- {
- name: "testV0StepDown",
- description: "protocolVersion 0, primary is stepped down",
- protocolVersion: 0,
- testRuns: 1,
- testCycles: 2,
- stepDownGuardTime: 30,
- // There is a guard time in pv0 that prevents an election right
- // after initiating.
- testSetup: function() {
- sleep(30 * 1000);
- },
- electionTrigger: ElectionTimingTest.prototype.stepDownPrimary,
- testReset: ElectionTimingTest.prototype.stepDownPrimaryReset
- },
-
- ];
-
- testCases.forEach(function(tc) {
- var testRun = new ElectionTimingTest(tc);
- tc.testResults = testRun.testResults;
- tc.electionTimeoutLimitMillis = testRun.electionTimeoutLimitMillis;
-
- if (testRun.testErrors.length) {
- // Stop tests if we encounter an error.
- // Dump available information for debugging.
- jsTestLog("Errors from: " + tc.name);
- printjson(tc);
- printjson(testRun.testErrors);
- throw new Error(testRun.testErrors[0].status);
- }
- // Print results of current test in case
- // we need to analyze a failed test later.
- jsTestLog("Raw Results: " + tc.name);
- printjson(tc.testResults);
- });
-
- testCases.forEach(function(tc) {
- var allResults = [];
- tc.testResults.forEach(function(tr) {
- allResults = allResults.concat(tr.results);
- });
-
- var resAvg = Array.avg(allResults);
- var resMin = Math.min(... allResults);
- var resMax = Math.max(... allResults);
- var resStdDev = Array.stdDev(allResults);
-
- jsTestLog("Results: " + tc.name + " Average over " + allResults.length + " runs: " +
- resAvg + " Min: " + resMin + " Max: " + resMax + " Limit: " +
- tc.electionTimeoutLimitMillis / 1000 + " StdDev: " + resStdDev.toFixed(4));
-
- allResults.forEach(function(failoverElapsedMillis) {
- assert.lte(failoverElapsedMillis,
- tc.electionTimeoutLimitMillis / 1000,
- tc.name + ': failover (' + failoverElapsedMillis +
- ' sec) took too long. limit: ' + tc.electionTimeoutLimitMillis / 1000 +
- ' sec');
- });
- });
-
- jsTestLog("Tests completed in: " + (Date.now() - testStart) / 1000 + " seconds");
-}());
diff --git a/jstests/slow1/memory.js b/jstests/slow1/memory.js
deleted file mode 100644
index 9d67aa7aab6..00000000000
--- a/jstests/slow1/memory.js
+++ /dev/null
@@ -1,57 +0,0 @@
-var col = db.memoryTest;
-
-// test creating many collections to make sure no internal cache goes OOM
-for (var i = 0; i < 10000; ++i) {
- name = "memoryTest" + i;
- if ((i % 1000) == 0)
- print("Processing " + name);
- db.eval(function(col) {
- for (var i = 0; i < 100; ++i) {
- db[col + "_" + i].find();
- }
- }, name);
-}
-
-// test recovery of JS engine after out of memory
-db.system.js.save({
- "_id": "f1",
- "value": function(n) {
- a = [];
- b = [];
- c = [];
- for (i = 0; i < n; i++) {
- a.push(Math.random());
- b.push(Math.random());
- c.push(Math.random());
- }
- }
-});
-
-// do mix of calls to make sure OOM is handled with no permanent damage
-db.eval("f1(10)");
-assert.throws(function() {
- db.eval("f1(100000000)");
-});
-db.eval("f1(10)");
-assert.throws(function() {
- db.eval("f1(1000000000)");
-});
-db.eval("f1(1000000)");
-db.eval("f1(1000000)");
-db.eval("f1(1000000)");
-assert.throws(function() {
- db.eval("f1(100000000)");
-});
-db.eval("f1(10)");
-db.eval("f1(1000000)");
-db.eval("f1(1000000)");
-db.eval("f1(1000000)");
-
-// also test $where
-col.drop();
-col.insert({a: 1});
-col.findOne({$where: "var arr = []; for (var i = 0; i < 1000000; ++i) {arr.push(0);}"});
-assert.throws(function() {
- col.findOne({$where: "var arr = []; for (var i = 0; i < 1000000000; ++i) {arr.push(0);}"});
-});
-col.findOne({$where: "var arr = []; for (var i = 0; i < 1000000; ++i) {arr.push(0);}"});
diff --git a/jstests/slow1/mr_during_migrate.js b/jstests/slow1/mr_during_migrate.js
new file mode 100644
index 00000000000..cb439aeb241
--- /dev/null
+++ b/jstests/slow1/mr_during_migrate.js
@@ -0,0 +1,113 @@
+// Do parallel ops with migrates occurring
+
+var st = new ShardingTest({shards: 10, mongos: 2, verbose: 2});
+
+jsTest.log("Doing parallel operations...");
+
+// Stop balancer, since it'll just get in the way of these
+st.stopBalancer();
+
+var mongos = st.s0;
+var admin = mongos.getDB("admin");
+var coll = st.s.getCollection(jsTest.name() + ".coll");
+
+var numDocs = 1024 * 1024;
+var dataSize = 1024; // bytes, must be power of 2
+
+var data = "x";
+while (data.length < dataSize)
+ data += data;
+
+var bulk = coll.initializeUnorderedBulkOp();
+for (var i = 0; i < numDocs; i++) {
+ bulk.insert({_id: i, data: data});
+}
+assert.writeOK(bulk.execute());
+
+// Make sure everything got inserted
+assert.eq(numDocs, coll.find().itcount());
+
+jsTest.log("Inserted " + sh._dataFormat(dataSize * numDocs) + " of data.");
+
+// Shard collection
+st.shardColl(coll, {_id: 1}, false);
+
+st.printShardingStatus();
+
+jsTest.log("Sharded collection now initialized, starting migrations...");
+
+var checkMigrate = function() {
+ print("Result of migrate : ");
+ printjson(this);
+};
+
+// Creates a number of migrations of random chunks to diff shard servers
+var ops = [];
+for (var i = 0; i < st._connections.length; i++) {
+ ops.push({
+ op: "command",
+ ns: "admin",
+ command: {
+ moveChunk: "" + coll,
+ find: {_id: {"#RAND_INT": [0, numDocs]}},
+ to: st._connections[i].shardName,
+ _waitForDelete: true
+ },
+ showResult: true
+ });
+}
+
+// TODO: Also migrate output collection
+
+jsTest.log("Starting migrations now...");
+
+var bid = benchStart({ops: ops, host: st.s.host, parallel: 1, handleErrors: false});
+
+//#######################
+// Tests during migration
+
+var numTests = 5;
+
+for (var t = 0; t < numTests; t++) {
+ jsTest.log("Test #" + t);
+
+ var mongos = st.s1; // use other mongos so we get stale shard versions
+ var coll = mongos.getCollection(coll + "");
+ var outputColl = mongos.getCollection(coll + "_output");
+
+ var numTypes = 32;
+ var map = function() {
+ emit(this._id % 32 /* must be hardcoded */, {c: 1});
+ };
+ var reduce = function(k, vals) {
+ var total = 0;
+ for (var i = 0; i < vals.length; i++)
+ total += vals[i].c;
+ return {
+ c: total
+ };
+ };
+
+ printjson(coll.find({_id: 0}).itcount());
+
+ jsTest.log("Starting new mapReduce run #" + t);
+
+ // assert.eq( coll.find().itcount(), numDocs )
+
+ coll.getMongo().getDB("admin").runCommand({setParameter: 1, traceExceptions: true});
+
+ printjson(coll.mapReduce(
+ map, reduce, {out: {replace: outputColl.getName(), db: outputColl.getDB() + ""}}));
+
+ jsTest.log("MapReduce run #" + t + " finished.");
+
+ assert.eq(outputColl.find().itcount(), numTypes);
+
+ outputColl.find().forEach(function(x) {
+ assert.eq(x.value.c, numDocs / numTypes);
+ });
+}
+
+printjson(benchFinish(bid));
+
+st.stop();
diff --git a/jstests/slow1/remove_during_mr.js b/jstests/slow1/remove_during_mr.js
new file mode 100644
index 00000000000..9b632a11a56
--- /dev/null
+++ b/jstests/slow1/remove_during_mr.js
@@ -0,0 +1,58 @@
+// SERVER-15539
+'use strict';
+
+load('jstests/libs/parallelTester.js');
+
+function client1() {
+ Random.setRandomSeed();
+ for (var i = 0; i < 1000; i++) {
+ db.remove_during_mr.remove({rand: {$gte: Random.rand()}}, {justOne: true});
+ }
+}
+
+function client2() {
+ function mapper() {
+ emit(this.key, 1);
+ }
+
+ function reducer() {
+ return {};
+ }
+
+ for (var i = 0; i < 1000; i++) {
+ var options = {
+ out: {replace: 'bar'},
+ sort: {_id: -1}
+ };
+
+ db.remove_during_mr.mapReduce(mapper, reducer, options);
+ }
+}
+
+// prepare some basic data for the collection
+db.remove_during_mr.drop();
+
+Random.setRandomSeed();
+var bulk = db.remove_during_mr.initializeUnorderedBulkOp();
+for (var i = 0; i < 3000; i++) {
+ bulk.insert({i: i, key: Random.randInt(), rand: Random.rand()});
+}
+bulk.execute();
+
+var threads = [];
+for (var i = 0; i < 20; i++) {
+ var t;
+
+ if (i % 2 === 0) {
+ t = new ScopedThread(client1);
+ } else {
+ t = new ScopedThread(client2);
+ }
+
+ threads.push(t);
+ t.start();
+}
+
+threads.forEach(function(t) {
+ t.join();
+});