summaryrefslogtreecommitdiff
path: root/jstests/slow1
diff options
context:
space:
mode:
authormay <may.hoque@mongodb.com>2017-06-07 10:03:57 -0400
committermay <may.hoque@mongodb.com>2017-06-07 10:03:57 -0400
commit67b2885420cc8d7ac63c2df4391fe92cd5c5c038 (patch)
treec01b42c0f240872375d2476b95437645620038f4 /jstests/slow1
parent1345c0476cf47d691e8db532967238800d0a70c2 (diff)
downloadmongo-67b2885420cc8d7ac63c2df4391fe92cd5c5c038.tar.gz
SERVER-23035 Remove SlowWeeklyMongod code from the shell
Diffstat (limited to 'jstests/slow1')
-rw-r--r--jstests/slow1/conc_update.js118
1 files changed, 61 insertions, 57 deletions
diff --git a/jstests/slow1/conc_update.js b/jstests/slow1/conc_update.js
index b7b8b836831..e46e132bd61 100644
--- a/jstests/slow1/conc_update.js
+++ b/jstests/slow1/conc_update.js
@@ -1,57 +1,61 @@
-load("jstests/libs/slow_weekly_util.js");
-test = new SlowWeeklyMongod("conc_update");
-db = test.getDB("concurrency");
-db.dropDatabase();
-
-NRECORDS = 3 * 1024 * 1024;
-
-print("loading " + NRECORDS + " documents (progress msg every 1024*1024 documents)");
-var bulk = db.conc.initializeUnorderedBulkOp();
-for (var i = 0; i < NRECORDS; i++) {
- bulk.insert({x: i});
-}
-assert.writeOK(bulk.execute());
-
-print("making an index (this will take a while)");
-db.conc.ensureIndex({x: 1});
-
-var c1 = db.conc.count({x: {$lt: NRECORDS}});
-
-updater = startParallelShell(
- "db = db.getSisterDB('concurrency');\
- db.concflag.insert({ inprog: true });\
- sleep(20);\
- assert.writeOK(db.conc.update({}, \
- { $inc: { x: " +
- NRECORDS +
- "}}, false, true)); \
- assert.writeOK(db.concflag.update({}, { inprog: false }));");
-
-assert.soon(function() {
- var x = db.concflag.findOne();
- return x && x.inprog;
-}, "wait for fork", 30000, 1);
-
-querycount = 0;
-decrements = 0;
-misses = 0;
-
-assert.soon(function() {
- c2 = db.conc.count({x: {$lt: NRECORDS}});
- print(c2);
- querycount++;
- if (c2 < c1)
- decrements++;
- else
- misses++;
- c1 = c2;
- return !db.concflag.findOne().inprog;
-}, "update never finished", 2 * 60 * 60 * 1000, 10);
-
-print(querycount + " queries, " + decrements + " decrements, " + misses + " misses");
-
-assert.eq(NRECORDS, db.conc.count(), "AT END 1");
-
-updater(); // wait()
-
-test.stop();
+(function() {
+ "use strict";
+
+ const conn = MongoRunner.runMongod({smallfiles: "", nojournal: ""});
+ assert.neq(null, conn, "mongod was unable to start up");
+ db = conn.getDB("concurrency");
+ db.dropDatabase();
+
+ const NRECORDS = 3 * 1024 * 1024;
+
+ print("loading " + NRECORDS + " documents (progress msg every 1024*1024 documents)");
+ var bulk = db.conc.initializeUnorderedBulkOp();
+ for (var i = 0; i < NRECORDS; i++) {
+ bulk.insert({x: i});
+ }
+ assert.writeOK(bulk.execute());
+
+ print("making an index (this will take a while)");
+ db.conc.ensureIndex({x: 1});
+
+ var c1 = db.conc.count({x: {$lt: NRECORDS}});
+
+ const updater = startParallelShell(
+ "db = db.getSisterDB('concurrency');\
+ db.concflag.insert({ inprog: true });\
+ sleep(20);\
+ assert.writeOK(db.conc.update({}, \
+ { $inc: { x: " +
+ NRECORDS +
+ "}}, false, true)); \
+ assert.writeOK(db.concflag.update({}, { inprog: false }));");
+
+ assert.soon(function() {
+ var x = db.concflag.findOne();
+ return x && x.inprog;
+ }, "wait for fork", 30000, 1);
+
+ let querycount = 0;
+ let decrements = 0;
+ let misses = 0;
+
+ assert.soon(function() {
+ const c2 = db.conc.count({x: {$lt: NRECORDS}});
+ print(c2);
+ querycount++;
+ if (c2 < c1)
+ decrements++;
+ else
+ misses++;
+ c1 = c2;
+ return !db.concflag.findOne().inprog;
+ }, "update never finished", 2 * 60 * 60 * 1000, 10);
+
+ print(querycount + " queries, " + decrements + " decrements, " + misses + " misses");
+
+ assert.eq(NRECORDS, db.conc.count(), "AT END 1");
+
+ updater(); // wait()
+
+ MongoRunner.stopMongod(conn);
+})();