summaryrefslogtreecommitdiff
path: root/jstests
diff options
context:
space:
mode:
authormay <may.hoque@mongodb.com>2017-06-07 10:03:57 -0400
committermay <may.hoque@mongodb.com>2017-06-07 10:03:57 -0400
commit67b2885420cc8d7ac63c2df4391fe92cd5c5c038 (patch)
treec01b42c0f240872375d2476b95437645620038f4 /jstests
parent1345c0476cf47d691e8db532967238800d0a70c2 (diff)
downloadmongo-67b2885420cc8d7ac63c2df4391fe92cd5c5c038.tar.gz
SERVER-23035 Remove SlowWeeklyMongod code from the shell
Diffstat (limited to 'jstests')
-rw-r--r--jstests/libs/slow_weekly_util.js19
-rw-r--r--jstests/mmap_v1/disk_reuse1.js95
-rw-r--r--jstests/noPassthrough/geo_full.js1015
-rw-r--r--jstests/noPassthrough/geo_mnypts_plus_fields.js157
-rw-r--r--jstests/noPassthrough/geo_near_random1.js28
-rw-r--r--jstests/noPassthrough/geo_near_random2.js45
-rw-r--r--jstests/noPassthrough/indexbg1.js236
-rw-r--r--jstests/noPassthrough/indexbg2.js154
-rw-r--r--jstests/noPassthrough/ns1.js83
-rw-r--r--jstests/noPassthrough/query_yield1.js150
-rw-r--r--jstests/noPassthrough/query_yield2.js261
-rw-r--r--jstests/noPassthrough/repair2.js47
-rw-r--r--jstests/noPassthrough/update_server-5552.js56
-rw-r--r--jstests/slow1/conc_update.js118
14 files changed, 1244 insertions, 1220 deletions
diff --git a/jstests/libs/slow_weekly_util.js b/jstests/libs/slow_weekly_util.js
deleted file mode 100644
index 7bc60bfcd61..00000000000
--- a/jstests/libs/slow_weekly_util.js
+++ /dev/null
@@ -1,19 +0,0 @@
-
-SlowWeeklyMongod = function(name) {
- this.name = name;
- this.start = new Date();
-
- this.conn = MongoRunner.runMongod({smallfiles: "", nojournal: ""});
- this.port = this.conn.port;
-};
-
-SlowWeeklyMongod.prototype.getDB = function(name) {
- return this.conn.getDB(name);
-};
-
-SlowWeeklyMongod.prototype.stop = function() {
- MongoRunner.stopMongod(this.conn);
- var end = new Date();
- print("slowWeekly test: " + this.name + " completed successfully in " +
- ((end.getTime() - this.start.getTime()) / 1000) + " seconds");
-};
diff --git a/jstests/mmap_v1/disk_reuse1.js b/jstests/mmap_v1/disk_reuse1.js
index 01090667820..6dc1a1debe3 100644
--- a/jstests/mmap_v1/disk_reuse1.js
+++ b/jstests/mmap_v1/disk_reuse1.js
@@ -1,51 +1,52 @@
-load("jstests/libs/slow_weekly_util.js");
-test = new SlowWeeklyMongod("conc_update");
-db = test.getDB("test");
-t = db.disk_reuse1;
-t.drop();
-
-N = 10000;
-
-function k() {
- return Math.floor(Math.random() * N);
-}
-
-s = "";
-while (s.length < 1024)
- s += "abc";
-
-state = {};
-
-var bulk = t.initializeUnorderedBulkOp();
-for (var i = 0; i < N; i++) {
- bulk.insert({_id: i, s: s});
-}
-assert.writeOK(bulk.execute());
-
-orig = t.stats();
-
-t.remove({});
-
-bulk = t.initializeUnorderedBulkOp();
-for (i = 0; i < N; i++) {
- bulk.insert({_id: i, s: s});
-}
-assert.writeOK(bulk.execute());
-
-assert.eq(orig.storageSize, t.stats().storageSize, "A");
-
-for (j = 0; j < 100; j++) {
- for (i = 0; i < N; i++) {
- bulk = t.initializeUnorderedBulkOp();
- var r = Math.random();
- if (r > .5)
- bulk.find({_id: i}).remove();
- else
- bulk.find({_id: i}).upsert().updateOne({_id: i, s: s});
+(function() {
+ "use strict";
+ const conn = MongoRunner.runMongod({smallfiles: "", nojournal: ""});
+ assert.neq(null, conn, "mongod failed to start.");
+ db = conn.getDB("test");
+ const t = db.disk_reuse1;
+ t.drop();
+
+ const N = 10000;
+
+ function k() {
+ return Math.floor(Math.random() * N);
}
+ let s = "";
+ while (s.length < 1024)
+ s += "abc";
+
+ var bulk = t.initializeUnorderedBulkOp();
+ for (var i = 0; i < N; i++) {
+ bulk.insert({_id: i, s: s});
+ }
assert.writeOK(bulk.execute());
- assert.eq(orig.storageSize, t.stats().storageSize, "B" + j);
-}
-test.stop();
+ const orig = t.stats();
+
+ t.remove({});
+
+ bulk = t.initializeUnorderedBulkOp();
+ for (let i = 0; i < N; i++) {
+ bulk.insert({_id: i, s: s});
+ }
+ assert.writeOK(bulk.execute());
+
+ assert.eq(orig.storageSize, t.stats().storageSize, "A");
+
+ for (let j = 0; j < 100; j++) {
+ for (let i = 0; i < N; i++) {
+ bulk = t.initializeUnorderedBulkOp();
+ var r = Math.random();
+ if (r > .5)
+ bulk.find({_id: i}).remove();
+ else
+ bulk.find({_id: i}).upsert().updateOne({_id: i, s: s});
+ }
+
+ assert.writeOK(bulk.execute());
+ assert.eq(orig.storageSize, t.stats().storageSize, "B" + j);
+ }
+
+ MongoRunner.stopMongod(conn);
+})();
diff --git a/jstests/noPassthrough/geo_full.js b/jstests/noPassthrough/geo_full.js
index 8b5048e0b29..377a3bac741 100644
--- a/jstests/noPassthrough/geo_full.js
+++ b/jstests/noPassthrough/geo_full.js
@@ -18,569 +18,578 @@
// test fails, and hard-wiring that as the test number.
//
-load("jstests/libs/slow_weekly_util.js");
-testServer = new SlowWeeklyMongod("geo_full");
-db = testServer.getDB("test");
+(function() {
+ "use strict";
+ const conn = MongoRunner.runMongod({smallfiles: "", nojournal: ""});
+ assert.neq(null, conn, "mongod failed to start.");
+ const db = conn.getDB("test");
+
+ var randEnvironment = function() {
+
+ // Normal earth environment
+ if (Random.rand() < 0.5) {
+ return {
+ max: 180,
+ min: -180,
+ bits: Math.floor(Random.rand() * 32) + 1,
+ earth: true,
+ bucketSize: 360 / (4 * 1024 * 1024 * 1024)
+ };
+ }
-var randEnvironment = function() {
+ var scales = [0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000, 100000];
+ var scale = scales[Math.floor(Random.rand() * scales.length)];
+ var offset = Random.rand() * scale;
- // Normal earth environment
- if (Random.rand() < 0.5) {
- return {
- max: 180,
- min: -180,
- bits: Math.floor(Random.rand() * 32) + 1,
- earth: true,
- bucketSize: 360 / (4 * 1024 * 1024 * 1024)
- };
- }
+ var max = Random.rand() * scale + offset;
+ var min = -Random.rand() * scale + offset;
+ var bits = Math.floor(Random.rand() * 32) + 1;
+ var bits = Math.floor(Random.rand() * 32) + 1;
+ var range = max - min;
+ var bucketSize = range / (4 * 1024 * 1024 * 1024);
- var scales = [0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000, 100000];
- var scale = scales[Math.floor(Random.rand() * scales.length)];
- var offset = Random.rand() * scale;
-
- var max = Random.rand() * scale + offset;
- var min = -Random.rand() * scale + offset;
- var bits = Math.floor(Random.rand() * 32) + 1;
- var bits = Math.floor(Random.rand() * 32) + 1;
- var range = max - min;
- var bucketSize = range / (4 * 1024 * 1024 * 1024);
+ return {max: max, min: min, bits: bits, earth: false, bucketSize: bucketSize};
+ };
- return {max: max, min: min, bits: bits, earth: false, bucketSize: bucketSize};
-};
+ var randPoint = function(env, query) {
-var randPoint = function(env, query) {
+ if (query && Random.rand() > 0.5)
+ return query.exact;
- if (query && Random.rand() > 0.5)
- return query.exact;
+ if (env.earth)
+ return [Random.rand() * 360 - 180, Random.rand() * 180 - 90];
- if (env.earth)
- return [Random.rand() * 360 - 180, Random.rand() * 180 - 90];
+ var range = env.max - env.min;
+ return [Random.rand() * range + env.min, Random.rand() * range + env.min];
+ };
- var range = env.max - env.min;
- return [Random.rand() * range + env.min, Random.rand() * range + env.min];
-};
+ var randLocType = function(loc, wrapIn) {
+ return randLocTypes([loc], wrapIn)[0];
+ };
-var randLocType = function(loc, wrapIn) {
- return randLocTypes([loc], wrapIn)[0];
-};
+ var randLocTypes = function(locs, wrapIn) {
-var randLocTypes = function(locs, wrapIn) {
+ var rLocs = [];
- var rLocs = [];
+ for (var i = 0; i < locs.length; i++) {
+ rLocs.push(locs[i]);
+ }
- for (var i = 0; i < locs.length; i++) {
- rLocs.push(locs[i]);
- }
+ if (wrapIn) {
+ var wrappedLocs = [];
+ for (var i = 0; i < rLocs.length; i++) {
+ var wrapper = {};
+ wrapper[wrapIn] = rLocs[i];
+ wrappedLocs.push(wrapper);
+ }
- if (wrapIn) {
- var wrappedLocs = [];
- for (var i = 0; i < rLocs.length; i++) {
- var wrapper = {};
- wrapper[wrapIn] = rLocs[i];
- wrappedLocs.push(wrapper);
+ return wrappedLocs;
}
- return wrappedLocs;
- }
+ return rLocs;
+ };
+
+ var randDataType = function() {
- return rLocs;
-};
+ var scales = [1, 10, 100, 1000, 10000];
+ var docScale = scales[Math.floor(Random.rand() * scales.length)];
+ var locScale = scales[Math.floor(Random.rand() * scales.length)];
-var randDataType = function() {
+ var numDocs = 40000;
+ var maxLocs = 40000;
+ // Make sure we don't blow past our test resources
+ while (numDocs * maxLocs > 40000) {
+ numDocs = Math.floor(Random.rand() * docScale) + 1;
+ maxLocs = Math.floor(Random.rand() * locScale) + 1;
+ }
- var scales = [1, 10, 100, 1000, 10000];
- var docScale = scales[Math.floor(Random.rand() * scales.length)];
- var locScale = scales[Math.floor(Random.rand() * scales.length)];
+ return {numDocs: numDocs, maxLocs: maxLocs};
+ };
- var numDocs = 40000;
- var maxLocs = 40000;
- // Make sure we don't blow past our test resources
- while (numDocs * maxLocs > 40000) {
- numDocs = Math.floor(Random.rand() * docScale) + 1;
- maxLocs = Math.floor(Random.rand() * locScale) + 1;
+ function deg2rad(arg) {
+ return arg * Math.PI / 180.0;
+ }
+ function rad2deg(arg) {
+ return arg * 180.0 / Math.PI;
}
- return {numDocs: numDocs, maxLocs: maxLocs};
-};
-
-function deg2rad(arg) {
- return arg * Math.PI / 180.0;
-}
-function rad2deg(arg) {
- return arg * 180.0 / Math.PI;
-}
-
-function computexscandist(latDegrees, maxDistDegrees) {
- // See s2cap.cc
- //
- // Compute the range of longitudes covered by the cap. We use the law
- // of sines for spherical triangles. Consider the triangle ABC where
- // A is the north pole, B is the center of the cap, and C is the point
- // of tangency between the cap boundary and a line of longitude. Then
- // C is a right angle, and letting a,b,c denote the sides opposite A,B,C,
- // we have sin(a)/sin(A) = sin(c)/sin(C), or sin(A) = sin(a)/sin(c).
- // Here "a" is the cap angle, and "c" is the colatitude (90 degrees
- // minus the latitude). This formula also works for negative latitudes.
- //
- // Angle A is the difference of longitudes of B and C.
- var sin_c = Math.cos(deg2rad(latDegrees));
- var sin_a = Math.sin(deg2rad(maxDistDegrees));
- if (sin_a > sin_c) {
- // Double floating number error, return invalid distance
- return 180;
+ function computexscandist(latDegrees, maxDistDegrees) {
+ // See s2cap.cc
+ //
+ // Compute the range of longitudes covered by the cap. We use the law
+ // of sines for spherical triangles. Consider the triangle ABC where
+ // A is the north pole, B is the center of the cap, and C is the point
+ // of tangency between the cap boundary and a line of longitude. Then
+ // C is a right angle, and letting a,b,c denote the sides opposite A,B,C,
+ // we have sin(a)/sin(A) = sin(c)/sin(C), or sin(A) = sin(a)/sin(c).
+ // Here "a" is the cap angle, and "c" is the colatitude (90 degrees
+ // minus the latitude). This formula also works for negative latitudes.
+ //
+ // Angle A is the difference of longitudes of B and C.
+ var sin_c = Math.cos(deg2rad(latDegrees));
+ var sin_a = Math.sin(deg2rad(maxDistDegrees));
+ if (sin_a > sin_c) {
+ // Double floating number error, return invalid distance
+ return 180;
+ }
+ var angleA = Math.asin(sin_a / sin_c);
+ return rad2deg(angleA);
}
- var angleA = Math.asin(sin_a / sin_c);
- return rad2deg(angleA);
-}
-function errorMarginForPoint(env) {
- if (!env.bits) {
- return 0.01;
+ function errorMarginForPoint(env) {
+ if (!env.bits) {
+ return 0.01;
+ }
+ var scalingFactor = Math.pow(2, env.bits);
+ return ((env.max - env.min) / scalingFactor) * Math.sqrt(2);
}
- var scalingFactor = Math.pow(2, env.bits);
- return ((env.max - env.min) / scalingFactor) * Math.sqrt(2);
-}
-
-function pointIsOK(startPoint, radius, env) {
- var error = errorMarginForPoint(env);
- var distDegrees = rad2deg(radius) + error;
- // TODO SERVER-24440: Points close to the north and south poles may fail to be returned by
- // $nearSphere queries answered using a "2d" index. We have empirically found that points with
- // latitudes between 89 and 90 degrees are potentially affected by this issue, so we
- // additionally reject any coordinates with a latitude that falls within that range.
- if ((startPoint[1] + distDegrees > 89) || (startPoint[1] - distDegrees < -89)) {
- return false;
+
+ function pointIsOK(startPoint, radius, env) {
+ var error = errorMarginForPoint(env);
+ var distDegrees = rad2deg(radius) + error;
+ // TODO SERVER-24440: Points close to the north and south poles may fail to be returned by
+ // $nearSphere queries answered using a "2d" index. We have empirically found that points
+ // with latitudes between 89 and 90 degrees are potentially affected by this issue, so we
+ // additionally reject any coordinates with a latitude that falls within that range.
+ if ((startPoint[1] + distDegrees > 89) || (startPoint[1] - distDegrees < -89)) {
+ return false;
+ }
+ var xscandist = computexscandist(startPoint[1], distDegrees);
+ return (startPoint[0] + xscandist < 180) && (startPoint[0] - xscandist > -180);
}
- var xscandist = computexscandist(startPoint[1], distDegrees);
- return (startPoint[0] + xscandist < 180) && (startPoint[0] - xscandist > -180);
-}
-
-var randQuery = function(env) {
- var center = randPoint(env);
-
- var sphereRadius = -1;
- var sphereCenter = null;
- if (env.earth) {
- // Get a start point that doesn't require wrapping
- // TODO: Are we a bit too aggressive with wrapping issues?
- var i;
- for (i = 0; i < 5; i++) {
- sphereRadius = Random.rand() * 45 * Math.PI / 180;
- sphereCenter = randPoint(env);
- if (pointIsOK(sphereCenter, sphereRadius, env)) {
- break;
+
+ var randQuery = function(env) {
+ var center = randPoint(env);
+
+ var sphereRadius = -1;
+ var sphereCenter = null;
+ if (env.earth) {
+ // Get a start point that doesn't require wrapping
+ // TODO: Are we a bit too aggressive with wrapping issues?
+ var i;
+ for (i = 0; i < 5; i++) {
+ sphereRadius = Random.rand() * 45 * Math.PI / 180;
+ sphereCenter = randPoint(env);
+ if (pointIsOK(sphereCenter, sphereRadius, env)) {
+ break;
+ }
}
+ if (i == 5)
+ sphereRadius = -1;
}
- if (i == 5)
- sphereRadius = -1;
- }
- var box = [randPoint(env), randPoint(env)];
+ var box = [randPoint(env), randPoint(env)];
- var boxPoly = [
- [box[0][0], box[0][1]],
- [box[0][0], box[1][1]],
- [box[1][0], box[1][1]],
- [box[1][0], box[0][1]]
- ];
+ var boxPoly = [
+ [box[0][0], box[0][1]],
+ [box[0][0], box[1][1]],
+ [box[1][0], box[1][1]],
+ [box[1][0], box[0][1]]
+ ];
- if (box[0][0] > box[1][0]) {
- var swap = box[0][0];
- box[0][0] = box[1][0];
- box[1][0] = swap;
- }
+ if (box[0][0] > box[1][0]) {
+ var swap = box[0][0];
+ box[0][0] = box[1][0];
+ box[1][0] = swap;
+ }
- if (box[0][1] > box[1][1]) {
- var swap = box[0][1];
- box[0][1] = box[1][1];
- box[1][1] = swap;
- }
+ if (box[0][1] > box[1][1]) {
+ var swap = box[0][1];
+ box[0][1] = box[1][1];
+ box[1][1] = swap;
+ }
- return {
- center: center,
- radius: box[1][0] - box[0][0],
- exact: randPoint(env),
- sphereCenter: sphereCenter,
- sphereRadius: sphereRadius,
- box: box,
- boxPoly: boxPoly
+ return {
+ center: center,
+ radius: box[1][0] - box[0][0],
+ exact: randPoint(env),
+ sphereCenter: sphereCenter,
+ sphereRadius: sphereRadius,
+ box: box,
+ boxPoly: boxPoly
+ };
};
-};
-
-var resultTypes = {
- "exact": function(loc) {
- return query.exact[0] == loc[0] && query.exact[1] == loc[1];
- },
- "center": function(loc) {
- return Geo.distance(query.center, loc) <= query.radius;
- },
- "box": function(loc) {
- return loc[0] >= query.box[0][0] && loc[0] <= query.box[1][0] &&
- loc[1] >= query.box[0][1] && loc[1] <= query.box[1][1];
-
- },
- "sphere": function(loc) {
- return (query.sphereRadius >= 0
- ? (Geo.sphereDistance(query.sphereCenter, loc) <= query.sphereRadius)
- : false);
- },
- "poly": function(loc) {
- return loc[0] >= query.box[0][0] && loc[0] <= query.box[1][0] &&
- loc[1] >= query.box[0][1] && loc[1] <= query.box[1][1];
- }
-};
-var queryResults = function(locs, query, results) {
+ var resultTypes = {
+ "exact": function(loc) {
+ return query.exact[0] == loc[0] && query.exact[1] == loc[1];
+ },
+ "center": function(loc) {
+ return Geo.distance(query.center, loc) <= query.radius;
+ },
+ "box": function(loc) {
+ return loc[0] >= query.box[0][0] && loc[0] <= query.box[1][0] &&
+ loc[1] >= query.box[0][1] && loc[1] <= query.box[1][1];
- if (!results["center"]) {
- for (var type in resultTypes) {
- results[type] = {docsIn: 0, docsOut: 0, locsIn: 0, locsOut: 0};
+ },
+ "sphere": function(loc) {
+ return (query.sphereRadius >= 0
+ ? (Geo.sphereDistance(query.sphereCenter, loc) <= query.sphereRadius)
+ : false);
+ },
+ "poly": function(loc) {
+ return loc[0] >= query.box[0][0] && loc[0] <= query.box[1][0] &&
+ loc[1] >= query.box[0][1] && loc[1] <= query.box[1][1];
}
- }
+ };
- var indResults = {};
- for (var type in resultTypes) {
- indResults[type] = {docIn: false, locsIn: 0, locsOut: 0};
- }
+ var queryResults = function(locs, query, results) {
- for (var type in resultTypes) {
- var docIn = false;
- for (var i = 0; i < locs.length; i++) {
- if (resultTypes[type](locs[i])) {
- results[type].locsIn++;
- indResults[type].locsIn++;
- indResults[type].docIn = true;
- } else {
- results[type].locsOut++;
- indResults[type].locsOut++;
+ if (!results["center"]) {
+ for (var type in resultTypes) {
+ results[type] = {docsIn: 0, docsOut: 0, locsIn: 0, locsOut: 0};
}
}
- if (indResults[type].docIn)
- results[type].docsIn++;
- else
- results[type].docsOut++;
- }
- return indResults;
-};
-
-var randQueryAdditions = function(doc, indResults) {
-
- for (var type in resultTypes) {
- var choice = Random.rand();
- if (Random.rand() < 0.25)
- doc[type] = (indResults[type].docIn ? {docIn: "yes"} : {docIn: "no"});
- else if (Random.rand() < 0.5)
- doc[type] = (indResults[type].docIn ? {docIn: ["yes"]} : {docIn: ["no"]});
- else if (Random.rand() < 0.75)
- doc[type] = (indResults[type].docIn ? [{docIn: "yes"}] : [{docIn: "no"}]);
- else
- doc[type] = (indResults[type].docIn ? [{docIn: ["yes"]}] : [{docIn: ["no"]}]);
- }
-};
+ var indResults = {};
+ for (var type in resultTypes) {
+ indResults[type] = {docIn: false, locsIn: 0, locsOut: 0};
+ }
-var randIndexAdditions = function(indexDoc) {
+ for (var type in resultTypes) {
+ var docIn = false;
+ for (var i = 0; i < locs.length; i++) {
+ if (resultTypes[type](locs[i])) {
+ results[type].locsIn++;
+ indResults[type].locsIn++;
+ indResults[type].docIn = true;
+ } else {
+ results[type].locsOut++;
+ indResults[type].locsOut++;
+ }
+ }
+ if (indResults[type].docIn)
+ results[type].docsIn++;
+ else
+ results[type].docsOut++;
+ }
- for (var type in resultTypes) {
- if (Random.rand() < 0.5)
- continue;
+ return indResults;
+ };
- var choice = Random.rand();
- if (Random.rand() < 0.5)
- indexDoc[type] = 1;
- else
- indexDoc[type + ".docIn"] = 1;
- }
-};
-
-var randYesQuery = function() {
-
- var choice = Math.floor(Random.rand() * 7);
- if (choice == 0)
- return {$ne: "no"};
- else if (choice == 1)
- return "yes";
- else if (choice == 2)
- return /^yes/;
- else if (choice == 3)
- return {$in: ["good", "yes", "ok"]};
- else if (choice == 4)
- return {$exists: true};
- else if (choice == 5)
- return {$nin: ["bad", "no", "not ok"]};
- else if (choice == 6)
- return {$not: /^no/};
-};
-
-var locArray = function(loc) {
- if (loc.x)
- return [loc.x, loc.y];
- if (!loc.length)
- return [loc[0], loc[1]];
- return loc;
-};
-
-var locsArray = function(locs) {
- if (locs.loc) {
- arr = [];
- for (var i = 0; i < locs.loc.length; i++)
- arr.push(locArray(locs.loc[i]));
- return arr;
- } else {
- arr = [];
- for (var i = 0; i < locs.length; i++)
- arr.push(locArray(locs[i].loc));
- return arr;
- }
-};
-
-var minBoxSize = function(env, box) {
- return env.bucketSize * Math.pow(2, minBucketScale(env, box));
-};
-
-var minBucketScale = function(env, box) {
-
- if (box.length && box[0].length)
- box = [box[0][0] - box[1][0], box[0][1] - box[1][1]];
-
- if (box.length)
- box = Math.max(box[0], box[1]);
-
- print(box);
- print(env.bucketSize);
-
- return Math.ceil(Math.log(box / env.bucketSize) / Math.log(2));
-
-};
-
-// TODO: Add spherical $uniqueDocs tests
-var numTests = 100;
-
-// Our seed will change every time this is run, but
-// each individual test will be reproducible given
-// that seed and test number
-var seed = new Date().getTime();
-// seed = 175 + 288 + 12
-
-for (var test = 0; test < numTests; test++) {
- Random.srand(seed + test);
- // Random.srand( 42240 )
- // Random.srand( 7344 )
- var t = db.testAllGeo;
- t.drop();
-
- print("Generating test environment #" + test);
- var env = randEnvironment();
- // env.bits = 11
- var query = randQuery(env);
- var data = randDataType();
- // data.numDocs = 5; data.maxLocs = 1;
- var paddingSize = Math.floor(Random.rand() * 10 + 1);
- var results = {};
- var totalPoints = 0;
- print("Calculating target results for " + data.numDocs + " docs with max " + data.maxLocs +
- " locs ");
-
- var bulk = t.initializeUnorderedBulkOp();
- for (var i = 0; i < data.numDocs; i++) {
- var numLocs = Math.floor(Random.rand() * data.maxLocs + 1);
- totalPoints += numLocs;
-
- var multiPoint = [];
- for (var p = 0; p < numLocs; p++) {
- var point = randPoint(env, query);
- multiPoint.push(point);
- }
+ var randQueryAdditions = function(doc, indResults) {
- var indResults = queryResults(multiPoint, query, results);
+ for (var type in resultTypes) {
+ var choice = Random.rand();
+ if (Random.rand() < 0.25)
+ doc[type] = (indResults[type].docIn ? {docIn: "yes"} : {docIn: "no"});
+ else if (Random.rand() < 0.5)
+ doc[type] = (indResults[type].docIn ? {docIn: ["yes"]} : {docIn: ["no"]});
+ else if (Random.rand() < 0.75)
+ doc[type] = (indResults[type].docIn ? [{docIn: "yes"}] : [{docIn: "no"}]);
+ else
+ doc[type] = (indResults[type].docIn ? [{docIn: ["yes"]}] : [{docIn: ["no"]}]);
+ }
+ };
- var doc;
- // Nest the keys differently
- if (Random.rand() < 0.5)
- doc = {locs: {loc: randLocTypes(multiPoint)}};
- else
- doc = {locs: randLocTypes(multiPoint, "loc")};
+ var randIndexAdditions = function(indexDoc) {
- randQueryAdditions(doc, indResults);
+ for (var type in resultTypes) {
+ if (Random.rand() < 0.5)
+ continue;
+
+ var choice = Random.rand();
+ if (Random.rand() < 0.5)
+ indexDoc[type] = 1;
+ else
+ indexDoc[type + ".docIn"] = 1;
+ }
+ };
- doc._id = i;
- bulk.insert(doc);
- }
- assert.writeOK(bulk.execute());
-
- var indexDoc = {"locs.loc": "2d"};
- randIndexAdditions(indexDoc);
-
- // "earth" is used to drive test setup and not a valid createIndexes option or required at this
- // point. It must be removed before calling ensureIndexes().
- delete env.earth;
-
- assert.commandWorked(t.ensureIndex(indexDoc, env));
- assert.isnull(db.getLastError());
-
- var padding = "x";
- for (var i = 0; i < paddingSize; i++)
- padding = padding + padding;
-
- print(padding);
-
- printjson({
- seed: seed,
- test: test,
- env: env,
- query: query,
- data: data,
- results: results,
- paddingSize: paddingSize
- });
-
- // exact
- print("Exact query...");
- assert.eq(
- results.exact.docsIn,
- t.find({"locs.loc": randLocType(query.exact), "exact.docIn": randYesQuery()}).count());
-
- // $center
- print("Center query...");
- print("Min box : " + minBoxSize(env, query.radius));
- assert.eq(results.center.docsIn,
- t.find({
- "locs.loc": {$within: {$center: [query.center, query.radius], $uniqueDocs: 1}},
- "center.docIn": randYesQuery()
- }).count());
-
- print("Center query update...");
- var res = t.update({
- "locs.loc": {$within: {$center: [query.center, query.radius], $uniqueDocs: true}},
- "center.docIn": randYesQuery()
- },
- {$set: {centerPaddingA: padding}},
- false,
- true);
- assert.eq(results.center.docsIn, res.nModified);
-
- if (query.sphereRadius >= 0) {
- print("Center sphere query...");
- // $centerSphere
- assert.eq(
- results.sphere.docsIn,
- t.find({
- "locs.loc": {$within: {$centerSphere: [query.sphereCenter, query.sphereRadius]}},
- "sphere.docIn": randYesQuery()
- }).count());
+ var randYesQuery = function() {
+
+ var choice = Math.floor(Random.rand() * 7);
+ if (choice == 0)
+ return {$ne: "no"};
+ else if (choice == 1)
+ return "yes";
+ else if (choice == 2)
+ return /^yes/;
+ else if (choice == 3)
+ return {$in: ["good", "yes", "ok"]};
+ else if (choice == 4)
+ return {$exists: true};
+ else if (choice == 5)
+ return {$nin: ["bad", "no", "not ok"]};
+ else if (choice == 6)
+ return {$not: /^no/};
+ };
- print("Center sphere query update...");
- res = t.update({
- "locs.loc": {
- $within:
- {$centerSphere: [query.sphereCenter, query.sphereRadius], $uniqueDocs: true}
- },
- "sphere.docIn": randYesQuery()
- },
- {$set: {spherePaddingA: padding}},
- false,
- true);
- assert.eq(results.sphere.docsIn, res.nModified);
- }
+ var locArray = function(loc) {
+ if (loc.x)
+ return [loc.x, loc.y];
+ if (!loc.length)
+ return [loc[0], loc[1]];
+ return loc;
+ };
- // $box
- print("Box query...");
- assert.eq(results.box.docsIn, t.find({
- "locs.loc": {$within: {$box: query.box, $uniqueDocs: true}},
- "box.docIn": randYesQuery()
- }).count());
-
- // $polygon
- print("Polygon query...");
- assert.eq(results.poly.docsIn, t.find({
- "locs.loc": {$within: {$polygon: query.boxPoly}},
- "poly.docIn": randYesQuery()
- }).count());
-
- var defaultDocLimit = 100;
-
- // $near
- print("Near query...");
- assert.eq(results.center.docsIn,
- t.find({"locs.loc": {$near: query.center, $maxDistance: query.radius}}).count(true),
- "Near query: center: " + query.center + "; radius: " + query.radius + "; docs: " +
- results.center.docsIn + "; locs: " + results.center.locsIn);
-
- if (query.sphereRadius >= 0) {
- print("Near sphere query...");
- // $centerSphere
- assert.eq(
- results.sphere.docsIn,
- t.find({
- "locs.loc": {$nearSphere: query.sphereCenter, $maxDistance: query.sphereRadius}
- }).count(true),
- "Near sphere query: sphere center: " + query.sphereCenter + "; radius: " +
- query.sphereRadius + "; docs: " + results.sphere.docsIn + "; locs: " +
- results.sphere.locsIn);
- }
+ var locsArray = function(locs) {
+ if (locs.loc) {
+ const arr = [];
+ for (var i = 0; i < locs.loc.length; i++)
+ arr.push(locArray(locs.loc[i]));
+ return arr;
+ } else {
+ const arr = [];
+ for (var i = 0; i < locs.length; i++)
+ arr.push(locArray(locs[i].loc));
+ return arr;
+ }
+ };
- // geoNear
- // results limited by size of objects
- if (data.maxLocs < defaultDocLimit) {
- // GeoNear query
- print("GeoNear query...");
- // GeoNear command has a default doc limit 100.
- assert.eq(
- Math.min(defaultDocLimit, results.center.docsIn),
- t.getDB()
- .runCommand({geoNear: "testAllGeo", near: query.center, maxDistance: query.radius})
- .results.length,
- "GeoNear query: center: " + query.center + "; radius: " + query.radius + "; docs: " +
- results.center.docsIn + "; locs: " + results.center.locsIn);
+ var minBoxSize = function(env, box) {
+ return env.bucketSize * Math.pow(2, minBucketScale(env, box));
+ };
- var num = Math.min(2 * defaultDocLimit, 2 * results.center.docsIn);
+ var minBucketScale = function(env, box) {
- var output = db.runCommand({
- geoNear: "testAllGeo",
- near: query.center,
- maxDistance: query.radius,
- includeLocs: true,
- num: num
- }).results;
+ if (box.length && box[0].length)
+ box = [box[0][0] - box[1][0], box[0][1] - box[1][1]];
- assert.eq(Math.min(num, results.center.docsIn),
- output.length,
- "GeoNear query with limit of " + num + ": center: " + query.center +
- "; radius: " + query.radius + "; docs: " + results.center.docsIn +
- "; locs: " + results.center.locsIn);
+ if (box.length)
+ box = Math.max(box[0], box[1]);
- var distance = 0;
- for (var i = 0; i < output.length; i++) {
- var retDistance = output[i].dis;
- var retLoc = locArray(output[i].loc);
+ print(box);
+ print(env.bucketSize);
- var arrLocs = locsArray(output[i].obj.locs);
+ return Math.ceil(Math.log(box / env.bucketSize) / Math.log(2));
- assert.contains(retLoc, arrLocs);
+ };
- var distInObj = false;
- for (var j = 0; j < arrLocs.length && distInObj == false; j++) {
- var newDistance = Geo.distance(locArray(query.center), arrLocs[j]);
- distInObj =
- (newDistance >= retDistance - 0.0001 && newDistance <= retDistance + 0.0001);
+ // TODO: Add spherical $uniqueDocs tests
+ var numTests = 100;
+
+ // Our seed will change every time this is run, but
+ // each individual test will be reproducible given
+ // that seed and test number
+ var seed = new Date().getTime();
+ // seed = 175 + 288 + 12
+
+ for (var test = 0; test < numTests; test++) {
+ Random.srand(seed + test);
+ // Random.srand( 42240 )
+ // Random.srand( 7344 )
+ var t = db.testAllGeo;
+ t.drop();
+
+ print("Generating test environment #" + test);
+ var env = randEnvironment();
+ // env.bits = 11
+ var query = randQuery(env);
+ var data = randDataType();
+ // data.numDocs = 5; data.maxLocs = 1;
+ var paddingSize = Math.floor(Random.rand() * 10 + 1);
+ var results = {};
+ var totalPoints = 0;
+ print("Calculating target results for " + data.numDocs + " docs with max " + data.maxLocs +
+ " locs ");
+
+ var bulk = t.initializeUnorderedBulkOp();
+ for (var i = 0; i < data.numDocs; i++) {
+ var numLocs = Math.floor(Random.rand() * data.maxLocs + 1);
+ totalPoints += numLocs;
+
+ var multiPoint = [];
+ for (var p = 0; p < numLocs; p++) {
+ var point = randPoint(env, query);
+ multiPoint.push(point);
}
- assert(distInObj);
- assert.between(retDistance - 0.0001,
- Geo.distance(locArray(query.center), retLoc),
- retDistance + 0.0001);
- assert.lte(retDistance, query.radius);
- assert.gte(retDistance, distance);
- distance = retDistance;
+ var indResults = queryResults(multiPoint, query, results);
+
+ var doc;
+ // Nest the keys differently
+ if (Random.rand() < 0.5)
+ doc = {locs: {loc: randLocTypes(multiPoint)}};
+ else
+ doc = {locs: randLocTypes(multiPoint, "loc")};
+
+ randQueryAdditions(doc, indResults);
+
+ doc._id = i;
+ bulk.insert(doc);
}
- }
+ assert.writeOK(bulk.execute());
+
+ var indexDoc = {"locs.loc": "2d"};
+ randIndexAdditions(indexDoc);
+
+ // "earth" is used to drive test setup and not a valid createIndexes option or required at
+ // this point. It must be removed before calling ensureIndexes().
+ delete env.earth;
+
+ assert.commandWorked(t.ensureIndex(indexDoc, env));
+ assert.isnull(db.getLastError());
+
+ var padding = "x";
+ for (var i = 0; i < paddingSize; i++)
+ padding = padding + padding;
+
+ print(padding);
+
+ printjson({
+ seed: seed,
+ test: test,
+ env: env,
+ query: query,
+ data: data,
+ results: results,
+ paddingSize: paddingSize
+ });
+
+ // exact
+ print("Exact query...");
+ assert.eq(
+ results.exact.docsIn,
+ t.find({"locs.loc": randLocType(query.exact), "exact.docIn": randYesQuery()}).count());
+
+ // $center
+ print("Center query...");
+ print("Min box : " + minBoxSize(env, query.radius));
+ assert.eq(
+ results.center.docsIn,
+ t.find({
+ "locs.loc": {$within: {$center: [query.center, query.radius], $uniqueDocs: 1}},
+ "center.docIn": randYesQuery()
+ }).count());
- // $polygon
- print("Polygon remove...");
- res =
- t.remove({"locs.loc": {$within: {$polygon: query.boxPoly}}, "poly.docIn": randYesQuery()});
- assert.eq(results.poly.docsIn, res.nRemoved);
-}
+ print("Center query update...");
+ var res = t.update({
+ "locs.loc": {$within: {$center: [query.center, query.radius], $uniqueDocs: true}},
+ "center.docIn": randYesQuery()
+ },
+ {$set: {centerPaddingA: padding}},
+ false,
+ true);
+ assert.eq(results.center.docsIn, res.nModified);
+
+ if (query.sphereRadius >= 0) {
+ print("Center sphere query...");
+ // $centerSphere
+ assert.eq(results.sphere.docsIn,
+ t.find({
+ "locs.loc":
+ {$within: {$centerSphere: [query.sphereCenter, query.sphereRadius]}},
+ "sphere.docIn": randYesQuery()
+ }).count());
+
+ print("Center sphere query update...");
+ res = t.update({
+ "locs.loc": {
+ $within: {
+ $centerSphere: [query.sphereCenter, query.sphereRadius],
+ $uniqueDocs: true
+ }
+ },
+ "sphere.docIn": randYesQuery()
+ },
+ {$set: {spherePaddingA: padding}},
+ false,
+ true);
+ assert.eq(results.sphere.docsIn, res.nModified);
+ }
+
+ // $box
+ print("Box query...");
+ assert.eq(results.box.docsIn,
+ t.find({
+ "locs.loc": {$within: {$box: query.box, $uniqueDocs: true}},
+ "box.docIn": randYesQuery()
+ }).count());
+
+ // $polygon
+ print("Polygon query...");
+ assert.eq(results.poly.docsIn, t.find({
+ "locs.loc": {$within: {$polygon: query.boxPoly}},
+ "poly.docIn": randYesQuery()
+ }).count());
+
+ var defaultDocLimit = 100;
+
+ // $near
+ print("Near query...");
+ assert.eq(
+ results.center.docsIn,
+ t.find({"locs.loc": {$near: query.center, $maxDistance: query.radius}}).count(true),
+ "Near query: center: " + query.center + "; radius: " + query.radius + "; docs: " +
+ results.center.docsIn + "; locs: " + results.center.locsIn);
+
+ if (query.sphereRadius >= 0) {
+ print("Near sphere query...");
+ // $centerSphere
+ assert.eq(results.sphere.docsIn,
+ t.find({
+ "locs.loc":
+ {$nearSphere: query.sphereCenter, $maxDistance: query.sphereRadius}
+ }).count(true),
+ "Near sphere query: sphere center: " + query.sphereCenter + "; radius: " +
+ query.sphereRadius + "; docs: " + results.sphere.docsIn + "; locs: " +
+ results.sphere.locsIn);
+ }
+
+ // geoNear
+ // results limited by size of objects
+ if (data.maxLocs < defaultDocLimit) {
+ // GeoNear query
+ print("GeoNear query...");
+ // GeoNear command has a default doc limit 100.
+ assert.eq(
+ Math.min(defaultDocLimit, results.center.docsIn),
+ t.getDB()
+ .runCommand(
+ {geoNear: "testAllGeo", near: query.center, maxDistance: query.radius})
+ .results.length,
+ "GeoNear query: center: " + query.center + "; radius: " + query.radius +
+ "; docs: " + results.center.docsIn + "; locs: " + results.center.locsIn);
+
+ var num = Math.min(2 * defaultDocLimit, 2 * results.center.docsIn);
+
+ var output = db.runCommand({
+ geoNear: "testAllGeo",
+ near: query.center,
+ maxDistance: query.radius,
+ includeLocs: true,
+ num: num
+ }).results;
+
+ assert.eq(Math.min(num, results.center.docsIn),
+ output.length,
+ "GeoNear query with limit of " + num + ": center: " + query.center +
+ "; radius: " + query.radius + "; docs: " + results.center.docsIn +
+ "; locs: " + results.center.locsIn);
+
+ var distance = 0;
+ for (var i = 0; i < output.length; i++) {
+ var retDistance = output[i].dis;
+ var retLoc = locArray(output[i].loc);
+
+ var arrLocs = locsArray(output[i].obj.locs);
+
+ assert.contains(retLoc, arrLocs);
+
+ var distInObj = false;
+ for (var j = 0; j < arrLocs.length && distInObj == false; j++) {
+ var newDistance = Geo.distance(locArray(query.center), arrLocs[j]);
+ distInObj = (newDistance >= retDistance - 0.0001 &&
+ newDistance <= retDistance + 0.0001);
+ }
+
+ assert(distInObj);
+ assert.between(retDistance - 0.0001,
+ Geo.distance(locArray(query.center), retLoc),
+ retDistance + 0.0001);
+ assert.lte(retDistance, query.radius);
+ assert.gte(retDistance, distance);
+ distance = retDistance;
+ }
+ }
+
+ // $polygon
+ print("Polygon remove...");
+ res = t.remove(
+ {"locs.loc": {$within: {$polygon: query.boxPoly}}, "poly.docIn": randYesQuery()});
+ assert.eq(results.poly.docsIn, res.nRemoved);
+ }
-testServer.stop();
+ MongoRunner.stopMongod(conn);
+})();
diff --git a/jstests/noPassthrough/geo_mnypts_plus_fields.js b/jstests/noPassthrough/geo_mnypts_plus_fields.js
index eb8a03ce739..52164b94860 100644
--- a/jstests/noPassthrough/geo_mnypts_plus_fields.js
+++ b/jstests/noPassthrough/geo_mnypts_plus_fields.js
@@ -1,100 +1,107 @@
// Test sanity of geo queries with a lot of points
-load("jstests/libs/slow_weekly_util.js");
-testServer = new SlowWeeklyMongod("geo_mnypts_plus_fields");
-db = testServer.getDB("test");
+(function() {
+ "use strict";
+ const conn = MongoRunner.runMongod({smallfiles: "", nojournal: ""});
+ assert.neq(null, conn, "mongod failed to start.");
+ const db = conn.getDB("test");
-var maxFields = 3;
+ var maxFields = 3;
-for (var fields = 1; fields < maxFields; fields++) {
- var coll = db.testMnyPts;
- coll.drop();
+ for (var fields = 1; fields < maxFields; fields++) {
+ var coll = db.testMnyPts;
+ coll.drop();
- var totalPts = 500 * 1000;
+ var totalPts = 500 * 1000;
- var bulk = coll.initializeUnorderedBulkOp();
- // Add points in a 100x100 grid
- for (var i = 0; i < totalPts; i++) {
- var ii = i % 10000;
+ var bulk = coll.initializeUnorderedBulkOp();
+ // Add points in a 100x100 grid
+ for (var i = 0; i < totalPts; i++) {
+ var ii = i % 10000;
- var doc = {loc: [ii % 100, Math.floor(ii / 100)]};
+ var doc = {loc: [ii % 100, Math.floor(ii / 100)]};
- // Add fields with different kinds of data
+ // Add fields with different kinds of data
+ for (var j = 0; j < fields; j++) {
+ var field = null;
+
+ if (j % 3 == 0) {
+ // Make half the points not searchable
+ field = "abcdefg" + (i % 2 == 0 ? "h" : "");
+ } else if (j % 3 == 1) {
+ field = new Date();
+ } else {
+ field = true;
+ }
+
+ doc["field" + j] = field;
+ }
+
+ bulk.insert(doc);
+ }
+ assert.writeOK(bulk.execute());
+
+ // Create the query for the additional fields
+ const queryFields = {};
for (var j = 0; j < fields; j++) {
var field = null;
if (j % 3 == 0) {
- // Make half the points not searchable
- field = "abcdefg" + (i % 2 == 0 ? "h" : "");
+ field = "abcdefg";
} else if (j % 3 == 1) {
- field = new Date();
+ field = {$lte: new Date()};
} else {
field = true;
}
- doc["field" + j] = field;
+ queryFields["field" + j] = field;
}
- bulk.insert(doc);
- }
- assert.writeOK(bulk.execute());
-
- // Create the query for the additional fields
- queryFields = {};
- for (var j = 0; j < fields; j++) {
- var field = null;
-
- if (j % 3 == 0) {
- field = "abcdefg";
- } else if (j % 3 == 1) {
- field = {$lte: new Date()};
- } else {
- field = true;
+ coll.ensureIndex({loc: "2d"});
+
+ // Check that quarter of points in each quadrant
+ for (var i = 0; i < 4; i++) {
+ var x = i % 2;
+ var y = Math.floor(i / 2);
+
+ var box = [[0, 0], [49, 49]];
+ box[0][0] += (x == 1 ? 50 : 0);
+ box[1][0] += (x == 1 ? 50 : 0);
+ box[0][1] += (y == 1 ? 50 : 0);
+ box[1][1] += (y == 1 ? 50 : 0);
+
+ // Now only half of each result comes back
+ assert.eq(totalPts / (4 * 2),
+ coll.find(Object.extend({loc: {$within: {$box: box}}}, queryFields)).count());
+ assert.eq(
+ totalPts / (4 * 2),
+ coll.find(Object.extend({loc: {$within: {$box: box}}}, queryFields)).itcount());
}
- queryFields["field" + j] = field;
- }
-
- coll.ensureIndex({loc: "2d"});
-
- // Check that quarter of points in each quadrant
- for (var i = 0; i < 4; i++) {
- var x = i % 2;
- var y = Math.floor(i / 2);
-
- var box = [[0, 0], [49, 49]];
- box[0][0] += (x == 1 ? 50 : 0);
- box[1][0] += (x == 1 ? 50 : 0);
- box[0][1] += (y == 1 ? 50 : 0);
- box[1][1] += (y == 1 ? 50 : 0);
-
- // Now only half of each result comes back
- assert.eq(totalPts / (4 * 2),
- coll.find(Object.extend({loc: {$within: {$box: box}}}, queryFields)).count());
- assert.eq(totalPts / (4 * 2),
- coll.find(Object.extend({loc: {$within: {$box: box}}}, queryFields)).itcount());
- }
+ // Check that half of points in each half
+ for (var i = 0; i < 2; i++) {
+ var box = [[0, 0], [49, 99]];
+ box[0][0] += (i == 1 ? 50 : 0);
+ box[1][0] += (i == 1 ? 50 : 0);
+
+ assert.eq(totalPts / (2 * 2),
+ coll.find(Object.extend({loc: {$within: {$box: box}}}, queryFields)).count());
+ assert.eq(
+ totalPts / (2 * 2),
+ coll.find(Object.extend({loc: {$within: {$box: box}}}, queryFields)).itcount());
+ }
- // Check that half of points in each half
- for (var i = 0; i < 2; i++) {
- var box = [[0, 0], [49, 99]];
- box[0][0] += (i == 1 ? 50 : 0);
- box[1][0] += (i == 1 ? 50 : 0);
+ // Check that all but corner set of points in radius
+ var circle = [[0, 0], (100 - 1) * Math.sqrt(2) - 0.25];
- assert.eq(totalPts / (2 * 2),
- coll.find(Object.extend({loc: {$within: {$box: box}}}, queryFields)).count());
- assert.eq(totalPts / (2 * 2),
- coll.find(Object.extend({loc: {$within: {$box: box}}}, queryFields)).itcount());
+ // All [99,x] pts are field0 : "abcdefg"
+ assert.eq(
+ totalPts / 2 - totalPts / (100 * 100),
+ coll.find(Object.extend({loc: {$within: {$center: circle}}}, queryFields)).count());
+ assert.eq(
+ totalPts / 2 - totalPts / (100 * 100),
+ coll.find(Object.extend({loc: {$within: {$center: circle}}}, queryFields)).itcount());
}
- // Check that all but corner set of points in radius
- var circle = [[0, 0], (100 - 1) * Math.sqrt(2) - 0.25];
-
- // All [99,x] pts are field0 : "abcdefg"
- assert.eq(totalPts / 2 - totalPts / (100 * 100),
- coll.find(Object.extend({loc: {$within: {$center: circle}}}, queryFields)).count());
- assert.eq(totalPts / 2 - totalPts / (100 * 100),
- coll.find(Object.extend({loc: {$within: {$center: circle}}}, queryFields)).itcount());
-}
-
-testServer.stop();
+ MongoRunner.stopMongod(conn);
+})();
diff --git a/jstests/noPassthrough/geo_near_random1.js b/jstests/noPassthrough/geo_near_random1.js
index 1e70ae39c8a..98f51636ea6 100644
--- a/jstests/noPassthrough/geo_near_random1.js
+++ b/jstests/noPassthrough/geo_near_random1.js
@@ -1,18 +1,22 @@
// this tests all points using $near
-load("jstests/libs/geo_near_random.js");
-load("jstests/libs/slow_weekly_util.js");
+var db;
+(function() {
+ "use strict";
+ load("jstests/libs/geo_near_random.js");
-testServer = new SlowWeeklyMongod("geo_near_random1");
-db = testServer.getDB("test");
+ const conn = MongoRunner.runMongod({smallfiles: "", nojournal: ""});
+ assert.neq(null, conn, "mongod failed to start.");
+ db = conn.getDB("test");
-var test = new GeoNearRandomTest("weekly.geo_near_random1");
+ var test = new GeoNearRandomTest("weekly.geo_near_random1");
-test.insertPts(1000);
+ test.insertPts(1000);
-test.testPt([0, 0]);
-test.testPt(test.mkPt());
-test.testPt(test.mkPt());
-test.testPt(test.mkPt());
-test.testPt(test.mkPt());
+ test.testPt([0, 0]);
+ test.testPt(test.mkPt());
+ test.testPt(test.mkPt());
+ test.testPt(test.mkPt());
+ test.testPt(test.mkPt());
-testServer.stop();
+ MongoRunner.stopMongod(conn);
+})();
diff --git a/jstests/noPassthrough/geo_near_random2.js b/jstests/noPassthrough/geo_near_random2.js
index 1c04cf2d223..4ba37b5da08 100644
--- a/jstests/noPassthrough/geo_near_random2.js
+++ b/jstests/noPassthrough/geo_near_random2.js
@@ -1,29 +1,30 @@
// this tests 1% of all points using $near and $nearSphere
-load("jstests/libs/geo_near_random.js");
-load("jstests/libs/slow_weekly_util.js");
+var db;
+(function() {
+ "use strict";
+ load("jstests/libs/geo_near_random.js");
-testServer = new SlowWeeklyMongod("geo_near_random2");
-db = testServer.getDB("test");
+ const conn = MongoRunner.runMongod({smallfiles: "", nojournal: ""});
+ assert.neq(null, conn, "mongod failed to start.");
+ db = conn.getDB("test");
-var test = new GeoNearRandomTest("weekly.geo_near_random2");
+ var test = new GeoNearRandomTest("weekly.geo_near_random2");
-test.insertPts(50000);
+ test.insertPts(50000);
-opts = {
- sphere: 0,
- nToTest: test.nPts * 0.01
-};
-test.testPt([0, 0], opts);
-test.testPt(test.mkPt(), opts);
-test.testPt(test.mkPt(), opts);
-test.testPt(test.mkPt(), opts);
-test.testPt(test.mkPt(), opts);
+ const opts = {sphere: 0, nToTest: test.nPts * 0.01};
+ test.testPt([0, 0], opts);
+ test.testPt(test.mkPt(), opts);
+ test.testPt(test.mkPt(), opts);
+ test.testPt(test.mkPt(), opts);
+ test.testPt(test.mkPt(), opts);
-opts.sphere = 1;
-test.testPt([0, 0], opts);
-test.testPt(test.mkPt(0.8), opts);
-test.testPt(test.mkPt(0.8), opts);
-test.testPt(test.mkPt(0.8), opts);
-test.testPt(test.mkPt(0.8), opts);
+ opts.sphere = 1;
+ test.testPt([0, 0], opts);
+ test.testPt(test.mkPt(0.8), opts);
+ test.testPt(test.mkPt(0.8), opts);
+ test.testPt(test.mkPt(0.8), opts);
+ test.testPt(test.mkPt(0.8), opts);
-testServer.stop();
+ MongoRunner.stopMongod(conn);
+})();
diff --git a/jstests/noPassthrough/indexbg1.js b/jstests/noPassthrough/indexbg1.js
index 00670f3f2db..d8a9c8e56a0 100644
--- a/jstests/noPassthrough/indexbg1.js
+++ b/jstests/noPassthrough/indexbg1.js
@@ -1,128 +1,130 @@
// Test background index creation
-load("jstests/libs/slow_weekly_util.js");
-
-var testServer = new SlowWeeklyMongod("indexbg1");
-var db = testServer.getDB("test");
-var baseName = "jstests_indexbg1";
-
-var parallel = function() {
- return db[baseName + "_parallelStatus"];
-};
-
-var resetParallel = function() {
- parallel().drop();
-};
-
-// Return the PID to call `waitpid` on for clean shutdown.
-var doParallel = function(work) {
- resetParallel();
- print("doParallel: " + work);
- return startMongoProgramNoConnect(
- "mongo",
- "--eval",
- work + "; db." + baseName + "_parallelStatus.save( {done:1} );",
- db.getMongo().host);
-};
-
-var doneParallel = function() {
- return !!parallel().findOne();
-};
-
-var waitParallel = function() {
- assert.soon(function() {
- return doneParallel();
- }, "parallel did not finish in time", 300000, 1000);
-};
-
-var size = 400 * 1000;
-var bgIndexBuildPid;
-while (1) { // if indexing finishes before we can run checks, try indexing w/ more data
- print("size: " + size);
-
- var fullName = "db." + baseName;
- var t = db[baseName];
- t.drop();
-
- var bulk = db.jstests_indexbg1.initializeUnorderedBulkOp();
- for (var i = 0; i < size; ++i) {
- bulk.insert({i: i});
- }
- assert.writeOK(bulk.execute());
- assert.eq(size, t.count());
-
- bgIndexBuildPid = doParallel(fullName + ".ensureIndex( {i:1}, {background:true} )");
- try {
- // wait for indexing to start
- print("wait for indexing to start");
+(function() {
+ "use strict";
+ const conn = MongoRunner.runMongod({smallfiles: "", nojournal: ""});
+ assert.neq(null, conn, "mongod failed to start.");
+ var db = conn.getDB("test");
+ var baseName = "jstests_indexbg1";
+
+ var parallel = function() {
+ return db[baseName + "_parallelStatus"];
+ };
+
+ var resetParallel = function() {
+ parallel().drop();
+ };
+
+ // Return the PID to call `waitpid` on for clean shutdown.
+ var doParallel = function(work) {
+ resetParallel();
+ print("doParallel: " + work);
+ return startMongoProgramNoConnect(
+ "mongo",
+ "--eval",
+ work + "; db." + baseName + "_parallelStatus.save( {done:1} );",
+ db.getMongo().host);
+ };
+
+ var doneParallel = function() {
+ return !!parallel().findOne();
+ };
+
+ var waitParallel = function() {
assert.soon(function() {
- return 2 === t.getIndexes().length;
- }, "no index created", 30000, 50);
- print("started.");
- sleep(1000); // there is a race between when the index build shows up in curop and
- // when it first attempts to grab a write lock.
- assert.eq(size, t.count());
- assert.eq(100, t.findOne({i: 100}).i);
- var q = t.find();
- for (i = 0; i < 120; ++i) { // getmore
- q.next();
- assert(q.hasNext(), "no next");
+ return doneParallel();
+ }, "parallel did not finish in time", 300000, 1000);
+ };
+
+ var size = 400 * 1000;
+ var bgIndexBuildPid;
+ while (1) { // if indexing finishes before we can run checks, try indexing w/ more data
+ print("size: " + size);
+
+ var fullName = "db." + baseName;
+ var t = db[baseName];
+ t.drop();
+
+ var bulk = db.jstests_indexbg1.initializeUnorderedBulkOp();
+ for (var i = 0; i < size; ++i) {
+ bulk.insert({i: i});
}
- var ex = t.find({i: 100}).limit(-1).explain("executionStats");
- printjson(ex);
- assert(ex.executionStats.totalKeysExamined < 1000,
- "took too long to find 100: " + tojson(ex));
-
- assert.writeOK(t.remove({i: 40}, true)); // table scan
- assert.writeOK(t.update({i: 10}, {i: -10})); // should scan 10
-
- var id = t.find().hint({$natural: -1}).next()._id;
-
- assert.writeOK(t.update({_id: id}, {i: -2}));
- assert.writeOK(t.save({i: -50}));
- assert.writeOK(t.save({i: size + 2}));
+ assert.writeOK(bulk.execute());
+ assert.eq(size, t.count());
- assert.eq(size + 1, t.count());
+ bgIndexBuildPid = doParallel(fullName + ".ensureIndex( {i:1}, {background:true} )");
+ try {
+ // wait for indexing to start
+ print("wait for indexing to start");
+ assert.soon(function() {
+ return 2 === t.getIndexes().length;
+ }, "no index created", 30000, 50);
+ print("started.");
+ sleep(1000); // there is a race between when the index build shows up in curop and
+ // when it first attempts to grab a write lock.
+ assert.eq(size, t.count());
+ assert.eq(100, t.findOne({i: 100}).i);
+ var q = t.find();
+ for (i = 0; i < 120; ++i) { // getmore
+ q.next();
+ assert(q.hasNext(), "no next");
+ }
+ var ex = t.find({i: 100}).limit(-1).explain("executionStats");
+ printjson(ex);
+ assert(ex.executionStats.totalKeysExamined < 1000,
+ "took too long to find 100: " + tojson(ex));
+
+ assert.writeOK(t.remove({i: 40}, true)); // table scan
+ assert.writeOK(t.update({i: 10}, {i: -10})); // should scan 10
+
+ var id = t.find().hint({$natural: -1}).next()._id;
+
+ assert.writeOK(t.update({_id: id}, {i: -2}));
+ assert.writeOK(t.save({i: -50}));
+ assert.writeOK(t.save({i: size + 2}));
+
+ assert.eq(size + 1, t.count());
+
+ print("finished with checks");
+ } catch (e) {
+ // only a failure if we're still indexing
+ // wait for parallel status to update to reflect indexing status
+ print("caught exception: " + e);
+ sleep(1000);
+ if (!doneParallel()) {
+ throw e;
+ }
+ print("but that's OK");
+ }
- print("finished with checks");
- } catch (e) {
- // only a failure if we're still indexing
- // wait for parallel status to update to reflect indexing status
- print("caught exception: " + e);
- sleep(1000);
+ print("going to check if index is done");
if (!doneParallel()) {
- throw e;
+ break;
}
- print("but that's OK");
+ print("indexing finished too soon, retrying...");
+ // Although the index build finished, ensure the shell has exited.
+ waitProgram(bgIndexBuildPid);
+ size *= 2;
+ assert(size < 200000000, "unable to run checks in parallel with index creation");
}
- print("going to check if index is done");
- if (!doneParallel()) {
- break;
- }
- print("indexing finished too soon, retrying...");
- // Although the index build finished, ensure the shell has exited.
+ print("our tests done, waiting for parallel to finish");
+ waitParallel();
+ // Ensure the shell has exited cleanly. Otherwise the test harness may send a SIGTERM which can
+ // lead to a false test failure.
waitProgram(bgIndexBuildPid);
- size *= 2;
- assert(size < 200000000, "unable to run checks in parallel with index creation");
-}
-
-print("our tests done, waiting for parallel to finish");
-waitParallel();
-// Ensure the shell has exited cleanly. Otherwise the test harness may send a SIGTERM which can lead
-// to a false test failure.
-waitProgram(bgIndexBuildPid);
-print("finished");
-
-assert.eq(1, t.count({i: -10}));
-assert.eq(1, t.count({i: -2}));
-assert.eq(1, t.count({i: -50}));
-assert.eq(1, t.count({i: size + 2}));
-assert.eq(0, t.count({i: 40}));
-print("about to drop index");
-t.dropIndex({i: 1});
-var gle = db.getLastError();
-printjson(gle);
-assert(!gle);
-
-testServer.stop();
+ print("finished");
+
+ assert.eq(1, t.count({i: -10}));
+ assert.eq(1, t.count({i: -2}));
+ assert.eq(1, t.count({i: -50}));
+ assert.eq(1, t.count({i: size + 2}));
+ assert.eq(0, t.count({i: 40}));
+ print("about to drop index");
+ t.dropIndex({i: 1});
+ var gle = db.getLastError();
+ printjson(gle);
+ assert(!gle);
+
+ MongoRunner.stopMongod(conn);
+})();
diff --git a/jstests/noPassthrough/indexbg2.js b/jstests/noPassthrough/indexbg2.js
index e9ac45c8b78..cc89115cdd3 100644
--- a/jstests/noPassthrough/indexbg2.js
+++ b/jstests/noPassthrough/indexbg2.js
@@ -1,96 +1,98 @@
// Test background index creation w/ constraints
-load("jstests/libs/slow_weekly_util.js");
-
-var testServer = new SlowWeeklyMongod("indexbg2");
-var db = testServer.getDB("test");
-var baseName = "jstests_index12";
+(function() {
+ "use strict";
+ const conn = MongoRunner.runMongod({smallfiles: "", nojournal: ""});
+ assert.neq(null, conn, "mongod failed to start.");
+ var db = conn.getDB("test");
+ var baseName = "jstests_index12";
-var parallel = function() {
- return db[baseName + "_parallelStatus"];
-};
+ var parallel = function() {
+ return db[baseName + "_parallelStatus"];
+ };
-var resetParallel = function() {
- parallel().drop();
-};
+ var resetParallel = function() {
+ parallel().drop();
+ };
-// Return the PID to call `waitpid` on for clean shutdown.
-var doParallel = function(work) {
- resetParallel();
- return startMongoProgramNoConnect(
- "mongo",
- "--eval",
- work + "; db." + baseName + "_parallelStatus.save( {done:1} );",
- db.getMongo().host);
-};
+ // Return the PID to call `waitpid` on for clean shutdown.
+ var doParallel = function(work) {
+ resetParallel();
+ return startMongoProgramNoConnect(
+ "mongo",
+ "--eval",
+ work + "; db." + baseName + "_parallelStatus.save( {done:1} );",
+ db.getMongo().host);
+ };
-var doneParallel = function() {
- return !!parallel().findOne();
-};
+ var doneParallel = function() {
+ return !!parallel().findOne();
+ };
-var waitParallel = function() {
- assert.soon(function() {
- return doneParallel();
- }, "parallel did not finish in time", 300000, 1000);
-};
+ var waitParallel = function() {
+ assert.soon(function() {
+ return doneParallel();
+ }, "parallel did not finish in time", 300000, 1000);
+ };
-var doTest = function() {
- "use strict";
- var size = 10000;
- var bgIndexBuildPid;
- while (1) { // if indexing finishes before we can run checks, try indexing w/ more data
- print("size: " + size);
- var fullName = "db." + baseName;
- var t = db[baseName];
- t.drop();
+ var doTest = function() {
+ "use strict";
+ var size = 10000;
+ var bgIndexBuildPid;
+ while (1) { // if indexing finishes before we can run checks, try indexing w/ more data
+ print("size: " + size);
+ var fullName = "db." + baseName;
+ var t = db[baseName];
+ t.drop();
- for (var i = 0; i < size; ++i) {
- db.jstests_index12.save({i: i});
- }
- assert.eq(size, t.count());
+ for (var i = 0; i < size; ++i) {
+ db.jstests_index12.save({i: i});
+ }
+ assert.eq(size, t.count());
- bgIndexBuildPid =
- doParallel(fullName + ".ensureIndex( {i:1}, {background:true, unique:true} )");
- try {
- // wait for indexing to start
- assert.soon(function() {
- return 2 === t.getIndexes().length;
- }, "no index created", 30000, 50);
- assert.writeError(t.save({i: 0, n: true})); // duplicate key violation
- assert.writeOK(t.save({i: size - 1, n: true}));
- } catch (e) {
- // only a failure if we're still indexing
- // wait for parallel status to update to reflect indexing status
- sleep(1000);
+ bgIndexBuildPid =
+ doParallel(fullName + ".ensureIndex( {i:1}, {background:true, unique:true} )");
+ try {
+ // wait for indexing to start
+ assert.soon(function() {
+ return 2 === t.getIndexes().length;
+ }, "no index created", 30000, 50);
+ assert.writeError(t.save({i: 0, n: true})); // duplicate key violation
+ assert.writeOK(t.save({i: size - 1, n: true}));
+ } catch (e) {
+ // only a failure if we're still indexing
+ // wait for parallel status to update to reflect indexing status
+ sleep(1000);
+ if (!doneParallel()) {
+ waitProgram(bgIndexBuildPid);
+ throw e;
+ }
+ }
if (!doneParallel()) {
+ // Ensure the shell has exited cleanly. Otherwise the test harness may send a
+ // SIGTERM which can lead to a false test failure.
waitProgram(bgIndexBuildPid);
- throw e;
+ break;
}
- }
- if (!doneParallel()) {
- // Ensure the shell has exited cleanly. Otherwise the test harness may send a SIGTERM
- // which can lead to a false test failure.
+ print("indexing finished too soon, retrying...");
+ // Although the index build finished, ensure the shell has exited.
waitProgram(bgIndexBuildPid);
- break;
+ size *= 2;
+ assert(size < 5000000, "unable to run checks in parallel with index creation");
}
- print("indexing finished too soon, retrying...");
- // Although the index build finished, ensure the shell has exited.
- waitProgram(bgIndexBuildPid);
- size *= 2;
- assert(size < 5000000, "unable to run checks in parallel with index creation");
- }
- waitParallel();
+ waitParallel();
- /* it could be that there is more than size now but the index failed
- to build - which is valid. we check index isn't there.
- */
- if (t.count() != size) {
- assert.eq(1, t.getIndexes().length, "change in # of elems yet index is there");
- }
+ /* it could be that there is more than size now but the index failed
+ to build - which is valid. we check index isn't there.
+ */
+ if (t.count() != size) {
+ assert.eq(1, t.getIndexes().length, "change in # of elems yet index is there");
+ }
-};
+ };
-doTest();
+ doTest();
-testServer.stop();
+ MongoRunner.stopMongod(conn);
+})();
diff --git a/jstests/noPassthrough/ns1.js b/jstests/noPassthrough/ns1.js
index c048a709f63..343e64feab0 100644
--- a/jstests/noPassthrough/ns1.js
+++ b/jstests/noPassthrough/ns1.js
@@ -1,50 +1,51 @@
+(function() {
+ "use strict";
+ const conn = MongoRunner.runMongod({smallfiles: "", nojournal: ""});
+ assert.neq(null, conn, "mongod failed to start.");
+ let mydb = conn.getDB("test_ns1");
+
+ const check = function(n, isNew) {
+ var coll = mydb["x" + n];
+ if (isNew) {
+ assert.eq(0, coll.count(), "pop a: " + n);
+ assert.writeOK(coll.insert({_id: n}));
+ }
+ assert.eq(1, coll.count(), "pop b: " + n);
+ assert.eq(n, coll.findOne()._id, "pop c: " + n);
+ return coll;
+ };
-load("jstests/libs/slow_weekly_util.js");
-
-testServer = new SlowWeeklyMongod("ns1");
-mydb = testServer.getDB("test_ns1");
+ let max = 0;
-check = function(n, isNew) {
- var coll = mydb["x" + n];
- if (isNew) {
- assert.eq(0, coll.count(), "pop a: " + n);
- assert.writeOK(coll.insert({_id: n}));
+ for (; max < 1000; max++) {
+ check(max, true);
}
- assert.eq(1, coll.count(), "pop b: " + n);
- assert.eq(n, coll.findOne()._id, "pop c: " + n);
- return coll;
-};
-
-max = 0;
-for (; max < 1000; max++) {
- check(max, true);
-}
-
-function checkall(removed) {
- for (var i = 0; i < max; i++) {
- if (removed == i) {
- assert.eq(0, mydb["x" + i].count(), "should be 0 : " + removed);
- } else {
- check(i, false);
+ function checkall(removed) {
+ for (var i = 0; i < max; i++) {
+ if (removed == i) {
+ assert.eq(0, mydb["x" + i].count(), "should be 0 : " + removed);
+ } else {
+ check(i, false);
+ }
}
}
-}
-
-checkall();
-Random.srand(123124);
-its = max / 2;
-print("its: " + its);
-for (i = 0; i < its; i++) {
- x = Random.randInt(max);
- check(x, false).drop();
- checkall(x);
- check(x, true);
- if ((i + 1) % 20 == 0) {
- print(i + "/" + its);
+ checkall();
+
+ Random.srand(123124);
+ const its = max / 2;
+ print("its: " + its);
+ for (let i = 0; i < its; i++) {
+ const x = Random.randInt(max);
+ check(x, false).drop();
+ checkall(x);
+ check(x, true);
+ if ((i + 1) % 20 == 0) {
+ print(i + "/" + its);
+ }
}
-}
-print("yay");
+ print("yay");
-mydb.dropDatabase();
+ MongoRunner.stopMongod(conn);
+})();
diff --git a/jstests/noPassthrough/query_yield1.js b/jstests/noPassthrough/query_yield1.js
index 22e95a1fb32..fd57f53b077 100644
--- a/jstests/noPassthrough/query_yield1.js
+++ b/jstests/noPassthrough/query_yield1.js
@@ -1,91 +1,93 @@
-if (0) { // Test disabled until SERVER-8579 is finished. Reminder ticket: SERVER-8342
-
- load("jstests/libs/slow_weekly_util.js");
- testServer = new SlowWeeklyMongod("query_yield1");
- db = testServer.getDB("test");
-
- t = db.query_yield1;
- t.drop();
-
- N = 20000;
- i = 0;
-
- q = function() {
- var x = this.n;
- for (var i = 0; i < 250; i++) {
- x = x * 2;
- }
- return false;
- };
-
- while (true) {
- function fill() {
- var bulk = t.initializeUnorderedBulkOp();
- for (; i < N; i++) {
- bulk.insert({_id: i, n: 1});
+(function() {
+ "use strict";
+ if (0) { // Test disabled until SERVER-8579 is finished. Reminder ticket: SERVER-8342
+ const conn = MongoRunner.runMongod({smallfiles: "", nojournal: ""});
+ assert.neq(null, conn, "mongod failed to start.");
+ db = conn.getDB("test");
+
+ t = db.query_yield1;
+ t.drop();
+
+ N = 20000;
+ i = 0;
+
+ q = function() {
+ var x = this.n;
+ for (var i = 0; i < 250; i++) {
+ x = x * 2;
}
- assert.writeOK(bulk.execute());
+ return false;
+ };
+
+ while (true) {
+ fill = function() {
+ var bulk = t.initializeUnorderedBulkOp();
+ for (; i < N; i++) {
+ bulk.insert({_id: i, n: 1});
+ }
+ assert.writeOK(bulk.execute());
+ };
+
+ timeQuery = function() {
+ return Date.timeFunc(function() {
+ assert.eq(0, t.find(q).itcount());
+ });
+ };
+
+ fill();
+ timeQuery();
+ timeQuery();
+ time = timeQuery();
+ print(N + "\t" + time);
+ if (time > 2000)
+ break;
+
+ N *= 2;
}
- function timeQuery() {
- return Date.timeFunc(function() {
- assert.eq(0, t.find(q).itcount());
- });
- }
+ // --- test 1
- fill();
- timeQuery();
- timeQuery();
- time = timeQuery();
- print(N + "\t" + time);
- if (time > 2000)
- break;
+ assert.eq(0, db.currentOp().inprog.length, "setup broken");
- N *= 2;
- }
+ join = startParallelShell(
+ "print( 0 == db.query_yield1.find( function(){ var x=this.n; for ( var i=0; i<500; i++ ){ x = x * 2; } return false; } ).itcount() ); ");
- // --- test 1
+ assert.soon(function() {
+ var x = db.currentOp().inprog;
+ return x.length > 0;
+ }, "never doing query", 2000, 1);
- assert.eq(0, db.currentOp().inprog.length, "setup broken");
+ print("start query");
- join = startParallelShell(
- "print( 0 == db.query_yield1.find( function(){ var x=this.n; for ( var i=0; i<500; i++ ){ x = x * 2; } return false; } ).itcount() ); ");
+ num = 0;
+ start = new Date();
+ biggestMe = 0;
+ while (((new Date()).getTime() - start) < (time * 2)) {
+ var me = Date.timeFunc(function() {
+ t.insert({x: 1});
+ });
+ var x = db.currentOp();
- assert.soon(function() {
- var x = db.currentOp().inprog;
- return x.length > 0;
- }, "never doing query", 2000, 1);
+ if (num++ == 0) {
+ assert.eq(1, x.inprog.length, "nothing in prog");
+ }
- print("start query");
+ if (me > biggestMe) {
+ biggestMe = me;
+ print("biggestMe: " + biggestMe);
+ }
- num = 0;
- start = new Date();
- biggestMe = 0;
- while (((new Date()).getTime() - start) < (time * 2)) {
- var me = Date.timeFunc(function() {
- t.insert({x: 1});
- });
- var x = db.currentOp();
+ assert.gt(200, me, "took too long for me to run");
- if (num++ == 0) {
- assert.eq(1, x.inprog.length, "nothing in prog");
+ if (x.inprog.length == 0)
+ break;
}
- if (me > biggestMe) {
- biggestMe = me;
- print("biggestMe: " + biggestMe);
- }
+ join();
- assert.gt(200, me, "took too long for me to run");
+ var x = db.currentOp();
+ assert.eq(0, x.inprog.length, "weird 2");
- if (x.inprog.length == 0)
- break;
+ MongoRunner.stopMongod(conn);
}
-
- join();
-
- var x = db.currentOp();
- assert.eq(0, x.inprog.length, "weird 2");
-
- testServer.stop();
-}
+})();
diff --git a/jstests/noPassthrough/query_yield2.js b/jstests/noPassthrough/query_yield2.js
index 8e5dc8dc4ec..e9c1ef92543 100644
--- a/jstests/noPassthrough/query_yield2.js
+++ b/jstests/noPassthrough/query_yield2.js
@@ -1,150 +1,153 @@
-if (0) { // Test disabled until SERVER-8579 is finished. Reminder ticket: SERVER-8342
+(function() {
+ "use strict";
+ if (0) { // Test disabled until SERVER-8579 is finished. Reminder ticket: SERVER-8342
- var currentOp;
- var N;
- var i;
- var t;
- var q;
- var len;
- var num;
- var start;
- var insertTime;
+ var currentOp;
+ var N;
+ var i;
+ var t;
+ var q;
+ var len;
+ var num;
+ var start;
+ var insertTime;
- load("jstests/libs/slow_weekly_util.js");
- testServer = new SlowWeeklyMongod("query_yield2");
- db = testServer.getDB("test");
+ const conn = MongoRunner.runMongod({smallfiles: "", nojournal: ""});
+ assert.neq(null, conn, "mongod failed to start.");
+ db = conn.getDB("test");
- t = db.query_yield2;
- t.drop();
+ t = db.query_yield2;
+ t.drop();
- N = 200;
- i = 0;
+ N = 200;
+ i = 0;
- q = function() {
- var x = this.n;
- for (var i = 0; i < 25000; i++) {
- x = x * 2;
- }
- return false;
- };
+ q = function() {
+ var x = this.n;
+ for (var i = 0; i < 25000; i++) {
+ x = x * 2;
+ }
+ return false;
+ };
- print("Shell ==== Creating test.query_yield2 collection ...");
- print(
- "Shell ==== Adding documents until a time-wasting query takes over 2 seconds to complete");
- while (true) {
- function fill() {
- var bulk = t.initializeUnorderedBulkOp();
- for (; i < N; ++i) {
- bulk.insert({_id: i, n: 1});
+ print("Shell ==== Creating test.query_yield2 collection ...");
+ print(
+ "Shell ==== Adding documents until a time-wasting query takes over 2 seconds to complete");
+ while (true) {
+ fill = function() {
+ var bulk = t.initializeUnorderedBulkOp();
+ for (; i < N; ++i) {
+ bulk.insert({_id: i, n: 1});
+ }
+ assert.writeOK(bulk.execute());
+ };
+ timeQuery = function() {
+ return Date.timeFunc(function() {
+ assert.eq(0, t.find(q).itcount());
+ });
+ };
+ print("Shell ==== Adding document IDs from " + i + " to " + (N - 1));
+ fill();
+ print("Shell ==== Running warm-up query 1");
+ timeQuery();
+ print("Shell ==== Running warm-up query 2");
+ timeQuery();
+ print("Shell ==== Running timed query ...");
+ time = timeQuery();
+ print("Shell ==== Query across " + N + " documents took " + time + " ms");
+ if (time > 2000) {
+ print("Shell ==== Reached desired 2000 ms mark (at " + time +
+ " ms), proceding to next step");
+ break;
}
- assert.writeOK(bulk.execute());
- }
- function timeQuery() {
- return Date.timeFunc(function() {
- assert.eq(0, t.find(q).itcount());
- });
+ N *= 2;
+ print("Shell ==== Did not reach 2000 ms, increasing fill point to " + N + " documents");
}
- print("Shell ==== Adding document IDs from " + i + " to " + (N - 1));
- fill();
- print("Shell ==== Running warm-up query 1");
- timeQuery();
- print("Shell ==== Running warm-up query 2");
- timeQuery();
- print("Shell ==== Running timed query ...");
- time = timeQuery();
- print("Shell ==== Query across " + N + " documents took " + time + " ms");
- if (time > 2000) {
- print("Shell ==== Reached desired 2000 ms mark (at " + time +
- " ms), proceding to next step");
- break;
- }
- N *= 2;
- print("Shell ==== Did not reach 2000 ms, increasing fill point to " + N + " documents");
- }
-
- print("Shell ==== Testing db.currentOp to make sure nothing is in progress");
- print("Shell ==== Dump of db.currentOp:");
- currentOp = db.currentOp();
- print(tojson(currentOp));
- len = currentOp.inprog.length;
- if (len) {
- print("Shell ==== This test is broken: db.currentOp().inprog.length is " + len);
- throw Error("query_yield2.js test is broken");
- }
- print("Shell ==== The test is working so far: db.currentOp().inprog.length is " + len);
-
- print("Shell ==== Starting parallel shell to test if slow query will yield to write");
- join = startParallelShell(
- "print( 0 == db.query_yield2.find( function(){ var x=this.n; for ( var i=0; i<50000; i++ ){ x = x * 2; } return false; } ).itcount() ); ");
- print("Shell ==== Waiting until db.currentOp().inprog becomes non-empty");
- assert.soon(function() {
+ print("Shell ==== Testing db.currentOp to make sure nothing is in progress");
+ print("Shell ==== Dump of db.currentOp:");
currentOp = db.currentOp();
+ print(tojson(currentOp));
len = currentOp.inprog.length;
if (len) {
- print("Shell ==== Wait satisfied: db.currentOp().inprog.length is " + len);
- print("Shell ==== Dump of db.currentOp:");
- print(tojson(currentOp));
- print("Shell ==== Checking if this currentOp is the query we are waiting for");
- if (currentOp.inprog[0].ns == "test.query_yield2" &&
- currentOp.inprog[0].query["$where"]) {
- print("Shell ==== Yes, we found the query we are waiting for");
- return true;
- }
- if (currentOp.inprog[0].ns == "" && currentOp.inprog[0].query["whatsmyuri"]) {
- print("Shell ==== No, we found a \"whatsmyuri\" query, waiting some more");
- return false;
- }
- print(
- "Shell ==== No, we found something other than our query or a \"whatsmyuri\", waiting some more");
- return false;
+ print("Shell ==== This test is broken: db.currentOp().inprog.length is " + len);
+ throw Error("query_yield2.js test is broken");
}
- return len > 0;
- }, "Wait failed, db.currentOp().inprog never became non-empty", 2000, 1);
+ print("Shell ==== The test is working so far: db.currentOp().inprog.length is " + len);
- print(
- "Shell ==== Now that we have seen db.currentOp().inprog show that our query is running, we start the real test");
- num = 0;
- start = new Date();
- while (((new Date()).getTime() - start) < (time * 2)) {
- if (num == 0) {
- print("Shell ==== Starting loop " + num + ", inserting 1 document");
- }
- insertTime = Date.timeFunc(function() {
- t.insert({x: 1});
- });
- currentOp = db.currentOp();
- len = currentOp.inprog.length;
- print("Shell ==== Time to insert document " + num + " was " + insertTime +
- " ms, db.currentOp().inprog.length is " + len);
- if (num++ == 0) {
- if (len != 1) {
- print("Shell ==== TEST FAILED! db.currentOp().inprog.length is " + len);
+ print("Shell ==== Starting parallel shell to test if slow query will yield to write");
+ join = startParallelShell(
+ "print( 0 == db.query_yield2.find( function(){ var x=this.n; for ( var i=0; i<50000; i++ ){ x = x * 2; } return false; } ).itcount() ); ");
+
+ print("Shell ==== Waiting until db.currentOp().inprog becomes non-empty");
+ assert.soon(function() {
+ currentOp = db.currentOp();
+ len = currentOp.inprog.length;
+ if (len) {
+ print("Shell ==== Wait satisfied: db.currentOp().inprog.length is " + len);
print("Shell ==== Dump of db.currentOp:");
print(tojson(currentOp));
- throw Error("TEST FAILED!");
+ print("Shell ==== Checking if this currentOp is the query we are waiting for");
+ if (currentOp.inprog[0].ns == "test.query_yield2" &&
+ currentOp.inprog[0].query["$where"]) {
+ print("Shell ==== Yes, we found the query we are waiting for");
+ return true;
+ }
+ if (currentOp.inprog[0].ns == "" && currentOp.inprog[0].query["whatsmyuri"]) {
+ print("Shell ==== No, we found a \"whatsmyuri\" query, waiting some more");
+ return false;
+ }
+ print(
+ "Shell ==== No, we found something other than our query or a \"whatsmyuri\", waiting some more");
+ return false;
+ }
+ return len > 0;
+ }, "Wait failed, db.currentOp().inprog never became non-empty", 2000, 1);
+
+ print(
+ "Shell ==== Now that we have seen db.currentOp().inprog show that our query is running, we start the real test");
+ num = 0;
+ start = new Date();
+ while (((new Date()).getTime() - start) < (time * 2)) {
+ if (num == 0) {
+ print("Shell ==== Starting loop " + num + ", inserting 1 document");
+ }
+ insertTime = Date.timeFunc(function() {
+ t.insert({x: 1});
+ });
+ currentOp = db.currentOp();
+ len = currentOp.inprog.length;
+ print("Shell ==== Time to insert document " + num + " was " + insertTime +
+ " ms, db.currentOp().inprog.length is " + len);
+ if (num++ == 0) {
+ if (len != 1) {
+ print("Shell ==== TEST FAILED! db.currentOp().inprog.length is " + len);
+ print("Shell ==== Dump of db.currentOp:");
+ print(tojson(currentOp));
+ throw Error("TEST FAILED!");
+ }
+ }
+ assert.gt(200,
+ insertTime,
+ "Insert took too long (" + insertTime + " ms), should be less than 200 ms");
+ if (currentOp.inprog.length == 0) {
+ break;
}
}
- assert.gt(200,
- insertTime,
- "Insert took too long (" + insertTime + " ms), should be less than 200 ms");
- if (currentOp.inprog.length == 0) {
- break;
- }
- }
- print("Shell ==== Finished inserting documents, reader also finished");
- print("Shell ==== Waiting for parallel shell to exit");
- join();
+ print("Shell ==== Finished inserting documents, reader also finished");
+ print("Shell ==== Waiting for parallel shell to exit");
+ join();
- currentOp = db.currentOp();
- len = currentOp.inprog.length;
- if (len != 0) {
- print("Shell ==== Final sanity check FAILED! db.currentOp().inprog.length is " + len);
- print("Shell ==== Dump of db.currentOp:");
- print(tojson(currentOp));
- throw Error("TEST FAILED!");
+ currentOp = db.currentOp();
+ len = currentOp.inprog.length;
+ if (len != 0) {
+ print("Shell ==== Final sanity check FAILED! db.currentOp().inprog.length is " + len);
+ print("Shell ==== Dump of db.currentOp:");
+ print(tojson(currentOp));
+ throw Error("TEST FAILED!");
+ }
+ print("Shell ==== Test completed successfully, shutting down server");
+ MongoRunner.stopMongod(conn);
}
- print("Shell ==== Test completed successfully, shutting down server");
- testServer.stop();
-}
+})();
diff --git a/jstests/noPassthrough/repair2.js b/jstests/noPassthrough/repair2.js
index 58032bd17d4..74d75562ae8 100644
--- a/jstests/noPassthrough/repair2.js
+++ b/jstests/noPassthrough/repair2.js
@@ -1,32 +1,35 @@
// SERVER-2843 The repair command should not yield.
-baseName = "jstests_repair2";
+(function() {
+ "use strict";
+ const baseName = "jstests_repair2";
-load("jstests/libs/slow_weekly_util.js");
-testServer = new SlowWeeklyMongod(baseName);
+ const conn = MongoRunner.runMongod({smallfiles: "", nojournal: ""});
+ assert.neq(null, conn, "mongod failed to start.");
-t = testServer.getDB(baseName)[baseName];
-t.drop();
+ const t = conn.getDB(baseName)[baseName];
+ t.drop();
-var awaitShell = startParallelShell("db = db.getSiblingDB( '" + baseName + "');" +
- "for( i = 0; i < 10; ++i ) { " + "db.repairDatabase();" +
- "sleep( 5000 );" + " }",
- testServer.port);
+ var awaitShell = startParallelShell("db = db.getSiblingDB( '" + baseName + "');" +
+ "for( i = 0; i < 10; ++i ) { " +
+ "db.repairDatabase();" + "sleep( 5000 );" + " }",
+ conn.port);
-for (i = 0; i < 30; ++i) {
- var bulk = t.initializeOrderedBulkOp();
- for (j = 0; j < 5000; ++j) {
- bulk.insert({_id: j});
- }
+ for (let i = 0; i < 30; ++i) {
+ var bulk = t.initializeOrderedBulkOp();
+ for (let j = 0; j < 5000; ++j) {
+ bulk.insert({_id: j});
+ }
- for (j = 0; j < 5000; ++j) {
- bulk.find({_id: j, $isolated: 1}).remove();
- }
+ for (let j = 0; j < 5000; ++j) {
+ bulk.find({_id: j, $isolated: 1}).remove();
+ }
- assert.writeOK(bulk.execute());
- assert.eq(0, t.count());
-}
+ assert.writeOK(bulk.execute());
+ assert.eq(0, t.count());
+ }
-awaitShell();
+ awaitShell();
-testServer.stop();
+ MongoRunner.stopMongod(conn);
+})();
diff --git a/jstests/noPassthrough/update_server-5552.js b/jstests/noPassthrough/update_server-5552.js
index 453914d6b3d..342f935d599 100644
--- a/jstests/noPassthrough/update_server-5552.js
+++ b/jstests/noPassthrough/update_server-5552.js
@@ -1,34 +1,38 @@
-load("jstests/libs/slow_weekly_util.js");
-testServer = new SlowWeeklyMongod("update_server-5552");
-db = testServer.getDB("test");
+var db;
+(function() {
+ "use strict";
+ const conn = MongoRunner.runMongod({smallfiles: "", nojournal: ""});
+ assert.neq(null, conn, "mongod failed to start.");
+ db = conn.getDB("test");
-t = db.foo;
-t.drop();
+ const t = db.foo;
+ t.drop();
-N = 10000;
+ const N = 10000;
-var bulk = t.initializeUnorderedBulkOp();
-for (i = 0; i < N; i++) {
- bulk.insert({_id: i, x: 1});
-}
-assert.writeOK(bulk.execute());
+ var bulk = t.initializeUnorderedBulkOp();
+ for (let i = 0; i < N; i++) {
+ bulk.insert({_id: i, x: 1});
+ }
+ assert.writeOK(bulk.execute());
-join = startParallelShell(
- "while( db.foo.findOne( { _id : 0 } ).x == 1 ); db.foo.ensureIndex( { x : 1 } );");
+ const join = startParallelShell(
+ "while( db.foo.findOne( { _id : 0 } ).x == 1 ); db.foo.ensureIndex( { x : 1 } );");
-t.update({
- $where: function() {
- sleep(1);
- return true;
- }
-},
- {$set: {x: 5}},
- false,
- true);
-db.getLastError();
+ t.update({
+ $where: function() {
+ sleep(1);
+ return true;
+ }
+ },
+ {$set: {x: 5}},
+ false,
+ true);
+ db.getLastError();
-join();
+ join();
-assert.eq(N, t.find({x: 5}).count());
+ assert.eq(N, t.find({x: 5}).count());
-testServer.stop();
+ MongoRunner.stopMongod(conn);
+})();
diff --git a/jstests/slow1/conc_update.js b/jstests/slow1/conc_update.js
index b7b8b836831..e46e132bd61 100644
--- a/jstests/slow1/conc_update.js
+++ b/jstests/slow1/conc_update.js
@@ -1,57 +1,61 @@
-load("jstests/libs/slow_weekly_util.js");
-test = new SlowWeeklyMongod("conc_update");
-db = test.getDB("concurrency");
-db.dropDatabase();
-
-NRECORDS = 3 * 1024 * 1024;
-
-print("loading " + NRECORDS + " documents (progress msg every 1024*1024 documents)");
-var bulk = db.conc.initializeUnorderedBulkOp();
-for (var i = 0; i < NRECORDS; i++) {
- bulk.insert({x: i});
-}
-assert.writeOK(bulk.execute());
-
-print("making an index (this will take a while)");
-db.conc.ensureIndex({x: 1});
-
-var c1 = db.conc.count({x: {$lt: NRECORDS}});
-
-updater = startParallelShell(
- "db = db.getSisterDB('concurrency');\
- db.concflag.insert({ inprog: true });\
- sleep(20);\
- assert.writeOK(db.conc.update({}, \
- { $inc: { x: " +
- NRECORDS +
- "}}, false, true)); \
- assert.writeOK(db.concflag.update({}, { inprog: false }));");
-
-assert.soon(function() {
- var x = db.concflag.findOne();
- return x && x.inprog;
-}, "wait for fork", 30000, 1);
-
-querycount = 0;
-decrements = 0;
-misses = 0;
-
-assert.soon(function() {
- c2 = db.conc.count({x: {$lt: NRECORDS}});
- print(c2);
- querycount++;
- if (c2 < c1)
- decrements++;
- else
- misses++;
- c1 = c2;
- return !db.concflag.findOne().inprog;
-}, "update never finished", 2 * 60 * 60 * 1000, 10);
-
-print(querycount + " queries, " + decrements + " decrements, " + misses + " misses");
-
-assert.eq(NRECORDS, db.conc.count(), "AT END 1");
-
-updater(); // wait()
-
-test.stop();
+(function() {
+ "use strict";
+
+ const conn = MongoRunner.runMongod({smallfiles: "", nojournal: ""});
+ assert.neq(null, conn, "mongod was unable to start up");
+ db = conn.getDB("concurrency");
+ db.dropDatabase();
+
+ const NRECORDS = 3 * 1024 * 1024;
+
+ print("loading " + NRECORDS + " documents (progress msg every 1024*1024 documents)");
+ var bulk = db.conc.initializeUnorderedBulkOp();
+ for (var i = 0; i < NRECORDS; i++) {
+ bulk.insert({x: i});
+ }
+ assert.writeOK(bulk.execute());
+
+ print("making an index (this will take a while)");
+ db.conc.ensureIndex({x: 1});
+
+ var c1 = db.conc.count({x: {$lt: NRECORDS}});
+
+ const updater = startParallelShell(
+ "db = db.getSisterDB('concurrency');\
+ db.concflag.insert({ inprog: true });\
+ sleep(20);\
+ assert.writeOK(db.conc.update({}, \
+ { $inc: { x: " +
+ NRECORDS +
+ "}}, false, true)); \
+ assert.writeOK(db.concflag.update({}, { inprog: false }));");
+
+ assert.soon(function() {
+ var x = db.concflag.findOne();
+ return x && x.inprog;
+ }, "wait for fork", 30000, 1);
+
+ let querycount = 0;
+ let decrements = 0;
+ let misses = 0;
+
+ assert.soon(function() {
+ const c2 = db.conc.count({x: {$lt: NRECORDS}});
+ print(c2);
+ querycount++;
+ if (c2 < c1)
+ decrements++;
+ else
+ misses++;
+ c1 = c2;
+ return !db.concflag.findOne().inprog;
+ }, "update never finished", 2 * 60 * 60 * 1000, 10);
+
+ print(querycount + " queries, " + decrements + " decrements, " + misses + " misses");
+
+ assert.eq(NRECORDS, db.conc.count(), "AT END 1");
+
+ updater(); // wait()
+
+ MongoRunner.stopMongod(conn);
+})();