summaryrefslogtreecommitdiff
path: root/jstests/noPassthroughWithMongod
diff options
context:
space:
mode:
Diffstat (limited to 'jstests/noPassthroughWithMongod')
-rw-r--r--jstests/noPassthroughWithMongod/apply_ops_errors.js55
-rw-r--r--jstests/noPassthroughWithMongod/background.js28
-rw-r--r--jstests/noPassthroughWithMongod/bench_test_crud_commands.js51
-rw-r--r--jstests/noPassthroughWithMongod/benchrun_substitution.js74
-rw-r--r--jstests/noPassthroughWithMongod/btreedel.js8
-rw-r--r--jstests/noPassthroughWithMongod/bulk_api_limits.js48
-rw-r--r--jstests/noPassthroughWithMongod/capped4.js32
-rw-r--r--jstests/noPassthroughWithMongod/capped_truncate.js33
-rw-r--r--jstests/noPassthroughWithMongod/clonecollection.js56
-rw-r--r--jstests/noPassthroughWithMongod/connections_opened.js71
-rw-r--r--jstests/noPassthroughWithMongod/create_indexes_shell_helper.js37
-rw-r--r--jstests/noPassthroughWithMongod/cursor8.js20
-rw-r--r--jstests/noPassthroughWithMongod/default_read_pref.js17
-rw-r--r--jstests/noPassthroughWithMongod/dup_bgindex.js12
-rw-r--r--jstests/noPassthroughWithMongod/explain1.js9
-rw-r--r--jstests/noPassthroughWithMongod/explain2.js14
-rw-r--r--jstests/noPassthroughWithMongod/explain3.js9
-rw-r--r--jstests/noPassthroughWithMongod/external_sort_text_agg.js11
-rw-r--r--jstests/noPassthroughWithMongod/find_and_modify_server16469.js19
-rw-r--r--jstests/noPassthroughWithMongod/fsync2.js51
-rw-r--r--jstests/noPassthroughWithMongod/ftdc_params.js14
-rw-r--r--jstests/noPassthroughWithMongod/geo_axis_aligned.js124
-rw-r--r--jstests/noPassthroughWithMongod/geo_mnypts.js48
-rw-r--r--jstests/noPassthroughWithMongod/geo_near_random1.js3
-rw-r--r--jstests/noPassthroughWithMongod/geo_near_random2.js10
-rw-r--r--jstests/noPassthroughWithMongod/geo_polygon.js78
-rw-r--r--jstests/noPassthroughWithMongod/getmore_error.js12
-rw-r--r--jstests/noPassthroughWithMongod/huge_multikey_index.js8
-rw-r--r--jstests/noPassthroughWithMongod/index_check10.js104
-rw-r--r--jstests/noPassthroughWithMongod/index_check9.js218
-rw-r--r--jstests/noPassthroughWithMongod/index_hammer1.js45
-rw-r--r--jstests/noPassthroughWithMongod/index_killop.js58
-rw-r--r--jstests/noPassthroughWithMongod/index_multi.js100
-rw-r--r--jstests/noPassthroughWithMongod/index_no_retry.js63
-rw-r--r--jstests/noPassthroughWithMongod/index_retry.js61
-rw-r--r--jstests/noPassthroughWithMongod/indexbg_drop.js53
-rw-r--r--jstests/noPassthroughWithMongod/indexbg_interrupts.js48
-rw-r--r--jstests/noPassthroughWithMongod/indexbg_restart_secondary.js49
-rw-r--r--jstests/noPassthroughWithMongod/indexbg_restart_sigkill_secondary_noretry.js52
-rw-r--r--jstests/noPassthroughWithMongod/insertMulti.js30
-rw-r--r--jstests/noPassthroughWithMongod/ipv6_connection_string_validation.js113
-rw-r--r--jstests/noPassthroughWithMongod/log_component_helpers.js11
-rw-r--r--jstests/noPassthroughWithMongod/logop_rollback.js7
-rw-r--r--jstests/noPassthroughWithMongod/logpath.js51
-rw-r--r--jstests/noPassthroughWithMongod/mapreduce_intermediate_reduce.js19
-rw-r--r--[-rwxr-xr-x]jstests/noPassthroughWithMongod/moveprimary-replset.js105
-rw-r--r--jstests/noPassthroughWithMongod/mr_noscripting.js21
-rw-r--r--jstests/noPassthroughWithMongod/mr_writeconflict.js64
-rw-r--r--jstests/noPassthroughWithMongod/newcollection2.js12
-rw-r--r--jstests/noPassthroughWithMongod/no_balance_collection.js80
-rw-r--r--jstests/noPassthroughWithMongod/parallel_collection_scan.js20
-rw-r--r--jstests/noPassthroughWithMongod/query_oplogreplay.js12
-rw-r--r--jstests/noPassthroughWithMongod/reconfigwt.js7
-rw-r--r--jstests/noPassthroughWithMongod/recstore.js16
-rw-r--r--jstests/noPassthroughWithMongod/remove9.js11
-rw-r--r--jstests/noPassthroughWithMongod/replReads.js120
-rw-r--r--jstests/noPassthroughWithMongod/replica_set_shard_version.js44
-rw-r--r--jstests/noPassthroughWithMongod/rpc_protocols.js62
-rw-r--r--jstests/noPassthroughWithMongod/server7428.js16
-rw-r--r--jstests/noPassthroughWithMongod/sharding_migrate_large_docs.js68
-rw-r--r--jstests/noPassthroughWithMongod/sharding_rs_arb1.js27
-rw-r--r--jstests/noPassthroughWithMongod/shelllimit.js8
-rw-r--r--jstests/noPassthroughWithMongod/temp_namespace.js45
-rw-r--r--jstests/noPassthroughWithMongod/testing_only_commands.js23
-rw-r--r--jstests/noPassthroughWithMongod/ttl1.js64
-rw-r--r--jstests/noPassthroughWithMongod/ttl_repl.js59
-rw-r--r--jstests/noPassthroughWithMongod/ttl_repl_maintenance.js12
-rw-r--r--jstests/noPassthroughWithMongod/ttl_repl_secondary_disabled.js34
-rw-r--r--jstests/noPassthroughWithMongod/ttl_sharded.js73
-rw-r--r--jstests/noPassthroughWithMongod/unix_socket1.js19
-rw-r--r--jstests/noPassthroughWithMongod/validate_command.js11
-rw-r--r--jstests/noPassthroughWithMongod/wt_roundtrip_creation_string.js24
72 files changed, 1574 insertions, 1517 deletions
diff --git a/jstests/noPassthroughWithMongod/apply_ops_errors.js b/jstests/noPassthroughWithMongod/apply_ops_errors.js
index 8cc5a8ad4cb..31353523810 100644
--- a/jstests/noPassthroughWithMongod/apply_ops_errors.js
+++ b/jstests/noPassthroughWithMongod/apply_ops_errors.js
@@ -19,19 +19,11 @@
// Scenario 1: only one operation
assert.eq(0, coll.find().count(), "test collection not empty");
- coll.ensureIndex({x:1},{unique:true});
- coll.insert({ _id: 1, x: "init" });
+ coll.ensureIndex({x: 1}, {unique: true});
+ coll.insert({_id: 1, x: "init"});
- var res = db.runCommand({ applyOps: [
- {
- op: "i",
- ns: coll.getFullName(),
- o: {
- _id: 2,
- x: "init"
- }
- },
- ]});
+ var res =
+ db.runCommand({applyOps: [{op: "i", ns: coll.getFullName(), o: {_id: 2, x: "init"}}, ]});
assert.eq(1, res.applied);
assert(res.code);
@@ -43,35 +35,16 @@
// Scenario 2: Three operations, first two should run, second should fail.
assert.eq(0, coll.find().count(), "test collection not empty");
- coll.ensureIndex({x:1},{unique:true});
- coll.insert({ _id: 1, x: "init" });
-
- var res = db.runCommand({ applyOps: [
- {
- op: "i",
- ns: coll.getFullName(),
- o: {
- _id: 3,
- x: "not init"
- }
- },
- {
- op: "i",
- ns: coll.getFullName(),
- o: {
- _id: 4,
- x: "init"
- }
- },
- {
- op: "i",
- ns: coll.getFullName(),
- o: {
- _id: 5,
- x: "not init again"
- }
- },
- ]});
+ coll.ensureIndex({x: 1}, {unique: true});
+ coll.insert({_id: 1, x: "init"});
+
+ var res = db.runCommand({
+ applyOps: [
+ {op: "i", ns: coll.getFullName(), o: {_id: 3, x: "not init"}},
+ {op: "i", ns: coll.getFullName(), o: {_id: 4, x: "init"}},
+ {op: "i", ns: coll.getFullName(), o: {_id: 5, x: "not init again"}},
+ ]
+ });
assert.eq(2, res.applied);
assert(res.code);
diff --git a/jstests/noPassthroughWithMongod/background.js b/jstests/noPassthroughWithMongod/background.js
index 742390b54c7..7624d24471c 100644
--- a/jstests/noPassthroughWithMongod/background.js
+++ b/jstests/noPassthroughWithMongod/background.js
@@ -1,16 +1,16 @@
// background indexing test during inserts.
-assert( db.getName() == "test" );
+assert(db.getName() == "test");
t = db.bg1;
t.drop();
-var a = new Mongo( db.getMongo().host ).getDB( db.getName() );
+var a = new Mongo(db.getMongo().host).getDB(db.getName());
var bulk = t.initializeUnorderedBulkOp();
-for( var i = 0; i < 100000; i++ ) {
- bulk.insert({ y: 'aaaaaaaaaaaa', i: i });
- if( i % 10000 == 0 ) {
+for (var i = 0; i < 100000; i++) {
+ bulk.insert({y: 'aaaaaaaaaaaa', i: i});
+ if (i % 10000 == 0) {
assert.writeOK(bulk.execute());
bulk = t.initializeUnorderedBulkOp();
print(i);
@@ -18,14 +18,14 @@ for( var i = 0; i < 100000; i++ ) {
}
// start bg indexing
-a.bg1.ensureIndex({i:1}, {name:"i_1", background:true});
+a.bg1.ensureIndex({i: 1}, {name: "i_1", background: true});
// add more data
bulk = t.initializeUnorderedBulkOp();
-for( var i = 0; i < 100000; i++ ) {
- bulk.insert({ i: i });
- if( i % 10000 == 0 ) {
- printjson( db.currentOp() );
+for (var i = 0; i < 100000; i++) {
+ bulk.insert({i: i});
+ if (i % 10000 == 0) {
+ printjson(db.currentOp());
assert.writeOK(bulk.execute());
bulk = t.initializeUnorderedBulkOp();
print(i);
@@ -34,14 +34,14 @@ for( var i = 0; i < 100000; i++ ) {
assert.writeOK(bulk.execute());
-printjson( db.currentOp() );
+printjson(db.currentOp());
-for( var i = 0; i < 40; i++ ) {
- if( db.currentOp().inprog.length == 0 )
+for (var i = 0; i < 40; i++) {
+ if (db.currentOp().inprog.length == 0)
break;
print("waiting");
sleep(1000);
}
var idx = t.getIndexes();
-assert( idx[1].key.i == 1 );
+assert(idx[1].key.i == 1);
diff --git a/jstests/noPassthroughWithMongod/bench_test_crud_commands.js b/jstests/noPassthroughWithMongod/bench_test_crud_commands.js
index 25060999d59..14ee7d0fdb7 100644
--- a/jstests/noPassthroughWithMongod/bench_test_crud_commands.js
+++ b/jstests/noPassthroughWithMongod/bench_test_crud_commands.js
@@ -7,9 +7,11 @@
assert.commandWorked(coll.getDB().createCollection(coll.getName()));
function makeDocument(docSize) {
- var doc = { "fieldName":"" };
+ var doc = {
+ "fieldName": ""
+ };
var longString = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx";
- while(Object.bsonsize(doc) < docSize) {
+ while (Object.bsonsize(doc) < docSize) {
if (Object.bsonsize(doc) < docSize - longString.length) {
doc.fieldName += longString;
} else {
@@ -20,7 +22,12 @@
}
function executeBenchRun(benchOps) {
- var benchArgs = {ops: benchOps, parallel: 2, seconds: 1, host: db.getMongo().host};
+ var benchArgs = {
+ ops: benchOps,
+ parallel: 2,
+ seconds: 1,
+ host: db.getMongo().host
+ };
if (jsTest.options().auth) {
benchArgs['db'] = 'admin';
benchArgs['username'] = jsTest.options().adminUser;
@@ -32,14 +39,16 @@
function testInsert(docs, writeCmd, wc) {
coll.drop();
- var res = executeBenchRun([{ns: coll.getFullName(),
- op: "insert",
- doc: docs,
- writeCmd: writeCmd,
- writeConcern : wc}]);
+ var res = executeBenchRun([{
+ ns: coll.getFullName(),
+ op: "insert",
+ doc: docs,
+ writeCmd: writeCmd,
+ writeConcern: wc
+ }]);
assert.gt(coll.count(), 0);
- assert.eq(coll.findOne({}, {_id:0}), docs[0]);
+ assert.eq(coll.findOne({}, {_id: 0}), docs[0]);
}
function testFind(readCmd) {
@@ -48,11 +57,13 @@
assert.writeOK(coll.insert({}));
}
- var res = executeBenchRun([{ns: coll.getFullName(),
- op: "find",
- query: {},
- batchSize: NumberInt(10),
- readCmd: readCmd}]);
+ var res = executeBenchRun([{
+ ns: coll.getFullName(),
+ op: "find",
+ query: {},
+ batchSize: NumberInt(10),
+ readCmd: readCmd
+ }]);
assert.gt(res.query, 0, tojson(res));
}
@@ -62,10 +73,8 @@
assert.writeOK(coll.insert({}));
}
- var res = executeBenchRun([{ns: coll.getFullName(),
- op: "findOne",
- query: {},
- readCmd: readCmd}]);
+ var res = executeBenchRun(
+ [{ns: coll.getFullName(), op: "findOne", query: {}, readCmd: readCmd}]);
assert.gt(res.findOne, 0, tojson(res));
}
@@ -78,9 +87,9 @@
testInsert([bigDoc], writeCmd, {});
testInsert(docs, writeCmd, {});
- testInsert(docs, writeCmd, {"writeConcern" : {"w" : "majority"}});
- testInsert(docs, writeCmd, {"writeConcern" : {"w" : 1, "j": false}});
- testInsert(docs, writeCmd, {"writeConcern" : {"j" : true}});
+ testInsert(docs, writeCmd, {"writeConcern": {"w": "majority"}});
+ testInsert(docs, writeCmd, {"writeConcern": {"w": 1, "j": false}});
+ testInsert(docs, writeCmd, {"writeConcern": {"j": true}});
}
testWriteConcern(false);
diff --git a/jstests/noPassthroughWithMongod/benchrun_substitution.js b/jstests/noPassthroughWithMongod/benchrun_substitution.js
index afc79b4cc49..ddcef69d73d 100644
--- a/jstests/noPassthroughWithMongod/benchrun_substitution.js
+++ b/jstests/noPassthroughWithMongod/benchrun_substitution.js
@@ -2,75 +2,73 @@ function benchrun_sub_insert(use_write_command) {
t = db.benchrun_sub;
t.drop();
var offset = 10000;
- ops = [{op: "insert", ns: "test.benchrun_sub",
- doc: {x: { "#RAND_INT" : [ 0, 100 ] },
- curDate: { "#CUR_DATE" : 0 } ,
- futureDate: { "#CUR_DATE" : offset} ,
- pastDate: { "#CUR_DATE" : (0 - offset) } },
- writeCmd: use_write_command,
- }];
+ ops = [{
+ op: "insert",
+ ns: "test.benchrun_sub",
+ doc: {
+ x: {"#RAND_INT": [0, 100]},
+ curDate: {"#CUR_DATE": 0},
+ futureDate: {"#CUR_DATE": offset},
+ pastDate: {"#CUR_DATE": (0 - offset)}
+ },
+ writeCmd: use_write_command,
+ }];
- res = benchRun({parallel: 1,
- seconds: 10,
- ops : ops,
- host: db.getMongo().host});
+ res = benchRun({parallel: 1, seconds: 10, ops: ops, host: db.getMongo().host});
assert.gt(res.insert, 0);
t.find().forEach(function(doc) {
- var field = doc.x;
- assert.gte(field, 0);
- assert.lt(field, 100);
- assert.lt(doc.pastDate, doc.curDate);
- assert.lt(doc.curDate, doc.futureDate);
- }
- );
+ var field = doc.x;
+ assert.gte(field, 0);
+ assert.lt(field, 100);
+ assert.lt(doc.pastDate, doc.curDate);
+ assert.lt(doc.curDate, doc.futureDate);
+ });
}
function benchrun_sub_update(use_write_command) {
t = db.benchrun_sub;
t.drop();
- ops = [{op: "update", ns: "test.benchrun_sub",
- query: {x: {"#RAND_INT": [0, 100]}},
- update: {$inc : {x : 1}},
- writeCmd: use_write_command}];
+ ops = [{
+ op: "update",
+ ns: "test.benchrun_sub",
+ query: {x: {"#RAND_INT": [0, 100]}},
+ update: {$inc: {x: 1}},
+ writeCmd: use_write_command
+ }];
for (var i = 0; i < 100; ++i) {
t.insert({x: i});
}
- res = benchRun({parallel: 1,
- seconds: 10,
- ops: ops,
- host: db.getMongo().host});
+ res = benchRun({parallel: 1, seconds: 10, ops: ops, host: db.getMongo().host});
var field_sum = 0;
t.find().forEach(function(doc) {
- field_sum += doc.x;
- }
- );
+ field_sum += doc.x;
+ });
- assert.gt(field_sum, 4950); // 1 + 2 + .. 99 = 4950
+ assert.gt(field_sum, 4950); // 1 + 2 + .. 99 = 4950
}
function benchrun_sub_remove(use_write_command) {
t = db.benchrun_sub;
t.drop();
- ops = [{op: "remove", ns: "test.benchrun_sub",
- query: {x: {"#RAND_INT": [0, 100]}},
- writeCmd: use_write_command,
- }];
+ ops = [{
+ op: "remove",
+ ns: "test.benchrun_sub",
+ query: {x: {"#RAND_INT": [0, 100]}},
+ writeCmd: use_write_command,
+ }];
for (var i = 0; i < 100; ++i) {
t.insert({x: i});
}
- res = benchRun({parallel: 1,
- seconds: 10,
- ops: ops,
- host: db.getMongo().host});
+ res = benchRun({parallel: 1, seconds: 10, ops: ops, host: db.getMongo().host});
assert.eq(t.count(), 0);
}
diff --git a/jstests/noPassthroughWithMongod/btreedel.js b/jstests/noPassthroughWithMongod/btreedel.js
index 89af6aa7d5d..13c3bb3b685 100644
--- a/jstests/noPassthroughWithMongod/btreedel.js
+++ b/jstests/noPassthroughWithMongod/btreedel.js
@@ -5,13 +5,13 @@ t.remove({});
var bulk = t.initializeUnorderedBulkOp();
for (var i = 0; i < 1000000; i++) {
- bulk.insert({ _id: i, x: 'a b' });
+ bulk.insert({_id: i, x: 'a b'});
}
assert.writeOK(bulk.execute());
print("1 insert done count: " + t.count());
-var c = t.find({y:null}).sort({ _id: 1 });
+var c = t.find({y: null}).sort({_id: 1});
for (var j = 0; j < 400000; j++) {
c.next();
if (j % 200000 == 0)
@@ -19,12 +19,12 @@ for (var j = 0; j < 400000; j++) {
}
printjson(c.next());
-var d = t.find({ _id: { $gt: 300000} }).sort({ _id: -1 });
+var d = t.find({_id: {$gt: 300000}}).sort({_id: -1});
d.next();
print("2");
-t.remove({ _id: { $gt: 200000, $lt: 600000} });
+t.remove({_id: {$gt: 200000, $lt: 600000}});
print("3");
print(d.hasNext());
diff --git a/jstests/noPassthroughWithMongod/bulk_api_limits.js b/jstests/noPassthroughWithMongod/bulk_api_limits.js
index b7bab04f6d1..3dada22c519 100644
--- a/jstests/noPassthroughWithMongod/bulk_api_limits.js
+++ b/jstests/noPassthroughWithMongod/bulk_api_limits.js
@@ -17,7 +17,7 @@ var executeTestsUnordered = function() {
// Create unique index
coll.dropIndexes();
coll.remove({});
- coll.ensureIndex({a : 1}, {unique : true});
+ coll.ensureIndex({a: 1}, {unique: true});
/**
* Fail during batch construction due to single document > maxBSONSize
@@ -25,18 +25,19 @@ var executeTestsUnordered = function() {
// Set up a giant string to blow through the max message size
var hugeString = "";
// Create it bigger than 16MB
- for(var i = 0; i < (1024 * 1100); i++) {
+ for (var i = 0; i < (1024 * 1100); i++) {
hugeString = hugeString + "1234567890123456";
}
// Set up the batch
var batch = coll.initializeUnorderedBulkOp();
- batch.insert({b:1, a:1});
+ batch.insert({b: 1, a: 1});
// Should fail on insert due to string being to big
try {
batch.insert({string: hugeString});
assert(false);
- } catch(err) {}
+ } catch (err) {
+ }
// Create unique index
coll.dropIndexes();
@@ -48,18 +49,18 @@ var executeTestsUnordered = function() {
// Set up a giant string to blow through the max message size
var hugeString = "";
// Create 4 MB strings to test splitting
- for(var i = 0; i < (1024 * 256); i++) {
+ for (var i = 0; i < (1024 * 256); i++) {
hugeString = hugeString + "1234567890123456";
}
// Insert the string a couple of times, should force split into multiple batches
var batch = coll.initializeUnorderedBulkOp();
- batch.insert({a:1, b: hugeString});
- batch.insert({a:2, b: hugeString});
- batch.insert({a:3, b: hugeString});
- batch.insert({a:4, b: hugeString});
- batch.insert({a:5, b: hugeString});
- batch.insert({a:6, b: hugeString});
+ batch.insert({a: 1, b: hugeString});
+ batch.insert({a: 2, b: hugeString});
+ batch.insert({a: 3, b: hugeString});
+ batch.insert({a: 4, b: hugeString});
+ batch.insert({a: 5, b: hugeString});
+ batch.insert({a: 6, b: hugeString});
var result = batch.execute();
printjson(JSON.stringify(result));
@@ -81,18 +82,19 @@ var executeTestsOrdered = function() {
// Set up a giant string to blow through the max message size
var hugeString = "";
// Create it bigger than 16MB
- for(var i = 0; i < (1024 * 1100); i++) {
+ for (var i = 0; i < (1024 * 1100); i++) {
hugeString = hugeString + "1234567890123456";
}
// Set up the batch
var batch = coll.initializeOrderedBulkOp();
- batch.insert({b:1, a:1});
+ batch.insert({b: 1, a: 1});
// Should fail on insert due to string being to big
try {
batch.insert({string: hugeString});
assert(false);
- } catch(err) {}
+ } catch (err) {
+ }
// Create unique index
coll.dropIndexes();
@@ -104,18 +106,18 @@ var executeTestsOrdered = function() {
// Set up a giant string to blow through the max message size
var hugeString = "";
// Create 4 MB strings to test splitting
- for(var i = 0; i < (1024 * 256); i++) {
+ for (var i = 0; i < (1024 * 256); i++) {
hugeString = hugeString + "1234567890123456";
}
// Insert the string a couple of times, should force split into multiple batches
var batch = coll.initializeOrderedBulkOp();
- batch.insert({a:1, b: hugeString});
- batch.insert({a:2, b: hugeString});
- batch.insert({a:3, b: hugeString});
- batch.insert({a:4, b: hugeString});
- batch.insert({a:5, b: hugeString});
- batch.insert({a:6, b: hugeString});
+ batch.insert({a: 1, b: hugeString});
+ batch.insert({a: 2, b: hugeString});
+ batch.insert({a: 3, b: hugeString});
+ batch.insert({a: 4, b: hugeString});
+ batch.insert({a: 5, b: hugeString});
+ batch.insert({a: 6, b: hugeString});
var result = batch.execute();
// Basic properties check
@@ -127,14 +129,14 @@ var executeTestsOrdered = function() {
coll.remove({});
};
-var buildVersion = parseInt(db.runCommand({buildInfo:1}).versionArray.slice(0, 3).join(""), 10);
+var buildVersion = parseInt(db.runCommand({buildInfo: 1}).versionArray.slice(0, 3).join(""), 10);
// Save the existing useWriteCommands function
var _useWriteCommands = coll.getMongo().useWriteCommands;
//
// Only execute write command tests if we have > 2.5.5 otherwise
// execute the down converted version
-if(buildVersion >= 255) {
+if (buildVersion >= 255) {
// Force the use of useWriteCommands
coll._mongo.useWriteCommands = function() {
return true;
diff --git a/jstests/noPassthroughWithMongod/capped4.js b/jstests/noPassthroughWithMongod/capped4.js
index f1371e8fa00..039f2557866 100644
--- a/jstests/noPassthroughWithMongod/capped4.js
+++ b/jstests/noPassthroughWithMongod/capped4.js
@@ -1,28 +1,28 @@
t = db.jstests_capped4;
t.drop();
-db.createCollection( "jstests_capped4", {size:1000,capped:true} );
-t.ensureIndex( { i: 1 } );
-for( i = 0; i < 20; ++i ) {
- t.save( { i : i } );
+db.createCollection("jstests_capped4", {size: 1000, capped: true});
+t.ensureIndex({i: 1});
+for (i = 0; i < 20; ++i) {
+ t.save({i: i});
}
-c = t.find().sort( { $natural: -1 } ).limit( 2 );
+c = t.find().sort({$natural: -1}).limit(2);
c.next();
c.next();
-d = t.find().sort( { i: -1 } ).limit( 2 );
+d = t.find().sort({i: -1}).limit(2);
d.next();
d.next();
-for( i = 20; t.findOne( { i:19 } ); ++i ) {
- t.save( { i : i } );
+for (i = 20; t.findOne({i: 19}); ++i) {
+ t.save({i: i});
}
-//assert( !t.findOne( { i : 19 } ), "A" );
-assert( !c.hasNext(), "B" );
-assert( !d.hasNext(), "C" );
-assert( t.find().sort( { i : 1 } ).hint( { i : 1 } ).toArray().length > 10, "D" );
+// assert( !t.findOne( { i : 19 } ), "A" );
+assert(!c.hasNext(), "B");
+assert(!d.hasNext(), "C");
+assert(t.find().sort({i: 1}).hint({i: 1}).toArray().length > 10, "D");
-assert( t.findOne( { i : i - 1 } ), "E" );
-var res = assert.writeError(t.remove( { i : i - 1 } ));
-assert( res.getWriteError().errmsg.indexOf( "capped" ) >= 0, "F" );
+assert(t.findOne({i: i - 1}), "E");
+var res = assert.writeError(t.remove({i: i - 1}));
+assert(res.getWriteError().errmsg.indexOf("capped") >= 0, "F");
-assert( t.validate().valid, "G" );
+assert(t.validate().valid, "G");
diff --git a/jstests/noPassthroughWithMongod/capped_truncate.js b/jstests/noPassthroughWithMongod/capped_truncate.js
index 8408ea7294b..c35318a6649 100644
--- a/jstests/noPassthroughWithMongod/capped_truncate.js
+++ b/jstests/noPassthroughWithMongod/capped_truncate.js
@@ -11,48 +11,47 @@
'use strict';
db.capped_truncate.drop();
- assert.commandWorked(db.runCommand({ create: "capped_truncate",
- capped: true,
- size: 1000,
- autoIndexId: true }));
+ assert.commandWorked(
+ db.runCommand({create: "capped_truncate", capped: true, size: 1000, autoIndexId: true}));
var t = db.capped_truncate;
// It is an error to remove a non-positive number of documents.
- assert.commandFailed(db.runCommand({ captrunc: "capped_truncate", n: -1 }),
+ assert.commandFailed(db.runCommand({captrunc: "capped_truncate", n: -1}),
"captrunc didn't return an error when attempting to remove a negative " +
- "number of documents");
- assert.commandFailed(db.runCommand({ captrunc: "capped_truncate", n: 0 }),
+ "number of documents");
+ assert.commandFailed(db.runCommand({captrunc: "capped_truncate", n: 0}),
"captrunc didn't return an error when attempting to remove 0 documents");
for (var j = 1; j <= 10; j++) {
- assert.writeOK(t.insert({x:j}));
+ assert.writeOK(t.insert({x: j}));
}
// It is an error to try and remove more documents than what exist in the capped collection.
- assert.commandFailed(db.runCommand({ captrunc: "capped_truncate", n: 20 }),
+ assert.commandFailed(db.runCommand({captrunc: "capped_truncate", n: 20}),
"captrunc didn't return an error when attempting to remove more" +
- " documents than what the collection contains");
+ " documents than what the collection contains");
- assert.commandWorked(db.runCommand({ captrunc: "capped_truncate", n: 5, inc: false }));
+ assert.commandWorked(db.runCommand({captrunc: "capped_truncate", n: 5, inc: false}));
assert.eq(5, t.count(), "wrong number of documents in capped collection after truncate");
assert.eq(5, t.distinct("_id").length, "wrong number of entries in _id index after truncate");
- var last = t.find({},{_id:1}).sort({_id:-1}).next();
- assert.neq(null, t.findOne({_id: last._id}),
+ var last = t.find({}, {_id: 1}).sort({_id: -1}).next();
+ assert.neq(null,
+ t.findOne({_id: last._id}),
tojson(last) + " is in _id index, but not in capped collection after truncate");
// It is an error to run the captrunc command on a nonexistent collection.
- assert.commandFailed(db.runCommand({ captrunc: "nonexistent", n: 1 }),
+ assert.commandFailed(db.runCommand({captrunc: "nonexistent", n: 1}),
"captrunc didn't return an error for a nonexistent collection");
// It is an error to run the captrunc command on a non-capped collection.
var collName = "noncapped";
db[collName].drop();
- assert.commandWorked(db.runCommand({ create: collName, capped: false }));
+ assert.commandWorked(db.runCommand({create: collName, capped: false}));
for (var j = 1; j <= 10; j++) {
- assert.writeOK(db[collName].insert({x:j}));
+ assert.writeOK(db[collName].insert({x: j}));
}
- assert.commandFailed(db.runCommand({ captrunc: collName, n: 5 }),
+ assert.commandFailed(db.runCommand({captrunc: collName, n: 5}),
"captrunc didn't return an error for a non-capped collection");
})();
diff --git a/jstests/noPassthroughWithMongod/clonecollection.js b/jstests/noPassthroughWithMongod/clonecollection.js
index 022ef58f595..a3633a12e58 100644
--- a/jstests/noPassthroughWithMongod/clonecollection.js
+++ b/jstests/noPassthroughWithMongod/clonecollection.js
@@ -6,49 +6,53 @@ var toMongod = MongoRunner.runMongod({bind_ip: "127.0.0.1"});
var f = fromMongod.getDB(baseName);
var t = toMongod.getDB(baseName);
-for( i = 0; i < 1000; ++i ) {
- f.a.save( { i: i } );
+for (i = 0; i < 1000; ++i) {
+ f.a.save({i: i});
}
-assert.eq( 1000, f.a.find().count() , "A1" );
+assert.eq(1000, f.a.find().count(), "A1");
-assert.commandWorked( t.cloneCollection( "localhost:" + fromMongod.port, "a" ) );
-assert.eq( 1000, t.a.find().count() , "A2" );
+assert.commandWorked(t.cloneCollection("localhost:" + fromMongod.port, "a"));
+assert.eq(1000, t.a.find().count(), "A2");
t.a.drop();
-assert.commandWorked( t.cloneCollection( "localhost:" + fromMongod.port, "a", { i: { $gte: 10, $lt: 20 } } ) );
-assert.eq( 10, t.a.find().count() , "A3" );
+assert.commandWorked(t.cloneCollection("localhost:" + fromMongod.port,
+ "a",
+ {i: {$gte: 10, $lt: 20}}));
+assert.eq(10, t.a.find().count(), "A3");
t.a.drop();
-assert.eq( 0, t.a.getIndexes().length, "prep 2");
+assert.eq(0, t.a.getIndexes().length, "prep 2");
-f.a.ensureIndex( { i: 1 } );
-assert.eq( 2, f.a.getIndexes().length, "expected index missing" );
-assert.commandWorked( t.cloneCollection( "localhost:" + fromMongod.port, "a" ) );
-if ( t.a.getIndexes().length != 2 ) {
- printjson( t.a.getIndexes());
+f.a.ensureIndex({i: 1});
+assert.eq(2, f.a.getIndexes().length, "expected index missing");
+assert.commandWorked(t.cloneCollection("localhost:" + fromMongod.port, "a"));
+if (t.a.getIndexes().length != 2) {
+ printjson(t.a.getIndexes());
}
-assert.eq( 2, t.a.getIndexes().length, "expected index missing" );
+assert.eq(2, t.a.getIndexes().length, "expected index missing");
// Verify index works
-x = t.a.find( { i: 50 } ).hint( { i: 1 } ).explain("executionStats");
-printjson( x );
-assert.eq( 1, x.executionStats.nReturned , "verify 1" );
-assert.eq( 1, t.a.find( { i: 50 } ).hint( { i: 1 } ).toArray().length, "match length did not match expected" );
+x = t.a.find({i: 50}).hint({i: 1}).explain("executionStats");
+printjson(x);
+assert.eq(1, x.executionStats.nReturned, "verify 1");
+assert.eq(1,
+ t.a.find({i: 50}).hint({i: 1}).toArray().length,
+ "match length did not match expected");
// Check that capped-ness is preserved on clone
f.a.drop();
t.a.drop();
-f.createCollection( "a", {capped:true,size:1000} );
-assert( f.a.isCapped() );
-assert.commandWorked( t.cloneCollection( "localhost:" + fromMongod.port, "a" ) );
-assert( t.a.isCapped(), "cloned collection not capped" );
+f.createCollection("a", {capped: true, size: 1000});
+assert(f.a.isCapped());
+assert.commandWorked(t.cloneCollection("localhost:" + fromMongod.port, "a"));
+assert(t.a.isCapped(), "cloned collection not capped");
// Check that cloning to "system.profile" is disallowed.
f.a.drop();
f.system.profile.drop();
-assert.commandWorked( f.setProfilingLevel( 2 ) );
-assert.writeOK( f.a.insert( {} ) );
-assert.gt( f.system.profile.count(), 0 );
+assert.commandWorked(f.setProfilingLevel(2));
+assert.writeOK(f.a.insert({}));
+assert.gt(f.system.profile.count(), 0);
t.system.profile.drop();
-assert.commandFailed( t.cloneCollection( "localhost:" + fromMongod.port, "system.profile" ) );
+assert.commandFailed(t.cloneCollection("localhost:" + fromMongod.port, "system.profile"));
diff --git a/jstests/noPassthroughWithMongod/connections_opened.js b/jstests/noPassthroughWithMongod/connections_opened.js
index e3f25b11fce..2ec192ed1e2 100644
--- a/jstests/noPassthroughWithMongod/connections_opened.js
+++ b/jstests/noPassthroughWithMongod/connections_opened.js
@@ -8,54 +8,55 @@ var mongo = MongoRunner.runMongod({});
var db = mongo.getDB("test");
var availableConnections = db.serverStatus().connections.available;
-if ( availableConnections < ( numPerTypeToCreate * 10 ) ) {
- numPerTypeToCreate = Math.floor( availableConnections / 10 );
+if (availableConnections < (numPerTypeToCreate * 10)) {
+ numPerTypeToCreate = Math.floor(availableConnections / 10);
}
-print( "numPerTypeToCreate: " + numPerTypeToCreate );
+print("numPerTypeToCreate: " + numPerTypeToCreate);
var testDB = 'connectionsOpenedTest';
var signalCollection = 'keepRunning';
function createPersistentConnection() {
assert.soon(function() {
- try {
- return new Mongo(db.getMongo().host);
- } catch (x) {
- return false;
- }}, "Timed out waiting for persistent connection to connect", 30000, 5000);
+ try {
+ return new Mongo(db.getMongo().host);
+ } catch (x) {
+ return false;
+ }
+ }, "Timed out waiting for persistent connection to connect", 30000, 5000);
}
function createTemporaryConnection() {
// Retry connecting until you are successful
- var pollString = "var conn = null;" +
- "assert.soon(function() {" +
- "try { conn = new Mongo(\"" + db.getMongo().host + "\"); return conn" +
- "} catch (x) {return false;}}, " +
+ var pollString = "var conn = null;" + "assert.soon(function() {" + "try { conn = new Mongo(\"" +
+ db.getMongo().host + "\"); return conn" + "} catch (x) {return false;}}, " +
"\"Timed out waiting for temporary connection to connect\", 30000, 5000);";
// Poll the signal collection until it is told to terminate.
- pollString += "assert.soon(function() {"
- + "return conn.getDB('" + testDB + "').getCollection('" + signalCollection + "')"
- + ".findOne().stop;}, \"Parallel shell never told to terminate\", 10 * 60000);";
+ pollString += "assert.soon(function() {" + "return conn.getDB('" + testDB +
+ "').getCollection('" + signalCollection + "')" +
+ ".findOne().stop;}, \"Parallel shell never told to terminate\", 10 * 60000);";
return startParallelShell(pollString, null, true);
}
function waitForConnections(expectedCurrentConnections, expectedTotalConnections) {
- assert.soon(function() {
- var currentConnInfo = db.serverStatus().connections;
- return (expectedCurrentConnections == currentConnInfo.current) &&
- (expectedTotalConnections, currentConnInfo.totalCreated);
- },
- {toString: function() {
- return "Incorrect connection numbers. Expected " + expectedCurrentConnections +
- " current connections and " + expectedTotalConnections + " total" +
- " connections. Connection info from serverStatus: " +
- tojson(db.serverStatus().connections); } },
- 5 * 60000);
-
+ assert.soon(
+ function() {
+ var currentConnInfo = db.serverStatus().connections;
+ return (expectedCurrentConnections == currentConnInfo.current) &&
+ (expectedTotalConnections, currentConnInfo.totalCreated);
+ },
+ {
+ toString: function() {
+ return "Incorrect connection numbers. Expected " + expectedCurrentConnections +
+ " current connections and " + expectedTotalConnections + " total" +
+ " connections. Connection info from serverStatus: " +
+ tojson(db.serverStatus().connections);
+ }
+ },
+ 5 * 60000);
}
-
var originalConnInfo = db.serverStatus().connections;
assert.gt(originalConnInfo.current, 0);
assert.gt(originalConnInfo.totalCreated, 0);
@@ -72,7 +73,7 @@ waitForConnections(originalConnInfo.current + numPerTypeToCreate,
jsTestLog("Creating temporary connections");
db.getSiblingDB(testDB).dropDatabase();
-db.getSiblingDB(testDB).getCollection(signalCollection).insert({stop:false});
+db.getSiblingDB(testDB).getCollection(signalCollection).insert({stop: false});
var tempConns = [];
for (var i = 0; i < numPerTypeToCreate; i++) {
@@ -80,21 +81,21 @@ for (var i = 0; i < numPerTypeToCreate; i++) {
}
jsTestLog("Testing that temporary connections increased the current and totalCreated counters");
-waitForConnections(originalConnInfo.current + numPerTypeToCreate*2,
- originalConnInfo.totalCreated + numPerTypeToCreate*2);
+waitForConnections(originalConnInfo.current + numPerTypeToCreate * 2,
+ originalConnInfo.totalCreated + numPerTypeToCreate * 2);
jsTestLog("Waiting for all temporary connections to be closed");
// Notify waiting parallel shells to terminate, causing the connection count to go back down.
-db.getSiblingDB(testDB).getCollection(signalCollection).update({}, {$set : {stop:true}});
+db.getSiblingDB(testDB).getCollection(signalCollection).update({}, {$set: {stop: true}});
for (var i = 0; i < tempConns.length; i++) {
- tempConns[i](); // wait on parallel shell to terminate
+ tempConns[i](); // wait on parallel shell to terminate
}
jsTestLog("Testing that current connections counter went down after temporary connections closed");
waitForConnections(originalConnInfo.current + numPerTypeToCreate,
- originalConnInfo.totalCreated + numPerTypeToCreate*2);
+ originalConnInfo.totalCreated + numPerTypeToCreate * 2);
persistent = null;
gc();
-MongoRunner.stopMongod( mongo );
+MongoRunner.stopMongod(mongo);
diff --git a/jstests/noPassthroughWithMongod/create_indexes_shell_helper.js b/jstests/noPassthroughWithMongod/create_indexes_shell_helper.js
index 8aa0c79b5a6..f9f9f7b9f06 100644
--- a/jstests/noPassthroughWithMongod/create_indexes_shell_helper.js
+++ b/jstests/noPassthroughWithMongod/create_indexes_shell_helper.js
@@ -13,16 +13,22 @@
_writeMode = mode;
},
writeMode: function() {
- return _writeMode;
+ return _writeMode;
+ },
+ getSlaveOk: function() {
+ return true;
},
- getSlaveOk: function() { return true; },
runCommand: function(db, cmd, opts) {
- commandsRan.push({db: db, cmd: cmd, opts: opts});
- return {ok: 1.0};
+ commandsRan.push({db: db, cmd: cmd, opts: opts});
+ return {
+ ok: 1.0
+ };
},
insert: function(db, indexSpecs, opts) {
- insertsRan.push({db: db, indexSpecs: indexSpecs, opts: opts});
- return {ok: 1.0};
+ insertsRan.push({db: db, indexSpecs: indexSpecs, opts: opts});
+ return {
+ ok: 1.0
+ };
},
getWriteConcern: function() {
return null;
@@ -43,25 +49,25 @@
assert.eq(commandsRan.length, 1);
assert(commandsRan[0].cmd.hasOwnProperty("createIndexes"));
assert.eq(commandsRan[0].cmd["indexes"][0],
- {ns: "test.create_indexes_shell_helper", key: {x:1}, name: "x_1"});
+ {ns: "test.create_indexes_shell_helper", key: {x: 1}, name: "x_1"});
commandsRan = [];
t.createIndexes([{y: 1}, {z: -1}]);
assert.eq(commandsRan.length, 1);
- assert( commandsRan[0].cmd.hasOwnProperty("createIndexes"));
+ assert(commandsRan[0].cmd.hasOwnProperty("createIndexes"));
assert.eq(commandsRan[0].cmd["indexes"][0],
- {ns: "test.create_indexes_shell_helper", key: {y:1}, name: "y_1"});
+ {ns: "test.create_indexes_shell_helper", key: {y: 1}, name: "y_1"});
assert.eq(commandsRan[0].cmd["indexes"][1],
- {ns: "test.create_indexes_shell_helper", key: {z:-1}, name: "z_-1"});
+ {ns: "test.create_indexes_shell_helper", key: {z: -1}, name: "z_-1"});
commandsRan = [];
t.createIndex({a: 1});
assert.eq(commandsRan.length, 1);
- assert( commandsRan[0].cmd.hasOwnProperty("createIndexes"));
+ assert(commandsRan[0].cmd.hasOwnProperty("createIndexes"));
assert.eq(commandsRan[0].cmd["indexes"][0],
- {ns: "test.create_indexes_shell_helper", key: {a:1}, name: "a_1"});
+ {ns: "test.create_indexes_shell_helper", key: {a: 1}, name: "a_1"});
db.getMongo().forceWriteMode("compatibility");
@@ -70,13 +76,12 @@
t.createIndex({b: 1});
assert.eq(insertsRan.length, 1);
assert.eq(insertsRan[0]["indexSpecs"]["ns"], "test.create_indexes_shell_helper");
- assert.eq(insertsRan[0]["indexSpecs"]["key"], {b:1});
+ assert.eq(insertsRan[0]["indexSpecs"]["key"], {b: 1});
assert.eq(insertsRan[0]["indexSpecs"]["name"], "b_1");
- //getLastError is called in the course of the bulk insert
+ // getLastError is called in the course of the bulk insert
assert.eq(commandsRan.length, 1);
assert(commandsRan[0].cmd.hasOwnProperty("getlasterror"));
- }
- finally {
+ } finally {
db._mongo = mongo;
}
}());
diff --git a/jstests/noPassthroughWithMongod/cursor8.js b/jstests/noPassthroughWithMongod/cursor8.js
index 34058b391e3..bcaf2387ae4 100644
--- a/jstests/noPassthroughWithMongod/cursor8.js
+++ b/jstests/noPassthroughWithMongod/cursor8.js
@@ -2,24 +2,24 @@
var t = db.cursor8;
t.drop();
-t.save( {} );
-t.save( {} );
-t.save( {} );
+t.save({});
+t.save({});
+t.save({});
-assert.eq( 3 , t.find().count() , "A0" );
+assert.eq(3, t.find().count(), "A0");
var initialTotalOpen = db.serverStatus().metrics.cursor.open.total;
-function test( want , msg ){
+function test(want, msg) {
var res = db.serverStatus().metrics.cursor;
assert.eq(want + initialTotalOpen, res.open.total, msg + " " + tojson(res));
}
-test( 0 , "A1" );
-assert.eq( 3 , t.find().count() , "A2" );
-assert.eq( 3 , t.find( {} ).count() , "A3" );
+test(0, "A1");
+assert.eq(3, t.find().count(), "A2");
+assert.eq(3, t.find({}).count(), "A3");
// This cursor should remain open on the server.
-var cursor = t.find( {} ).batchSize( 2 );
+var cursor = t.find({}).batchSize(2);
cursor.next();
-test( 1 , "B1" );
+test(1, "B1");
diff --git a/jstests/noPassthroughWithMongod/default_read_pref.js b/jstests/noPassthroughWithMongod/default_read_pref.js
index 05be7915626..46967012a31 100644
--- a/jstests/noPassthroughWithMongod/default_read_pref.js
+++ b/jstests/noPassthroughWithMongod/default_read_pref.js
@@ -8,18 +8,25 @@
try {
var commandsRan = [];
db._mongo = {
- getSlaveOk: function() { return false; },
- getReadPrefMode: function() { return mongo.getReadPrefMode(); },
- getReadPref: function() { return mongo.getReadPref(); },
+ getSlaveOk: function() {
+ return false;
+ },
+ getReadPrefMode: function() {
+ return mongo.getReadPrefMode();
+ },
+ getReadPref: function() {
+ return mongo.getReadPref();
+ },
runCommand: function(db, cmd, opts) {
- commandsRan.push({db: db, cmd: cmd, opts:opts});
+ commandsRan.push({db: db, cmd: cmd, opts: opts});
}
};
db.runReadCommand({ping: 1});
assert.eq(commandsRan.length, 1);
assert.docEq(commandsRan[0].cmd, {ping: 1}, "The command should not have been wrapped.");
- assert.eq(commandsRan[0].opts & DBQuery.Option.slaveOk, 0, "The slaveOk bit should not be set.");
+ assert.eq(
+ commandsRan[0].opts & DBQuery.Option.slaveOk, 0, "The slaveOk bit should not be set.");
} finally {
db._mongo = mongo;
diff --git a/jstests/noPassthroughWithMongod/dup_bgindex.js b/jstests/noPassthroughWithMongod/dup_bgindex.js
index 3d36e90a4ae..cd0078248bb 100644
--- a/jstests/noPassthroughWithMongod/dup_bgindex.js
+++ b/jstests/noPassthroughWithMongod/dup_bgindex.js
@@ -2,15 +2,15 @@
// This test runs fairly quickly but cannot be in /jstests/. So it lives in slowNightly for now.
var t = db.duplIndexTest;
t.drop();
-for (var i=0; i<10000; i++) {
- t.insert( { name : "foo" , z : { a : 17 , b : 4}, i: i } );
+for (var i = 0; i < 10000; i++) {
+ t.insert({name: "foo", z: {a: 17, b: 4}, i: i});
}
var cmd = "db.duplIndexTest.ensureIndex( { i : 1 }, {background:true} );";
var join1 = startParallelShell(cmd);
var join2 = startParallelShell(cmd);
-t.ensureIndex( { i : 1 }, {background:true} );
-assert.eq(1, t.find({i:1}).count(), "Should find only one doc");
-t.dropIndex({ i : 1 });
-assert.eq(1, t.find({i:1}).count(), "Should find only one doc");
+t.ensureIndex({i: 1}, {background: true});
+assert.eq(1, t.find({i: 1}).count(), "Should find only one doc");
+t.dropIndex({i: 1});
+assert.eq(1, t.find({i: 1}).count(), "Should find only one doc");
join1();
join2();
diff --git a/jstests/noPassthroughWithMongod/explain1.js b/jstests/noPassthroughWithMongod/explain1.js
index 81baeb6e918..021108028d4 100644
--- a/jstests/noPassthroughWithMongod/explain1.js
+++ b/jstests/noPassthroughWithMongod/explain1.js
@@ -4,13 +4,16 @@ t = db.jstests_slowNightly_explain1;
t.drop();
// Periodically drops the collection, invalidating client cursors for s2's operations.
-s1 = startParallelShell( "t = db.jstests_slowNightly_explain1; for( var i = 0; i < 80; ++i ) { t.drop(); t.ensureIndex({x:1}); for( var j = 0; j < 1000; ++j ) { t.save( {x:j,y:1} ) }; sleep( 100 ); }" );
+s1 = startParallelShell(
+ "t = db.jstests_slowNightly_explain1; for( var i = 0; i < 80; ++i ) { t.drop(); t.ensureIndex({x:1}); for( var j = 0; j < 1000; ++j ) { t.save( {x:j,y:1} ) }; sleep( 100 ); }");
// Query repeatedly.
-s2 = startParallelShell( "t = db.jstests_slowNightly_explain1; for( var i = 0; i < 500; ++i ) { try { z = t.find( {x:{$gt:0},y:1} ).explain(); t.count( {x:{$gt:0},y:1} ); } catch( e ) {} }" );
+s2 = startParallelShell(
+ "t = db.jstests_slowNightly_explain1; for( var i = 0; i < 500; ++i ) { try { z = t.find( {x:{$gt:0},y:1} ).explain(); t.count( {x:{$gt:0},y:1} ); } catch( e ) {} }");
// Put pressure on s2 to yield more often.
-s3 = startParallelShell( "t = db.jstests_slowNightly_explain1; for( var i = 0; i < 200; ++i ) { t.validate({scandata:true}); }" );
+s3 = startParallelShell(
+ "t = db.jstests_slowNightly_explain1; for( var i = 0; i < 200; ++i ) { t.validate({scandata:true}); }");
s1();
s2();
diff --git a/jstests/noPassthroughWithMongod/explain2.js b/jstests/noPassthroughWithMongod/explain2.js
index 032f0fa8de8..81b8951488f 100644
--- a/jstests/noPassthroughWithMongod/explain2.js
+++ b/jstests/noPassthroughWithMongod/explain2.js
@@ -2,17 +2,17 @@
collName = 'jstests_slowNightly_explain2';
-t = db[ collName ];
+t = db[collName];
t.drop();
-db.createCollection( collName, {capped:true,size:100000} );
-t = db[ collName ];
-t.ensureIndex( {x:1} );
+db.createCollection(collName, {capped: true, size: 100000});
+t = db[collName];
+t.ensureIndex({x: 1});
-a = startParallelShell( 'for( i = 0; i < 50000; ++i ) { db.' + collName + '.insert( {x:i,y:1} ); }' );
+a = startParallelShell('for( i = 0; i < 50000; ++i ) { db.' + collName + '.insert( {x:i,y:1} ); }');
-for( i = 0; i < 800; ++i ) {
- t.find( {x:{$gt:-1},y:1} ).sort({x:-1}).explain();
+for (i = 0; i < 800; ++i) {
+ t.find({x: {$gt: -1}, y: 1}).sort({x: -1}).explain();
}
a(); \ No newline at end of file
diff --git a/jstests/noPassthroughWithMongod/explain3.js b/jstests/noPassthroughWithMongod/explain3.js
index af6fde7b81b..ed22604f02c 100644
--- a/jstests/noPassthroughWithMongod/explain3.js
+++ b/jstests/noPassthroughWithMongod/explain3.js
@@ -4,13 +4,16 @@ t = db.jstests_slowNightly_explain3;
t.drop();
// Periodically drops the collection, invalidating client cursors for s2's operations.
-s1 = startParallelShell( "t = db.jstests_slowNightly_explain1; for( var i = 0; i < 80; ++i ) { t.drop(); t.ensureIndex({x:1}); for( var j = 0; j < 1000; ++j ) { t.save( {x:j,y:1} ) }; sleep( 100 ); }" );
+s1 = startParallelShell(
+ "t = db.jstests_slowNightly_explain1; for( var i = 0; i < 80; ++i ) { t.drop(); t.ensureIndex({x:1}); for( var j = 0; j < 1000; ++j ) { t.save( {x:j,y:1} ) }; sleep( 100 ); }");
// Query repeatedly.
-s2 = startParallelShell( "t = db.jstests_slowNightly_explain1; for( var i = 0; i < 500; ++i ) { try { z = t.find( {x:{$gt:0},y:1} ).sort({x:1}).explain(); } catch( e ) {} }" );
+s2 = startParallelShell(
+ "t = db.jstests_slowNightly_explain1; for( var i = 0; i < 500; ++i ) { try { z = t.find( {x:{$gt:0},y:1} ).sort({x:1}).explain(); } catch( e ) {} }");
// Put pressure on s2 to yield more often.
-s3 = startParallelShell( "t = db.jstests_slowNightly_explain1; for( var i = 0; i < 200; ++i ) { t.validate({scandata:true}); }" );
+s3 = startParallelShell(
+ "t = db.jstests_slowNightly_explain1; for( var i = 0; i < 200; ++i ) { t.validate({scandata:true}); }");
s1();
s2();
diff --git a/jstests/noPassthroughWithMongod/external_sort_text_agg.js b/jstests/noPassthroughWithMongod/external_sort_text_agg.js
index 5479d1b30b7..ecb843ae9e5 100644
--- a/jstests/noPassthroughWithMongod/external_sort_text_agg.js
+++ b/jstests/noPassthroughWithMongod/external_sort_text_agg.js
@@ -3,15 +3,16 @@ var t = db.external_sort_text_agg;
t.drop();
t.ensureIndex({text: "text"});
for (i = 0; i < 100; i++) {
- t.insert({_id:i, text: Array(210000).join("asdf ")});
+ t.insert({_id: i, text: Array(210000).join("asdf ")});
// string over 1MB to hit the 100MB threshold for external sort
}
var score = t.find({$text: {$search: "asdf"}}, {score: {$meta: 'textScore'}}).next().score;
-var res = t.aggregate([{$match: {$text: {$search: "asdf"}}},
- {$sort: {"_id": 1}},
- {$project: {string: "$text", score: {$meta: "textScore"}}}
- ],
+var res = t.aggregate([
+ {$match: {$text: {$search: "asdf"}}},
+ {$sort: {"_id": 1}},
+ {$project: {string: "$text", score: {$meta: "textScore"}}}
+],
{allowDiskUse: true});
// we must use .next() rather than a $limit because a $limit will optimize away the external sort
printjson(res.next());
diff --git a/jstests/noPassthroughWithMongod/find_and_modify_server16469.js b/jstests/noPassthroughWithMongod/find_and_modify_server16469.js
index 5c54c05f6e2..43f6ed3910c 100644
--- a/jstests/noPassthroughWithMongod/find_and_modify_server16469.js
+++ b/jstests/noPassthroughWithMongod/find_and_modify_server16469.js
@@ -14,8 +14,8 @@ var result = db.adminCommand({getParameter: 1, internalQueryExecMaxBlockingSortB
assert.commandWorked(result);
var oldSortLimit = result.internalQueryExecMaxBlockingSortBytes;
var newSortLimit = 1024 * 1024;
-assert.commandWorked(db.adminCommand({setParameter: 1,
- internalQueryExecMaxBlockingSortBytes: newSortLimit}));
+assert.commandWorked(
+ db.adminCommand({setParameter: 1, internalQueryExecMaxBlockingSortBytes: newSortLimit}));
try {
// Insert ~3MB of data.
@@ -28,18 +28,19 @@ try {
}
// Verify that an unindexed sort of this data fails with a find() if no limit is specified.
- assert.throws(function() { coll.find({}).sort({b: 1}).itcount(); });
+ assert.throws(function() {
+ coll.find({}).sort({b: 1}).itcount();
+ });
// Verify that an unindexed sort of this data succeeds with findAndModify (which should be
// requesting a top-K sort).
- result = coll.runCommand({findAndModify: coll.getName(), query: {}, update: {$set: {c: 1}},
- sort: {b: 1}});
+ result = coll.runCommand(
+ {findAndModify: coll.getName(), query: {}, update: {$set: {c: 1}}, sort: {b: 1}});
assert.commandWorked(result);
assert.neq(result.value, null);
assert.eq(result.value.b, 0);
-}
-finally {
+} finally {
// Restore the orginal sort memory limit.
- assert.commandWorked(db.adminCommand({setParameter: 1,
- internalQueryExecMaxBlockingSortBytes: oldSortLimit}));
+ assert.commandWorked(
+ db.adminCommand({setParameter: 1, internalQueryExecMaxBlockingSortBytes: oldSortLimit}));
}
diff --git a/jstests/noPassthroughWithMongod/fsync2.js b/jstests/noPassthroughWithMongod/fsync2.js
index dad1c6088bf..5710f57e33c 100644
--- a/jstests/noPassthroughWithMongod/fsync2.js
+++ b/jstests/noPassthroughWithMongod/fsync2.js
@@ -1,59 +1,56 @@
-function debug( msg ) {
- print( "fsync2: " + msg );
+function debug(msg) {
+ print("fsync2: " + msg);
}
var loops = 200;
-if ( db.getSisterDB("local").slaves.count() > 0 ) {
+if (db.getSisterDB("local").slaves.count() > 0) {
// replication can cause some write locks on local
// therefore this test is flaky with replication on
loops = 1;
}
-
function doTest() {
db.fsync2.drop();
// Make write ops asynchronous so the test won't hang when in fsync lock mode.
db.getMongo().forceWriteMode('legacy');
- db.fsync2.save( {x:1} );
-
- d = db.getSisterDB( "admin" );
+ db.fsync2.save({x: 1});
+
+ d = db.getSisterDB("admin");
// Don't test if the engine doesn't support fsyncLock
- var ret = d.runCommand( {fsync:1, lock: 1 } );
+ var ret = d.runCommand({fsync: 1, lock: 1});
if (!ret.ok) {
assert.commandFailedWithCode(ret, ErrorCodes.CommandNotSupported);
jsTestLog("Skipping test as engine does not support fsyncLock");
return;
}
-
- assert.commandWorked( ret );
- debug( "after lock" );
-
+ assert.commandWorked(ret);
- for ( var i=0; i<loops; i++) {
- debug( "loop: " + i );
+ debug("after lock");
+
+ for (var i = 0; i < loops; i++) {
+ debug("loop: " + i);
assert.eq(1, db.fsync2.count());
sleep(100);
}
-
- debug( "about to save" );
- db.fsync2.save( {x:1} );
- debug( "save done" );
-
- m = new Mongo( db.getMongo().host );
-
+
+ debug("about to save");
+ db.fsync2.save({x: 1});
+ debug("save done");
+
+ m = new Mongo(db.getMongo().host);
+
// Uncomment once SERVER-4243 is fixed
- //assert.eq(1, m.getDB(db.getName()).fsync2.count());
-
- assert( m.getDB("admin").fsyncUnlock().ok );
+ // assert.eq(1, m.getDB(db.getName()).fsync2.count());
+
+ assert(m.getDB("admin").fsyncUnlock().ok);
- assert.eq( 2, db.fsync2.count() );
-
+ assert.eq(2, db.fsync2.count());
}
-if (!jsTest.options().auth) { // SERVER-4243
+if (!jsTest.options().auth) { // SERVER-4243
doTest();
}
diff --git a/jstests/noPassthroughWithMongod/ftdc_params.js b/jstests/noPassthroughWithMongod/ftdc_params.js
index 9a0fe20d965..5fae9e77c49 100644
--- a/jstests/noPassthroughWithMongod/ftdc_params.js
+++ b/jstests/noPassthroughWithMongod/ftdc_params.js
@@ -1,16 +1,18 @@
// FTDC test cases
//
-(function () {
+(function() {
'use strict';
- var admin = db.getSiblingDB( "admin" );
+ var admin = db.getSiblingDB("admin");
// Check the defaults are correct
//
function getparam(field) {
- var q = { getParameter : 1 };
+ var q = {
+ getParameter: 1
+ };
q[field] = 1;
- var ret = admin.runCommand( q );
+ var ret = admin.runCommand(q);
return ret[field];
}
@@ -23,7 +25,7 @@
assert.eq(getparam("diagnosticDataCollectionSamplesPerInterimUpdate"), 10);
function setparam(obj) {
- var ret = admin.runCommand( Object.extend({ setParameter : 1 }, obj));
+ var ret = admin.runCommand(Object.extend({setParameter: 1}, obj));
return ret;
}
@@ -55,4 +57,4 @@
assert.commandWorked(setparam({"diagnosticDataCollectionPeriodMillis": 1000}));
assert.commandWorked(setparam({"diagnosticDataCollectionSamplesPerChunk": 300}));
assert.commandWorked(setparam({"diagnosticDataCollectionSamplesPerInterimUpdate": 10}));
-}) ();
+})();
diff --git a/jstests/noPassthroughWithMongod/geo_axis_aligned.js b/jstests/noPassthroughWithMongod/geo_axis_aligned.js
index 7cd33b2d638..47c0369e5e0 100644
--- a/jstests/noPassthroughWithMongod/geo_axis_aligned.js
+++ b/jstests/noPassthroughWithMongod/geo_axis_aligned.js
@@ -3,27 +3,28 @@
t = db.axisaligned;
t.drop();
-scale = [ 1, 10, 1000, 10000 ];
-bits = [ 2, 3, 4, 5, 6, 7, 8, 9 ];
-radius = [ 0.0001, 0.001, 0.01, 0.1 ];
-center = [ [ 5, 52 ], [ 6, 53 ], [ 7, 54 ], [ 8, 55 ], [ 9, 56 ] ];
+scale = [1, 10, 1000, 10000];
+bits = [2, 3, 4, 5, 6, 7, 8, 9];
+radius = [0.0001, 0.001, 0.01, 0.1];
+center = [[5, 52], [6, 53], [7, 54], [8, 55], [9, 56]];
bound = [];
-for( var j = 0; j < center.length; j++ ) bound.push( [-180, 180] );
+for (var j = 0; j < center.length; j++)
+ bound.push([-180, 180]);
// Scale all our values to test different sizes
radii = [];
centers = [];
bounds = [];
-for( var s = 0; s < scale.length; s++ ){
- for ( var i = 0; i < radius.length; i++ ) {
- radii.push( radius[i] * scale[s] );
+for (var s = 0; s < scale.length; s++) {
+ for (var i = 0; i < radius.length; i++) {
+ radii.push(radius[i] * scale[s]);
}
- for ( var j = 0; j < center.length; j++ ) {
- centers.push( [ center[j][0] * scale[s], center[j][1] * scale[s] ] );
- bounds.push( [ bound[j][0] * scale[s], bound[j][1] * scale[s] ] );
+ for (var j = 0; j < center.length; j++) {
+ centers.push([center[j][0] * scale[s], center[j][1] * scale[s]]);
+ bounds.push([bound[j][0] * scale[s], bound[j][1] * scale[s]]);
}
}
@@ -31,76 +32,85 @@ radius = radii;
center = centers;
bound = bounds;
+for (var b = 0; b < bits.length; b++) {
+ printjson(radius);
+ printjson(centers);
-for ( var b = 0; b < bits.length; b++ ) {
- printjson( radius );
- printjson( centers );
-
- for ( var i = 0; i < radius.length; i++ ) {
- for ( var j = 0; j < center.length; j++ ) {
- printjson( { center : center[j], radius : radius[i], bits : bits[b] } );
+ for (var i = 0; i < radius.length; i++) {
+ for (var j = 0; j < center.length; j++) {
+ printjson({center: center[j], radius: radius[i], bits: bits[b]});
t.drop();
// Make sure our numbers are precise enough for this test
- if( (center[j][0] - radius[i] == center[j][0]) || (center[j][1] - radius[i] == center[j][1]) )
+ if ((center[j][0] - radius[i] == center[j][0]) ||
+ (center[j][1] - radius[i] == center[j][1]))
continue;
- t.save( { "_id" : 1, "loc" : { "x" : center[j][0] - radius[i], "y" : center[j][1] } } );
- t.save( { "_id" : 2, "loc" : { "x" : center[j][0], "y" : center[j][1] } } );
- t.save( { "_id" : 3, "loc" : { "x" : center[j][0] + radius[i], "y" : center[j][1] } } );
- t.save( { "_id" : 4, "loc" : { "x" : center[j][0], "y" : center[j][1] + radius[i] } } );
- t.save( { "_id" : 5, "loc" : { "x" : center[j][0], "y" : center[j][1] - radius[i] } } );
- t.save( { "_id" : 6, "loc" : { "x" : center[j][0] - radius[i], "y" : center[j][1] + radius[i] } } );
- t.save( { "_id" : 7, "loc" : { "x" : center[j][0] + radius[i], "y" : center[j][1] + radius[i] } } );
- t.save( { "_id" : 8, "loc" : { "x" : center[j][0] - radius[i], "y" : center[j][1] - radius[i] } } );
- t.save( { "_id" : 9, "loc" : { "x" : center[j][0] + radius[i], "y" : center[j][1] - radius[i] } } );
-
- var res = t.ensureIndex({ loc: "2d" },
- { max: bound[j][1],
- min : bound[j][0],
- bits : bits[b] });
+ t.save({"_id": 1, "loc": {"x": center[j][0] - radius[i], "y": center[j][1]}});
+ t.save({"_id": 2, "loc": {"x": center[j][0], "y": center[j][1]}});
+ t.save({"_id": 3, "loc": {"x": center[j][0] + radius[i], "y": center[j][1]}});
+ t.save({"_id": 4, "loc": {"x": center[j][0], "y": center[j][1] + radius[i]}});
+ t.save({"_id": 5, "loc": {"x": center[j][0], "y": center[j][1] - radius[i]}});
+ t.save(
+ {"_id": 6, "loc": {"x": center[j][0] - radius[i], "y": center[j][1] + radius[i]}});
+ t.save(
+ {"_id": 7, "loc": {"x": center[j][0] + radius[i], "y": center[j][1] + radius[i]}});
+ t.save(
+ {"_id": 8, "loc": {"x": center[j][0] - radius[i], "y": center[j][1] - radius[i]}});
+ t.save(
+ {"_id": 9, "loc": {"x": center[j][0] + radius[i], "y": center[j][1] - radius[i]}});
+
+ var res =
+ t.ensureIndex({loc: "2d"}, {max: bound[j][1], min: bound[j][0], bits: bits[b]});
// ensureIndex fails when this iteration inserted coordinates that are out of bounds.
// These are invalid cases, so we skip them.
- if (!res.ok) continue;
+ if (!res.ok)
+ continue;
- print( "DOING WITHIN QUERY ");
- r = t.find( { "loc" : { "$within" : { "$center" : [ center[j], radius[i] ] } } } );
+ print("DOING WITHIN QUERY ");
+ r = t.find({"loc": {"$within": {"$center": [center[j], radius[i]]}}});
- assert.eq( 5, r.count() );
+ assert.eq(5, r.count());
// FIXME: surely code like this belongs in utils.js.
a = r.toArray();
x = [];
- for ( k in a )
- x.push( a[k]["_id"] );
+ for (k in a)
+ x.push(a[k]["_id"]);
x.sort();
- assert.eq( [ 1, 2, 3, 4, 5 ], x );
+ assert.eq([1, 2, 3, 4, 5], x);
- print( " DOING NEAR QUERY ");
- //printjson( center[j] )
- r = t.find( { loc : { $near : center[j], $maxDistance : radius[i] } }, { _id : 1 } );
- assert.eq( 5, r.count() );
+ print(" DOING NEAR QUERY ");
+ // printjson( center[j] )
+ r = t.find({loc: {$near: center[j], $maxDistance: radius[i]}}, {_id: 1});
+ assert.eq(5, r.count());
- print( " DOING DIST QUERY ");
+ print(" DOING DIST QUERY ");
- a = db.runCommand({ geoNear : "axisaligned", near : center[j], maxDistance : radius[i] }).results;
- assert.eq( 5, a.length );
+ a = db.runCommand({geoNear: "axisaligned", near: center[j], maxDistance: radius[i]})
+ .results;
+ assert.eq(5, a.length);
var distance = 0;
- for( var k = 0; k < a.length; k++ ){
- assert.gte( a[k].dis, distance );
-
+ for (var k = 0; k < a.length; k++) {
+ assert.gte(a[k].dis, distance);
}
- r = t.find({ loc: { $within: { $box: [ [ center[j][0] - radius[i],
- center[j][1] - radius[i] ],
- [ center[j][0] + radius[i],
- center[j][1] + radius[i] ]]}}},
- { _id: 1 } );
- assert.eq( 9, r.count() );
-
+ r = t.find(
+ {
+ loc: {
+ $within: {
+ $box: [
+ [center[j][0] - radius[i], center[j][1] - radius[i]],
+ [center[j][0] + radius[i], center[j][1] + radius[i]]
+ ]
+ }
+ }
+ },
+ {_id: 1});
+ assert.eq(9, r.count());
}
}
}
diff --git a/jstests/noPassthroughWithMongod/geo_mnypts.js b/jstests/noPassthroughWithMongod/geo_mnypts.js
index d70a2bdb60a..5a936a3490d 100644
--- a/jstests/noPassthroughWithMongod/geo_mnypts.js
+++ b/jstests/noPassthroughWithMongod/geo_mnypts.js
@@ -7,47 +7,43 @@ var totalPts = 500 * 1000;
// Add points in a 100x100 grid
var bulk = coll.initializeUnorderedBulkOp();
-for( var i = 0; i < totalPts; i++ ){
+for (var i = 0; i < totalPts; i++) {
var ii = i % 10000;
- bulk.insert({ loc : [ ii % 100, Math.floor( ii / 100 ) ] });
+ bulk.insert({loc: [ii % 100, Math.floor(ii / 100)]});
}
assert.writeOK(bulk.execute());
-coll.ensureIndex({ loc : "2d" });
+coll.ensureIndex({loc: "2d"});
// Check that quarter of points in each quadrant
-for( var i = 0; i < 4; i++ ){
+for (var i = 0; i < 4; i++) {
var x = i % 2;
- var y = Math.floor( i / 2 );
-
- var box = [[0, 0], [49, 49]];
- box[0][0] += ( x == 1 ? 50 : 0 );
- box[1][0] += ( x == 1 ? 50 : 0 );
- box[0][1] += ( y == 1 ? 50 : 0 );
- box[1][1] += ( y == 1 ? 50 : 0 );
+ var y = Math.floor(i / 2);
- assert.eq( totalPts / 4, coll.find({ loc : { $within : { $box : box } } }).count() );
- assert.eq( totalPts / 4, coll.find({ loc : { $within : { $box : box } } }).itcount() );
+ var box = [[0, 0], [49, 49]];
+ box[0][0] += (x == 1 ? 50 : 0);
+ box[1][0] += (x == 1 ? 50 : 0);
+ box[0][1] += (y == 1 ? 50 : 0);
+ box[1][1] += (y == 1 ? 50 : 0);
+ assert.eq(totalPts / 4, coll.find({loc: {$within: {$box: box}}}).count());
+ assert.eq(totalPts / 4, coll.find({loc: {$within: {$box: box}}}).itcount());
}
// Check that half of points in each half
-for( var i = 0; i < 2; i++ ){
-
+for (var i = 0; i < 2; i++) {
var box = [[0, 0], [49, 99]];
- box[0][0] += ( i == 1 ? 50 : 0 );
- box[1][0] += ( i == 1 ? 50 : 0 );
-
- assert.eq( totalPts / 2, coll.find({ loc : { $within : { $box : box } } }).count() );
- assert.eq( totalPts / 2, coll.find({ loc : { $within : { $box : box } } }).itcount() );
+ box[0][0] += (i == 1 ? 50 : 0);
+ box[1][0] += (i == 1 ? 50 : 0);
+ assert.eq(totalPts / 2, coll.find({loc: {$within: {$box: box}}}).count());
+ assert.eq(totalPts / 2, coll.find({loc: {$within: {$box: box}}}).itcount());
}
// Check that all but corner set of points in radius
-var circle = [[0, 0], (100 - 1) * Math.sqrt( 2 ) - 0.25 ];
-
-assert.eq( totalPts - totalPts / ( 100 * 100 ), coll.find({ loc : { $within : { $center : circle } } }).count() );
-assert.eq( totalPts - totalPts / ( 100 * 100 ), coll.find({ loc : { $within : { $center : circle } } }).itcount() );
-
-
+var circle = [[0, 0], (100 - 1) * Math.sqrt(2) - 0.25];
+assert.eq(totalPts - totalPts / (100 * 100),
+ coll.find({loc: {$within: {$center: circle}}}).count());
+assert.eq(totalPts - totalPts / (100 * 100),
+ coll.find({loc: {$within: {$center: circle}}}).itcount());
diff --git a/jstests/noPassthroughWithMongod/geo_near_random1.js b/jstests/noPassthroughWithMongod/geo_near_random1.js
index ad67bdc2734..c9ea5f1da7c 100644
--- a/jstests/noPassthroughWithMongod/geo_near_random1.js
+++ b/jstests/noPassthroughWithMongod/geo_near_random1.js
@@ -5,9 +5,8 @@ var test = new GeoNearRandomTest("nightly.geo_near_random1");
test.insertPts(200);
-test.testPt([0,0]);
+test.testPt([0, 0]);
test.testPt(test.mkPt());
test.testPt(test.mkPt());
test.testPt(test.mkPt());
test.testPt(test.mkPt());
-
diff --git a/jstests/noPassthroughWithMongod/geo_near_random2.js b/jstests/noPassthroughWithMongod/geo_near_random2.js
index ac729b140e6..2fafb7d4c80 100644
--- a/jstests/noPassthroughWithMongod/geo_near_random2.js
+++ b/jstests/noPassthroughWithMongod/geo_near_random2.js
@@ -5,17 +5,19 @@ var test = new GeoNearRandomTest("nightly.geo_near_random2");
test.insertPts(10000);
-opts = {sphere:0, nToTest:test.nPts*0.01};
-test.testPt([0,0], opts);
+opts = {
+ sphere: 0,
+ nToTest: test.nPts * 0.01
+};
+test.testPt([0, 0], opts);
test.testPt(test.mkPt(), opts);
test.testPt(test.mkPt(), opts);
test.testPt(test.mkPt(), opts);
test.testPt(test.mkPt(), opts);
opts.sphere = 1;
-test.testPt([0,0], opts);
+test.testPt([0, 0], opts);
test.testPt(test.mkPt(0.8), opts);
test.testPt(test.mkPt(0.8), opts);
test.testPt(test.mkPt(0.8), opts);
test.testPt(test.mkPt(0.8), opts);
-
diff --git a/jstests/noPassthroughWithMongod/geo_polygon.js b/jstests/noPassthroughWithMongod/geo_polygon.js
index 50bc5c29d68..073ffdeb72d 100644
--- a/jstests/noPassthroughWithMongod/geo_polygon.js
+++ b/jstests/noPassthroughWithMongod/geo_polygon.js
@@ -3,39 +3,79 @@ t.drop();
num = 0;
var bulk = t.initializeUnorderedBulkOp();
-for ( x = -180; x < 180; x += .5 ){
- for ( y = -180; y < 180; y += .5 ){
- o = { _id : num++ , loc : [ x , y ] };
- bulk.insert( o );
+for (x = -180; x < 180; x += .5) {
+ for (y = -180; y < 180; y += .5) {
+ o = {
+ _id: num++,
+ loc: [x, y]
+ };
+ bulk.insert(o);
}
}
assert.writeOK(bulk.execute());
var numTests = 31;
-for( var n = 0; n < numTests; n++ ){
+for (var n = 0; n < numTests; n++) {
t.dropIndexes();
- t.ensureIndex( { loc : "2d" }, { bits : 2 + n } );
+ t.ensureIndex({loc: "2d"}, {bits: 2 + n});
- assert.between( 9 - 2 , t.find( { loc: { "$within": { "$polygon" : [[0,0], [1,1], [0,2]] }}} ).count() , 9, "Triangle Test", true);
- assert.eq( num , t.find( { loc : { "$within" : { "$polygon" : [ [-180,-180], [-180,180], [180,180], [180,-180] ] } } } ).count() , "Bounding Box Test" );
+ assert.between(9 - 2,
+ t.find({loc: {"$within": {"$polygon": [[0, 0], [1, 1], [0, 2]]}}}).count(),
+ 9,
+ "Triangle Test",
+ true);
+ assert.eq(
+ num,
+ t.find({
+ loc: {"$within": {"$polygon": [[-180, -180], [-180, 180], [180, 180], [180, -180]]}}
+ }).count(),
+ "Bounding Box Test");
- assert.eq( 441 , t.find( { loc : { "$within" : { "$polygon" : [ [0,0], [0,10], [10,10], [10,0] ] } } } ).count() , "Square Test" );
- assert.eq( 25 , t.find( { loc : { "$within" : { "$polygon" : [ [0,0], [0,2], [2,2], [2,0] ] } } } ).count() , "Square Test 2" );
+ assert.eq(
+ 441,
+ t.find({loc: {"$within": {"$polygon": [[0, 0], [0, 10], [10, 10], [10, 0]]}}}).count(),
+ "Square Test");
+ assert.eq(25,
+ t.find({loc: {"$within": {"$polygon": [[0, 0], [0, 2], [2, 2], [2, 0]]}}}).count(),
+ "Square Test 2");
- if(1){ // SERVER-3726
- // Points exactly on diagonals may be in or out, depending on how the error calculating the slope falls.
- assert.between( 341 - 18 , t.find( { loc : { "$within" : { "$polygon" : [ [0,0], [0,10], [10,10], [10,0], [5,5] ] } } } ).count(), 341, "Square Missing Chunk Test", true );
- assert.between( 21 - 2 , t.find( { loc : { "$within" : { "$polygon" : [ [0,0], [0,2], [2,2], [2,0], [1,1] ] } } } ).count(), 21 , "Square Missing Chunk Test 2", true );
+ if (1) { // SERVER-3726
+ // Points exactly on diagonals may be in or out, depending on how the error calculating the
+ // slope falls.
+ assert.between(
+ 341 - 18,
+ t.find({loc: {"$within": {"$polygon": [[0, 0], [0, 10], [10, 10], [10, 0], [5, 5]]}}})
+ .count(),
+ 341,
+ "Square Missing Chunk Test",
+ true);
+ assert.between(
+ 21 - 2,
+ t.find({loc: {"$within": {"$polygon": [[0, 0], [0, 2], [2, 2], [2, 0], [1, 1]]}}})
+ .count(),
+ 21,
+ "Square Missing Chunk Test 2",
+ true);
}
- assert.eq( 1 , t.find( { loc: { "$within": { "$polygon" : [[0,0], [0,0], [0,0]] }}} ).count() , "Point Test" );
+ assert.eq(1,
+ t.find({loc: {"$within": {"$polygon": [[0, 0], [0, 0], [0, 0]]}}}).count(),
+ "Point Test");
// SERVER-3725
{
- assert.eq( 5 , t.find( { loc: { "$within": { "$polygon" : [[0,0], [1,0], [2,0]] }}} ).count() , "Line Test 1" );
- assert.eq( 3 , t.find( { loc: { "$within": { "$polygon" : [[0,0], [0,0], [1,0]] }}} ).count() , "Line Test 2" );
- assert.eq( 5 , t.find( { loc: { "$within": { "$polygon" : [[0,2], [0,1], [0,0]] }}} ).count() , "Line Test 3" );
+ assert.eq(5,
+ t.find({loc: {"$within": {"$polygon": [[0, 0], [1, 0], [2, 0]]}}}).count(),
+ "Line Test 1");
+ assert.eq(3,
+ t.find({loc: {"$within": {"$polygon": [[0, 0], [0, 0], [1, 0]]}}}).count(),
+ "Line Test 2");
+ assert.eq(5,
+ t.find({loc: {"$within": {"$polygon": [[0, 2], [0, 1], [0, 0]]}}}).count(),
+ "Line Test 3");
}
- assert.eq( 3 , t.find( { loc: { "$within": { "$polygon" : [[0,1], [0,0], [0,0]] }}} ).count() , "Line Test 4" );
+ assert.eq(3,
+ t.find({loc: {"$within": {"$polygon": [[0, 1], [0, 0], [0, 0]]}}}).count(),
+ "Line Test 4");
}
diff --git a/jstests/noPassthroughWithMongod/getmore_error.js b/jstests/noPassthroughWithMongod/getmore_error.js
index 8eebd7205ae..4fed6c38d3d 100644
--- a/jstests/noPassthroughWithMongod/getmore_error.js
+++ b/jstests/noPassthroughWithMongod/getmore_error.js
@@ -2,20 +2,20 @@
var t = db.getmore_error;
-for (var i=0; i < 10; i++) {
+for (var i = 0; i < 10; i++) {
t.insert({_id: i});
}
-var cursor = t.find().batchSize(2); // 1 is a special case
+var cursor = t.find().batchSize(2); // 1 is a special case
// first batch (only one from OP_QUERY)
-assert.eq(cursor.next(), {_id:0});
-assert.eq(cursor.next(), {_id:1});
+assert.eq(cursor.next(), {_id: 0});
+assert.eq(cursor.next(), {_id: 1});
assert.eq(cursor.objsLeftInBatch(), 0);
// second batch (first from OP_GETMORE)
-assert.eq(cursor.next(), {_id:2});
-assert.eq(cursor.next(), {_id:3});
+assert.eq(cursor.next(), {_id: 2});
+assert.eq(cursor.next(), {_id: 3});
assert.eq(cursor.objsLeftInBatch(), 0);
/*
diff --git a/jstests/noPassthroughWithMongod/huge_multikey_index.js b/jstests/noPassthroughWithMongod/huge_multikey_index.js
index 14f110ff3bb..fce643eab8a 100644
--- a/jstests/noPassthroughWithMongod/huge_multikey_index.js
+++ b/jstests/noPassthroughWithMongod/huge_multikey_index.js
@@ -6,14 +6,14 @@ t.drop();
function doit() {
arr = [];
- for (var i=0; i< 1000*1000;i++)
+ for (var i = 0; i < 1000 * 1000; i++)
arr.push(i);
- t.insert({a:arr});
+ t.insert({a: arr});
- //t.ensureIndex({a:1}, {background:true}) // always worked
+ // t.ensureIndex({a:1}, {background:true}) // always worked
- t.ensureIndex({a:1}); // used to fail server with out of fds error
+ t.ensureIndex({a: 1}); // used to fail server with out of fds error
}
doit();
diff --git a/jstests/noPassthroughWithMongod/index_check10.js b/jstests/noPassthroughWithMongod/index_check10.js
index 25d9eed5ca8..30ed9c17eac 100644
--- a/jstests/noPassthroughWithMongod/index_check10.js
+++ b/jstests/noPassthroughWithMongod/index_check10.js
@@ -6,121 +6,122 @@ Random.setRandomSeed();
t = db.test_index_check10;
function doIt() {
-
t.drop();
function sort() {
var sort = {};
- for( var i = 0; i < n; ++i ) {
- sort[ fields[ i ] ] = Random.rand() > 0.5 ? 1 : -1;
+ for (var i = 0; i < n; ++i) {
+ sort[fields[i]] = Random.rand() > 0.5 ? 1 : -1;
}
return sort;
}
- var fields = [ 'a', 'b', 'c', 'd', 'e' ];
- n = Random.randInt( 5 ) + 1;
+ var fields = ['a', 'b', 'c', 'd', 'e'];
+ n = Random.randInt(5) + 1;
var idx = sort();
var chars = "abcdefghijklmnopqrstuvwxyz";
function obj() {
var ret = {};
- for( var i = 0; i < n; ++i ) {
- ret[ fields[ i ] ] = r();
+ for (var i = 0; i < n; ++i) {
+ ret[fields[i]] = r();
}
return ret;
}
function r() {
- var len = Random.randInt( 700 / n );
+ var len = Random.randInt(700 / n);
buf = "";
- for( var i = 0; i < len; ++i ) {
- buf += chars.charAt( Random.randInt( chars.length ) );
+ for (var i = 0; i < len; ++i) {
+ buf += chars.charAt(Random.randInt(chars.length));
}
return buf;
}
function check() {
var v = t.validate();
- if ( !v.valid ) {
- printjson( v );
- assert( v.valid );
+ if (!v.valid) {
+ printjson(v);
+ assert(v.valid);
}
var spec = {};
- for( var i = 0; i < n; ++i ) {
- if ( Random.rand() > 0.5 ) {
- var bounds = [ r(), r() ];
- if ( bounds[ 0 ] > bounds[ 1 ] ) {
+ for (var i = 0; i < n; ++i) {
+ if (Random.rand() > 0.5) {
+ var bounds = [r(), r()];
+ if (bounds[0] > bounds[1]) {
bounds.reverse();
}
var s = {};
- if ( Random.rand() > 0.5 ) {
- s[ "$gte" ] = bounds[ 0 ];
+ if (Random.rand() > 0.5) {
+ s["$gte"] = bounds[0];
} else {
- s[ "$gt" ] = bounds[ 0 ];
+ s["$gt"] = bounds[0];
}
- if ( Random.rand() > 0.5 ) {
- s[ "$lte" ] = bounds[ 1 ];
+ if (Random.rand() > 0.5) {
+ s["$lte"] = bounds[1];
} else {
- s[ "$lt" ] = bounds[ 1 ];
+ s["$lt"] = bounds[1];
}
- spec[ fields[ i ] ] = s;
+ spec[fields[i]] = s;
} else {
var vals = [];
- for( var j = 0; j < Random.randInt( 15 ); ++j ) {
- vals.push( r() );
+ for (var j = 0; j < Random.randInt(15); ++j) {
+ vals.push(r());
}
- spec[ fields[ i ] ] = { $in: vals };
+ spec[fields[i]] = {
+ $in: vals
+ };
}
}
s = sort();
- c1 = t.find( spec, { _id:null } ).sort( s ).hint( idx ).toArray();
+ c1 = t.find(spec, {_id: null}).sort(s).hint(idx).toArray();
try {
- c3 = t.find( spec, { _id:null } ).sort( s ).hint( {$natural:1} ).toArray();
- } catch( e ) {
+ c3 = t.find(spec, {_id: null}).sort(s).hint({$natural: 1}).toArray();
+ } catch (e) {
// may assert if too much data for in memory sort
- print( "retrying check..." );
- check(); // retry with different bounds
+ print("retrying check...");
+ check(); // retry with different bounds
return;
}
var j = 0;
- for( var i = 0; i < c3.length; ++i ) {
- if( friendlyEqual( c1[ j ], c3[ i ] ) ) {
+ for (var i = 0; i < c3.length; ++i) {
+ if (friendlyEqual(c1[j], c3[i])) {
++j;
} else {
- var o = c3[ i ];
- var size = Object.bsonsize( o );
- for( var f in o ) {
- size -= f.length;
+ var o = c3[i];
+ var size = Object.bsonsize(o);
+ for (var f in o) {
+ size -= f.length;
}
- var max = 818; // KeyMax
- if ( size <= max ) {
- assert.eq( c1, c3 , "size: " + size );
+ var max = 818; // KeyMax
+ if (size <= max) {
+ assert.eq(c1, c3, "size: " + size);
}
}
}
}
var bulk = t.initializeUnorderedBulkOp();
- for( var i = 0; i < 10000; ++i ) {
- bulk.insert( obj() );
+ for (var i = 0; i < 10000; ++i) {
+ bulk.insert(obj());
}
assert.writeOK(bulk.execute());
- t.ensureIndex( idx );
+ t.ensureIndex(idx);
check();
bulk = t.initializeUnorderedBulkOp();
- for( var i = 0; i < 10000; ++i ) {
- if ( Random.rand() > 0.9 ) {
- bulk.insert( obj() );
+ for (var i = 0; i < 10000; ++i) {
+ if (Random.rand() > 0.9) {
+ bulk.insert(obj());
} else {
- bulk.find( obj() ).remove(); // improve
+ bulk.find(obj()).remove(); // improve
}
- if( Random.rand() > 0.999 ) {
- print( i );
+ if (Random.rand() > 0.999) {
+ print(i);
assert.writeOK(bulk.execute());
check();
bulk = t.initializeUnorderedBulkOp();
@@ -128,9 +129,8 @@ function doIt() {
}
assert.writeOK(bulk.execute());
check();
-
}
-for( var z = 0; z < 5; ++z ) {
+for (var z = 0; z < 5; ++z) {
doIt();
}
diff --git a/jstests/noPassthroughWithMongod/index_check9.js b/jstests/noPassthroughWithMongod/index_check9.js
index 3271d7245f5..a801b473a44 100644
--- a/jstests/noPassthroughWithMongod/index_check9.js
+++ b/jstests/noPassthroughWithMongod/index_check9.js
@@ -5,135 +5,135 @@ Random.setRandomSeed();
t = db.test_index_check9;
function doIt() {
+ t.drop();
-t.drop();
+ function sort() {
+ var sort = {};
+ for (var i = 0; i < n; ++i) {
+ sort[fields[i]] = Random.rand() > 0.5 ? 1 : -1;
+ }
+ return sort;
+ }
-function sort() {
- var sort = {};
- for( var i = 0; i < n; ++i ) {
- sort[ fields[ i ] ] = Random.rand() > 0.5 ? 1 : -1;
- }
- return sort;
-}
+ var fields = ['a', 'b', 'c', 'd', 'e'];
+ n = Random.randInt(5) + 1;
+ var idx = sort();
-var fields = [ 'a', 'b', 'c', 'd', 'e' ];
-n = Random.randInt( 5 ) + 1;
-var idx = sort();
+ var chars = "abcdefghijklmnopqrstuvwxyz";
+ var alphas = [];
+ for (var i = 0; i < n; ++i) {
+ alphas.push(Random.rand() > 0.5);
+ }
-var chars = "abcdefghijklmnopqrstuvwxyz";
-var alphas = [];
-for( var i = 0; i < n; ++i ) {
- alphas.push( Random.rand() > 0.5 );
-}
-
-t.ensureIndex( idx );
+ t.ensureIndex(idx);
-function obj() {
- var ret = {};
- for( var i = 0; i < n; ++i ) {
- ret[ fields[ i ] ] = r( alphas[ i ] );
+ function obj() {
+ var ret = {};
+ for (var i = 0; i < n; ++i) {
+ ret[fields[i]] = r(alphas[i]);
+ }
+ return ret;
}
- return ret;
-}
-function r( alpha ) {
- if ( !alpha ) {
- return Random.randInt( 10 );
- } else {
- var len = Random.randInt( 10 );
- buf = "";
- for( var i = 0; i < len; ++i ) {
- buf += chars.charAt( Random.randInt( chars.length ) );
+ function r(alpha) {
+ if (!alpha) {
+ return Random.randInt(10);
+ } else {
+ var len = Random.randInt(10);
+ buf = "";
+ for (var i = 0; i < len; ++i) {
+ buf += chars.charAt(Random.randInt(chars.length));
+ }
+ return buf;
}
- return buf;
}
-}
-function check() {
- var v = t.validate();
- if ( !t.valid ) {
- printjson( t );
- assert( t.valid );
- }
- var spec = {};
- for( var i = 0; i < n; ++i ) {
- var predicateType = Random.randInt( 4 );
- switch( predicateType ) {
- case 0 /* range */ : {
- var bounds = [ r( alphas[ i ] ), r( alphas[ i ] ) ];
- if ( bounds[ 0 ] > bounds[ 1 ] ) {
- bounds.reverse();
- }
- var s = {};
- if ( Random.rand() > 0.5 ) {
- s[ "$gte" ] = bounds[ 0 ];
- } else {
- s[ "$gt" ] = bounds[ 0 ];
- }
- if ( Random.rand() > 0.5 ) {
- s[ "$lte" ] = bounds[ 1 ];
- } else {
- s[ "$lt" ] = bounds[ 1 ];
- }
- spec[ fields[ i ] ] = s;
- break;
+ function check() {
+ var v = t.validate();
+ if (!t.valid) {
+ printjson(t);
+ assert(t.valid);
}
- case 1 /* $in */ : {
- var vals = [];
- var inLength = Random.randInt( 15 );
- for( var j = 0; j < inLength; ++j ) {
- vals.push( r( alphas[ i ] ) );
+ var spec = {};
+ for (var i = 0; i < n; ++i) {
+ var predicateType = Random.randInt(4);
+ switch (predicateType) {
+ case 0 /* range */: {
+ var bounds = [r(alphas[i]), r(alphas[i])];
+ if (bounds[0] > bounds[1]) {
+ bounds.reverse();
+ }
+ var s = {};
+ if (Random.rand() > 0.5) {
+ s["$gte"] = bounds[0];
+ } else {
+ s["$gt"] = bounds[0];
+ }
+ if (Random.rand() > 0.5) {
+ s["$lte"] = bounds[1];
+ } else {
+ s["$lt"] = bounds[1];
+ }
+ spec[fields[i]] = s;
+ break;
+ }
+ case 1 /* $in */: {
+ var vals = [];
+ var inLength = Random.randInt(15);
+ for (var j = 0; j < inLength; ++j) {
+ vals.push(r(alphas[i]));
+ }
+ spec[fields[i]] = {
+ $in: vals
+ };
+ break;
+ }
+ case 2 /* equality */: {
+ spec[fields[i]] = r(alphas[i]);
+ break;
+ }
+ default /* no predicate */:
+ break;
}
- spec[ fields[ i ] ] = { $in: vals };
- break;
- }
- case 2 /* equality */ : {
- spec[ fields[ i ] ] = r( alphas[ i ] );
- break;
- }
- default /* no predicate */ :
- break;
}
+ s = sort();
+ c1 = t.find(spec, {_id: null}).sort(s).hint(idx).toArray();
+ c2 = t.find(spec, {_id: null}).sort(s).hint({$natural: 1}).toArray();
+ count = t.count(spec);
+ assert.eq(c1, c2);
+ assert.eq(c2.length, count);
}
- s = sort();
- c1 = t.find( spec, { _id:null } ).sort( s ).hint( idx ).toArray();
- c2 = t.find( spec, { _id:null } ).sort( s ).hint( {$natural:1} ).toArray();
- count = t.count( spec );
- assert.eq( c1, c2 );
- assert.eq( c2.length, count );
-}
-var bulk = t.initializeUnorderedBulkOp();
-for( var i = 0; i < 10000; ++i ) {
- bulk.insert( obj() );
- if( Random.rand() > 0.999 ) {
- print( i );
- assert.writeOK(bulk.execute());
- check();
- bulk = t.initializeUnorderedBulkOp();
+ var bulk = t.initializeUnorderedBulkOp();
+ for (var i = 0; i < 10000; ++i) {
+ bulk.insert(obj());
+ if (Random.rand() > 0.999) {
+ print(i);
+ assert.writeOK(bulk.execute());
+ check();
+ bulk = t.initializeUnorderedBulkOp();
+ }
}
-}
-bulk = t.initializeUnorderedBulkOp();
-for( var i = 0; i < 100000; ++i ) {
- if ( Random.rand() > 0.9 ) {
- bulk.insert( obj() );
- } else {
- bulk.find( obj() ).remove(); // improve
- }
- if( Random.rand() > 0.999 ) {
- print( i );
- assert.writeOK(bulk.execute());
- check();
- bulk = t.initializeUnorderedBulkOp();
+ bulk = t.initializeUnorderedBulkOp();
+ for (var i = 0; i < 100000; ++i) {
+ if (Random.rand() > 0.9) {
+ bulk.insert(obj());
+ } else {
+ bulk.find(obj()).remove(); // improve
+ }
+ if (Random.rand() > 0.999) {
+ print(i);
+ assert.writeOK(bulk.execute());
+ check();
+ bulk = t.initializeUnorderedBulkOp();
+ }
}
-}
-assert.writeOK(bulk.execute());
-
-check();
+ assert.writeOK(bulk.execute());
+ check();
}
-for( var z = 0; z < 5; ++z ) {
+for (var z = 0; z < 5; ++z) {
doIt();
}
diff --git a/jstests/noPassthroughWithMongod/index_hammer1.js b/jstests/noPassthroughWithMongod/index_hammer1.js
index 4617eb3fd98..5d89223093e 100644
--- a/jstests/noPassthroughWithMongod/index_hammer1.js
+++ b/jstests/noPassthroughWithMongod/index_hammer1.js
@@ -3,22 +3,37 @@ t = db.index_hammer1;
t.drop();
var bulk = t.initializeUnorderedBulkOp();
-for ( i=0; i<10000; i++ )
- bulk.insert({ x: i, y: i });
+for (i = 0; i < 10000; i++)
+ bulk.insert({x: i, y: i});
assert.writeOK(bulk.execute());
ops = [];
-for ( i=0; i<50; i++ )
- ops.push( { op : "find" , ns : t.getFullName() , query : { x : { $gt : 5000 } , y : { $gt : 5000 } } } );
-
-ops[10] = { op : "createIndex" , ns : t.getFullName() , key : { x : 1 } };
-ops[20] = { op : "createIndex" , ns : t.getFullName() , key : { y : 1 } };
-ops[30] = { op : "dropIndex" , ns : t.getFullName() , key : { x : 1 } };
-ops[40] = { op : "dropIndex" , ns : t.getFullName() , key : { y : 1 } };
-
-res = benchRun( { ops : ops , parallel : 5 , seconds : 20 , host : db.getMongo().host } );
-printjson( res );
-
-assert.eq( 10000 , t.count() );
-
+for (i = 0; i < 50; i++)
+ ops.push({op: "find", ns: t.getFullName(), query: {x: {$gt: 5000}, y: {$gt: 5000}}});
+
+ops[10] = {
+ op: "createIndex",
+ ns: t.getFullName(),
+ key: {x: 1}
+};
+ops[20] = {
+ op: "createIndex",
+ ns: t.getFullName(),
+ key: {y: 1}
+};
+ops[30] = {
+ op: "dropIndex",
+ ns: t.getFullName(),
+ key: {x: 1}
+};
+ops[40] = {
+ op: "dropIndex",
+ ns: t.getFullName(),
+ key: {y: 1}
+};
+
+res = benchRun({ops: ops, parallel: 5, seconds: 20, host: db.getMongo().host});
+printjson(res);
+
+assert.eq(10000, t.count());
diff --git a/jstests/noPassthroughWithMongod/index_killop.js b/jstests/noPassthroughWithMongod/index_killop.js
index 71e1cfc650b..95b0f995848 100644
--- a/jstests/noPassthroughWithMongod/index_killop.js
+++ b/jstests/noPassthroughWithMongod/index_killop.js
@@ -6,56 +6,58 @@ t.drop();
// Insert a large number of documents, enough to ensure that an index build on these documents will
// be interrupted before complete.
var bulk = t.initializeUnorderedBulkOp();
-for( i = 0; i < 1e6; ++i ) {
- bulk.insert({ a: i });
+for (i = 0; i < 1e6; ++i) {
+ bulk.insert({a: i});
}
assert.writeOK(bulk.execute());
-function debug( x ) {
-// printjson( x );
+function debug(x) {
+ // printjson( x );
}
/** @return the op id for the running index build, or -1 if there is no current index build. */
function getIndexBuildOpId() {
inprog = db.currentOp().inprog;
- debug( inprog );
+ debug(inprog);
indexBuildOpId = -1;
- inprog.forEach( function( op ) {
- // Identify the index build as the createIndex command
- // It is assumed that no other clients are concurrently
- // accessing the 'test' database.
- if ( (op.op == 'query' ||
- op.op == 'command') && 'createIndexes' in op.query ) {
- debug( op.opid );
- indexBuildOpId = op.opid;
- }
- } );
+ inprog.forEach(function(op) {
+ // Identify the index build as the createIndex command
+ // It is assumed that no other clients are concurrently
+ // accessing the 'test' database.
+ if ((op.op == 'query' || op.op == 'command') && 'createIndexes' in op.query) {
+ debug(op.opid);
+ indexBuildOpId = op.opid;
+ }
+ });
return indexBuildOpId;
}
/** Test that building an index with @param 'options' can be aborted using killop. */
-function testAbortIndexBuild( options ) {
- var createIdx = startParallelShell(
- 'var coll = db.jstests_slownightly_index_killop;' +
- 'assert.commandWorked(coll.createIndex({ a: 1 }, ' + tojson(options) + '));'
- );
+function testAbortIndexBuild(options) {
+ var createIdx = startParallelShell('var coll = db.jstests_slownightly_index_killop;' +
+ 'assert.commandWorked(coll.createIndex({ a: 1 }, ' +
+ tojson(options) + '));');
// When the index build starts, find its op id.
- assert.soon( function() { return ( opId = getIndexBuildOpId() ) != -1; } );
+ assert.soon(function() {
+ return (opId = getIndexBuildOpId()) != -1;
+ });
// Kill the index build.
- db.killOp( opId );
+ db.killOp(opId);
// Wait for the index build to stop.
- assert.soon( function() { return getIndexBuildOpId() == -1; } );
+ assert.soon(function() {
+ return getIndexBuildOpId() == -1;
+ });
var exitCode = createIdx({checkExitSuccess: false});
- assert.neq(0, exitCode,
- 'expected shell to exit abnormally due to index build being terminated');
+ assert.neq(
+ 0, exitCode, 'expected shell to exit abnormally due to index build being terminated');
// Check that no new index has been created. This verifies that the index build was aborted
// rather than successfully completed.
- assert.eq( [ { _id:1 } ], t.getIndexKeys() );
+ assert.eq([{_id: 1}], t.getIndexKeys());
}
-testAbortIndexBuild( { background:false } );
-testAbortIndexBuild( { background:true } );
+testAbortIndexBuild({background: false});
+testAbortIndexBuild({background: true});
diff --git a/jstests/noPassthroughWithMongod/index_multi.js b/jstests/noPassthroughWithMongod/index_multi.js
index ec162821d4b..8d728fac8d9 100644
--- a/jstests/noPassthroughWithMongod/index_multi.js
+++ b/jstests/noPassthroughWithMongod/index_multi.js
@@ -9,19 +9,20 @@ db.results.drop();
var bulk = coll.initializeUnorderedBulkOp();
print("Populate the collection with random data");
for (var i = 0; i < 1e4; i++) {
- var doc = {"_id" : i};
+ var doc = {
+ "_id": i
+ };
- for (var j=0; j<100; j++) {
+ for (var j = 0; j < 100; j++) {
// Skip some of the fields
if (Random.rand() < .1) {
continue;
}
// Make 0, 10, etc. multikey indexes
else if (j % 10 == 0) {
- doc["field"+j] = [Random.rand(), Random.rand(), Random.rand()];
- }
- else {
- doc["field"+j] = Random.rand();
+ doc["field" + j] = [Random.rand(), Random.rand(), Random.rand()];
+ } else {
+ doc["field" + j] = Random.rand();
}
}
@@ -33,66 +34,54 @@ assert.writeOK(bulk.execute());
var specs = [];
var multikey = [];
-var setupDBStr =
- "var conn = null;" +
- "assert.soon(function() {" +
- " try {" +
- " conn = new Mongo(\"" + db.getMongo().host + "\");" +
- " return conn;" +
- " } catch (x) {" +
- " return false;" +
- " }" +
- "}, 'Timed out waiting for temporary connection to connect', 30000, 5000);" +
- "var db = conn.getDB('" + db.getName() + "');";
+var setupDBStr = "var conn = null;" + "assert.soon(function() {" + " try {" +
+ " conn = new Mongo(\"" + db.getMongo().host + "\");" + " return conn;" +
+ " } catch (x) {" + " return false;" + " }" +
+ "}, 'Timed out waiting for temporary connection to connect', 30000, 5000);" +
+ "var db = conn.getDB('" + db.getName() + "');";
var indexJobs = [];
print("Create 3 triple indexes");
for (var i = 90; i < 93; i++) {
var spec = {};
- spec["field"+i] = 1;
- spec["field"+(i+1)] = 1;
- spec["field"+(i+2)] = 1;
- indexJobs.push(startParallelShell(setupDBStr +
- "printjson(db.index_multi.createIndex(" + tojson(spec) + "," +
- "{ background: true }));" +
- "db.results.insert(Object.extend(" +
- "db.runCommand({ getlasterror: 1 }), " + tojson(spec) +
- ") );",
- null, // port
- true)); // noconnect
+ spec["field" + i] = 1;
+ spec["field" + (i + 1)] = 1;
+ spec["field" + (i + 2)] = 1;
+ indexJobs.push(startParallelShell(
+ setupDBStr + "printjson(db.index_multi.createIndex(" + tojson(spec) + "," +
+ "{ background: true }));" + "db.results.insert(Object.extend(" +
+ "db.runCommand({ getlasterror: 1 }), " + tojson(spec) + ") );",
+ null, // port
+ true)); // noconnect
specs.push(spec);
- multikey.push(i % 10 == 0 || (i+1) % 10 == 0 || (i+2) % 10 == 0);
+ multikey.push(i % 10 == 0 || (i + 1) % 10 == 0 || (i + 2) % 10 == 0);
}
print("Create 30 compound indexes");
for (var i = 30; i < 90; i += 2) {
var spec = {};
- spec["field"+i] = 1;
- spec["field"+(i+1)] = 1;
- indexJobs.push(startParallelShell(setupDBStr +
- "printjson(db.index_multi.createIndex(" + tojson(spec) + ", " +
- "{ background: true }));" +
- "db.results.insert(Object.extend(" +
- "db.runCommand({ getlasterror: 1 }), " + tojson(spec) +
- ") );",
- null, // port
- true)); // noconnect
+ spec["field" + i] = 1;
+ spec["field" + (i + 1)] = 1;
+ indexJobs.push(startParallelShell(
+ setupDBStr + "printjson(db.index_multi.createIndex(" + tojson(spec) + ", " +
+ "{ background: true }));" + "db.results.insert(Object.extend(" +
+ "db.runCommand({ getlasterror: 1 }), " + tojson(spec) + ") );",
+ null, // port
+ true)); // noconnect
specs.push(spec);
- multikey.push(i % 10 == 0 || (i+1) % 10 == 0);
+ multikey.push(i % 10 == 0 || (i + 1) % 10 == 0);
}
print("Create 30 indexes");
for (var i = 0; i < 30; i++) {
var spec = {};
- spec["field"+i] = 1;
- indexJobs.push(startParallelShell(setupDBStr +
- "printjson(db.index_multi.createIndex(" + tojson(spec) + ", " +
- "{ background: true }));" +
- "db.results.insert(Object.extend(" +
- "db.runCommand({ getlasterror: 1 }), " + tojson(spec) +
- ") );",
- null, // port
- true)); // noconnect
+ spec["field" + i] = 1;
+ indexJobs.push(startParallelShell(
+ setupDBStr + "printjson(db.index_multi.createIndex(" + tojson(spec) + ", " +
+ "{ background: true }));" + "db.results.insert(Object.extend(" +
+ "db.runCommand({ getlasterror: 1 }), " + tojson(spec) + ") );",
+ null, // port
+ true)); // noconnect
specs.push(spec);
multikey.push(i % 10 == 0);
}
@@ -100,15 +89,16 @@ for (var i = 0; i < 30; i++) {
print("Do some sets and unsets");
bulk = coll.initializeUnorderedBulkOp();
for (i = 0; i < 1e4; i++) {
- var criteria = {_id: Random.randInt(1e5)};
+ var criteria = {
+ _id: Random.randInt(1e5)
+ };
var mod = {};
if (Random.rand() < .5) {
mod['$set'] = {};
- mod['$set']['field'+Random.randInt(100)] = Random.rand();
- }
- else {
+ mod['$set']['field' + Random.randInt(100)] = Random.rand();
+ } else {
mod['$unset'] = {};
- mod['$unset']['field'+Random.randInt(100)] = true;
+ mod['$unset']['field' + Random.randInt(100)] = true;
}
bulk.find(criteria).update(mod);
@@ -120,7 +110,7 @@ indexJobs.forEach(function(join) {
});
printjson(db.results.find().toArray());
-//assert.eq(coll.getIndexes().length, 64, "didn't see 64 indexes");
+// assert.eq(coll.getIndexes().length, 64, "didn't see 64 indexes");
print("Make sure we end up with 64 indexes");
for (var i in specs) {
diff --git a/jstests/noPassthroughWithMongod/index_no_retry.js b/jstests/noPassthroughWithMongod/index_no_retry.js
index fd7f070736d..ff09b70d039 100644
--- a/jstests/noPassthroughWithMongod/index_no_retry.js
+++ b/jstests/noPassthroughWithMongod/index_no_retry.js
@@ -7,9 +7,7 @@
var baseName = 'index_retry';
var dbpath = MongoRunner.dataPath + baseName;
- var conn = MongoRunner.runMongod({
- dbpath: dbpath,
- journal: ''});
+ var conn = MongoRunner.runMongod({dbpath: dbpath, journal: ''});
var test = conn.getDB("test");
@@ -21,7 +19,7 @@
// can be interrupted before complete.
var bulk = t.initializeUnorderedBulkOp();
for (var i = 0; i < 5e5; ++i) {
- bulk.insert({ a: i });
+ bulk.insert({a: i});
if (i % 10000 == 0) {
print("i: " + i);
}
@@ -39,42 +37,34 @@
var inprog = test.currentOp().inprog;
debug(inprog);
var indexBuildOpId = -1;
- inprog.forEach(
- function( op ) {
- // Identify the index build as a createIndexes command.
- // It is assumed that no other clients are concurrently
- // accessing the 'test' database.
- if ( (op.op == 'query' || op.op == 'command') && 'createIndexes' in op.query ) {
- debug(op.opid);
- var idxSpec = op.query.indexes[0];
- // SERVER-4295 Make sure the index details are there
- // we can't assert these things, since there is a race in reporting
- // but we won't count if they aren't
- if ( "a_1" == idxSpec.name &&
- 1 == idxSpec.key.a &&
- idxSpec.background &&
- op.progress &&
- (op.progress.done / op.progress.total) > 0.20) {
- indexBuildOpId = op.opid;
- }
+ inprog.forEach(function(op) {
+ // Identify the index build as a createIndexes command.
+ // It is assumed that no other clients are concurrently
+ // accessing the 'test' database.
+ if ((op.op == 'query' || op.op == 'command') && 'createIndexes' in op.query) {
+ debug(op.opid);
+ var idxSpec = op.query.indexes[0];
+ // SERVER-4295 Make sure the index details are there
+ // we can't assert these things, since there is a race in reporting
+ // but we won't count if they aren't
+ if ("a_1" == idxSpec.name && 1 == idxSpec.key.a && idxSpec.background &&
+ op.progress && (op.progress.done / op.progress.total) > 0.20) {
+ indexBuildOpId = op.opid;
}
}
- );
+ });
return indexBuildOpId != -1;
}
function abortDuringIndexBuild(options) {
var createIdx = startParallelShell(
- 'db.' + name + '.createIndex({ a: 1 }, { background: true });',
- conn.port);
+ 'db.' + name + '.createIndex({ a: 1 }, { background: true });', conn.port);
// Wait for the index build to start.
var times = 0;
- assert.soon(
- function() {
- return indexBuildInProgress() && times++ >= 2;
- }
- );
+ assert.soon(function() {
+ return indexBuildInProgress() && times++ >= 2;
+ });
print("killing the mongod");
MongoRunner.stopMongod(conn.port, /* signal */ 9);
@@ -85,17 +75,14 @@
abortDuringIndexBuild();
- conn = MongoRunner.runMongod({
- dbpath: dbpath,
- journal: '',
- noIndexBuildRetry: '',
- restart: true});
+ conn =
+ MongoRunner.runMongod({dbpath: dbpath, journal: '', noIndexBuildRetry: '', restart: true});
test = conn.getDB("test");
t = test.getCollection(name);
- assert.throws(function() { t.find({a: 42}).hint({a: 1}).next(); },
- null,
- 'index {a: 1} was rebuilt in spite of --noIndexBuildRetry');
+ assert.throws(function() {
+ t.find({a: 42}).hint({a: 1}).next();
+ }, null, 'index {a: 1} was rebuilt in spite of --noIndexBuildRetry');
var indexes = t.getIndexes();
assert.eq(1, indexes.length, 'unfinished indexes in listIndexes result: ' + tojson(indexes));
diff --git a/jstests/noPassthroughWithMongod/index_retry.js b/jstests/noPassthroughWithMongod/index_retry.js
index e420edb1914..cb33de4b95b 100644
--- a/jstests/noPassthroughWithMongod/index_retry.js
+++ b/jstests/noPassthroughWithMongod/index_retry.js
@@ -7,9 +7,7 @@
var baseName = 'index_retry';
var dbpath = MongoRunner.dataPath + baseName;
- var conn = MongoRunner.runMongod({
- dbpath: dbpath,
- journal: ''});
+ var conn = MongoRunner.runMongod({dbpath: dbpath, journal: ''});
var test = conn.getDB("test");
@@ -21,7 +19,7 @@
// can be interrupted before complete.
var bulk = t.initializeUnorderedBulkOp();
for (var i = 0; i < 5e5; ++i) {
- bulk.insert({ a: i });
+ bulk.insert({a: i});
if (i % 10000 == 0) {
print("i: " + i);
}
@@ -39,42 +37,34 @@
var inprog = test.currentOp().inprog;
debug(inprog);
var indexBuildOpId = -1;
- inprog.forEach(
- function( op ) {
- // Identify the index build as a createIndexes command.
- // It is assumed that no other clients are concurrently
- // accessing the 'test' database.
- if ( (op.op == 'query' || op.op == 'command') && 'createIndexes' in op.query ) {
- debug(op.opid);
- var idxSpec = op.query.indexes[0];
- // SERVER-4295 Make sure the index details are there
- // we can't assert these things, since there is a race in reporting
- // but we won't count if they aren't
- if ( "a_1" == idxSpec.name &&
- 1 == idxSpec.key.a &&
- idxSpec.background &&
- op.progress &&
- (op.progress.done / op.progress.total) > 0.20) {
- indexBuildOpId = op.opid;
- }
+ inprog.forEach(function(op) {
+ // Identify the index build as a createIndexes command.
+ // It is assumed that no other clients are concurrently
+ // accessing the 'test' database.
+ if ((op.op == 'query' || op.op == 'command') && 'createIndexes' in op.query) {
+ debug(op.opid);
+ var idxSpec = op.query.indexes[0];
+ // SERVER-4295 Make sure the index details are there
+ // we can't assert these things, since there is a race in reporting
+ // but we won't count if they aren't
+ if ("a_1" == idxSpec.name && 1 == idxSpec.key.a && idxSpec.background &&
+ op.progress && (op.progress.done / op.progress.total) > 0.20) {
+ indexBuildOpId = op.opid;
}
}
- );
+ });
return indexBuildOpId != -1;
}
function abortDuringIndexBuild(options) {
var createIdx = startParallelShell(
- 'db.' + name + '.createIndex({ a: 1 }, { background: true });',
- conn.port);
+ 'db.' + name + '.createIndex({ a: 1 }, { background: true });', conn.port);
// Wait for the index build to start.
var times = 0;
- assert.soon(
- function() {
- return indexBuildInProgress() && times++ >= 2;
- }
- );
+ assert.soon(function() {
+ return indexBuildInProgress() && times++ >= 2;
+ });
print("killing the mongod");
MongoRunner.stopMongod(conn.port, /* signal */ 9);
@@ -85,18 +75,17 @@
abortDuringIndexBuild();
- conn = MongoRunner.runMongod({
- dbpath: dbpath,
- journal: '',
- restart: true});
+ conn = MongoRunner.runMongod({dbpath: dbpath, journal: '', restart: true});
test = conn.getDB("test");
t = test.getCollection(name);
- assert.eq({a: 42}, t.find({a: 42}, {_id: 0}).hint({a: 1}).next(),
+ assert.eq({a: 42},
+ t.find({a: 42}, {_id: 0}).hint({a: 1}).next(),
'index {a: 1} was rebuilt on startup');
var indexes = t.getIndexes();
- assert.eq(2, indexes.length,
+ assert.eq(2,
+ indexes.length,
'unexpected number of indexes in listIndexes result: ' + tojson(indexes));
print("Index built");
diff --git a/jstests/noPassthroughWithMongod/indexbg_drop.js b/jstests/noPassthroughWithMongod/indexbg_drop.js
index 261ab342e48..863730ae35b 100644
--- a/jstests/noPassthroughWithMongod/indexbg_drop.js
+++ b/jstests/noPassthroughWithMongod/indexbg_drop.js
@@ -15,17 +15,20 @@ var collection = 'jstests_feh';
var size = 500000;
// Set up replica set
-var replTest = new ReplSetTest({ name: 'bgIndex', nodes: 3 });
+var replTest = new ReplSetTest({name: 'bgIndex', nodes: 3});
var nodes = replTest.nodeList();
printjson(nodes);
// We need an arbiter to ensure that the primary doesn't step down when we restart the secondary
replTest.startSet();
-replTest.initiate({"_id" : "bgIndex",
- "members" : [
- {"_id" : 0, "host" : nodes[0]},
- {"_id" : 1, "host" : nodes[1]},
- {"_id" : 2, "host" : nodes[2], "arbiterOnly" : true}]});
+replTest.initiate({
+ "_id": "bgIndex",
+ "members": [
+ {"_id": 0, "host": nodes[0]},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], "arbiterOnly": true}
+ ]
+});
var master = replTest.getPrimary();
var second = replTest.getSecondary();
@@ -36,43 +39,43 @@ var secondId = replTest.getNodeId(second);
var masterDB = master.getDB(dbname);
var secondDB = second.getDB(dbname);
-
-var dc = {dropIndexes: collection, index: "i_1"};
+var dc = {
+ dropIndexes: collection,
+ index: "i_1"
+};
// set up collections
masterDB.dropDatabase();
jsTest.log("creating test data " + size + " documents");
Random.setRandomSeed();
var bulk = masterDB.getCollection(collection).initializeUnorderedBulkOp();
-for( i = 0; i < size; ++i ) {
- bulk.insert({ i: Random.rand() });
+for (i = 0; i < size; ++i) {
+ bulk.insert({i: Random.rand()});
}
assert.writeOK(bulk.execute());
jsTest.log("Starting background indexing for test of: " + tojson(dc));
// Add another index to be sure the drop command works.
-masterDB.getCollection(collection).ensureIndex({b:1});
+masterDB.getCollection(collection).ensureIndex({b: 1});
-masterDB.getCollection(collection).ensureIndex( {i:1}, {background:true} );
-assert.eq(3, masterDB.getCollection(collection).getIndexes().length );
+masterDB.getCollection(collection).ensureIndex({i: 1}, {background: true});
+assert.eq(3, masterDB.getCollection(collection).getIndexes().length);
// Wait for the secondary to get the index entry
-assert.soon(
- function() { return 3 == secondDB.getCollection(collection).getIndexes().length; },
- "index not created on secondary (prior to drop)", 240000 );
+assert.soon(function() {
+ return 3 == secondDB.getCollection(collection).getIndexes().length;
+}, "index not created on secondary (prior to drop)", 240000);
jsTest.log("Index created and index entry exists on secondary");
-
// make sure the index build has started on secondary
assert.soon(function() {
var curOp = secondDB.currentOp();
printjson(curOp);
- for (var i=0; i < curOp.inprog.length; i++) {
+ for (var i = 0; i < curOp.inprog.length; i++) {
try {
- if (curOp.inprog[i].insert.background){
-
- return true;
+ if (curOp.inprog[i].insert.background) {
+ return true;
}
} catch (e) {
// catchem if you can
@@ -81,9 +84,8 @@ assert.soon(function() {
return false;
}, "waiting for secondary bg index build", 20000, 10);
-
jsTest.log("dropping index");
-masterDB.runCommand( {dropIndexes: collection, index: "*"});
+masterDB.runCommand({dropIndexes: collection, index: "*"});
jsTest.log("Waiting on replication");
replTest.awaitReplication();
@@ -93,13 +95,12 @@ masterDB.getCollection(collection).getIndexes().forEach(printjson);
// we need to assert.soon because the drop only marks the index for removal
// the removal itself is asynchronous and may take another moment before it happens
var i = 0;
-assert.soon( function() {
+assert.soon(function() {
print("index list on secondary (run " + i + "):");
secondDB.getCollection(collection).getIndexes().forEach(printjson);
i++;
return 1 === secondDB.getCollection(collection).getIndexes().length;
- }, "secondary did not drop index"
-);
+}, "secondary did not drop index");
replTest.stopSet();
diff --git a/jstests/noPassthroughWithMongod/indexbg_interrupts.js b/jstests/noPassthroughWithMongod/indexbg_interrupts.js
index 2d3ddd9099e..0a50951c75d 100644
--- a/jstests/noPassthroughWithMongod/indexbg_interrupts.js
+++ b/jstests/noPassthroughWithMongod/indexbg_interrupts.js
@@ -10,16 +10,16 @@
/**
* Starts a replica set with arbiter, builds an index in background,
- * run through drop indexes, drop collection, drop database.
+ * run through drop indexes, drop collection, drop database.
*/
var checkOp = function(checkDB) {
var curOp = checkDB.currentOp(true);
- for (var i=0; i < curOp.inprog.length; i++) {
+ for (var i = 0; i < curOp.inprog.length; i++) {
try {
- if (curOp.inprog[i].query.background){
+ if (curOp.inprog[i].query.background) {
printjson(curOp.inprog[i].msg);
- return true;
+ return true;
}
} catch (e) {
// catchem if you can
@@ -33,16 +33,19 @@ var collection = 'jstests_feh';
var size = 100000;
// Set up replica set
-var replTest = new ReplSetTest({ name: 'bgIndex', nodes: 3 });
+var replTest = new ReplSetTest({name: 'bgIndex', nodes: 3});
var nodes = replTest.nodeList();
// We need an arbiter to ensure that the primary doesn't step down when we restart the secondary
replTest.startSet();
-replTest.initiate({"_id" : "bgIndex",
- "members" : [
- {"_id" : 0, "host" : nodes[0]},
- {"_id" : 1, "host" : nodes[1]},
- {"_id" : 2, "host" : nodes[2], "arbiterOnly" : true}]});
+replTest.initiate({
+ "_id": "bgIndex",
+ "members": [
+ {"_id": 0, "host": nodes[0]},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], "arbiterOnly": true}
+ ]
+});
var master = replTest.getPrimary();
var second = replTest.getSecondary();
@@ -50,15 +53,14 @@ var second = replTest.getSecondary();
var masterDB = master.getDB(dbname);
var secondDB = second.getDB(dbname);
-var dropAction = [
+var dropAction = [
{dropIndexes: collection, index: "*"},
{dropIndexes: collection, index: "i_1"},
{drop: collection},
- {dropDatabase: 1 },
+ {dropDatabase: 1},
{convertToCapped: collection, size: 20000}
];
-
for (var idx = 0; idx < dropAction.length; idx++) {
var dc = dropAction[idx];
jsTest.log("Setting up collection " + collection + " for test of: " + JSON.stringify(dc));
@@ -67,31 +69,31 @@ for (var idx = 0; idx < dropAction.length; idx++) {
masterDB.dropDatabase();
jsTest.log("creating test data " + size + " documents");
var bulk = masterDB.getCollection(collection).initializeUnorderedBulkOp();
- for(var i = 0; i < size; ++i ) {
- bulk.insert({ i: i });
+ for (var i = 0; i < size; ++i) {
+ bulk.insert({i: i});
}
assert.writeOK(bulk.execute());
jsTest.log("Starting background indexing for test of: " + JSON.stringify(dc));
- masterDB.getCollection(collection).ensureIndex( {i:1}, {background:true} );
- assert.eq(2, masterDB.getCollection(collection).getIndexes().length );
+ masterDB.getCollection(collection).ensureIndex({i: 1}, {background: true});
+ assert.eq(2, masterDB.getCollection(collection).getIndexes().length);
// Wait for the secondary to get the index entry
- assert.soon( function() {
+ assert.soon(function() {
return 2 == secondDB.getCollection(collection).getIndexes().length;
- }, "index not created on secondary", 240000 );
+ }, "index not created on secondary", 240000);
jsTest.log("Index created and index info exists on secondary");
jsTest.log("running command " + JSON.stringify(dc));
- assert.commandWorked(masterDB.runCommand( dc ));
-
+ assert.commandWorked(masterDB.runCommand(dc));
+
jsTest.log("Waiting on replication");
- replTest.awaitReplication(60*1000);
+ replTest.awaitReplication(60 * 1000);
// we need to assert.soon because the drop only marks the index for removal
// the removal itself is asynchronous and may take another moment before it happens
- assert.soon( function() {
+ assert.soon(function() {
var idx_count = secondDB.getCollection(collection).getIndexes().length;
return idx_count == 1 || idx_count == 0;
}, "secondary did not drop index for " + dc.toString());
diff --git a/jstests/noPassthroughWithMongod/indexbg_restart_secondary.js b/jstests/noPassthroughWithMongod/indexbg_restart_secondary.js
index 80379b64844..e6b1a14f5f8 100644
--- a/jstests/noPassthroughWithMongod/indexbg_restart_secondary.js
+++ b/jstests/noPassthroughWithMongod/indexbg_restart_secondary.js
@@ -10,22 +10,24 @@
/**
* Starts a replica set with arbiter, builds an index in background
- * restart secondary once it starts building index, secondary should
+ * restart secondary once it starts building index, secondary should
* restart when index build after it restarts
*/
-
// Set up replica set
-var replTest = new ReplSetTest({ name: 'bgIndex', nodes: 3 });
+var replTest = new ReplSetTest({name: 'bgIndex', nodes: 3});
var nodes = replTest.nodeList();
// We need an arbiter to ensure that the primary doesn't step down when we restart the secondary
replTest.startSet();
-replTest.initiate({"_id" : "bgIndex",
- "members" : [
- {"_id" : 0, "host" : nodes[0]},
- {"_id" : 1, "host" : nodes[1]},
- {"_id" : 2, "host" : nodes[2], "arbiterOnly" : true}]});
+replTest.initiate({
+ "_id": "bgIndex",
+ "members": [
+ {"_id": 0, "host": nodes[0]},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], "arbiterOnly": true}
+ ]
+});
var master = replTest.getPrimary();
var second = replTest.getSecondary();
@@ -39,37 +41,36 @@ var size = 500000;
jsTest.log("creating test data " + size + " documents");
var bulk = masterDB.jstests_bgsec.initializeUnorderedBulkOp();
-for(var i = 0; i < size; ++i) {
- bulk.insert({ i: i });
+for (var i = 0; i < size; ++i) {
+ bulk.insert({i: i});
}
assert.writeOK(bulk.execute());
jsTest.log("Starting background indexing");
-masterDB.jstests_bgsec.ensureIndex( {i:1}, {background:true} );
+masterDB.jstests_bgsec.ensureIndex({i: 1}, {background: true});
assert.eq(2, masterDB.jstests_bgsec.getIndexes().length);
// Wait for the secondary to get the index entry
-assert.soon( function() {
- return 2 == secondDB.jstests_bgsec.getIndexes().length; },
- "index not created on secondary (prior to restart)", 5 * 60 * 1000 );
+assert.soon(function() {
+ return 2 == secondDB.jstests_bgsec.getIndexes().length;
+}, "index not created on secondary (prior to restart)", 5 * 60 * 1000);
// restart secondary and reconnect
jsTest.log("Restarting secondary");
-replTest.restart(secondId, {}, /*wait=*/true);
+replTest.restart(secondId, {}, /*wait=*/true);
// Make sure secondary comes back
-assert.soon( function() {
+assert.soon(function() {
try {
- secondDB.jstests_bgsec.getIndexes().length; // trigger a reconnect if needed
- return true;
+ secondDB.jstests_bgsec.getIndexes().length; // trigger a reconnect if needed
+ return true;
} catch (e) {
- return false;
+ return false;
}
-} , "secondary didn't restart", 30000, 1000);
+}, "secondary didn't restart", 30000, 1000);
-assert.soon( function() {
- return 2 == secondDB.jstests_bgsec.getIndexes().length; },
- "Index build not resumed after restart", 30000, 50 );
+assert.soon(function() {
+ return 2 == secondDB.jstests_bgsec.getIndexes().length;
+}, "Index build not resumed after restart", 30000, 50);
jsTest.log("indexbg-restart-secondary.js complete");
-
diff --git a/jstests/noPassthroughWithMongod/indexbg_restart_sigkill_secondary_noretry.js b/jstests/noPassthroughWithMongod/indexbg_restart_sigkill_secondary_noretry.js
index c97d8320422..c7a793e2a51 100644
--- a/jstests/noPassthroughWithMongod/indexbg_restart_sigkill_secondary_noretry.js
+++ b/jstests/noPassthroughWithMongod/indexbg_restart_sigkill_secondary_noretry.js
@@ -14,7 +14,7 @@
* Start with noIndexBuildRetry option, should *not* build index on secondary
*/
-(function () {
+(function() {
var assert_trueTimeout = function(f, msg, timeout /*ms*/, interval) {
var start = new Date();
timeout = timeout || 30000;
@@ -28,12 +28,12 @@
if (diff > timeout)
return;
sleep(interval);
- }
+ }
};
// Set up replica set
- var replTest = new ReplSetTest({ name: 'bgIndexNoRetry', nodes: 3,
- nodeOptions : {noIndexBuildRetry:"", syncdelay:1} });
+ var replTest = new ReplSetTest(
+ {name: 'bgIndexNoRetry', nodes: 3, nodeOptions: {noIndexBuildRetry: "", syncdelay: 1}});
var nodenames = replTest.nodeList();
// We can't use an arbiter as the third node because the -auth test tries to log on there
@@ -47,11 +47,14 @@
return;
}
- replTest.initiate({"_id" : "bgIndexNoRetry",
- "members" : [
- {"_id" : 0, "host" : nodenames[0]},
- {"_id" : 1, "host" : nodenames[1]},
- {"_id" : 2, "host" : nodenames[2], arbiterOnly: true}]});
+ replTest.initiate({
+ "_id": "bgIndexNoRetry",
+ "members": [
+ {"_id": 0, "host": nodenames[0]},
+ {"_id": 1, "host": nodenames[1]},
+ {"_id": 2, "host": nodenames[2], arbiterOnly: true}
+ ]
+ });
var master = replTest.getPrimary();
var second = replTest.getSecondary();
@@ -65,18 +68,18 @@
jsTest.log("creating test data " + size + " documents");
var bulk = masterDB.jstests_bgsec.initializeUnorderedBulkOp();
- for( i = 0; i < size; ++i ) {
- bulk.insert({ i : i });
+ for (i = 0; i < size; ++i) {
+ bulk.insert({i: i});
}
assert.writeOK(bulk.execute());
jsTest.log("Starting background indexing");
- masterDB.jstests_bgsec.ensureIndex( {i:1}, {background:true} );
+ masterDB.jstests_bgsec.ensureIndex({i: 1}, {background: true});
assert.eq(2, masterDB.jstests_bgsec.getIndexes().length);
// Do one more write, so that later on, the secondary doesn't restart with the index build
// as the last op in the oplog -- it will redo this op otherwise.
- masterDB.jstests_bgsec.insert( { i : -1 } );
+ masterDB.jstests_bgsec.insert({i: -1});
// Wait for the secondary to get caught up
jsTest.log("Waiting for replication");
@@ -84,29 +87,26 @@
// Make sure a journal flush for the oplog occurs, by doing a local journaled write to the
// secondary
- assert.writeOK(second.getDB('local').foo.insert({ a: 1 }, { writeConcern: { j: true }}));
+ assert.writeOK(second.getDB('local').foo.insert({a: 1}, {writeConcern: {j: true}}));
// restart secondary and reconnect
jsTest.log("Restarting secondary");
- replTest.restart(secondId, {}, /*signal=*/ 9, /*wait=*/true);
+ replTest.restart(secondId, {}, /*signal=*/9, /*wait=*/true);
// Make sure secondary comes back
- assert.soon( function() {
+ assert.soon(function() {
try {
- secondDB.isMaster(); // trigger a reconnect if needed
+ secondDB.isMaster(); // trigger a reconnect if needed
return true;
} catch (e) {
- return false;
+ return false;
}
- } , "secondary didn't restart", 60000, 1000);
+ }, "secondary didn't restart", 60000, 1000);
- assert_trueTimeout(
- function() {
- return 2 == secondDB.jstests_bgsec.getIndexes().length;
- },
- "index created on secondary after restart with --noIndexBuildRetry",
- 30000, 200);
+ assert_trueTimeout(function() {
+ return 2 == secondDB.jstests_bgsec.getIndexes().length;
+ }, "index created on secondary after restart with --noIndexBuildRetry", 30000, 200);
- assert.neq(2, secondDB.jstests_bgsec.getIndexes().length );
+ assert.neq(2, secondDB.jstests_bgsec.getIndexes().length);
replTest.stopSet();
}());
diff --git a/jstests/noPassthroughWithMongod/insertMulti.js b/jstests/noPassthroughWithMongod/insertMulti.js
index b857e8c159a..e2a70307550 100644
--- a/jstests/noPassthroughWithMongod/insertMulti.js
+++ b/jstests/noPassthroughWithMongod/insertMulti.js
@@ -4,9 +4,11 @@
"use strict";
function makeDocument(docSize) {
- var doc = { "fieldName":"" };
+ var doc = {
+ "fieldName": ""
+ };
var longString = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx";
- while(Object.bsonsize(doc) < docSize) {
+ while (Object.bsonsize(doc) < docSize) {
if (Object.bsonsize(doc) < docSize - longString.length) {
doc.fieldName += longString;
} else {
@@ -20,24 +22,24 @@
var t = db.foo;
t.drop();
- t.insert([{_id:1},{_id:2}]);
+ t.insert([{_id: 1}, {_id: 2}]);
assert.eq(t.count(), 2);
- t.insert([{_id:3},{_id:2},{_id:4}], 0); // no ContinueOnError
+ t.insert([{_id: 3}, {_id: 2}, {_id: 4}], 0); // no ContinueOnError
assert.eq(t.count(), 3);
- assert.eq(t.count({ "_id" : 1 }), 1);
- assert.eq(t.count({ "_id" : 2 }), 1);
- assert.eq(t.count({ "_id" : 3 }), 1);
- assert.eq(t.count({ "_id" : 4 }), 0);
+ assert.eq(t.count({"_id": 1}), 1);
+ assert.eq(t.count({"_id": 2}), 1);
+ assert.eq(t.count({"_id": 3}), 1);
+ assert.eq(t.count({"_id": 4}), 0);
t.drop();
- t.insert([{_id:1},{_id:2}]);
+ t.insert([{_id: 1}, {_id: 2}]);
assert.eq(t.count(), 2);
- t.insert([{_id:3},{_id:2},{_id:4}], 1); // ContinueOnError
+ t.insert([{_id: 3}, {_id: 2}, {_id: 4}], 1); // ContinueOnError
assert.eq(t.count(), 4);
- assert.eq(t.count({ "_id" : 1 }), 1);
- assert.eq(t.count({ "_id" : 2 }), 1);
- assert.eq(t.count({ "_id" : 3 }), 1);
- assert.eq(t.count({ "_id" : 4 }), 1);
+ assert.eq(t.count({"_id": 1}), 1);
+ assert.eq(t.count({"_id": 2}), 1);
+ assert.eq(t.count({"_id": 3}), 1);
+ assert.eq(t.count({"_id": 4}), 1);
// Push a large vector in bigger than the subset size we'll break it up into
t.drop();
diff --git a/jstests/noPassthroughWithMongod/ipv6_connection_string_validation.js b/jstests/noPassthroughWithMongod/ipv6_connection_string_validation.js
index 9f544248ef1..e0a83397ee9 100644
--- a/jstests/noPassthroughWithMongod/ipv6_connection_string_validation.js
+++ b/jstests/noPassthroughWithMongod/ipv6_connection_string_validation.js
@@ -13,13 +13,18 @@ if ("undefined" == typeof inner_mode) {
// tested manually), so 127.0.0.1 is also present so the test mongo shell can connect
// with that address.
var mongod = MongoRunner.runMongod({ipv6: "", bind_ip: "::1,127.0.0.1"});
- var args = ["mongo",
- "--nodb",
- "--ipv6",
- "--host", "::1",
- "--port", mongod.port,
- "--eval", "inner_mode=true;port=" + mongod.port + ";",
- "jstests/noPassthroughWithMongod/ipv6_connection_string_validation.js" ];
+ var args = [
+ "mongo",
+ "--nodb",
+ "--ipv6",
+ "--host",
+ "::1",
+ "--port",
+ mongod.port,
+ "--eval",
+ "inner_mode=true;port=" + mongod.port + ";",
+ "jstests/noPassthroughWithMongod/ipv6_connection_string_validation.js"
+ ];
var exitCode = _runMongoProgram.apply(null, args);
jsTest.log("Inner mode test finished, exit code was " + exitCode);
@@ -32,50 +37,49 @@ if ("undefined" == typeof inner_mode) {
}
var goodStrings = [
- "localhost:27999/test",
- "[::1]:27999/test",
- "[0:0:0:0:0:0:0:1]:27999/test",
- "[0000:0000:0000:0000:0000:0000:0000:0001]:27999/test"
+ "localhost:27999/test",
+ "[::1]:27999/test",
+ "[0:0:0:0:0:0:0:1]:27999/test",
+ "[0000:0000:0000:0000:0000:0000:0000:0001]:27999/test"
];
var badStrings = [
- { s: undefined, r: /^Missing connection string$/ },
- { s: 7, r: /^Incorrect type/ },
- { s: null, r: /^Incorrect type/ },
- { s: "", r: /^Empty connection string$/ },
- { s: " ", r: /^Empty connection string$/ },
- { s: ":", r: /^Missing host name/ },
- { s: "/", r: /^Missing host name/ },
- { s: ":/", r: /^Missing host name/ },
- { s: ":/test", r: /^Missing host name/ },
- { s: ":27999/", r: /^Missing host name/ },
- { s: ":27999/test", r: /^Missing host name/ },
- { s: "/test", r: /^Missing host name/ },
- { s: "localhost:/test", r: /^Missing port number/ },
- { s: "::1:/test", r: /^Missing port number/ },
- { s: "::1:cat/test", r: /^Invalid port number/ },
- { s: "::1:1cat/test", r: /^Invalid port number/ },
- { s: "::1:123456/test", r: /^Invalid port number/ },
- { s: "::1:65536/test", r: /^Invalid port number/ },
- { s: "127.0.0.1:65536/test", r: /^Invalid port number/ },
- { s: "::1:27999/", r: /^Missing database name/ },
- { s: "127.0.0.1:27999/", r: /^Missing database name/ },
- { s: "::1:27999/test", r: /^More than one ':'/ },
- { s: "0:0::0:0:1:27999/test", r: /^More than one ':'/ },
- { s: "0000:0000:0000:0000:0000:0000:0000:0001:27999/test", r: /^More than one ':'/ },
- { s: "a[127.0.0.1]:27999/", r: /^Missing database name/ },
- { s: "a[::1:]27999/", r: /^Invalid port number/ },
- { s: "[::1:27999/", r: /^Missing database name/ },
- { s: "[::1:]27999/", r: /^Invalid port number/ },
- { s: "::1]:27999/", r: /^Missing database name/ }
+ {s: undefined, r: /^Missing connection string$/},
+ {s: 7, r: /^Incorrect type/},
+ {s: null, r: /^Incorrect type/},
+ {s: "", r: /^Empty connection string$/},
+ {s: " ", r: /^Empty connection string$/},
+ {s: ":", r: /^Missing host name/},
+ {s: "/", r: /^Missing host name/},
+ {s: ":/", r: /^Missing host name/},
+ {s: ":/test", r: /^Missing host name/},
+ {s: ":27999/", r: /^Missing host name/},
+ {s: ":27999/test", r: /^Missing host name/},
+ {s: "/test", r: /^Missing host name/},
+ {s: "localhost:/test", r: /^Missing port number/},
+ {s: "::1:/test", r: /^Missing port number/},
+ {s: "::1:cat/test", r: /^Invalid port number/},
+ {s: "::1:1cat/test", r: /^Invalid port number/},
+ {s: "::1:123456/test", r: /^Invalid port number/},
+ {s: "::1:65536/test", r: /^Invalid port number/},
+ {s: "127.0.0.1:65536/test", r: /^Invalid port number/},
+ {s: "::1:27999/", r: /^Missing database name/},
+ {s: "127.0.0.1:27999/", r: /^Missing database name/},
+ {s: "::1:27999/test", r: /^More than one ':'/},
+ {s: "0:0::0:0:1:27999/test", r: /^More than one ':'/},
+ {s: "0000:0000:0000:0000:0000:0000:0000:0001:27999/test", r: /^More than one ':'/},
+ {s: "a[127.0.0.1]:27999/", r: /^Missing database name/},
+ {s: "a[::1:]27999/", r: /^Invalid port number/},
+ {s: "[::1:27999/", r: /^Missing database name/},
+ {s: "[::1:]27999/", r: /^Invalid port number/},
+ {s: "::1]:27999/", r: /^Missing database name/}
];
var substitutePort = function(connectionString) {
// This will be called with non-strings as well as strings, so we need to catch exceptions
try {
return connectionString.replace("27999", "" + port);
- }
- catch (e) {
+ } catch (e) {
return connectionString;
}
};
@@ -87,18 +91,17 @@ var testGood = function(i, connectionString) {
try {
var connectDB = connect(connectionString);
connectDB = null;
- }
- catch (e) {
+ } catch (e) {
gotException = true;
exception = e;
}
if (!gotException) {
- print("Good connection string " + i +
- " (\"" + connectionString + "\") correctly validated");
+ print("Good connection string " + i + " (\"" + connectionString +
+ "\") correctly validated");
return;
}
- var message = "FAILED to correctly validate goodString " + i +
- " (\"" + connectionString + "\"): exception was \"" + tojson(exception) + "\"";
+ var message = "FAILED to correctly validate goodString " + i + " (\"" + connectionString +
+ "\"): exception was \"" + tojson(exception) + "\"";
doassert(message);
};
@@ -110,8 +113,7 @@ var testBad = function(i, connectionString, errorRegex) {
try {
var connectDB = connect(connectionString);
connectDB = null;
- }
- catch (e) {
+ } catch (e) {
gotException = true;
exception = e;
if (errorRegex.test(e.message)) {
@@ -123,13 +125,12 @@ var testBad = function(i, connectionString, errorRegex) {
"\") correctly rejected:\n" + tojson(exception));
return;
}
- var message = "FAILED to generate correct exception for badString " + i +
- " (\"" + connectionString + "\"): ";
+ var message = "FAILED to generate correct exception for badString " + i + " (\"" +
+ connectionString + "\"): ";
if (gotException) {
- message += "exception was \"" + tojson(exception) +
- "\", it should have matched \"" + errorRegex.toString() + "\"";
- }
- else {
+ message += "exception was \"" + tojson(exception) + "\", it should have matched \"" +
+ errorRegex.toString() + "\"";
+ } else {
message += "no exception was thrown";
}
doassert(message);
diff --git a/jstests/noPassthroughWithMongod/log_component_helpers.js b/jstests/noPassthroughWithMongod/log_component_helpers.js
index 044c1ba5b4a..dfe6523ac35 100644
--- a/jstests/noPassthroughWithMongod/log_component_helpers.js
+++ b/jstests/noPassthroughWithMongod/log_component_helpers.js
@@ -5,9 +5,9 @@
var mongo = db.getMongo();
// Get current log component setttings. We will reset to these later.
- var originalSettings = assert.commandWorked(
- db.adminCommand({ getParameter:1, logComponentVerbosity:1 })
- ).logComponentVerbosity;
+ var originalSettings =
+ assert.commandWorked(db.adminCommand({getParameter: 1, logComponentVerbosity: 1}))
+ .logComponentVerbosity;
// getLogComponents
var components1 = mongo.getLogComponents();
@@ -37,6 +37,5 @@
// Restore originalSettings
assert.commandWorked(
- db.adminCommand({setParameter:1, logComponentVerbosity:originalSettings })
- );
- }(db));
+ db.adminCommand({setParameter: 1, logComponentVerbosity: originalSettings}));
+}(db));
diff --git a/jstests/noPassthroughWithMongod/logop_rollback.js b/jstests/noPassthroughWithMongod/logop_rollback.js
index cc585c59a8e..f6ff75e3ce6 100644
--- a/jstests/noPassthroughWithMongod/logop_rollback.js
+++ b/jstests/noPassthroughWithMongod/logop_rollback.js
@@ -5,7 +5,7 @@
'use strict';
function checkForLogOpRollback(coll) {
- var res = coll.runCommand({ getLog: 'global' });
+ var res = coll.runCommand({getLog: 'global'});
assert.commandWorked(res);
for (var i = res.log.length - 1; i >= 0; i--) {
@@ -30,15 +30,14 @@
// must be in 'legacy' or 'compatibility' mode
db.getMongo().forceWriteMode('compatibility');
- var res = coll.insert({ _id: new Array(1025).join('x') });
+ var res = coll.insert({_id: new Array(1025).join('x')});
assert(res.hasWriteError());
// ErrorCodes::KeyTooLong == 17280
assert.eq(17280, res.getWriteError().code);
assert(checkForLogOpRollback(coll));
- }
- finally {
+ } finally {
db.getMongo().forceWriteMode(prevWriteMode);
db.setLogLevel(prevVerbosityLevel);
}
diff --git a/jstests/noPassthroughWithMongod/logpath.js b/jstests/noPassthroughWithMongod/logpath.js
index 1634495cd17..3b6b2069937 100644
--- a/jstests/noPassthroughWithMongod/logpath.js
+++ b/jstests/noPassthroughWithMongod/logpath.js
@@ -3,7 +3,7 @@
var name = "logpath";
var token = "logpath_token";
-var dbdir = MongoRunner.dataPath + name + "/"; // this will work under windows as well as linux
+var dbdir = MongoRunner.dataPath + name + "/"; // this will work under windows as well as linux
var basedir = MongoRunner.dataPath + name + "files" + "/";
var logdir = basedir + "logdir/";
var testdir = basedir + "testdir/";
@@ -19,15 +19,15 @@ assert(mkdir(basedir));
assert(mkdir(logdir));
assert(mkdir(testdir));
-var cleanupFiles = function() {
+var cleanupFiles = function() {
var files = listFiles(logdir);
- for(f in files) {
+ for (f in files) {
var name = files[f].name;
// mostly here for safety
- if(name.indexOf(token) != -1) {
- removeFile(name);
+ if (name.indexOf(token) != -1) {
+ removeFile(name);
}
}
};
@@ -37,8 +37,8 @@ var logCount = function(fpattern, prefix) {
var pat = RegExp(fpattern + (prefix ? "" : "$"));
var cnt = 0;
- for(f in files) {
- if(pat.test(files[f].name)) {
+ for (f in files) {
+ if (pat.test(files[f].name)) {
cnt++;
}
}
@@ -53,14 +53,14 @@ cleanupFiles();
assert.eq(logCount(logs[0]), 0);
print("------ Start mongod with logpath set to new file");
-var m = MongoRunner.runMongod({ port: port[0], dbpath: dbdir, logpath: logdir + logs[0]});
+var m = MongoRunner.runMongod({port: port[0], dbpath: dbdir, logpath: logdir + logs[0]});
// log should now exist (and no rotations should exist)
assert.eq(logCount(logs[0], true), 1);
MongoRunner.stopMongod(port[0]);
print("------ Start mongod with logpath set to existing file");
-m = MongoRunner.runMongod({ port: port[1], dbpath: dbdir, logpath: logdir + logs[0]});
+m = MongoRunner.runMongod({port: port[1], dbpath: dbdir, logpath: logdir + logs[0]});
// log should continue to exist
assert.eq(logCount(logs[0]), 1);
@@ -73,36 +73,43 @@ MongoRunner.stopMongod(port[1]);
// Blocking on SERVER-5117:
// MongoRunner currently hangs if mongod fails to start so these tests don't work
-if ( false ) {
+if (false) {
// only run forking test on *nix (not supported on Windows)
- if ( _isWindows() ) {
+ if (_isWindows()) {
print("------ Skipping fork tests... (Windows)");
} else {
print("------ Start mongod with logpath set to new file, fork");
- var m = MongoRunner.runMongod({ port: port[2], dbpath: dbdir, logpath: logdir + logs[1], fork: true});
-
+ var m = MongoRunner.runMongod(
+ {port: port[2], dbpath: dbdir, logpath: logdir + logs[1], fork: true});
+
// log should now exist (and no rotations should exist)
assert.eq(logCount(logs[1], true), 1);
MongoRunner.stopMongod(port[2]);
-
+
print("------ Start mongod with logpath set to existing file, fork");
- m = MongoRunner.runMongod({ port: port[3], dbpath: dbdir, logpath: logdir + logs[1], fork: true});
-
+ m = MongoRunner.runMongod(
+ {port: port[3], dbpath: dbdir, logpath: logdir + logs[1], fork: true});
+
// log should continue to exist
assert.eq(logCount(logs[1]), 1);
-
+
// but now there should be a rotation file
assert.eq(logCount(logs[1], true), 2);
cleanupFiles();
-
+
MongoRunner.stopMongod(port[3]);
}
-
- // the following tests depend on undefined behavior; assume that MongoRunner raises exception on error
+
+ // the following tests depend on undefined behavior; assume that MongoRunner raises exception on
+ // error
print("------ Confirm that launch fails with directory");
- assert.throws(function() { MongoRunner.runMongod({ port: port[4], dbpath: dbdir, logpath: testdir }); });
+ assert.throws(function() {
+ MongoRunner.runMongod({port: port[4], dbpath: dbdir, logpath: testdir});
+ });
print("------ Confirm that launch fails with special file");
- assert.throws(function() { MongoRunner.runMongod({ port: port[5], dbpath: dbdir, logpath: sfile }); });
+ assert.throws(function() {
+ MongoRunner.runMongod({port: port[5], dbpath: dbdir, logpath: sfile});
+ });
}
diff --git a/jstests/noPassthroughWithMongod/mapreduce_intermediate_reduce.js b/jstests/noPassthroughWithMongod/mapreduce_intermediate_reduce.js
index 857c18c297c..589e072b631 100644
--- a/jstests/noPassthroughWithMongod/mapreduce_intermediate_reduce.js
+++ b/jstests/noPassthroughWithMongod/mapreduce_intermediate_reduce.js
@@ -2,8 +2,7 @@
// in-memory state small. See SERVER-12949 for more details.
//
function assertGLEOK(status) {
- assert(status.ok && status.err === null,
- "Expected OK status object; found " + tojson(status));
+ assert(status.ok && status.err === null, "Expected OK status object; found " + tojson(status));
}
var db = db.getSisterDB("MapReduceTestDB");
@@ -18,16 +17,20 @@ var expectedOutColl = [];
var bulk = coll.initializeUnorderedBulkOp();
for (var i = 0; i < 10; i++) {
for (var j = 1; j < 50; j++) {
- bulk.insert({ idx: i, j: j });
+ bulk.insert({idx: i, j: j});
}
- expectedOutColl.push ({ _id: i, value: j - 1 });
+ expectedOutColl.push({_id: i, value: j - 1});
}
assert.writeOK(bulk.execute());
-function mapFn() { emit(this.idx, 1); }
-function reduceFn(key, values) { return Array.sum(values); }
+function mapFn() {
+ emit(this.idx, 1);
+}
+function reduceFn(key, values) {
+ return Array.sum(values);
+}
-var out = coll.mapReduce(mapFn, reduceFn, { out: { replace: "mrOutput" } });
+var out = coll.mapReduce(mapFn, reduceFn, {out: {replace: "mrOutput"}});
// Check the output is as expected
//
@@ -38,7 +41,7 @@ assert.eq(out.counts.input, 490, "input count is wrong");
assert.eq(out.counts.emit, 490, "emit count is wrong");
// If this fails, most probably some of the configuration settings under mongo::mr::Config have
-// changed, such as reduceTriggerRatio or maxInMemSize. If not the case, then something else
+// changed, such as reduceTriggerRatio or maxInMemSize. If not the case, then something else
// must have changed with when intermediate reduces occur (see mongo::mr::State::checkSize).
//
assert.eq(out.counts.reduce, 14, "reduce count is wrong");
diff --git a/jstests/noPassthroughWithMongod/moveprimary-replset.js b/jstests/noPassthroughWithMongod/moveprimary-replset.js
index 2725585a08e..0f956f638ea 100755..100644
--- a/jstests/noPassthroughWithMongod/moveprimary-replset.js
+++ b/jstests/noPassthroughWithMongod/moveprimary-replset.js
@@ -5,67 +5,72 @@
(function() {
"use strict";
-var numDocs = 10000;
-var baseName = "moveprimary-replset";
-var testDBName = baseName;
-var testCollName = 'coll';
+ var numDocs = 10000;
+ var baseName = "moveprimary-replset";
+ var testDBName = baseName;
+ var testCollName = 'coll';
-jsTest.log("Spinning up a sharded cluster, but not adding the shards");
-var shardingTestConfig = {
- name : baseName,
- mongos : 1,
- shards : 2,
- config : 3,
- rs : { nodes : 3 },
- other : { manualAddShard : true }
-};
-var shardingTest = new ShardingTest(shardingTestConfig);
+ jsTest.log("Spinning up a sharded cluster, but not adding the shards");
+ var shardingTestConfig = {
+ name: baseName,
+ mongos: 1,
+ shards: 2,
+ config: 3,
+ rs: {nodes: 3},
+ other: {manualAddShard: true}
+ };
+ var shardingTest = new ShardingTest(shardingTestConfig);
-jsTest.log("Geting connections to the individual shards");
-var replSet1 = shardingTest.rs0;
-var replSet2 = shardingTest.rs1;
+ jsTest.log("Geting connections to the individual shards");
+ var replSet1 = shardingTest.rs0;
+ var replSet2 = shardingTest.rs1;
-jsTest.log("Adding data to our first replica set");
-var repset1DB = replSet1.getPrimary().getDB(testDBName);
-for (var i = 1; i <= numDocs; i++) {
- repset1DB[testCollName].insert({ x : i });
-}
-replSet1.awaitReplication();
+ jsTest.log("Adding data to our first replica set");
+ var repset1DB = replSet1.getPrimary().getDB(testDBName);
+ for (var i = 1; i <= numDocs; i++) {
+ repset1DB[testCollName].insert({x: i});
+ }
+ replSet1.awaitReplication();
-jsTest.log("Geting connection to mongos for the cluster");
-var mongosConn = shardingTest.s;
-var testDB = mongosConn.getDB(testDBName);
+ jsTest.log("Geting connection to mongos for the cluster");
+ var mongosConn = shardingTest.s;
+ var testDB = mongosConn.getDB(testDBName);
-jsTest.log("Adding replSet1 as only shard");
-mongosConn.adminCommand({ addshard : replSet1.getURL() });
+ jsTest.log("Adding replSet1 as only shard");
+ mongosConn.adminCommand({addshard: replSet1.getURL()});
-jsTest.log("Updating the data via mongos and making sure all documents are updated and present");
-testDB[testCollName].update({}, { $set : { y : 'hello' } }, false/*upsert*/, true/*multi*/);
-assert.eq(testDB[testCollName].count({ y : 'hello' }), numDocs,
- 'updating and counting docs via mongos failed');
+ jsTest.log(
+ "Updating the data via mongos and making sure all documents are updated and present");
+ testDB[testCollName].update({}, {$set: {y: 'hello'}}, false /*upsert*/, true /*multi*/);
+ assert.eq(testDB[testCollName].count({y: 'hello'}),
+ numDocs,
+ 'updating and counting docs via mongos failed');
-jsTest.log("Adding replSet2 as second shard");
-mongosConn.adminCommand({ addshard : replSet2.getURL() });
+ jsTest.log("Adding replSet2 as second shard");
+ mongosConn.adminCommand({addshard: replSet2.getURL()});
-mongosConn.getDB('admin').printShardingStatus();
-printjson(replSet2.getPrimary().getDBs());
+ mongosConn.getDB('admin').printShardingStatus();
+ printjson(replSet2.getPrimary().getDBs());
-jsTest.log("Moving test db from replSet1 to replSet2");
-assert.commandWorked(mongosConn.getDB('admin').runCommand({ moveprimary: testDBName,
- to: replSet2.getURL() }));
-mongosConn.getDB('admin').printShardingStatus();
-printjson(replSet2.getPrimary().getDBs());
-assert.eq(testDB.getSiblingDB("config").databases.findOne({ "_id" : testDBName }).primary,
- replSet2.name, "Failed to change primary shard for unsharded database.");
+ jsTest.log("Moving test db from replSet1 to replSet2");
+ assert.commandWorked(
+ mongosConn.getDB('admin').runCommand({moveprimary: testDBName, to: replSet2.getURL()}));
+ mongosConn.getDB('admin').printShardingStatus();
+ printjson(replSet2.getPrimary().getDBs());
+ assert.eq(testDB.getSiblingDB("config").databases.findOne({"_id": testDBName}).primary,
+ replSet2.name,
+ "Failed to change primary shard for unsharded database.");
-jsTest.log("Updating the data via mongos and making sure all documents are updated and present");
-testDB[testCollName].update({}, { $set : { z : 'world' } }, false/*upsert*/, true/*multi*/);
-assert.eq(testDB[testCollName].count({ z : 'world' }), numDocs,
- 'updating and counting docs via mongos failed');
+ jsTest.log(
+ "Updating the data via mongos and making sure all documents are updated and present");
+ testDB[testCollName].update({}, {$set: {z: 'world'}}, false /*upsert*/, true /*multi*/);
+ assert.eq(testDB[testCollName].count({z: 'world'}),
+ numDocs,
+ 'updating and counting docs via mongos failed');
-jsTest.log("Shutting down cluster");
-shardingTest.stop();
+ jsTest.log("Shutting down cluster");
+ shardingTest.stop();
-print('moveprimary-replset.js SUCCESS');
+ print('moveprimary-replset.js SUCCESS');
})();
diff --git a/jstests/noPassthroughWithMongod/mr_noscripting.js b/jstests/noPassthroughWithMongod/mr_noscripting.js
index cd7f53ae28d..7a6ca1555a8 100644
--- a/jstests/noPassthroughWithMongod/mr_noscripting.js
+++ b/jstests/noPassthroughWithMongod/mr_noscripting.js
@@ -1,24 +1,21 @@
-var conn = MongoRunner.runMongod({ noscripting: '' });
-var testDB = conn.getDB( 'foo' );
+var conn = MongoRunner.runMongod({noscripting: ''});
+var testDB = conn.getDB('foo');
var coll = testDB.bar;
-coll.insert({ x: 1 });
+coll.insert({x: 1});
var map = function() {
- emit( this.x, 1 );
+ emit(this.x, 1);
};
-var reduce = function( key, values ) {
+var reduce = function(key, values) {
return 1;
};
-var mrResult = testDB.runCommand({ mapReduce: 'bar', map: map, reduce: reduce,
- out: { inline: 1 }});
+var mrResult = testDB.runCommand({mapReduce: 'bar', map: map, reduce: reduce, out: {inline: 1}});
-assert.eq( 0, mrResult.ok, 'mr result: ' + tojson( mrResult ));
+assert.eq(0, mrResult.ok, 'mr result: ' + tojson(mrResult));
// Confirm that mongod did not crash
-var cmdResult = testDB.adminCommand({ serverStatus: 1 });
-assert( cmdResult.ok, 'serverStatus failed, result: ' +
- tojson( cmdResult ));
-
+var cmdResult = testDB.adminCommand({serverStatus: 1});
+assert(cmdResult.ok, 'serverStatus failed, result: ' + tojson(cmdResult));
diff --git a/jstests/noPassthroughWithMongod/mr_writeconflict.js b/jstests/noPassthroughWithMongod/mr_writeconflict.js
index 60adb0ac0ce..baae608b59e 100644
--- a/jstests/noPassthroughWithMongod/mr_writeconflict.js
+++ b/jstests/noPassthroughWithMongod/mr_writeconflict.js
@@ -5,41 +5,41 @@
load('jstests/libs/parallelTester.js');
var makeDoc = function(keyLimit, valueLimit) {
- return {
- _id: ObjectId(),
- key: Random.randInt(keyLimit),
- value: Random.randInt(valueLimit)
- };
+ return {
+ _id: ObjectId(),
+ key: Random.randInt(keyLimit),
+ value: Random.randInt(valueLimit)
+ };
};
var main = function() {
- function mapper() {
- var obj = {};
- obj[this.value] = 1;
- emit(this.key, obj);
- }
+ function mapper() {
+ var obj = {};
+ obj[this.value] = 1;
+ emit(this.key, obj);
+ }
- function reducer(key, values) {
- var res = {};
+ function reducer(key, values) {
+ var res = {};
- values.forEach(function(obj) {
- Object.keys(obj).forEach(function(value) {
- if (!res.hasOwnProperty(value)) {
- res[value] = 0;
- }
- res[value] += obj[value];
- });
- });
+ values.forEach(function(obj) {
+ Object.keys(obj).forEach(function(value) {
+ if (!res.hasOwnProperty(value)) {
+ res[value] = 0;
+ }
+ res[value] += obj[value];
+ });
+ });
- return res;
- }
+ return res;
+ }
- for (var i = 0; i < 10; i++) {
- // Have all threads combine their results into the same collection
- var res = db.source.mapReduce(mapper, reducer, { out: { reduce: 'dest' } });
- assert.commandWorked(res);
- }
+ for (var i = 0; i < 10; i++) {
+ // Have all threads combine their results into the same collection
+ var res = db.source.mapReduce(mapper, reducer, {out: {reduce: 'dest'}});
+ assert.commandWorked(res);
+ }
};
Random.setRandomSeed();
@@ -48,8 +48,8 @@
var bulk = db.source.initializeUnorderedBulkOp();
var i;
for (i = 0; i < numDocs; ++i) {
- var doc = makeDoc(numDocs / 100, numDocs / 10);
- bulk.insert(doc);
+ var doc = makeDoc(numDocs / 100, numDocs / 10);
+ bulk.insert(doc);
}
var res = bulk.execute();
@@ -62,12 +62,12 @@
var numThreads = 6;
var t = [];
for (i = 0; i < numThreads - 1; ++i) {
- t[i] = new ScopedThread(main);
- t[i].start();
+ t[i] = new ScopedThread(main);
+ t[i].start();
}
main();
for (i = 0; i < numThreads - 1; ++i) {
- t[i].join();
+ t[i].join();
}
}());
diff --git a/jstests/noPassthroughWithMongod/newcollection2.js b/jstests/noPassthroughWithMongod/newcollection2.js
index 104eec7e897..46cd1316c90 100644
--- a/jstests/noPassthroughWithMongod/newcollection2.js
+++ b/jstests/noPassthroughWithMongod/newcollection2.js
@@ -2,16 +2,16 @@
var baseName = "jstests_disk_newcollection2";
var m = MongoRunner.runMongod({noprealloc: "", smallfiles: ""});
-db = m.getDB( "test" );
+db = m.getDB("test");
-db.createCollection( baseName, {size:0x1FFC0000-0x10-8192} );
-var v = db[ baseName ].validate();
-printjson( v );
-assert( v.valid );
+db.createCollection(baseName, {size: 0x1FFC0000 - 0x10 - 8192});
+var v = db[baseName].validate();
+printjson(v);
+assert(v.valid);
// Try creating collections with some invalid names and confirm that they
// don't crash MongoD.
-db.runCommand({ applyOps: [ { op: 'u', ns: 'a\0b' } ] });
+db.runCommand({applyOps: [{op: 'u', ns: 'a\0b'}]});
var res = db["a\0a"].insert({});
assert(res.hasWriteError(), "A write to collection a\0a succceeded");
diff --git a/jstests/noPassthroughWithMongod/no_balance_collection.js b/jstests/noPassthroughWithMongod/no_balance_collection.js
index 45736c26a4e..cfec6199ca2 100644
--- a/jstests/noPassthroughWithMongod/no_balance_collection.js
+++ b/jstests/noPassthroughWithMongod/no_balance_collection.js
@@ -1,91 +1,95 @@
// Tests whether the noBalance flag disables balancing for collections
-var st = new ShardingTest({ shards : 2, mongos : 1, verbose : 1 });
+var st = new ShardingTest({shards: 2, mongos: 1, verbose: 1});
// First, test that shell helpers require an argument
assert.throws(sh.disableBalancing, [], "sh.disableBalancing requires a collection");
assert.throws(sh.enableBalancing, [], "sh.enableBalancing requires a collection");
-
// Initially stop balancing
st.stopBalancer();
var shardAName = st._shardNames[0];
var shardBName = st._shardNames[1];
-var collA = st.s.getCollection( jsTest.name() + ".collA" );
-var collB = st.s.getCollection( jsTest.name() + ".collB" );
+var collA = st.s.getCollection(jsTest.name() + ".collA");
+var collB = st.s.getCollection(jsTest.name() + ".collB");
// Shard two collections
-st.shardColl( collA, { _id : 1 }, false );
-st.shardColl( collB, { _id : 1 }, false );
+st.shardColl(collA, {_id: 1}, false);
+st.shardColl(collB, {_id: 1}, false);
// Split into a lot of chunks so balancing can occur
-for( var i = 0; i < 10 - 1; i++ ){ // 10 chunks total
- collA.getMongo().getDB("admin").runCommand({ split : collA + "", middle : { _id : i } });
- collA.getMongo().getDB("admin").runCommand({ split : collB + "", middle : { _id : i } });
+for (var i = 0; i < 10 - 1; i++) { // 10 chunks total
+ collA.getMongo().getDB("admin").runCommand({split: collA + "", middle: {_id: i}});
+ collA.getMongo().getDB("admin").runCommand({split: collB + "", middle: {_id: i}});
}
// Disable balancing on one collection
-sh.disableBalancing( collB );
+sh.disableBalancing(collB);
-jsTest.log( "Balancing disabled on " + collB );
-printjson( collA.getDB().getSisterDB( "config" ).collections.find().toArray() );
+jsTest.log("Balancing disabled on " + collB);
+printjson(collA.getDB().getSisterDB("config").collections.find().toArray());
st.startBalancer();
// Make sure collA gets balanced
-assert.soon( function(){
- var shardAChunks = st.s.getDB( "config" ).chunks.find({ _id : sh._collRE( collA ), shard : shardAName }).itcount();
- var shardBChunks = st.s.getDB( "config" ).chunks.find({ _id : sh._collRE( collA ), shard : shardBName }).itcount();
- printjson({ shardA : shardAChunks, shardB : shardBChunks });
+assert.soon(function() {
+ var shardAChunks =
+ st.s.getDB("config").chunks.find({_id: sh._collRE(collA), shard: shardAName}).itcount();
+ var shardBChunks =
+ st.s.getDB("config").chunks.find({_id: sh._collRE(collA), shard: shardBName}).itcount();
+ printjson({shardA: shardAChunks, shardB: shardBChunks});
return shardAChunks == shardBChunks;
-}, "" + collA + " chunks not balanced!", 5 * 60 * 1000 );
+}, "" + collA + " chunks not balanced!", 5 * 60 * 1000);
-jsTest.log( "Chunks for " + collA + " are balanced." );
+jsTest.log("Chunks for " + collA + " are balanced.");
// Check that the collB chunks were not moved
-var shardAChunks = st.s.getDB( "config" ).chunks.find({ _id : sh._collRE( collB ), shard : shardAName }).itcount();
-var shardBChunks = st.s.getDB( "config" ).chunks.find({ _id : sh._collRE( collB ), shard : shardBName }).itcount();
-printjson({ shardA : shardAChunks, shardB : shardBChunks });
-assert( shardAChunks == 0 || shardBChunks == 0 );
+var shardAChunks =
+ st.s.getDB("config").chunks.find({_id: sh._collRE(collB), shard: shardAName}).itcount();
+var shardBChunks =
+ st.s.getDB("config").chunks.find({_id: sh._collRE(collB), shard: shardBName}).itcount();
+printjson({shardA: shardAChunks, shardB: shardBChunks});
+assert(shardAChunks == 0 || shardBChunks == 0);
// Re-enable balancing for collB
-sh.enableBalancing( collB );
+sh.enableBalancing(collB);
// Make sure that collB is now balanced
-assert.soon( function(){
- var shardAChunks = st.s.getDB( "config" ).chunks.find({ _id : sh._collRE( collB ), shard : shardAName }).itcount();
- var shardBChunks = st.s.getDB( "config" ).chunks.find({ _id : sh._collRE( collB ), shard : shardBName }).itcount();
- printjson({ shardA : shardAChunks, shardB : shardBChunks });
+assert.soon(function() {
+ var shardAChunks =
+ st.s.getDB("config").chunks.find({_id: sh._collRE(collB), shard: shardAName}).itcount();
+ var shardBChunks =
+ st.s.getDB("config").chunks.find({_id: sh._collRE(collB), shard: shardBName}).itcount();
+ printjson({shardA: shardAChunks, shardB: shardBChunks});
return shardAChunks == shardBChunks;
-}, "" + collB + " chunks not balanced!", 5 * 60 * 1000 );
+}, "" + collB + " chunks not balanced!", 5 * 60 * 1000);
-jsTest.log( "Chunks for " + collB + " are balanced." );
+jsTest.log("Chunks for " + collB + " are balanced.");
// Re-disable balancing for collB
-sh.disableBalancing( collB );
+sh.disableBalancing(collB);
// Wait for the balancer to fully finish the last migration and write the changelog
// MUST set db var here, ugly but necessary
db = st.s0.getDB("config");
sh.waitForBalancer(true);
// Make sure auto-migrates on insert don't move chunks
-var lastMigration = sh._lastMigration( collB );
+var lastMigration = sh._lastMigration(collB);
var bulk = collB.initializeUnorderedBulkOp();
-for( var i = 0; i < 1000000; i++ ){
- bulk.insert({ _id: i, hello: "world" });
+for (var i = 0; i < 1000000; i++) {
+ bulk.insert({_id: i, hello: "world"});
}
assert.writeOK(bulk.execute());
-printjson( lastMigration );
-printjson( sh._lastMigration( collB ) );
+printjson(lastMigration);
+printjson(sh._lastMigration(collB));
-if(lastMigration == null) {
+if (lastMigration == null) {
assert.eq(null, sh._lastMigration(collB));
-}
-else {
+} else {
assert.eq(lastMigration.time, sh._lastMigration(collB).time);
}
diff --git a/jstests/noPassthroughWithMongod/parallel_collection_scan.js b/jstests/noPassthroughWithMongod/parallel_collection_scan.js
index 44e5d361e45..11fa5d0bd75 100644
--- a/jstests/noPassthroughWithMongod/parallel_collection_scan.js
+++ b/jstests/noPassthroughWithMongod/parallel_collection_scan.js
@@ -3,27 +3,27 @@ t = db.parallel_collection_scan;
t.drop();
s = "";
-while ( s.length < 10000 )
+while (s.length < 10000)
s += ".";
var bulk = t.initializeUnorderedBulkOp();
-for ( i = 0; i < 8000; i++ ) {
- bulk.insert({ x: i, s: s });
- }
+for (i = 0; i < 8000; i++) {
+ bulk.insert({x: i, s: s});
+}
assert.writeOK(bulk.execute());
function iterateSliced() {
- var res = t.runCommand( "parallelCollectionScan", { numCursors : 3 } );
- assert( res.ok, tojson( res ) );
+ var res = t.runCommand("parallelCollectionScan", {numCursors: 3});
+ assert(res.ok, tojson(res));
var count = 0;
- for ( var i = 0; i < res.cursors.length; i++ ) {
+ for (var i = 0; i < res.cursors.length; i++) {
var x = res.cursors[i];
- var cursor = new DBCommandCursor( db.getMongo(), x, 5 );
+ var cursor = new DBCommandCursor(db.getMongo(), x, 5);
count += cursor.itcount();
}
return count;
}
-assert.eq( iterateSliced(), t.count() );
-assert.eq( iterateSliced(), i );
+assert.eq(iterateSliced(), t.count());
+assert.eq(iterateSliced(), i);
diff --git a/jstests/noPassthroughWithMongod/query_oplogreplay.js b/jstests/noPassthroughWithMongod/query_oplogreplay.js
index fc2760577bc..7d4e632f431 100644
--- a/jstests/noPassthroughWithMongod/query_oplogreplay.js
+++ b/jstests/noPassthroughWithMongod/query_oplogreplay.js
@@ -2,7 +2,7 @@
function test(t) {
t.drop();
- assert.commandWorked(t.getDB().createCollection(t.getName(), {capped: true, size: 16*1024}));
+ assert.commandWorked(t.getDB().createCollection(t.getName(), {capped: true, size: 16 * 1024}));
function makeTS(i) {
return Timestamp(1000, i);
@@ -23,11 +23,13 @@ function test(t) {
// 'ts' field is not top-level.
assert.throws(function() {
t.find({$or: [{ts: {$gt: makeTS(3)}}, {foo: 3}]})
- .addOption(DBQuery.Option.oplogReplay).next();
+ .addOption(DBQuery.Option.oplogReplay)
+ .next();
});
assert.throws(function() {
t.find({$nor: [{ts: {$gt: makeTS(4)}}, {foo: 4}]})
- .addOption(DBQuery.Option.oplogReplay).next();
+ .addOption(DBQuery.Option.oplogReplay)
+ .next();
});
// Predicate over 'ts' is not $gt or $gte.
@@ -61,5 +63,5 @@ var coll = db.jstests_query_oplogreplay;
coll.drop();
assert.commandWorked(coll.getDB().createCollection(coll.getName()));
var res = assert.throws(function() {
- coll.find({ts: {$gt: "abcd"}}).addOption(DBQuery.Option.oplogReplay).next();
- });
+ coll.find({ts: {$gt: "abcd"}}).addOption(DBQuery.Option.oplogReplay).next();
+});
diff --git a/jstests/noPassthroughWithMongod/reconfigwt.js b/jstests/noPassthroughWithMongod/reconfigwt.js
index e75495608be..4176022e971 100644
--- a/jstests/noPassthroughWithMongod/reconfigwt.js
+++ b/jstests/noPassthroughWithMongod/reconfigwt.js
@@ -8,14 +8,13 @@ var ss = db.serverStatus();
// Test is only valid in the WT suites which run against a mongod with WiredTiger enabled
if (ss.storageEngine.name !== "wiredTiger") {
print("Skipping reconfigwt.js since this server does not have WiredTiger enabled");
-}
-else {
+} else {
var conn = MongoRunner.runMongod();
- var admin = conn.getDB( "admin" );
+ var admin = conn.getDB("admin");
function reconfigure(str) {
- ret = admin.runCommand( { setParameter : 1, "wiredTigerEngineRuntimeConfig" : str });
+ ret = admin.runCommand({setParameter: 1, "wiredTigerEngineRuntimeConfig": str});
print("ret: " + tojson(ret));
return ret;
}
diff --git a/jstests/noPassthroughWithMongod/recstore.js b/jstests/noPassthroughWithMongod/recstore.js
index 339fdf2992c..fdb41af568b 100644
--- a/jstests/noPassthroughWithMongod/recstore.js
+++ b/jstests/noPassthroughWithMongod/recstore.js
@@ -7,16 +7,16 @@ t = db.storetest;
t.drop();
-t.save({z:3});
-t.save({z:2});
+t.save({z: 3});
+t.save({z: 2});
-t.ensureIndex({z:1});
-t.ensureIndex({q:1});
-assert( t.find().sort({z:1})[0].z == 2 );
+t.ensureIndex({z: 1});
+t.ensureIndex({q: 1});
+assert(t.find().sort({z: 1})[0].z == 2);
t.dropIndexes();
-assert( t.find().sort({z:1})[0].z == 2 );
+assert(t.find().sort({z: 1})[0].z == 2);
-t.ensureIndex({z:1});
-t.ensureIndex({q:1});
+t.ensureIndex({z: 1});
+t.ensureIndex({q: 1});
diff --git a/jstests/noPassthroughWithMongod/remove9.js b/jstests/noPassthroughWithMongod/remove9.js
index 3135514e4dc..b7da7b58f95 100644
--- a/jstests/noPassthroughWithMongod/remove9.js
+++ b/jstests/noPassthroughWithMongod/remove9.js
@@ -1,12 +1,13 @@
t = db.jstests_remove9;
t.drop();
-js = "while( 1 ) { for( i = 0; i < 10000; ++i ) { db.jstests_remove9.save( {i:i} ); } db.jstests_remove9.remove( {i: {$gte:0} } ); }";
-pid = startMongoProgramNoConnect( "mongo" , "--eval" , js , db ? db.getMongo().host : null );
+js =
+ "while( 1 ) { for( i = 0; i < 10000; ++i ) { db.jstests_remove9.save( {i:i} ); } db.jstests_remove9.remove( {i: {$gte:0} } ); }";
+pid = startMongoProgramNoConnect("mongo", "--eval", js, db ? db.getMongo().host : null);
Random.setRandomSeed();
-for( var i = 0; i < 10000; ++i ) {
- assert.writeOK(t.remove( { i: Random.randInt( 10000 )} ));
+for (var i = 0; i < 10000; ++i) {
+ assert.writeOK(t.remove({i: Random.randInt(10000)}));
}
-stopMongoProgramByPid( pid );
+stopMongoProgramByPid(pid);
diff --git a/jstests/noPassthroughWithMongod/replReads.js b/jstests/noPassthroughWithMongod/replReads.js
index a5b60ffea9c..45e0a4d49a6 100644
--- a/jstests/noPassthroughWithMongod/replReads.js
+++ b/jstests/noPassthroughWithMongod/replReads.js
@@ -1,16 +1,15 @@
// Test that doing slaveOk reads from secondaries hits all the secondaries evenly
function testReadLoadBalancing(numReplicas) {
+ var s =
+ new ShardingTest({shards: {rs0: {nodes: numReplicas}}, verbose: 2, other: {chunkSize: 1}});
- var s = new ShardingTest({ shards: { rs0: { nodes: numReplicas }},
- verbose: 2, other: { chunkSize: 1 }});
-
- s.adminCommand({enablesharding : "test"});
+ s.adminCommand({enablesharding: "test"});
s.config.settings.find().forEach(printjson);
- s.adminCommand({shardcollection : "test.foo", key : {_id : 1}});
+ s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
- s.getDB("test").foo.insert({a : 123});
+ s.getDB("test").foo.insert({a: 123});
primary = s._rs[0].test.liveNodes.master;
secondaries = s._rs[0].test.liveNodes.slaves;
@@ -18,30 +17,30 @@ function testReadLoadBalancing(numReplicas) {
function rsStats() {
return s.getDB("admin").runCommand("connPoolStats")["replicaSets"][s.rs0.name];
}
-
- assert.eq( numReplicas , rsStats().hosts.length );
-
- function isMasterOrSecondary( info ){
- if ( ! info.ok )
+
+ assert.eq(numReplicas, rsStats().hosts.length);
+
+ function isMasterOrSecondary(info) {
+ if (!info.ok)
return false;
- if ( info.ismaster )
+ if (info.ismaster)
return true;
- return info.secondary && ! info.hidden;
+ return info.secondary && !info.hidden;
}
- assert.soon(
- function() {
- var x = rsStats().hosts;
- printjson(x);
- for ( var i=0; i<x.length; i++ )
- if ( ! isMasterOrSecondary( x[i] ) )
- return false;
- return true;
- }
- );
-
+ assert.soon(function() {
+ var x = rsStats().hosts;
+ printjson(x);
+ for (var i = 0; i < x.length; i++)
+ if (!isMasterOrSecondary(x[i]))
+ return false;
+ return true;
+ });
+
for (var i = 0; i < secondaries.length; i++) {
- assert.soon( function(){ return secondaries[i].getDB("test").foo.count() > 0; } );
+ assert.soon(function() {
+ return secondaries[i].getDB("test").foo.count() > 0;
+ });
secondaries[i].getDB('test').setProfilingLevel(2);
}
// Primary may change with reconfig
@@ -57,50 +56,53 @@ function testReadLoadBalancing(numReplicas) {
connections.push(conn);
}
- var profileCriteria = { op: 'query', ns: 'test.foo' };
+ var profileCriteria = {
+ op: 'query',
+ ns: 'test.foo'
+ };
for (var i = 0; i < secondaries.length; i++) {
var profileCollection = secondaries[i].getDB('test').system.profile;
- assert.eq(10, profileCollection.find(profileCriteria).count(),
- "Wrong number of read queries sent to secondary " + i +
- " " + tojson( profileCollection.find().toArray() ));
+ assert.eq(10,
+ profileCollection.find(profileCriteria).count(),
+ "Wrong number of read queries sent to secondary " + i + " " +
+ tojson(profileCollection.find().toArray()));
}
-
- db = primary.getDB( "test" );
-
+
+ db = primary.getDB("test");
+
printjson(rs.status());
c = rs.conf();
- print( "config before: " + tojson(c) );
- for ( i=0; i<c.members.length; i++ ) {
- if ( c.members[i].host == db.runCommand( "ismaster" ).primary )
+ print("config before: " + tojson(c));
+ for (i = 0; i < c.members.length; i++) {
+ if (c.members[i].host == db.runCommand("ismaster").primary)
continue;
c.members[i].hidden = true;
c.members[i].priority = 0;
break;
}
- rs.reconfig( c );
- print( "config after: " + tojson( rs.conf() ) );
-
- assert.soon(
- function() {
- var x = rsStats();
- printjson(x);
- var numOk = 0;
- // Now wait until the host disappears, since now we actually update our
- // replica sets via isMaster in mongos
- if( x.hosts.length == c["members"].length - 1 ) return true;
- /*
- for ( var i=0; i<x.hosts.length; i++ )
- if ( x.hosts[i].hidden )
- return true;
- */
- return false;
- } , "one slave not ok" , 180000 , 5000
- );
-
+ rs.reconfig(c);
+ print("config after: " + tojson(rs.conf()));
+
+ assert.soon(function() {
+ var x = rsStats();
+ printjson(x);
+ var numOk = 0;
+ // Now wait until the host disappears, since now we actually update our
+ // replica sets via isMaster in mongos
+ if (x.hosts.length == c["members"].length - 1)
+ return true;
+ /*
+ for ( var i=0; i<x.hosts.length; i++ )
+ if ( x.hosts[i].hidden )
+ return true;
+ */
+ return false;
+ }, "one slave not ok", 180000, 5000);
+
// Secondaries may change here
secondaries = s._rs[0].test.liveNodes.slaves;
-
+
for (var i = 0; i < secondaries.length * 10; i++) {
conn = new Mongo(s._mongos[0].host);
conn.setSlaveOk();
@@ -111,16 +113,16 @@ function testReadLoadBalancing(numReplicas) {
var counts = [];
for (var i = 0; i < secondaries.length; i++) {
var profileCollection = secondaries[i].getDB('test').system.profile;
- counts.push( profileCollection.find(profileCriteria).count() );
+ counts.push(profileCollection.find(profileCriteria).count());
}
counts = counts.sort();
- assert.eq( 20 , Math.abs( counts[1] - counts[0] ), "counts wrong: " + tojson( counts ) );
+ assert.eq(20, Math.abs(counts[1] - counts[0]), "counts wrong: " + tojson(counts));
s.stop();
}
-//for (var i = 1; i < 10; i++) {
+// for (var i = 1; i < 10; i++) {
// testReadLoadBalancing(i)
//}
diff --git a/jstests/noPassthroughWithMongod/replica_set_shard_version.js b/jstests/noPassthroughWithMongod/replica_set_shard_version.js
index f853c74603e..b8fe681cc06 100644
--- a/jstests/noPassthroughWithMongod/replica_set_shard_version.js
+++ b/jstests/noPassthroughWithMongod/replica_set_shard_version.js
@@ -1,8 +1,8 @@
// Tests whether a Replica Set in a mongos cluster can cause versioning problems
-jsTestLog( "Starting sharded cluster..." );
+jsTestLog("Starting sharded cluster...");
-var st = new ShardingTest( { shards : 1, mongos : 2, other : { rs : true } } );
+var st = new ShardingTest({shards: 1, mongos: 2, other: {rs: true}});
// Uncomment to stop the balancer, since the balancer usually initializes the shard automatically
// SERVER-4921 is otherwise hard to manifest
@@ -12,48 +12,50 @@ var mongosA = st.s0;
var mongosB = st.s1;
var shard = st.shard0;
-coll = mongosA.getCollection( jsTestName() + ".coll" );
+coll = mongosA.getCollection(jsTestName() + ".coll");
// Wait for primary and then initialize shard SERVER-5130
st.rs0.getPrimary();
coll.findOne();
-var sadmin = shard.getDB( "admin" );
-assert.throws(function() { sadmin.runCommand({ replSetStepDown : 3000, force : true }); });
+var sadmin = shard.getDB("admin");
+assert.throws(function() {
+ sadmin.runCommand({replSetStepDown: 3000, force: true});
+});
st.rs0.getPrimary();
-mongosA.getDB("admin").runCommand({ setParameter : 1, traceExceptions : true });
+mongosA.getDB("admin").runCommand({setParameter: 1, traceExceptions: true});
-try{
- // This _almost_ always fails, unless the new primary is already detected. If if fails, it should
+try {
+ // This _almost_ always fails, unless the new primary is already detected. If if fails, it
+ // should
// mark the master as bad, so mongos will reload the replica set master next request
// TODO: Can we just retry and succeed here?
coll.findOne();
-}
-catch( e ){
- print( "This error is expected : " );
- printjson( e );
+} catch (e) {
+ print("This error is expected : ");
+ printjson(e);
}
-jsTest.log( "Running query which should succeed..." );
+jsTest.log("Running query which should succeed...");
// This should always succeed without throwing an error
coll.findOne();
-mongosA.getDB("admin").runCommand({ setParameter : 1, traceExceptions : false });
+mongosA.getDB("admin").runCommand({setParameter: 1, traceExceptions: false});
// now check secondary
-assert.throws(function() { sadmin.runCommand({ replSetStepDown : 3000, force : true }); });
+assert.throws(function() {
+ sadmin.runCommand({replSetStepDown: 3000, force: true});
+});
// Can't use the mongosB - SERVER-5128
-other = new Mongo( mongosA.host );
-other.setSlaveOk( true );
-other = other.getCollection( jsTestName() + ".coll" );
-
-print( "eliot: " + tojson( other.findOne() ) );
-
+other = new Mongo(mongosA.host);
+other.setSlaveOk(true);
+other = other.getCollection(jsTestName() + ".coll");
+print("eliot: " + tojson(other.findOne()));
st.stop();
diff --git a/jstests/noPassthroughWithMongod/rpc_protocols.js b/jstests/noPassthroughWithMongod/rpc_protocols.js
index 2720d30b88d..7e33c3986d3 100644
--- a/jstests/noPassthroughWithMongod/rpc_protocols.js
+++ b/jstests/noPassthroughWithMongod/rpc_protocols.js
@@ -4,7 +4,7 @@
// startup using the "--rpcProtocols" command line option, or at runtime using the
// "setClientRPCProtocols" method on the Mongo object.
-var RPC_PROTOCOLS = {
+var RPC_PROTOCOLS = {
OP_QUERY: "opQueryOnly",
OP_COMMAND: "opCommandOnly"
};
@@ -19,53 +19,59 @@ var RPC_PROTOCOLS = {
assert.commandWorked(db.setProfilingLevel(2));
function runInShell(rpcProtocol, func) {
- assert (0 == _runMongoProgram("mongo",
- "--rpcProtocols="+rpcProtocol,
- "--readMode=commands", // ensure we use the find command.
- "--eval",
- "(" + func.toString() + ")();",
- db.getMongo().host));
-
+ assert(0 == _runMongoProgram("mongo",
+ "--rpcProtocols=" + rpcProtocol,
+ "--readMode=commands", // ensure we use the find command.
+ "--eval",
+ "(" + func.toString() + ")();",
+ db.getMongo().host));
}
// Test that --rpcProtocols=opQueryOnly forces OP_QUERY commands.
- runInShell(RPC_PROTOCOLS.OP_QUERY, function() {
- assert(db.getMongo().getClientRPCProtocols() === "opQueryOnly");
- db.getSiblingDB("test").rpcProtocols.find().comment("opQueryCommandLine").itcount();
- });
+ runInShell(
+ RPC_PROTOCOLS.OP_QUERY,
+ function() {
+ assert(db.getMongo().getClientRPCProtocols() === "opQueryOnly");
+ db.getSiblingDB("test").rpcProtocols.find().comment("opQueryCommandLine").itcount();
+ });
var profileDoc = db.system.profile.findOne({"query.comment": "opQueryCommandLine"});
assert(profileDoc !== null);
assert.eq(profileDoc.protocol, "op_query");
// Test that --rpcProtocols=opCommandOnly forces OP_COMMAND commands.
- runInShell(RPC_PROTOCOLS.OP_COMMAND, function() {
- assert(db.getMongo().getClientRPCProtocols() === "opCommandOnly");
- db.getSiblingDB("test").rpcProtocols.find().comment("opCommandCommandLine").itcount();
- });
+ runInShell(
+ RPC_PROTOCOLS.OP_COMMAND,
+ function() {
+ assert(db.getMongo().getClientRPCProtocols() === "opCommandOnly");
+ db.getSiblingDB("test").rpcProtocols.find().comment("opCommandCommandLine").itcount();
+ });
profileDoc = db.system.profile.findOne({"query.comment": "opCommandCommandLine"});
assert(profileDoc !== null);
assert.eq(profileDoc.protocol, "op_command");
// Test that .setClientRPCProtocols("opQueryOnly") forces OP_QUERY commands. We start the shell
// in OP_COMMAND only mode, then switch it to OP_QUERY mode at runtime.
- runInShell(RPC_PROTOCOLS.OP_COMMAND, function() {
- assert(db.getMongo().getClientRPCProtocols() === "opCommandOnly");
- db.getMongo().setClientRPCProtocols("opQueryOnly");
- assert(db.getMongo().getClientRPCProtocols() === "opQueryOnly");
- db.getSiblingDB("test").rpcProtocols.find().comment("opQueryRuntime").itcount();
- });
+ runInShell(RPC_PROTOCOLS.OP_COMMAND,
+ function() {
+ assert(db.getMongo().getClientRPCProtocols() === "opCommandOnly");
+ db.getMongo().setClientRPCProtocols("opQueryOnly");
+ assert(db.getMongo().getClientRPCProtocols() === "opQueryOnly");
+ db.getSiblingDB("test").rpcProtocols.find().comment("opQueryRuntime").itcount();
+ });
profileDoc = db.system.profile.findOne({"query.comment": "opQueryRuntime"});
assert(profileDoc !== null);
assert.eq(profileDoc.protocol, "op_query");
// Test that .setClientRPCProtocols("opCommandOnly") forces OP_COMMAND commands. We start the
// shell in OP_QUERY only mode, then switch it to OP_COMMAND mode at runtime.
- runInShell(RPC_PROTOCOLS.OP_QUERY, function() {
- assert(db.getMongo().getClientRPCProtocols() === "opQueryOnly");
- db.getMongo().setClientRPCProtocols("opCommandOnly");
- assert(db.getMongo().getClientRPCProtocols() === "opCommandOnly");
- db.getSiblingDB("test").rpcProtocols.find().comment("opCommandRuntime").itcount();
- });
+ runInShell(
+ RPC_PROTOCOLS.OP_QUERY,
+ function() {
+ assert(db.getMongo().getClientRPCProtocols() === "opQueryOnly");
+ db.getMongo().setClientRPCProtocols("opCommandOnly");
+ assert(db.getMongo().getClientRPCProtocols() === "opCommandOnly");
+ db.getSiblingDB("test").rpcProtocols.find().comment("opCommandRuntime").itcount();
+ });
profileDoc = db.system.profile.findOne({"query.comment": "opCommandRuntime"});
assert(profileDoc !== null);
assert.eq(profileDoc.protocol, "op_command");
diff --git a/jstests/noPassthroughWithMongod/server7428.js b/jstests/noPassthroughWithMongod/server7428.js
index d077e126d8a..745f11021f4 100644
--- a/jstests/noPassthroughWithMongod/server7428.js
+++ b/jstests/noPassthroughWithMongod/server7428.js
@@ -8,15 +8,15 @@
(function() {
-// Setup fromDb with no auth
-var fromDb = MongoRunner.runMongod();
+ // Setup fromDb with no auth
+ var fromDb = MongoRunner.runMongod();
-// Setup toDb with auth
-var toDb = MongoRunner.runMongod({auth: ""});
-var admin = toDb.getDB("admin");
-admin.createUser({user: "foo", pwd: "bar", roles: jsTest.adminUserRoles});
-admin.auth("foo","bar");
+ // Setup toDb with auth
+ var toDb = MongoRunner.runMongod({auth: ""});
+ var admin = toDb.getDB("admin");
+ admin.createUser({user: "foo", pwd: "bar", roles: jsTest.adminUserRoles});
+ admin.auth("foo", "bar");
-admin.copyDatabase('test', 'test', fromDb.host);
+ admin.copyDatabase('test', 'test', fromDb.host);
})();
diff --git a/jstests/noPassthroughWithMongod/sharding_migrate_large_docs.js b/jstests/noPassthroughWithMongod/sharding_migrate_large_docs.js
index b112590d0a0..0f4b19e22e3 100644
--- a/jstests/noPassthroughWithMongod/sharding_migrate_large_docs.js
+++ b/jstests/noPassthroughWithMongod/sharding_migrate_large_docs.js
@@ -2,62 +2,66 @@
// Tests migration behavior of large documents
//
-var st = new ShardingTest({ shards : 2, mongos : 1,
- other : { mongosOptions : { noAutoSplit : "" },
- shardOptions : { /* binVersion : "latest" */ } } });
+var st = new ShardingTest({
+ shards: 2,
+ mongos: 1,
+ other: {mongosOptions: {noAutoSplit: ""}, shardOptions: {/* binVersion : "latest" */}}
+});
st.stopBalancer();
var mongos = st.s0;
-var coll = mongos.getCollection( "foo.bar" );
-var admin = mongos.getDB( "admin" );
-var shards = mongos.getCollection( "config.shards" ).find().toArray();
-var shardAdmin = st.shard0.getDB( "admin" );
+var coll = mongos.getCollection("foo.bar");
+var admin = mongos.getDB("admin");
+var shards = mongos.getCollection("config.shards").find().toArray();
+var shardAdmin = st.shard0.getDB("admin");
-assert( admin.runCommand({ enableSharding : coll.getDB() + "" }).ok );
-printjson( admin.runCommand({ movePrimary : coll.getDB() + "", to : shards[0]._id }) );
-assert( admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } }).ok );
-assert( admin.runCommand({ split : coll + "", middle : { _id : 0 } }).ok );
+assert(admin.runCommand({enableSharding: coll.getDB() + ""}).ok);
+printjson(admin.runCommand({movePrimary: coll.getDB() + "", to: shards[0]._id}));
+assert(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}).ok);
+assert(admin.runCommand({split: coll + "", middle: {_id: 0}}).ok);
-jsTest.log( "Preparing large insert..." );
+jsTest.log("Preparing large insert...");
var data1MB = "x";
-while ( data1MB.length < 1024 * 1024 )
+while (data1MB.length < 1024 * 1024)
data1MB += data1MB;
var data15MB = "";
-for ( var i = 0; i < 15; i++ ) data15MB += data1MB;
+for (var i = 0; i < 15; i++)
+ data15MB += data1MB;
var data15PlusMB = data15MB;
-for ( var i = 0; i < 1023 * 1024; i++ ) data15PlusMB += "x";
+for (var i = 0; i < 1023 * 1024; i++)
+ data15PlusMB += "x";
-print("~15MB object size is : " + Object.bsonsize({ _id : 0, d : data15PlusMB }));
+print("~15MB object size is : " + Object.bsonsize({_id: 0, d: data15PlusMB}));
-jsTest.log( "Inserting docs of large and small sizes..." );
+jsTest.log("Inserting docs of large and small sizes...");
// Two large docs next to each other
-coll.insert({ _id : -2, d : data15PlusMB });
-coll.insert({ _id : -1, d : data15PlusMB });
+coll.insert({_id: -2, d: data15PlusMB});
+coll.insert({_id: -1, d: data15PlusMB});
// Docs of assorted sizes
-assert.writeOK(coll.insert({ _id : 0, d : "x" }));
-assert.writeOK(coll.insert({ _id : 1, d : data15PlusMB }));
-assert.writeOK(coll.insert({ _id : 2, d : "x" }));
-assert.writeOK(coll.insert({ _id : 3, d : data15MB }));
-assert.writeOK(coll.insert({ _id : 4, d : "x" }));
-assert.writeOK(coll.insert({ _id : 5, d : data1MB }));
-assert.writeOK(coll.insert({ _id : 6, d : "x" }));
+assert.writeOK(coll.insert({_id: 0, d: "x"}));
+assert.writeOK(coll.insert({_id: 1, d: data15PlusMB}));
+assert.writeOK(coll.insert({_id: 2, d: "x"}));
+assert.writeOK(coll.insert({_id: 3, d: data15MB}));
+assert.writeOK(coll.insert({_id: 4, d: "x"}));
+assert.writeOK(coll.insert({_id: 5, d: data1MB}));
+assert.writeOK(coll.insert({_id: 6, d: "x"}));
-assert.eq( 9, coll.find().itcount() );
+assert.eq(9, coll.find().itcount());
-jsTest.log( "Starting migration..." );
+jsTest.log("Starting migration...");
-assert( admin.runCommand({ moveChunk : coll + "", find : { _id : 0 }, to : shards[1]._id }).ok );
-assert( admin.runCommand({ moveChunk : coll + "", find : { _id : -1 }, to : shards[1]._id }).ok );
+assert(admin.runCommand({moveChunk: coll + "", find: {_id: 0}, to: shards[1]._id}).ok);
+assert(admin.runCommand({moveChunk: coll + "", find: {_id: -1}, to: shards[1]._id}).ok);
// Ensure that the doc count is correct and that the mongos query path can handle docs near the 16MB
// user BSON size limit.
-assert.eq( 9, coll.find().itcount() );
+assert.eq(9, coll.find().itcount());
-jsTest.log( "DONE!" );
+jsTest.log("DONE!");
st.stop();
diff --git a/jstests/noPassthroughWithMongod/sharding_rs_arb1.js b/jstests/noPassthroughWithMongod/sharding_rs_arb1.js
index ba3142bbe5c..8b53f07d3a3 100644
--- a/jstests/noPassthroughWithMongod/sharding_rs_arb1.js
+++ b/jstests/noPassthroughWithMongod/sharding_rs_arb1.js
@@ -1,27 +1,28 @@
var name = "sharding_rs_arb1";
-var replTest = new ReplSetTest( { name : name , nodes : 3 } );
+var replTest = new ReplSetTest({name: name, nodes: 3});
replTest.startSet();
var port = replTest.ports;
-replTest.initiate({_id : name, members :
- [
- {_id:0, host : getHostName()+":"+port[0]},
- {_id:1, host : getHostName()+":"+port[1]},
- {_id:2, host : getHostName()+":"+port[2], arbiterOnly : true},
- ],
- });
+replTest.initiate({
+ _id: name,
+ members: [
+ {_id: 0, host: getHostName() + ":" + port[0]},
+ {_id: 1, host: getHostName() + ":" + port[1]},
+ {_id: 2, host: getHostName() + ":" + port[2], arbiterOnly: true},
+ ],
+});
replTest.awaitReplication();
var master = replTest.getPrimary();
-var db = master.getDB( "test" );
-printjson( rs.status() );
+var db = master.getDB("test");
+printjson(rs.status());
var st = new ShardingTest({numShards: 0});
var admin = st.getDB('admin');
-var res = admin.runCommand( { addshard : replTest.getURL() } );
-printjson( res );
-assert( res.ok , tojson(res) );
+var res = admin.runCommand({addshard: replTest.getURL()});
+printjson(res);
+assert(res.ok, tojson(res));
st.stop();
replTest.stopSet();
diff --git a/jstests/noPassthroughWithMongod/shelllimit.js b/jstests/noPassthroughWithMongod/shelllimit.js
index cc7e7359ef6..3b270bddc12 100644
--- a/jstests/noPassthroughWithMongod/shelllimit.js
+++ b/jstests/noPassthroughWithMongod/shelllimit.js
@@ -7,15 +7,15 @@
t.drop();
var pre = db.serverStatus().metrics.cursor.open.total;
- for (var i=1; i<=5; i++) {
- t.save( { a : i } );
+ for (var i = 1; i <= 5; i++) {
+ t.save({a: i});
}
var c = t.find().limit(3);
- while(c.hasNext()) {
+ while (c.hasNext()) {
var v = c.next();
}
- assert.eq(pre,db.serverStatus().metrics.cursor.open.total);
+ assert.eq(pre, db.serverStatus().metrics.cursor.open.total);
t.drop();
}());
diff --git a/jstests/noPassthroughWithMongod/temp_namespace.js b/jstests/noPassthroughWithMongod/temp_namespace.js
index f74ac73bcea..a2f1aa21a80 100644
--- a/jstests/noPassthroughWithMongod/temp_namespace.js
+++ b/jstests/noPassthroughWithMongod/temp_namespace.js
@@ -7,31 +7,34 @@ testname = 'temp_namespace_sw';
var conn = MongoRunner.runMongod({smallfiles: "", noprealloc: "", nopreallocj: ""});
d = conn.getDB('test');
-d.runCommand({create: testname+'temp1', temp: true});
-d[testname+'temp1'].ensureIndex({x:1});
-d.runCommand({create: testname+'temp2', temp: 1});
-d[testname+'temp2'].ensureIndex({x:1});
-d.runCommand({create: testname+'keep1', temp: false});
-d.runCommand({create: testname+'keep2', temp: 0});
-d.runCommand({create: testname+'keep3'});
-d[testname+'keep4'].insert({});
+d.runCommand({create: testname + 'temp1', temp: true});
+d[testname + 'temp1'].ensureIndex({x: 1});
+d.runCommand({create: testname + 'temp2', temp: 1});
+d[testname + 'temp2'].ensureIndex({x: 1});
+d.runCommand({create: testname + 'keep1', temp: false});
+d.runCommand({create: testname + 'keep2', temp: 0});
+d.runCommand({create: testname + 'keep3'});
+d[testname + 'keep4'].insert({});
-function countCollectionNames( theDB, regex ) {
- return theDB.getCollectionNames().filter( function(z) {
- return z.match( regex ); } ).length;
+function countCollectionNames(theDB, regex) {
+ return theDB.getCollectionNames().filter(function(z) {
+ return z.match(regex);
+ }).length;
}
-assert.eq(countCollectionNames( d, /temp\d$/) , 2);
-assert.eq(countCollectionNames( d, /keep\d$/) , 4);
+assert.eq(countCollectionNames(d, /temp\d$/), 2);
+assert.eq(countCollectionNames(d, /keep\d$/), 4);
MongoRunner.stopMongod(conn);
-conn = MongoRunner.runMongod({restart:true,
- cleanData: false,
- dbpath: conn.dbpath,
- smallfiles: "",
- noprealloc: "",
- nopreallocj: ""});
+conn = MongoRunner.runMongod({
+ restart: true,
+ cleanData: false,
+ dbpath: conn.dbpath,
+ smallfiles: "",
+ noprealloc: "",
+ nopreallocj: ""
+});
d = conn.getDB('test');
-assert.eq(countCollectionNames( d, /temp\d$/) , 0);
-assert.eq(countCollectionNames( d, /keep\d$/) , 4);
+assert.eq(countCollectionNames(d, /temp\d$/), 0);
+assert.eq(countCollectionNames(d, /keep\d$/), 4);
MongoRunner.stopMongod(conn);
diff --git a/jstests/noPassthroughWithMongod/testing_only_commands.js b/jstests/noPassthroughWithMongod/testing_only_commands.js
index 51d104bee48..3ac3db8ed67 100644
--- a/jstests/noPassthroughWithMongod/testing_only_commands.js
+++ b/jstests/noPassthroughWithMongod/testing_only_commands.js
@@ -3,14 +3,16 @@
* via the --enableTestCommands flag fail when that flag isn't provided.
*/
-var testOnlyCommands = ['configureFailPoint',
- '_hashBSONElement',
- 'replSetTest',
- 'journalLatencyTest',
- 'godinsert',
- 'sleep',
- 'captrunc',
- 'emptycapped'];
+var testOnlyCommands = [
+ 'configureFailPoint',
+ '_hashBSONElement',
+ 'replSetTest',
+ 'journalLatencyTest',
+ 'godinsert',
+ 'sleep',
+ 'captrunc',
+ 'emptycapped'
+];
var assertCmdNotFound = function(db, cmdName) {
var res = db.runCommand(cmdName);
@@ -21,9 +23,10 @@ var assertCmdNotFound = function(db, cmdName) {
var assertCmdFound = function(db, cmdName) {
var res = db.runCommand(cmdName);
if (!res.ok) {
- assert.neq(59, res.code,
+ assert.neq(59,
+ res.code,
'test command ' + cmdName + ' should either have succeeded or ' +
- 'failed with an error code other than CommandNotFound(59)');
+ 'failed with an error code other than CommandNotFound(59)');
}
};
diff --git a/jstests/noPassthroughWithMongod/ttl1.js b/jstests/noPassthroughWithMongod/ttl1.js
index 906f2be75cf..60df6537023 100644
--- a/jstests/noPassthroughWithMongod/ttl1.js
+++ b/jstests/noPassthroughWithMongod/ttl1.js
@@ -11,9 +11,9 @@
assertEntryMatches = function(array, regex) {
var found = false;
- for (i=0; i<array.length; i++) {
+ for (i = 0; i < array.length; i++) {
if (regex.test(array[i])) {
- found = true;
+ found = true;
}
}
assert(found,
@@ -22,23 +22,23 @@ assertEntryMatches = function(array, regex) {
// Part 1
var t = db.ttl1;
t.drop();
-t.runCommand( "create", { flags : 0 } );
+t.runCommand("create", {flags: 0});
var now = (new Date()).getTime();
-for (i=0; i<24; i++) {
+for (i = 0; i < 24; i++) {
var past = new Date(now - (3600 * 1000 * i));
t.insert({x: past, y: past, z: past});
}
-t.insert( { a : 1 } ); //no x value
-t.insert( { x: null } ); //non-date value
-t.insert( { x : true } ); //non-date value
-t.insert( { x : "yo" } ); //non-date value
-t.insert( { x : 3 } ); //non-date value
-t.insert( { x : /foo/ } ); //non-date value
+t.insert({a: 1}); // no x value
+t.insert({x: null}); // non-date value
+t.insert({x: true}); // non-date value
+t.insert({x: "yo"}); // non-date value
+t.insert({x: 3}); // non-date value
+t.insert({x: /foo/}); // non-date value
-assert.eq( 30 , t.count() );
+assert.eq(30, t.count());
-t.ensureIndex( { z : 1 } , { expireAfterSeconds : "20000" } );
+t.ensureIndex({z: 1}, {expireAfterSeconds: "20000"});
sleep(70 * 1000);
@@ -51,33 +51,29 @@ var msg = RegExp("ttl indexes require the expireAfterSeconds" +
assertEntryMatches(log, msg);
// Part 2
-t.ensureIndex( { x : 1 } , { expireAfterSeconds : 20000 } );
+t.ensureIndex({x: 1}, {expireAfterSeconds: 20000});
-assert.soon(
- function() {
- return t.count() < 30;
- }, "TTL index on x didn't delete" , 70 * 1000);
+assert.soon(function() {
+ return t.count() < 30;
+}, "TTL index on x didn't delete", 70 * 1000);
// We know the TTL thread has started deleting. Wait a few seconds to give it a chance to finish.
-assert.soon(
- function() {
- return t.find( { x : { $lt : new Date( now - ( 20000 * 1000 ) ) } } ).count() === 0;
- }, "TTL index on x didn't finish deleting", 5 * 1000);
-assert.eq( 12 , t.count() );
+assert.soon(function() {
+ return t.find({x: {$lt: new Date(now - (20000 * 1000))}}).count() === 0;
+}, "TTL index on x didn't finish deleting", 5 * 1000);
+assert.eq(12, t.count());
-assert.lte( 18, db.serverStatus().metrics.ttl.deletedDocuments );
-assert.lte( 1, db.serverStatus().metrics.ttl.passes );
+assert.lte(18, db.serverStatus().metrics.ttl.deletedDocuments);
+assert.lte(1, db.serverStatus().metrics.ttl.passes);
// Part 3
-t.ensureIndex( { y : 1 } , { expireAfterSeconds : 10000 } );
+t.ensureIndex({y: 1}, {expireAfterSeconds: 10000});
-assert.soon(
- function() {
- return t.count() < 12;
- }, "TTL index on y didn't delete" , 70 * 1000);
+assert.soon(function() {
+ return t.count() < 12;
+}, "TTL index on y didn't delete", 70 * 1000);
-assert.soon(
- function() {
- return t.find( { y : { $lt : new Date( now - ( 10000 * 1000 ) ) } } ).count() === 0;
- }, "TTL index on y didn't finish deleting", 5 * 1000);
-assert.eq( 9 , t.count() );
+assert.soon(function() {
+ return t.find({y: {$lt: new Date(now - (10000 * 1000))}}).count() === 0;
+}, "TTL index on y didn't finish deleting", 5 * 1000);
+assert.eq(9, t.count());
diff --git a/jstests/noPassthroughWithMongod/ttl_repl.js b/jstests/noPassthroughWithMongod/ttl_repl.js
index 4c16c7f6306..794f0c3ad90 100644
--- a/jstests/noPassthroughWithMongod/ttl_repl.js
+++ b/jstests/noPassthroughWithMongod/ttl_repl.js
@@ -7,7 +7,7 @@
load("jstests/replsets/rslib.js");
-var rt = new ReplSetTest( { name : "ttl_repl" , nodes: 2 } );
+var rt = new ReplSetTest({name: "ttl_repl", nodes: 2});
/******** Part 1 ***************/
@@ -19,10 +19,10 @@ rt.awaitSecondaryNodes();
var slave1 = rt.liveNodes.slaves[0];
// shortcuts
-var masterdb = master.getDB( 'd' );
-var slave1db = slave1.getDB( 'd' );
-var mastercol = masterdb[ 'c' ];
-var slave1col = slave1db[ 'c' ];
+var masterdb = master.getDB('d');
+var slave1db = slave1.getDB('d');
+var mastercol = masterdb['c'];
+var slave1col = slave1db['c'];
// turn off usePowerOf2Sizes as this tests the flag is set automatically
mastercol.drop();
@@ -31,36 +31,35 @@ masterdb.createCollection(mastercol.getName(), {usePowerOf2Sizes: false});
// create new collection. insert 24 docs, aged at one-hour intervalss
now = (new Date()).getTime();
var bulk = mastercol.initializeUnorderedBulkOp();
-for ( i=0; i<24; i++ ) {
- bulk.insert({ x: new Date( now - ( 3600 * 1000 * i )) });
+for (i = 0; i < 24; i++) {
+ bulk.insert({x: new Date(now - (3600 * 1000 * i))});
}
assert.writeOK(bulk.execute());
rt.awaitReplication();
-assert.eq( 24 , mastercol.count() , "docs not inserted on primary" );
-assert.eq( 24 , slave1col.count() , "docs not inserted on secondary" );
+assert.eq(24, mastercol.count(), "docs not inserted on primary");
+assert.eq(24, slave1col.count(), "docs not inserted on secondary");
print("Initial Stats:");
print("Master:");
-printjson( mastercol.stats() );
+printjson(mastercol.stats());
print("Slave1:");
-printjson( slave1col.stats() );
+printjson(slave1col.stats());
// create TTL index, wait for TTL monitor to kick in, then check that
// the correct number of docs age out
-assert.commandWorked(mastercol.ensureIndex({ x: 1 }, { expireAfterSeconds: 20000 }));
+assert.commandWorked(mastercol.ensureIndex({x: 1}, {expireAfterSeconds: 20000}));
rt.awaitReplication();
-sleep(70*1000); // TTL monitor runs every 60 seconds, so wait 70
+sleep(70 * 1000); // TTL monitor runs every 60 seconds, so wait 70
print("Stats after waiting for TTL Monitor:");
print("Master:");
-printjson( mastercol.stats() );
+printjson(mastercol.stats());
print("Slave1:");
-printjson( slave1col.stats() );
-
-assert.eq( 6 , mastercol.count() , "docs not deleted on primary" );
-assert.eq( 6 , slave1col.count() , "docs not deleted on secondary" );
+printjson(slave1col.stats());
+assert.eq(6, mastercol.count(), "docs not deleted on primary");
+assert.eq(6, slave1col.count(), "docs not deleted on secondary");
/******** Part 2 ***************/
@@ -70,33 +69,31 @@ var config = rt.getReplSetConfig();
config.version = 2;
reconfig(rt, config);
-var slave2col = slave.getDB( 'd' )[ 'c' ];
+var slave2col = slave.getDB('d')['c'];
// check that the new secondary has the correct number of docs
print("New Slave stats:");
-printjson( slave2col.stats() );
-
-assert.eq( 6 , slave2col.count() , "wrong number of docs on new secondary");
+printjson(slave2col.stats());
+assert.eq(6, slave2col.count(), "wrong number of docs on new secondary");
/******* Part 3 *****************/
-//Check that the collMod command successfully updates the expireAfterSeconds field
-masterdb.runCommand( { collMod : "c",
- index : { keyPattern : {x : 1}, expireAfterSeconds : 10000} } );
+// Check that the collMod command successfully updates the expireAfterSeconds field
+masterdb.runCommand({collMod: "c", index: {keyPattern: {x: 1}, expireAfterSeconds: 10000}});
rt.awaitReplication();
-function getTTLTime( theCollection, theKey ) {
+function getTTLTime(theCollection, theKey) {
var indexes = theCollection.getIndexes();
- for ( var i = 0; i < indexes.length; i++ ) {
- if ( friendlyEqual( theKey, indexes[i].key ) )
+ for (var i = 0; i < indexes.length; i++) {
+ if (friendlyEqual(theKey, indexes[i].key))
return indexes[i].expireAfterSeconds;
}
throw "not found";
}
-printjson( masterdb.c.getIndexes() );
-assert.eq( 10000, getTTLTime( masterdb.c, { x : 1 } ) );
-assert.eq( 10000, getTTLTime( slave1db.c, { x : 1 } ) );
+printjson(masterdb.c.getIndexes());
+assert.eq(10000, getTTLTime(masterdb.c, {x: 1}));
+assert.eq(10000, getTTLTime(slave1db.c, {x: 1}));
// finish up
rt.stopSet();
diff --git a/jstests/noPassthroughWithMongod/ttl_repl_maintenance.js b/jstests/noPassthroughWithMongod/ttl_repl_maintenance.js
index 740f49b665d..45a5d752106 100644
--- a/jstests/noPassthroughWithMongod/ttl_repl_maintenance.js
+++ b/jstests/noPassthroughWithMongod/ttl_repl_maintenance.js
@@ -11,20 +11,20 @@ var conn;
var primeSystemReplset = function() {
conn = MongoRunner.runMongod();
var localDB = conn.getDB("local");
- localDB.system.replset.insert({x:1});
+ localDB.system.replset.insert({x: 1});
print("create a TTL collection");
var testDB = conn.getDB("test");
- assert.commandWorked(testDB.foo.ensureIndex({ x: 1 }, { expireAfterSeconds: 2 }));
+ assert.commandWorked(testDB.foo.ensureIndex({x: 1}, {expireAfterSeconds: 2}));
};
var restartWithConfig = function() {
MongoRunner.stopMongod(conn.port, 15);
- conn = MongoRunner.runMongod({restart:true, cleanData: false, dbpath: conn.dbpath});
+ conn = MongoRunner.runMongod({restart: true, cleanData: false, dbpath: conn.dbpath});
testDB = conn.getDB("test");
var n = 100;
- for (var i=0; i<n; i++) {
- testDB.foo.insert({x : new Date()});
+ for (var i = 0; i < n; i++) {
+ testDB.foo.insert({x: new Date()});
}
print("sleeping 65 seconds");
@@ -39,7 +39,7 @@ var restartWithoutConfig = function() {
MongoRunner.stopMongod(conn.port, 15);
- conn = MongoRunner.runMongod({restart:true, cleanData: false, dbpath: conn.dbpath});
+ conn = MongoRunner.runMongod({restart: true, cleanData: false, dbpath: conn.dbpath});
assert.soon(function() {
return conn.getDB("test").foo.count() < 100;
diff --git a/jstests/noPassthroughWithMongod/ttl_repl_secondary_disabled.js b/jstests/noPassthroughWithMongod/ttl_repl_secondary_disabled.js
index a4319a15c13..1ec78f6ee65 100644
--- a/jstests/noPassthroughWithMongod/ttl_repl_secondary_disabled.js
+++ b/jstests/noPassthroughWithMongod/ttl_repl_secondary_disabled.js
@@ -1,7 +1,7 @@
/** Test TTL docs are not deleted from secondaries directly
*/
-var rt = new ReplSetTest( { name : "ttl_repl" , nodes: 2 } );
+var rt = new ReplSetTest({name: "ttl_repl", nodes: 2});
// setup set
var nodes = rt.startSet();
@@ -11,26 +11,26 @@ rt.awaitSecondaryNodes();
var slave1 = rt.getSecondary();
// shortcuts
-var masterdb = master.getDB( 'd' );
-var slave1db = slave1.getDB( 'd' );
-var mastercol = masterdb[ 'c' ];
-var slave1col = slave1db[ 'c' ];
+var masterdb = master.getDB('d');
+var slave1db = slave1.getDB('d');
+var mastercol = masterdb['c'];
+var slave1col = slave1db['c'];
// create TTL index, wait for TTL monitor to kick in, then check things
-mastercol.ensureIndex( { x : 1 } , { expireAfterSeconds : 10 } );
+mastercol.ensureIndex({x: 1}, {expireAfterSeconds: 10});
rt.awaitReplication();
-//increase logging
-assert.commandWorked(slave1col.getDB().adminCommand({setParameter:1, logLevel:1}));
+// increase logging
+assert.commandWorked(slave1col.getDB().adminCommand({setParameter: 1, logLevel: 1}));
-//insert old doc (10 minutes old) directly on secondary using godinsert
-assert.commandWorked(slave1col.runCommand("godinsert",
- {obj: {_id: new Date(), x: new Date( (new Date()).getTime() - 600000 ) } }));
-assert.eq(1, slave1col.count(), "missing inserted doc" );
+// insert old doc (10 minutes old) directly on secondary using godinsert
+assert.commandWorked(slave1col.runCommand(
+ "godinsert", {obj: {_id: new Date(), x: new Date((new Date()).getTime() - 600000)}}));
+assert.eq(1, slave1col.count(), "missing inserted doc");
-sleep(70*1000); //wait for 70seconds
-assert.eq(1, slave1col.count(), "ttl deleted my doc!" );
+sleep(70 * 1000); // wait for 70seconds
+assert.eq(1, slave1col.count(), "ttl deleted my doc!");
// looking for these errors : "Assertion: 13312:replSet error : logOp() but not primary",
// "replSet error : logOp() but can't accept write to collection <ns>/n" + "Fatal Assertion 17405"
@@ -38,13 +38,13 @@ assert.eq(1, slave1col.count(), "ttl deleted my doc!" );
var errorStrings = ["Assertion: 13312", "Assertion 17405"];
var foundError = false;
var foundLine = "";
-var globalLogLines = assert.commandWorked(slave1col.getDB().adminCommand({getLog:"global"})).log;
+var globalLogLines = assert.commandWorked(slave1col.getDB().adminCommand({getLog: "global"})).log;
for (i in globalLogLines) {
var line = globalLogLines[i];
errorStrings.forEach(function(errorString) {
- if (line.match( errorString )) {
+ if (line.match(errorString)) {
foundError = true;
- foundLine = line; // replace error string with what we found.
+ foundLine = line; // replace error string with what we found.
}
});
}
diff --git a/jstests/noPassthroughWithMongod/ttl_sharded.js b/jstests/noPassthroughWithMongod/ttl_sharded.js
index e1c550d74c6..d6896665b65 100644
--- a/jstests/noPassthroughWithMongod/ttl_sharded.js
+++ b/jstests/noPassthroughWithMongod/ttl_sharded.js
@@ -7,85 +7,76 @@
*/
// start up a new sharded cluster
-var s = new ShardingTest({ shards : 2, mongos : 1});
+var s = new ShardingTest({shards: 2, mongos: 1});
var dbname = "testDB";
var coll = "ttl_sharded";
var ns = dbname + "." + coll;
-t = s.getDB( dbname ).getCollection( coll );
+t = s.getDB(dbname).getCollection(coll);
// enable sharding of the collection. Only 1 chunk initially
-s.adminCommand( { enablesharding : dbname } );
+s.adminCommand({enablesharding: dbname});
s.ensurePrimaryShard(dbname, 'shard0001');
-s.adminCommand( { shardcollection : ns , key: { _id : 1 } } );
+s.adminCommand({shardcollection: ns, key: {_id: 1}});
// insert 24 docs, with timestamps at one hour intervals
var now = (new Date()).getTime();
var bulk = t.initializeUnorderedBulkOp();
for (var i = 0; i < 24; i++) {
- var past = new Date( now - ( 3600 * 1000 * i ) );
- bulk.insert({ _id: i, x: past });
+ var past = new Date(now - (3600 * 1000 * i));
+ bulk.insert({_id: i, x: past});
}
assert.writeOK(bulk.execute());
-assert.eq( t.count() , 24 , "initial docs not inserted");
+assert.eq(t.count(), 24, "initial docs not inserted");
// create the TTL index which delete anything older than ~5.5 hours
-t.ensureIndex( { x : 1 } , { expireAfterSeconds : 20000 } );
+t.ensureIndex({x: 1}, {expireAfterSeconds: 20000});
// split chunk in half by _id, and move one chunk to the other shard
-s.adminCommand( {split : ns , middle : {_id : 12 } } );
-s.adminCommand( {moveChunk : ns ,
- find : {_id : 0} ,
- to : s.getOther(s.getPrimaryShard(dbname)).name } );
+s.adminCommand({split: ns, middle: {_id: 12}});
+s.adminCommand({moveChunk: ns, find: {_id: 0}, to: s.getOther(s.getPrimaryShard(dbname)).name});
// one shard will lose 12/12 docs, the other 6/12, so count will go
// from 24 -> 18 or 12 -> 6
-assert.soon(
- function() {
- return t.count() < 7;
- }, "TTL index on x didn't delete enough" , 70 * 1000
-);
+assert.soon(function() {
+ return t.count() < 7;
+}, "TTL index on x didn't delete enough", 70 * 1000);
// ensure that count ultimately ends up at 6
-assert.eq( 0 , t.find( { x : { $lt : new Date( now - 20000000 ) } } ).count() );
-assert.eq( 6 , t.count() );
+assert.eq(0, t.find({x: {$lt: new Date(now - 20000000)}}).count());
+assert.eq(6, t.count());
// now lets check things explicily on each shard
-var shard0 = s._connections[0].getDB( dbname );
-var shard1 = s._connections[1].getDB( dbname );
+var shard0 = s._connections[0].getDB(dbname);
+var shard1 = s._connections[1].getDB(dbname);
print("Shard 0 coll stats:");
-printjson( shard0.getCollection( coll ).stats() );
+printjson(shard0.getCollection(coll).stats());
print("Shard 1 coll stats:");
-printjson( shard1.getCollection( coll ).stats() );
+printjson(shard1.getCollection(coll).stats());
-
-function getTTLTime( theCollection, theKey ) {
+function getTTLTime(theCollection, theKey) {
var indexes = theCollection.getIndexes();
- for ( var i = 0; i < indexes.length; i++ ) {
- if ( friendlyEqual( theKey, indexes[i].key ) )
+ for (var i = 0; i < indexes.length; i++) {
+ if (friendlyEqual(theKey, indexes[i].key))
return indexes[i].expireAfterSeconds;
}
throw "not found";
}
// Check that TTL index (with expireAfterSeconds field) appears on both shards
-assert.eq( 20000, getTTLTime( shard0.getCollection(coll), { x : 1 } ) );
-assert.eq( 20000, getTTLTime( shard1.getCollection(coll), { x : 1 } ) );
+assert.eq(20000, getTTLTime(shard0.getCollection(coll), {x: 1}));
+assert.eq(20000, getTTLTime(shard1.getCollection(coll), {x: 1}));
// Check that the collMod command successfully updates the expireAfterSeconds field
-s.getDB( dbname ).runCommand( { collMod : coll,
- index : { keyPattern : {x : 1}, expireAfterSeconds : 10000} } );
-assert.eq( 10000, getTTLTime( shard0.getCollection(coll), { x : 1 } ) );
-assert.eq( 10000, getTTLTime( shard1.getCollection(coll), { x : 1 } ) );
+s.getDB(dbname).runCommand({collMod: coll, index: {keyPattern: {x: 1}, expireAfterSeconds: 10000}});
+assert.eq(10000, getTTLTime(shard0.getCollection(coll), {x: 1}));
+assert.eq(10000, getTTLTime(shard1.getCollection(coll), {x: 1}));
-assert.soon(
- function() {
- return t.count() < 6;
- }, "new expireAfterSeconds value not taking effect" , 70 * 1000
-);
-assert.eq( 0 , t.find( { x : { $lt : new Date( now - 10000000 ) } } ).count() );
-assert.eq( 3 , t.count() );
+assert.soon(function() {
+ return t.count() < 6;
+}, "new expireAfterSeconds value not taking effect", 70 * 1000);
+assert.eq(0, t.find({x: {$lt: new Date(now - 10000000)}}).count());
+assert.eq(3, t.count());
s.stop();
-
diff --git a/jstests/noPassthroughWithMongod/unix_socket1.js b/jstests/noPassthroughWithMongod/unix_socket1.js
index 3cd64c3370e..d6f10062565 100644
--- a/jstests/noPassthroughWithMongod/unix_socket1.js
+++ b/jstests/noPassthroughWithMongod/unix_socket1.js
@@ -1,5 +1,5 @@
doesLogMatchRegex = function(logArray, regex) {
- for (var i = (logArray.length - 1); i >= 0; i--) {
+ for (var i = (logArray.length - 1); i >= 0; i--) {
var regexInLine = regex.exec(logArray[i]);
if (regexInLine != null) {
return true;
@@ -8,11 +8,10 @@ doesLogMatchRegex = function(logArray, regex) {
return false;
};
-
-if ( ! _isWindows() ) {
+if (!_isWindows()) {
hoststring = db.getMongo().host;
index = hoststring.lastIndexOf(':');
- if (index == -1){
+ if (index == -1) {
port = '27017';
} else {
port = hoststring.substr(index + 1);
@@ -20,24 +19,24 @@ if ( ! _isWindows() ) {
sock = new Mongo('/tmp/mongodb-' + port + '.sock');
sockdb = sock.getDB(db.getName());
- assert( sockdb.runCommand('ping').ok );
+ assert(sockdb.runCommand('ping').ok);
// Test unix socket path
var path = MongoRunner.dataDir + "/sockpath";
mkdir(path);
var dataPath = MongoRunner.dataDir + "/sockpath_data";
-
+
var conn = MongoRunner.runMongod({dbpath: dataPath, unixSocketPrefix: path});
-
+
var sock2 = new Mongo(path + "/mongodb-" + conn.port + ".sock");
sockdb2 = sock2.getDB(db.getName());
- assert( sockdb2.runCommand('ping').ok );
+ assert(sockdb2.runCommand('ping').ok);
// Test the naming of the unix socket
- var log = db.adminCommand({ getLog: 'global' });
+ var log = db.adminCommand({getLog: 'global'});
var ll = log.log;
var re = new RegExp("anonymous unix socket");
- assert( doesLogMatchRegex( ll, re ), "Log message did not contain 'anonymous unix socket'");
+ assert(doesLogMatchRegex(ll, re), "Log message did not contain 'anonymous unix socket'");
} else {
print("Not testing unix sockets on Windows");
}
diff --git a/jstests/noPassthroughWithMongod/validate_command.js b/jstests/noPassthroughWithMongod/validate_command.js
index 6e243070142..9c52c9acad7 100644
--- a/jstests/noPassthroughWithMongod/validate_command.js
+++ b/jstests/noPassthroughWithMongod/validate_command.js
@@ -23,19 +23,18 @@
t = db.jstests_validate;
t.drop();
- for(var i = 0; i < count; i++){
- t.insert({x:i});
+ for (var i = 0; i < count; i++) {
+ t.insert({x: i});
}
- t.ensureIndex({x:1}, {name: "forward"});
- t.ensureIndex({x:-1}, {name: "reverse"});
-
+ t.ensureIndex({x: 1}, {name: "forward"});
+ t.ensureIndex({x: -1}, {name: "reverse"});
// TEST NORMAL VALIDATE
var output = t.validate();
testValidate(output);
// TEST FULL
- var output = t.validate({full:true});
+ var output = t.validate({full: true});
testValidate(output);
}()); \ No newline at end of file
diff --git a/jstests/noPassthroughWithMongod/wt_roundtrip_creation_string.js b/jstests/noPassthroughWithMongod/wt_roundtrip_creation_string.js
index d927cbb541a..65ebef5ccf3 100644
--- a/jstests/noPassthroughWithMongod/wt_roundtrip_creation_string.js
+++ b/jstests/noPassthroughWithMongod/wt_roundtrip_creation_string.js
@@ -24,25 +24,21 @@
var collStats = db.runCommand({collStats: collNamePrefix + '.source'});
assert.commandWorked(collStats);
- assert.commandWorked(db.runCommand({
- create: collNamePrefix + '.dest',
- storageEngine: {
- wiredTiger: {
- configString: collStats.wiredTiger.creationString
- }
- }
- }), 'unable to create collection using the creation string of another collection');
+ assert.commandWorked(
+ db.runCommand({
+ create: collNamePrefix + '.dest',
+ storageEngine: {wiredTiger: {configString: collStats.wiredTiger.creationString}}
+ }),
+ 'unable to create collection using the creation string of another collection');
assert.commandWorked(db.runCommand({
createIndexes: collNamePrefix + '.dest',
indexes: [{
key: {b: 1},
name: 'b_1',
- storageEngine: {
- wiredTiger: {
- configString: collStats.indexDetails.a_1.creationString
- }
- }
+ storageEngine:
+ {wiredTiger: {configString: collStats.indexDetails.a_1.creationString}}
}]
- }), 'unable to create index using the creation string of another index');
+ }),
+ 'unable to create index using the creation string of another index');
})();