diff options
author | clang-format-7.0.1 <adam.martin@10gen.com> | 2019-07-26 18:42:24 -0400 |
---|---|---|
committer | ADAM David Alan Martin <adam.martin@10gen.com> | 2019-07-26 18:42:24 -0400 |
commit | c1a45ebbb0530e3d0201321d725527f1eb83ffce (patch) | |
tree | f523079dc5ded3052eefbdcaae424b7502df5b25 /jstests/noPassthrough/inmem_full.js | |
parent | c9599d8610c3da0b7c3da65667aff821063cf5b9 (diff) | |
download | mongo-c1a45ebbb0530e3d0201321d725527f1eb83ffce.tar.gz |
Apply formatting per `clang-format-7.0.1`
Diffstat (limited to 'jstests/noPassthrough/inmem_full.js')
-rw-r--r-- | jstests/noPassthrough/inmem_full.js | 140 |
1 files changed, 70 insertions, 70 deletions
diff --git a/jstests/noPassthrough/inmem_full.js b/jstests/noPassthrough/inmem_full.js index a73a7f0ad69..84e85f31955 100644 --- a/jstests/noPassthrough/inmem_full.js +++ b/jstests/noPassthrough/inmem_full.js @@ -1,87 +1,87 @@ // SERVER-22599 Test behavior of in-memory storage engine with full cache. (function() { - 'use strict'; +'use strict'; - if (jsTest.options().storageEngine !== "inMemory") { - jsTestLog("Skipping test because storageEngine is not inMemory"); - return; - } +if (jsTest.options().storageEngine !== "inMemory") { + jsTestLog("Skipping test because storageEngine is not inMemory"); + return; +} - Random.setRandomSeed(); +Random.setRandomSeed(); - // Return array of approximately 1kB worth of random numbers. - function randomArray() { - var arr = []; - for (var j = 0; j < 85; j++) - arr[j] = Random.rand(); - return arr; - } +// Return array of approximately 1kB worth of random numbers. +function randomArray() { + var arr = []; + for (var j = 0; j < 85; j++) + arr[j] = Random.rand(); + return arr; +} - // Return a document of approximately 10kB in size with arrays of random numbers. - function randomDoc() { - var doc = {}; - for (var c of "abcdefghij") - doc[c] = randomArray(); - return doc; - } +// Return a document of approximately 10kB in size with arrays of random numbers. +function randomDoc() { + var doc = {}; + for (var c of "abcdefghij") + doc[c] = randomArray(); + return doc; +} - // Return an array with random documents totalling about 1Mb. - function randomBatch(batchSize) { - var batch = []; - for (var j = 0; j < batchSize; j++) - batch[j] = randomDoc(); - return batch; - } +// Return an array with random documents totalling about 1Mb. +function randomBatch(batchSize) { + var batch = []; + for (var j = 0; j < batchSize; j++) + batch[j] = randomDoc(); + return batch; +} - const cacheMB = 128; - const cacheKB = 1024 * cacheMB; - const docSizeKB = Object.bsonsize(randomDoc()) / 1024; - const batchSize = 100; - const batch = randomBatch(batchSize); +const cacheMB = 128; +const cacheKB = 1024 * cacheMB; +const docSizeKB = Object.bsonsize(randomDoc()) / 1024; +const batchSize = 100; +const batch = randomBatch(batchSize); - var mongod = MongoRunner.runMongod({ - storageEngine: 'inMemory', - inMemoryEngineConfigString: 'cache_size=' + cacheMB + "M,", - }); - assert.neq(null, mongod, "mongod failed to started up with --inMemoryEngineConfigString"); - var db = mongod.getDB("test"); - var t = db.large; +var mongod = MongoRunner.runMongod({ + storageEngine: 'inMemory', + inMemoryEngineConfigString: 'cache_size=' + cacheMB + "M,", +}); +assert.neq(null, mongod, "mongod failed to started up with --inMemoryEngineConfigString"); +var db = mongod.getDB("test"); +var t = db.large; - // Insert documents until full. - var res; - var count = 0; - for (var j = 0; j < 1000; j++) { - res = t.insert(batch); - assert.gte(res.nInserted, 0, tojson(res)); - count += res.nInserted; - if (res.hasErrors()) - break; - assert.eq(res.nInserted, batchSize, tojson(res)); - print("Inserted " + count + " documents"); - } - assert.writeError(res, "didn't get ExceededMemoryLimit but should have"); +// Insert documents until full. +var res; +var count = 0; +for (var j = 0; j < 1000; j++) { + res = t.insert(batch); + assert.gte(res.nInserted, 0, tojson(res)); + count += res.nInserted; + if (res.hasErrors()) + break; + assert.eq(res.nInserted, batchSize, tojson(res)); print("Inserted " + count + " documents"); +} +assert.writeError(res, "didn't get ExceededMemoryLimit but should have"); +print("Inserted " + count + " documents"); - // Should have encountered exactly one memory full error. - assert.eq(res.getWriteErrorCount(), 1, tojson(res)); - assert.eq(res.getWriteErrorAt(0).code, ErrorCodes.ExceededMemoryLimit, tojson(res)); +// Should have encountered exactly one memory full error. +assert.eq(res.getWriteErrorCount(), 1, tojson(res)); +assert.eq(res.getWriteErrorAt(0).code, ErrorCodes.ExceededMemoryLimit, tojson(res)); - // Should encounter memory full at between 75% and 150% of total capacity. - assert.gt(count * docSizeKB, cacheKB * 0.75, "inserted data size is at least 75% of capacity"); - assert.lt(count * docSizeKB, cacheKB * 1.50, "inserted data size is at most 150% of capacity"); +// Should encounter memory full at between 75% and 150% of total capacity. +assert.gt(count * docSizeKB, cacheKB * 0.75, "inserted data size is at least 75% of capacity"); +assert.lt(count * docSizeKB, cacheKB * 1.50, "inserted data size is at most 150% of capacity"); - // Indexes are sufficiently large that it should be impossible to add a new one. - assert.commandFailedWithCode(t.createIndex({a: 1}), ErrorCodes.ExceededMemoryLimit); +// Indexes are sufficiently large that it should be impossible to add a new one. +assert.commandFailedWithCode(t.createIndex({a: 1}), ErrorCodes.ExceededMemoryLimit); - // An aggregate copying all 'a' and 'b' fields should run out of memory. - // Can't test the specific error code, because it depends on whether the collection - // creation already fails, or just the writing. Agg wraps the original error code. - assert.commandFailed( - t.runCommand("aggregate", {pipeline: [{$project: {a: 1, b: 1}}, {$out: "test.out"}]})); +// An aggregate copying all 'a' and 'b' fields should run out of memory. +// Can't test the specific error code, because it depends on whether the collection +// creation already fails, or just the writing. Agg wraps the original error code. +assert.commandFailed( + t.runCommand("aggregate", {pipeline: [{$project: {a: 1, b: 1}}, {$out: "test.out"}]})); - // Should still be able to query. - assert.eq(t.find({}).itcount(), count, "cannot find expected number of documents"); - assert.eq(t.aggregate([{$group: {_id: null, count: {$sum: 1}}}]).next().count, - count, - "cannot aggregate expected number of documents"); +// Should still be able to query. +assert.eq(t.find({}).itcount(), count, "cannot find expected number of documents"); +assert.eq(t.aggregate([{$group: {_id: null, count: {$sum: 1}}}]).next().count, + count, + "cannot aggregate expected number of documents"); }()); |