summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJames Wahlin <james@mongodb.com>2018-06-22 15:47:08 -0400
committerJames Wahlin <james@mongodb.com>2018-06-29 13:23:58 -0400
commita86f311e3f8f50f8f2ed37f223da17f12da855a7 (patch)
treed2deffb0e3521486edb236ffe1d6dbbf3cdd612c
parenta8a6d58c59eef1ee58a9fd8e83a51d87ae961f38 (diff)
downloadmongo-a86f311e3f8f50f8f2ed37f223da17f12da855a7.tar.gz
SERVER-34956 Replace big_object1.js with max_doc_size.js
(cherry picked from commit 051262cc7b3e9584602c364b8cf803d31d47d5f8)
-rw-r--r--jstests/core/big_object1.js53
-rw-r--r--jstests/core/max_doc_size.js125
2 files changed, 70 insertions, 108 deletions
diff --git a/jstests/core/big_object1.js b/jstests/core/big_object1.js
deleted file mode 100644
index 7ed4c92dad6..00000000000
--- a/jstests/core/big_object1.js
+++ /dev/null
@@ -1,53 +0,0 @@
-// @tags: [requires_fastcount, requires_collstats]
-
-t = db.big_object1;
-t.drop();
-
-if (db.adminCommand("buildinfo").bits == 64) {
- var large = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
- var s = large;
- while (s.length < 850 * 1024) {
- s += large;
- }
- x = 0;
- while (true) {
- var result;
- n = {_id: x, a: []};
- for (i = 0; i < 14 + x; i++)
- n.a.push(s);
- try {
- result = t.insert(n);
- o = n;
- } catch (e) {
- break;
- }
-
- if (result.hasWriteError())
- break;
- x++;
- }
-
- printjson(t.stats(1024 * 1024));
-
- assert.lt(15 * 1024 * 1024, Object.bsonsize(o), "A1");
- assert.gt(17 * 1024 * 1024, Object.bsonsize(o), "A2");
-
- assert.eq(x, t.count(), "A3");
-
- for (i = 0; i < x; i++) {
- o = t.findOne({_id: i});
- try {
- // test large mongo -> js conversion
- var a = o.a;
- } catch (e) {
- assert(false, "Caught exception trying to insert during iteration " + i + ": " + e);
- }
- assert(o, "B" + i);
- }
-
- t.drop();
-} else {
- print("skipping big_object1 b/c not 64-bit");
-}
-
-print("SUCCESS");
diff --git a/jstests/core/max_doc_size.js b/jstests/core/max_doc_size.js
index 5180add4f87..4c0ddd98f85 100644
--- a/jstests/core/max_doc_size.js
+++ b/jstests/core/max_doc_size.js
@@ -1,55 +1,70 @@
-var maxBsonObjectSize = db.isMaster().maxBsonObjectSize;
-var docOverhead = Object.bsonsize({_id: new ObjectId(), x: ''});
-var maxStrSize = maxBsonObjectSize - docOverhead;
-var maxStr = 'a'.repeat(maxStrSize);
-var coll = db.max_doc_size;
-
-coll.drop();
-var res = db.runCommand({insert: coll.getName(), documents: [{_id: new ObjectId(), x: maxStr}]});
-assert(res.ok);
-assert.eq(null, res.writeErrors);
-
-coll.drop();
-res = db.runCommand({
- update: coll.getName(),
- ordered: true,
- updates: [{q: {a: 1}, u: {_id: new ObjectId(), x: maxStr}, upsert: true}]
-});
-assert(res.ok);
-assert.eq(null, res.writeErrors);
-
-coll.drop();
-var id = new ObjectId();
-coll.insert({_id: id});
-res = db.runCommand(
- {update: coll.getName(), ordered: true, updates: [{q: {_id: id}, u: {$set: {x: maxStr}}}]});
-assert(res.ok);
-assert.eq(null, res.writeErrors);
-
-//
-// Test documents over the size limit.
-//
-
-var overBigStr = maxStr + 'a';
-
-coll.drop();
-res = db.runCommand({insert: coll.getName(), documents: [{_id: new ObjectId(), x: overBigStr}]});
-assert(res.ok);
-assert.neq(null, res.writeErrors);
-
-coll.drop();
-res = db.runCommand({
- update: coll.getName(),
- ordered: true,
- updates: [{q: {a: 1}, u: {_id: new ObjectId(), x: overBigStr}, upsert: true}]
-});
-assert(res.ok);
-assert.neq(null, res.writeErrors);
-
-coll.drop();
-id = new ObjectId();
-coll.insert({_id: id});
-res = db.runCommand(
- {update: coll.getName(), ordered: true, updates: [{q: {_id: id}, u: {$set: {x: overBigStr}}}]});
-assert(res.ok);
-assert.neq(null, res.writeErrors);
+/**
+ * Confirms that:
+ * - Documents at the maximum BSON size limit can be written and read back.
+ * - Documents over the maximum BSON size limit cannot be written.
+ */
+(function() {
+ 'use strict';
+
+ const maxBsonObjectSize = db.isMaster().maxBsonObjectSize;
+ const docOverhead = Object.bsonsize({_id: new ObjectId(), x: ''});
+ const maxStrSize = maxBsonObjectSize - docOverhead;
+ const maxStr = 'a'.repeat(maxStrSize);
+ const coll = db.max_doc_size;
+
+ //
+ // Test that documents at the size limit can be written and read back.
+ //
+ coll.drop();
+ assert.writeOK(
+ db.runCommand({insert: coll.getName(), documents: [{_id: new ObjectId(), x: maxStr}]}));
+ assert.eq(coll.find({}).itcount(), 1);
+
+ coll.drop();
+ assert.commandWorked(db.runCommand({
+ update: coll.getName(),
+ ordered: true,
+ updates: [{q: {a: 1}, u: {_id: new ObjectId(), x: maxStr}, upsert: true}]
+ }));
+ assert.eq(coll.find({}).itcount(), 1);
+
+ coll.drop();
+ const objectId = new ObjectId();
+ assert.writeOK(coll.insert({_id: objectId}));
+ assert.commandWorked(db.runCommand({
+ update: coll.getName(),
+ ordered: true,
+ updates: [{q: {_id: objectId}, u: {$set: {x: maxStr}}}]
+ }));
+ assert.eq(coll.find({}).itcount(), 1);
+
+ //
+ // Test that documents over the size limit cannot be written.
+ //
+ const largerThanMaxString = maxStr + 'a';
+
+ coll.drop();
+ let res = db.runCommand(
+ {insert: coll.getName(), documents: [{_id: new ObjectId(), x: largerThanMaxString}]});
+ assert(res.ok);
+ assert.neq(null, res.writeErrors);
+
+ coll.drop();
+ res = db.runCommand({
+ update: coll.getName(),
+ ordered: true,
+ updates: [{q: {a: 1}, u: {_id: new ObjectId(), x: largerThanMaxString}, upsert: true}]
+ });
+ assert(res.ok);
+ assert.neq(null, res.writeErrors);
+
+ coll.drop();
+ assert.writeOK(coll.insert({_id: objectId}));
+ res = db.runCommand({
+ update: coll.getName(),
+ ordered: true,
+ updates: [{q: {_id: objectId}, u: {$set: {x: largerThanMaxString}}}]
+ });
+ assert(res.ok);
+ assert.neq(null, res.writeErrors);
+})();