summaryrefslogtreecommitdiff
path: root/jstests/noPassthroughWithMongod/bulk_api_limits.js
diff options
context:
space:
mode:
authorMathias Stearn <mathias@10gen.com>2014-03-27 16:15:09 -0400
committerMathias Stearn <mathias@10gen.com>2014-03-27 17:35:16 -0400
commitd0a1e84ab2fa1b6aa699721b5cb9a4f8d0bf3692 (patch)
tree7a1ffc91cb6cb176c1e367ea7641ab05032c862c /jstests/noPassthroughWithMongod/bulk_api_limits.js
parent719134aa7985c0a697f199fc78e323d04e3a65ad (diff)
downloadmongo-d0a1e84ab2fa1b6aa699721b5cb9a4f8d0bf3692.tar.gz
SERVER-13391 Rename slowNightly -> noPassthroughWithMongod and slowWeekly -> noPassthrough
This better represents their purpose and the difference between them.
Diffstat (limited to 'jstests/noPassthroughWithMongod/bulk_api_limits.js')
-rw-r--r--jstests/noPassthroughWithMongod/bulk_api_limits.js158
1 files changed, 158 insertions, 0 deletions
diff --git a/jstests/noPassthroughWithMongod/bulk_api_limits.js b/jstests/noPassthroughWithMongod/bulk_api_limits.js
new file mode 100644
index 00000000000..ab3e468f373
--- /dev/null
+++ b/jstests/noPassthroughWithMongod/bulk_api_limits.js
@@ -0,0 +1,158 @@
+var collectionName = "bulk_api_limits";
+var coll = db.getCollection(collectionName);
+coll.drop();
+
+jsTest.log("Starting unordered bulk tests...");
+
+var request;
+var result;
+
+/********************************************************
+ *
+ * Ordered tests should return same results for write command as
+ * well as for the legacy operations
+ *
+ *******************************************************/
+var executeTestsUnordered = function() {
+ // Create unique index
+ coll.dropIndexes();
+ coll.remove({});
+ coll.ensureIndex({a : 1}, {unique : true});
+
+ /**
+ * Fail during batch construction due to single document > maxBSONSize
+ */
+ // Set up a giant string to blow through the max message size
+ var hugeString = "";
+ // Create it bigger than 16MB
+ for(var i = 0; i < (1024 * 1100); i++) {
+ hugeString = hugeString + "1234567890123456"
+ }
+
+ // Set up the batch
+ var batch = coll.initializeUnorderedBulkOp();
+ batch.insert({b:1, a:1});
+ // Should fail on insert due to string being to big
+ try {
+ batch.insert({string: hugeString});
+ assert(false);
+ } catch(err) {}
+
+ // Create unique index
+ coll.dropIndexes();
+ coll.remove({});
+
+ /**
+ * Check that batch is split when documents overflow the BSON size
+ */
+ // Set up a giant string to blow through the max message size
+ var hugeString = "";
+ // Create 4 MB strings to test splitting
+ for(var i = 0; i < (1024 * 256); i++) {
+ hugeString = hugeString + "1234567890123456"
+ }
+
+ // Insert the string a couple of times, should force split into multiple batches
+ var batch = coll.initializeUnorderedBulkOp();
+ batch.insert({a:1, b: hugeString});
+ batch.insert({a:2, b: hugeString});
+ batch.insert({a:3, b: hugeString});
+ batch.insert({a:4, b: hugeString});
+ batch.insert({a:5, b: hugeString});
+ batch.insert({a:6, b: hugeString});
+ var result = batch.execute();
+ printjson(JSON.stringify(result))
+
+ // Basic properties check
+ assert.eq(6, result.nInserted);
+ assert.eq(false, result.hasWriteErrors());
+}
+
+/********************************************************
+ *
+ * Ordered tests should return same results for write command as
+ * well as for the legacy operations
+ *
+ *******************************************************/
+var executeTestsOrdered = function() {
+ /**
+ * Fail during batch construction due to single document > maxBSONSize
+ */
+ // Set up a giant string to blow through the max message size
+ var hugeString = "";
+ // Create it bigger than 16MB
+ for(var i = 0; i < (1024 * 1100); i++) {
+ hugeString = hugeString + "1234567890123456"
+ }
+
+ // Set up the batch
+ var batch = coll.initializeOrderedBulkOp();
+ batch.insert({b:1, a:1});
+ // Should fail on insert due to string being to big
+ try {
+ batch.insert({string: hugeString});
+ assert(false);
+ } catch(err) {}
+
+ // Create unique index
+ coll.dropIndexes();
+ coll.remove({});
+
+ /**
+ * Check that batch is split when documents overflow the BSON size
+ */
+ // Set up a giant string to blow through the max message size
+ var hugeString = "";
+ // Create 4 MB strings to test splitting
+ for(var i = 0; i < (1024 * 256); i++) {
+ hugeString = hugeString + "1234567890123456"
+ }
+
+ // Insert the string a couple of times, should force split into multiple batches
+ var batch = coll.initializeOrderedBulkOp();
+ batch.insert({a:1, b: hugeString});
+ batch.insert({a:2, b: hugeString});
+ batch.insert({a:3, b: hugeString});
+ batch.insert({a:4, b: hugeString});
+ batch.insert({a:5, b: hugeString});
+ batch.insert({a:6, b: hugeString});
+ var result = batch.execute();
+
+ // Basic properties check
+ assert.eq(6, result.nInserted);
+ assert.eq(false, result.hasWriteErrors());
+
+ // Create unique index
+ coll.dropIndexes();
+ coll.remove({});
+}
+
+var buildVersion = parseInt(db.runCommand({buildInfo:1}).versionArray.slice(0, 3).join(""), 10);
+// Save the existing useWriteCommands function
+var _useWriteCommands = coll.getMongo().useWriteCommands;
+
+//
+// Only execute write command tests if we have > 2.5.5 otherwise
+// execute the down converted version
+if(buildVersion >= 255) {
+ // Force the use of useWriteCommands
+ coll._mongo.useWriteCommands = function() {
+ return true;
+ }
+
+ // Execute tests using legacy operations
+ executeTestsUnordered();
+ executeTestsOrdered();
+}
+
+// Force the use of legacy commands
+coll._mongo.useWriteCommands = function() {
+ return false;
+}
+
+// Execute tests using legacy operations
+executeTestsUnordered();
+executeTestsOrdered();
+
+// Reset the function
+coll.getMongo().useWriteCommands = _useWriteCommands; \ No newline at end of file