summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2016-03-31 16:21:29 -0400
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2016-03-31 18:02:25 -0400
commit3187f76049df6ea1e1b6b3525d19f988f59e4768 (patch)
treea1a04928879695f62fda17d258eb3a8b276756a6
parent8d77d63b9261bd07e0b58f388deaf28d3e211eee (diff)
downloadmongo-3187f76049df6ea1e1b6b3525d19f988f59e4768.tar.gz
SERVER-23453 Make in_memory_sort_limit.js not dependent on balancing time
-rw-r--r--jstests/sharding/in_memory_sort_limit.js37
1 files changed, 19 insertions, 18 deletions
diff --git a/jstests/sharding/in_memory_sort_limit.js b/jstests/sharding/in_memory_sort_limit.js
index 1c7e8c73447..cd907921489 100644
--- a/jstests/sharding/in_memory_sort_limit.js
+++ b/jstests/sharding/in_memory_sort_limit.js
@@ -2,32 +2,32 @@
// doesn't cause the in-memory sort limit to be reached, then make sure the same limit also doesn't
// cause the in-memory sort limit to be reached when running through a mongos.
(function() {
- "use strict";
+ 'use strict';
var st = new ShardingTest({shards: 2});
- var db = st.s.getDB('test');
- var mongosCol = db.getCollection('skip');
- db.adminCommand({enableSharding: 'test'});
- st.ensurePrimaryShard('test', 'shard0001');
- db.adminCommand({shardCollection: 'test.skip', key: {_id: 1}});
-
- var filler = new Array(10000).toString();
- var bulk = [];
- // create enough data to exceed 32MB in-memory sort limit.
- for (var i = 0; i < 20000; i++) {
- bulk.push({x: i, str: filler});
- }
- assert.writeOK(mongosCol.insert(bulk));
+ assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
+ st.ensurePrimaryShard('test', 'shard0000');
// Make sure that at least 1 chunk is on another shard so that mongos doesn't treat this as a
- // single-shard query (which doesn't exercise the bug).
- st.startBalancer();
- st.awaitBalance('skip', 'test');
+ // single-shard query (which doesn't exercise the bug)
+ assert.commandWorked(st.s.adminCommand(
+ {shardCollection: 'test.skip', key: {_id: 'hashed'}, numInitialChunks: 64}));
- var docCount = mongosCol.count();
+ var mongosCol = st.s.getDB('test').getCollection('skip');
var shardCol = st.shard0.getDB('test').getCollection('skip');
+
+ // Create enough data to exceed the 32MB in-memory sort limit (per shard)
+ var filler = new Array(10240).toString();
+ var bulkOp = mongosCol.initializeOrderedBulkOp();
+ for (var i = 0; i < 12800; i++) {
+ bulkOp.insert({x: i, str: filler});
+ }
+ assert.writeOK(bulkOp.execute());
+
var passLimit = 2000;
var failLimit = 4000;
+
+ // Test on MongoD
jsTestLog("Test no error with limit of " + passLimit + " on mongod");
assert.eq(passLimit, shardCol.find().sort({x: 1}).limit(passLimit).itcount());
@@ -36,6 +36,7 @@
shardCol.find().sort({x: 1}).limit(failLimit).itcount();
});
+ // Test on MongoS
jsTestLog("Test no error with limit of " + passLimit + " on mongos");
assert.eq(passLimit, mongosCol.find().sort({x: 1}).limit(passLimit).itcount());