diff options
author | Spencer T Brody <spencer@mongodb.com> | 2014-12-17 12:50:17 -0500 |
---|---|---|
committer | Spencer T Brody <spencer@mongodb.com> | 2014-12-18 15:25:42 -0500 |
commit | 4a1a4010830fdf3b2d737164ce09c669fd9ed738 (patch) | |
tree | fdd76c75df4ca656eb57ea4441da82b9c12d096b | |
parent | a8fb1429cb3099e0dbbfdbd930693810bbf291b1 (diff) | |
download | mongo-4a1a4010830fdf3b2d737164ce09c669fd9ed738.tar.gz |
SERVER-14306 Make sure mongos never requests more results than needed from the shards
-rw-r--r-- | jstests/sharding/in_memory_sort_limit.js | 50 | ||||
-rw-r--r-- | src/mongo/client/parallel.cpp | 6 | ||||
-rw-r--r-- | src/mongo/client/parallel.h | 7 | ||||
-rw-r--r-- | src/mongo/s/cursors.cpp | 5 |
4 files changed, 68 insertions, 0 deletions
diff --git a/jstests/sharding/in_memory_sort_limit.js b/jstests/sharding/in_memory_sort_limit.js new file mode 100644 index 00000000000..427471e3a9e --- /dev/null +++ b/jstests/sharding/in_memory_sort_limit.js @@ -0,0 +1,50 @@ +// Test SERVER-14306. Do a query directly against a mongod with an in-memory sort and a limit that +// doesn't cause the in-memory sort limit to be reached, then make sure the same limit also doesn't +// cause the in-memory sort limit to be reached when running through a mongos. +(function() { + "use strict"; + + var st = new ShardingTest({ shards: 2, chunkSize: 1, other: {separateConfig: true}}); + // Force write commands mode; this is a backport of a new test. + st.s0.forceWriteMode("commands"); + st.shard0.forceWriteMode("commands"); + st.shard1.forceWriteMode("commands"); + + var db = st.s.getDB('test'); + var mongosCol = db.getCollection('skip'); + db.adminCommand({ enableSharding: 'test' }); + db.adminCommand({ shardCollection: 'test.skip', key: { _id: 1 }}); + // Disable balancing of this collection + assert.writeOK(db.getSiblingDB('config').collections.update({_id: 'test.skip'}, + {$set: {noBalance: true}})); + + + var filler = new Array(10000).toString(); + var bulk = []; + // create enough data to exceed 32MB in-memory sort limit. + for (var i = 0; i < 20000; i++) { + bulk.push({x:i, str:filler}); + } + assert.writeOK(mongosCol.insert(bulk)); + // Make sure that at least 1 doc is on another shard so that mongos doesn't treat this as a + // single-shard query (which doesn't exercise the bug) + assert.commandWorked(db.getSiblingDB('admin').runCommand({moveChunk: 'test.skip', + find: {_id:1}, + to: 'shard0001'})); + + var docCount = mongosCol.count(); + var shardCol = st.shard0.getDB('test').getCollection('skip'); + var passLimit = 2000; + var failLimit = 4000; + jsTestLog("Test no error with limit of " + passLimit + " on mongod"); + assert.eq(passLimit, shardCol.find().sort({x:1}).limit(passLimit).itcount()); + + jsTestLog("Test error with limit of " + failLimit + " on mongod"); + assert.throws( function() {shardCol.find().sort({x:1}).limit(failLimit).itcount(); } ); + + jsTestLog("Test no error with limit of " + passLimit + " on mongos"); + assert.eq(passLimit, mongosCol.find().sort({x:1}).limit(passLimit).itcount()); + + jsTestLog("Test error with limit of " + failLimit + " on mongos"); + assert.throws( function() {mongosCol.find().sort({x:1}).limit(failLimit).itcount(); } ); + })();
\ No newline at end of file diff --git a/src/mongo/client/parallel.cpp b/src/mongo/client/parallel.cpp index 491094a8033..4316c0ef29a 100644 --- a/src/mongo/client/parallel.cpp +++ b/src/mongo/client/parallel.cpp @@ -1456,6 +1456,12 @@ namespace mongo { _done = true; } + void ParallelSortClusteredCursor::setBatchSize(int newBatchSize) { + for ( int i=0; i<_numServers; i++ ) { + _cursors[i].setBatchSize(newBatchSize); + } + } + bool ParallelSortClusteredCursor::more() { if ( _needToSkip > 0 ) { diff --git a/src/mongo/client/parallel.h b/src/mongo/client/parallel.h index c8c5af13be8..a596bddcf7e 100644 --- a/src/mongo/client/parallel.h +++ b/src/mongo/client/parallel.h @@ -192,6 +192,11 @@ namespace mongo { bool isExplain(){ return _qSpec.isExplain(); } bool isVersioned(){ return _qShards.size() == 0; } + /** + * Sets the batch size on all underlying cursors to 'newBatchSize'. + */ + void setBatchSize(int newBatchSize); + bool isSharded(); ShardPtr getPrimary(); void getQueryShards( set<Shard>& shards ); @@ -273,6 +278,8 @@ namespace mongo { BSONObj peek(); + void setBatchSize(int newBatchSize) { _cursor->setBatchSize(newBatchSize); } + DBClientCursor* raw() { return _cursor.get(); } ParallelConnectionMetadata* rawMData(){ return _pcmData; } diff --git a/src/mongo/s/cursors.cpp b/src/mongo/s/cursors.cpp index 4931c9b9459..f4a9f0e4573 100644 --- a/src/mongo/s/cursors.cpp +++ b/src/mongo/s/cursors.cpp @@ -136,6 +136,11 @@ namespace mongo { buffer.appendBuf( (void*)o.objdata() , o.objsize() ); docCount++; + // Ensure that the next batch will never wind up requesting more docs from the shard + // than are remaining to satisfy the initial ntoreturn. + if (ntoreturn != 0) { + _cursor->setBatchSize(ntoreturn - docCount); + } if ( buffer.len() > maxSize ) { break; |