diff options
author | Greg Studer <greg@10gen.com> | 2013-08-09 15:52:05 -0400 |
---|---|---|
committer | Matt Kangas <matt.kangas@10gen.com> | 2013-08-09 15:32:44 -0500 |
commit | 484fc234656308135234cfca7c184f8f8520c497 (patch) | |
tree | 7c5e7dd5865582f4e8d3eabb049383459b05f3dd | |
parent | 7cf37eb6d49bf8d4d19b5e608f47dbfbe50abf72 (diff) | |
download | mongo-484fc234656308135234cfca7c184f8f8520c497.tar.gz |
SERVER-10478 fix batch limit check for _cloneLocs in migration
-rw-r--r-- | jstests/slowNightly/sharding_migrate_large_docs.js | 63 | ||||
-rw-r--r-- | src/mongo/s/d_migrate.cpp | 7 |
2 files changed, 67 insertions, 3 deletions
diff --git a/jstests/slowNightly/sharding_migrate_large_docs.js b/jstests/slowNightly/sharding_migrate_large_docs.js new file mode 100644 index 00000000000..6b2e7faa56b --- /dev/null +++ b/jstests/slowNightly/sharding_migrate_large_docs.js @@ -0,0 +1,63 @@ +// +// Tests migration behavior of large documents +// + +var st = new ShardingTest({ shards : 2, mongos : 1, + other : { separateConfig : true, + mongosOptions : { noAutoSplit : "" }, + shardOptions : { /* binVersion : "latest" */ } } }); +st.stopBalancer() + +var mongos = st.s0; +var coll = mongos.getCollection( "foo.bar" ); +var admin = mongos.getDB( "admin" ); +var shards = mongos.getCollection( "config.shards" ).find().toArray(); +var shardAdmin = st.shard0.getDB( "admin" ); + +assert( admin.runCommand({ enableSharding : coll.getDB() + "" }).ok ); +printjson( admin.runCommand({ movePrimary : coll.getDB() + "", to : shards[0]._id }) ); +assert( admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } }).ok ); +assert( admin.runCommand({ split : coll + "", middle : { _id : 0 } }).ok ); + +jsTest.log( "Preparing large insert..." ); + +var data1MB = "x" +while ( data1MB.length < 1024 * 1024 ) + data1MB += data1MB; + +var data15MB = ""; +for ( var i = 0; i < 15; i++ ) data15MB += data1MB; + +var data15PlusMB = data15MB; +for ( var i = 0; i < 1023 * 1024; i++ ) data15PlusMB += "x"; + +print("~15MB object size is : " + Object.bsonsize({ _id : 0, d : data15PlusMB })); + +jsTest.log( "Inserting docs of large and small sizes..." ); + +// Two large docs next to each other +coll.insert({ _id : -2, d : data15PlusMB }); +coll.insert({ _id : -1, d : data15PlusMB }); + +// Docs of assorted sizes +coll.insert({ _id : 0, d : "x" }); +coll.insert({ _id : 1, d : data15PlusMB }); +coll.insert({ _id : 2, d : "x" }); +coll.insert({ _id : 3, d : data15MB }); +coll.insert({ _id : 4, d : "x" }); +coll.insert({ _id : 5, d : data1MB }); +coll.insert({ _id : 6, d : "x" }); + +assert.eq( null, coll.getDB().getLastError() ); +assert.eq( 9, coll.find().itcount() ); + +jsTest.log( "Starting migration..." ); + +assert( admin.runCommand({ moveChunk : coll + "", find : { _id : 0 }, to : shards[1]._id }).ok ); +assert( admin.runCommand({ moveChunk : coll + "", find : { _id : -1 }, to : shards[1]._id }).ok ); + +assert.eq( 9, coll.find().itcount() ); + +jsTest.log( "DONE!" ); + +st.stop(); diff --git a/src/mongo/s/d_migrate.cpp b/src/mongo/s/d_migrate.cpp index 69390e331b8..ec528226b27 100644 --- a/src/mongo/s/d_migrate.cpp +++ b/src/mongo/s/d_migrate.cpp @@ -589,10 +589,11 @@ namespace mongo { } BSONObj o = dl.obj(); - + // use the builder size instead of accumulating 'o's size so that we take into consideration - // the overhead of BSONArray indices - if ( a.len() + o.objsize() + 1024 > BSONObjMaxUserSize ) { + // the overhead of BSONArray indices, and *always* append one doc + if ( a.arrSize() != 0 && + a.len() + o.objsize() + 1024 > BSONObjMaxUserSize ) { filledBuffer = true; // break out of outer while loop break; } |