summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGreg Studer <greg@10gen.com>2013-08-09 15:52:05 -0400
committerGreg Studer <greg@10gen.com>2013-08-12 09:45:54 -0400
commita8e94e832027d44a78b441f9efdc8352ce56834a (patch)
treee5d1c47c5e0da3ff6c1408dcd5b2aba8edfdae3b
parent4bf8648b4196aff618c68ca2d814a1a13f48c3d2 (diff)
downloadmongo-a8e94e832027d44a78b441f9efdc8352ce56834a.tar.gz
SERVER-10478 fix batch limit check for _cloneLocs in migration
-rw-r--r--jstests/slowNightly/sharding_migrate_large_docs.js63
-rw-r--r--src/mongo/s/d_migrate.cpp7
2 files changed, 67 insertions, 3 deletions
diff --git a/jstests/slowNightly/sharding_migrate_large_docs.js b/jstests/slowNightly/sharding_migrate_large_docs.js
new file mode 100644
index 00000000000..6b2e7faa56b
--- /dev/null
+++ b/jstests/slowNightly/sharding_migrate_large_docs.js
@@ -0,0 +1,63 @@
+//
+// Tests migration behavior of large documents
+//
+
+var st = new ShardingTest({ shards : 2, mongos : 1,
+ other : { separateConfig : true,
+ mongosOptions : { noAutoSplit : "" },
+ shardOptions : { /* binVersion : "latest" */ } } });
+st.stopBalancer()
+
+var mongos = st.s0;
+var coll = mongos.getCollection( "foo.bar" );
+var admin = mongos.getDB( "admin" );
+var shards = mongos.getCollection( "config.shards" ).find().toArray();
+var shardAdmin = st.shard0.getDB( "admin" );
+
+assert( admin.runCommand({ enableSharding : coll.getDB() + "" }).ok );
+printjson( admin.runCommand({ movePrimary : coll.getDB() + "", to : shards[0]._id }) );
+assert( admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } }).ok );
+assert( admin.runCommand({ split : coll + "", middle : { _id : 0 } }).ok );
+
+jsTest.log( "Preparing large insert..." );
+
+var data1MB = "x"
+while ( data1MB.length < 1024 * 1024 )
+ data1MB += data1MB;
+
+var data15MB = "";
+for ( var i = 0; i < 15; i++ ) data15MB += data1MB;
+
+var data15PlusMB = data15MB;
+for ( var i = 0; i < 1023 * 1024; i++ ) data15PlusMB += "x";
+
+print("~15MB object size is : " + Object.bsonsize({ _id : 0, d : data15PlusMB }));
+
+jsTest.log( "Inserting docs of large and small sizes..." );
+
+// Two large docs next to each other
+coll.insert({ _id : -2, d : data15PlusMB });
+coll.insert({ _id : -1, d : data15PlusMB });
+
+// Docs of assorted sizes
+coll.insert({ _id : 0, d : "x" });
+coll.insert({ _id : 1, d : data15PlusMB });
+coll.insert({ _id : 2, d : "x" });
+coll.insert({ _id : 3, d : data15MB });
+coll.insert({ _id : 4, d : "x" });
+coll.insert({ _id : 5, d : data1MB });
+coll.insert({ _id : 6, d : "x" });
+
+assert.eq( null, coll.getDB().getLastError() );
+assert.eq( 9, coll.find().itcount() );
+
+jsTest.log( "Starting migration..." );
+
+assert( admin.runCommand({ moveChunk : coll + "", find : { _id : 0 }, to : shards[1]._id }).ok );
+assert( admin.runCommand({ moveChunk : coll + "", find : { _id : -1 }, to : shards[1]._id }).ok );
+
+assert.eq( 9, coll.find().itcount() );
+
+jsTest.log( "DONE!" );
+
+st.stop();
diff --git a/src/mongo/s/d_migrate.cpp b/src/mongo/s/d_migrate.cpp
index 5d9dd0dbbc4..a391cbcb8cb 100644
--- a/src/mongo/s/d_migrate.cpp
+++ b/src/mongo/s/d_migrate.cpp
@@ -561,10 +561,11 @@ namespace mongo {
}
BSONObj o = dl.obj();
-
+
// use the builder size instead of accumulating 'o's size so that we take into consideration
- // the overhead of BSONArray indices
- if ( a.len() + o.objsize() + 1024 > BSONObjMaxUserSize ) {
+ // the overhead of BSONArray indices, and *always* append one doc
+ if ( a.arrSize() != 0 &&
+ a.len() + o.objsize() + 1024 > BSONObjMaxUserSize ) {
filledBuffer = true; // break out of outer while loop
break;
}