summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGreg Studer <greg@10gen.com>2013-08-12 10:01:16 -0400
committerGreg Studer <greg@10gen.com>2013-08-12 10:34:56 -0400
commite5a7faaecdfa0b153db493ad15d624dc90c986b9 (patch)
treee6f662fce10f022f8b3db730708a3052356a982e
parentec60d7de77e4c23e32f80daacd5e45350c3613f7 (diff)
downloadmongo-e5a7faaecdfa0b153db493ad15d624dc90c986b9.tar.gz
SERVER-10478 fix batch limit check for _cloneLocs in migration
-rw-r--r--bson/bsonobjbuilder.h2
-rw-r--r--jstests/slowNightly/sharding_migrate_large_docs.js65
-rw-r--r--s/d_migrate.cpp3
3 files changed, 69 insertions, 1 deletions
diff --git a/bson/bsonobjbuilder.h b/bson/bsonobjbuilder.h
index f89d225c9b3..882da0407df 100644
--- a/bson/bsonobjbuilder.h
+++ b/bson/bsonobjbuilder.h
@@ -752,6 +752,8 @@ namespace mongo {
int len() const { return _b.len(); }
+ int arrSize() const { return _i; }
+
private:
void fill( const StringData& name ) {
char *r;
diff --git a/jstests/slowNightly/sharding_migrate_large_docs.js b/jstests/slowNightly/sharding_migrate_large_docs.js
new file mode 100644
index 00000000000..0e7ba804a07
--- /dev/null
+++ b/jstests/slowNightly/sharding_migrate_large_docs.js
@@ -0,0 +1,65 @@
+//
+// Tests migration behavior of large documents
+//
+
+var st = new ShardingTest({ shards : 2, mongos : 1,
+ other : { separateConfig : true,
+ mongosOptions : { noAutoSplit : "" },
+ shardOptions : { /* binVersion : "latest" */ } } });
+
+var mongos = st.s0;
+var coll = mongos.getCollection( "foo.bar" );
+var admin = mongos.getDB( "admin" );
+var shards = mongos.getCollection( "config.shards" ).find().toArray();
+var shardAdmin = st.shard0.getDB( "admin" );
+
+mongos.getDB( "config" ).settings.update({ _id : "balancer" },
+ { $set : { stopped : true } }, true, false);
+
+assert( admin.runCommand({ enableSharding : coll.getDB() + "" }).ok );
+printjson( admin.runCommand({ movePrimary : coll.getDB() + "", to : shards[0]._id }) );
+assert( admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } }).ok );
+assert( admin.runCommand({ split : coll + "", middle : { _id : 0 } }).ok );
+
+jsTestLog( "Preparing large insert..." );
+
+var data1MB = "x"
+while ( data1MB.length < 1024 * 1024 )
+ data1MB += data1MB;
+
+var data15MB = "";
+for ( var i = 0; i < 15; i++ ) data15MB += data1MB;
+
+var data15PlusMB = data15MB;
+for ( var i = 0; i < 1023 * 1024; i++ ) data15PlusMB += "x";
+
+print("~15MB object size is : " + Object.bsonsize({ _id : 0, d : data15PlusMB }));
+
+jsTestLog( "Inserting docs of large and small sizes..." );
+
+// Two large docs next to each other
+coll.insert({ _id : -2, d : data15PlusMB });
+coll.insert({ _id : -1, d : data15PlusMB });
+
+// Docs of assorted sizes
+coll.insert({ _id : 0, d : "x" });
+coll.insert({ _id : 1, d : data15PlusMB });
+coll.insert({ _id : 2, d : "x" });
+coll.insert({ _id : 3, d : data15MB });
+coll.insert({ _id : 4, d : "x" });
+coll.insert({ _id : 5, d : data1MB });
+coll.insert({ _id : 6, d : "x" });
+
+assert.eq( null, coll.getDB().getLastError() );
+assert.eq( 9, coll.find().itcount() );
+
+jsTestLog( "Starting migration..." );
+
+assert( admin.runCommand({ moveChunk : coll + "", find : { _id : 0 }, to : shards[1]._id }).ok );
+assert( admin.runCommand({ moveChunk : coll + "", find : { _id : -1 }, to : shards[1]._id }).ok );
+
+assert.eq( 9, coll.find().itcount() );
+
+jsTestLog( "DONE!" );
+
+st.stop(); \ No newline at end of file
diff --git a/s/d_migrate.cpp b/s/d_migrate.cpp
index 918dd86a44f..a0ccdd8df4d 100644
--- a/s/d_migrate.cpp
+++ b/s/d_migrate.cpp
@@ -507,7 +507,8 @@ namespace mongo {
// use the builder size instead of accumulating 'o's size so that we take into consideration
// the overhead of BSONArray indices
- if ( a.len() + o.objsize() + 1024 > BSONObjMaxUserSize ) {
+ if ( a.arrSize() != 0 &&
+ a.len() + o.objsize() + 1024 > BSONObjMaxUserSize ) {
filledBuffer = true; // break out of outer while loop
break;
}