summaryrefslogtreecommitdiff
path: root/jstests
diff options
context:
space:
mode:
authorGreg Studer <greg@10gen.com>2013-08-21 13:14:04 -0400
committerGreg Studer <greg@10gen.com>2013-08-21 13:14:04 -0400
commit252f42781db4b1019433968b7293287d18b2ec40 (patch)
tree80aa502b5cb347e594ca3abf4a52223831b3a3d1 /jstests
parent5d8c1c9cc60a2ba7395691c237f8a0560d5312ae (diff)
downloadmongo-252f42781db4b1019433968b7293287d18b2ec40.tar.gz
SERVER-8598 test cleanup of orphaned data in hashed sharded collection
Diffstat (limited to 'jstests')
-rw-r--r--jstests/sharding/cleanup_orphaned_cmd_hashed.js66
1 files changed, 66 insertions, 0 deletions
diff --git a/jstests/sharding/cleanup_orphaned_cmd_hashed.js b/jstests/sharding/cleanup_orphaned_cmd_hashed.js
new file mode 100644
index 00000000000..8e7a828015f
--- /dev/null
+++ b/jstests/sharding/cleanup_orphaned_cmd_hashed.js
@@ -0,0 +1,66 @@
+//
+// Tests cleanup of orphaned data in hashed sharded coll via the orphaned data cleanup command
+//
+
+var options = { separateConfig : true, shardOptions : { verbose : 2 } };
+
+var st = new ShardingTest({ shards : 2, mongos : 1, other : options });
+st.stopBalancer();
+
+var mongos = st.s0;
+var admin = mongos.getDB( "admin" );
+var shards = mongos.getCollection( "config.shards" ).find().toArray();
+var coll = mongos.getCollection( "foo.bar" );
+
+assert( admin.runCommand({ enableSharding : coll.getDB() + "" }).ok );
+printjson( admin.runCommand({ movePrimary : coll.getDB() + "", to : shards[0]._id }) );
+assert( admin.runCommand({ shardCollection : coll + "", key : { _id : "hashed" } }).ok );
+
+// Create two orphaned data holes, one bounded by min or max on each shard
+
+assert( admin.runCommand({ split : coll + "", middle : { _id : NumberLong(-100) } }).ok );
+assert( admin.runCommand({ split : coll + "", middle : { _id : NumberLong(-50) } }).ok );
+assert( admin.runCommand({ split : coll + "", middle : { _id : NumberLong(50) } }).ok );
+assert( admin.runCommand({ split : coll + "", middle : { _id : NumberLong(100) } }).ok );
+assert( admin.runCommand({ moveChunk : coll + "", bounds : [{ _id : NumberLong(-100) },
+ { _id : NumberLong(-50) }],
+ to : shards[1]._id }).ok );
+assert( admin.runCommand({ moveChunk : coll + "", bounds : [{ _id : NumberLong(50) },
+ { _id : NumberLong(100) }],
+ to : shards[0]._id }).ok );
+st.printShardingStatus();
+
+jsTest.log( "Inserting some docs on each shard, so 1/2 will be orphaned..." );
+
+for ( var s = 0; s < 2; s++ ) {
+ var shardColl = ( s == 0 ? st.shard0 : st.shard1 ).getCollection( coll + "" );
+ for ( var i = 0; i < 100; i++ ) shardColl.insert({ _id : i });
+ assert.eq( null, shardColl.getDB().getLastError() );
+}
+
+assert.eq( 200, st.shard0.getCollection( coll + "" ).find().itcount() +
+ st.shard1.getCollection( coll + "" ).find().itcount() );
+assert.eq( 100, coll.find().itcount() );
+
+jsTest.log( "Cleaning up orphaned data..." );
+
+for ( var s = 0; s < 2; s++ ) {
+ var shardAdmin = ( s == 0 ? st.shard0 : st.shard1 ).getDB( "admin" );
+
+ var result = shardAdmin.runCommand({ cleanupOrphaned : coll + "" });
+ while ( result.ok && result.stoppedAtKey ) {
+ printjson( result );
+ result = shardAdmin.runCommand({ cleanupOrphaned : coll + "",
+ startingFromKey : result.stoppedAtKey });
+ }
+}
+
+printjson( result );
+assert( result.ok );
+assert.eq( 100, st.shard0.getCollection( coll + "" ).find().itcount() +
+ st.shard1.getCollection( coll + "" ).find().itcount() );
+assert.eq( 100, coll.find().itcount() );
+
+jsTest.log( "DONE!" );
+
+st.stop();