summaryrefslogtreecommitdiff
path: root/jstests/sharding/cleanup_orphaned_cmd_hashed.js
blob: 002940878851d9781551ae466efe171c428adafa (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
//
// Tests cleanup of orphaned data in hashed sharded coll via the orphaned data cleanup command
//

(function() { 
"use strict";

var st = new ShardingTest({ shards : 2, mongos : 1, other : { shardOptions : { verbose : 2 } } });

var mongos = st.s0;
var admin = mongos.getDB( "admin" );
var shards = mongos.getCollection( "config.shards" ).find().toArray();
var coll = mongos.getCollection( "foo.bar" );

assert.commandWorked( admin.runCommand({ enableSharding : coll.getDB() + "" }) );
printjson( admin.runCommand({ movePrimary : coll.getDB() + "", to : shards[0]._id }) );
assert.commandWorked( admin.runCommand({ shardCollection : coll + "", key : { _id : "hashed" } }) );

// Create two orphaned data holes, one bounded by min or max on each shard

assert.commandWorked( admin.runCommand({ split : coll + "", middle : { _id : NumberLong(-100) } }) );
assert.commandWorked( admin.runCommand({ split : coll + "", middle : { _id : NumberLong(-50) } }) );
assert.commandWorked( admin.runCommand({ split : coll + "", middle : { _id : NumberLong(50) } }) );
assert.commandWorked( admin.runCommand({ split : coll + "", middle : { _id : NumberLong(100) } }) );
assert.commandWorked( admin.runCommand({ moveChunk : coll + "", bounds : [{ _id : NumberLong(-100) },
                                                            { _id : NumberLong(-50) }],
                                                  to : shards[1]._id,
                                                  _waitForDelete : true }) );
assert.commandWorked( admin.runCommand({ moveChunk : coll + "", bounds : [{ _id : NumberLong(50) },
                                                            { _id : NumberLong(100) }],
                                                  to : shards[0]._id,
                                                  _waitForDelete : true }) );
st.printShardingStatus();

jsTest.log( "Inserting some docs on each shard, so 1/2 will be orphaned..." );

for ( var s = 0; s < 2; s++ ) {
    var shardColl = ( s == 0 ? st.shard0 : st.shard1 ).getCollection( coll + "" );
    var bulk = shardColl.initializeUnorderedBulkOp();
    for ( var i = 0; i < 100; i++ ) bulk.insert({ _id : i });
    assert.writeOK(bulk.execute());
}

assert.eq( 200, st.shard0.getCollection( coll + "" ).find().itcount() +
                st.shard1.getCollection( coll + "" ).find().itcount() );
assert.eq( 100, coll.find().itcount() );

jsTest.log( "Cleaning up orphaned data in hashed coll..." );

for ( var s = 0; s < 2; s++ ) {
    var shardAdmin = ( s == 0 ? st.shard0 : st.shard1 ).getDB( "admin" );

    var result = shardAdmin.runCommand({ cleanupOrphaned : coll + "" });
    while ( result.ok && result.stoppedAtKey ) {
        printjson( result );
        result = shardAdmin.runCommand({ cleanupOrphaned : coll + "",
                                         startingFromKey : result.stoppedAtKey });
    }
    
    printjson( result );
    assert( result.ok );
}

assert.eq( 100, st.shard0.getCollection( coll + "" ).find().itcount() +
                st.shard1.getCollection( coll + "" ).find().itcount() );
assert.eq( 100, coll.find().itcount() );

jsTest.log( "DONE!" );

st.stop();

})();