summaryrefslogtreecommitdiff
path: root/jstests/noPassthroughWithMongod/ttl_sharded.js
blob: 5c59b1851ad05c49583a06db52695b50fe881380 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
/**
 * Simple test of sharding TTL collections.
 *  - Creates a new collection with a TTL index
 *  - Shards it, and moves one chunk containing half the docs to another shard.
 *  - Checks that both shards have TTL index, and docs get deleted on both shards.
 *  - Run the collMod command to update the expireAfterSeconds field. Check that more docs get
 *    deleted.
 *  @tags: [requires_sharding]
 */

// start up a new sharded cluster
var s = new ShardingTest({shards: 2, mongos: 1});

var dbname = "testDB";
var coll = "ttl_sharded";
var ns = dbname + "." + coll;
t = s.getDB(dbname).getCollection(coll);

// enable sharding of the collection. Only 1 chunk initially
s.adminCommand({enablesharding: dbname});
s.ensurePrimaryShard(dbname, s.shard1.shardName);
s.adminCommand({shardcollection: ns, key: {_id: 1}});

// insert 24 docs, with timestamps at one hour intervals
var now = (new Date()).getTime();
var bulk = t.initializeUnorderedBulkOp();
for (var i = 0; i < 24; i++) {
    var past = new Date(now - (3600 * 1000 * i));
    bulk.insert({_id: i, x: past});
}
assert.commandWorked(bulk.execute());
assert.eq(t.count(), 24, "initial docs not inserted");

// create the TTL index which delete anything older than ~5.5 hours
t.ensureIndex({x: 1}, {expireAfterSeconds: 20000});

// split chunk in half by _id, and move one chunk to the other shard
s.adminCommand({split: ns, middle: {_id: 12}});
s.adminCommand({moveChunk: ns, find: {_id: 0}, to: s.getOther(s.getPrimaryShard(dbname)).name});

// Check that all expired documents are deleted.
assert.soon(
    function() {
        return t.count() === 6 && t.find({x: {$lt: new Date(now - 20000000)}}).count() === 0;
    },
    "TTL index did not successfully delete expired documents, all documents: " +
        tojson(t.find().toArray()),
    70 * 1000);

// now lets check things explicily on each shard
var shard0 = s._connections[0].getDB(dbname);
var shard1 = s._connections[1].getDB(dbname);

print("Shard 0 coll stats:");
printjson(shard0.getCollection(coll).stats());
print("Shard 1 coll stats:");
printjson(shard1.getCollection(coll).stats());

function getTTLTime(theCollection, theKey) {
    var indexes = theCollection.getIndexes();
    for (var i = 0; i < indexes.length; i++) {
        if (friendlyEqual(theKey, indexes[i].key))
            return indexes[i].expireAfterSeconds;
    }
    throw "not found";
}

// Check that TTL index (with expireAfterSeconds field) appears on both shards
assert.eq(20000, getTTLTime(shard0.getCollection(coll), {x: 1}));
assert.eq(20000, getTTLTime(shard1.getCollection(coll), {x: 1}));

// Check that the collMod command successfully updates the expireAfterSeconds field
s.getDB(dbname).runCommand({collMod: coll, index: {keyPattern: {x: 1}, expireAfterSeconds: 10000}});
assert.eq(10000, getTTLTime(shard0.getCollection(coll), {x: 1}));
assert.eq(10000, getTTLTime(shard1.getCollection(coll), {x: 1}));

// Check that all expired documents are deleted.
assert.soon(
    function() {
        return t.count() === 3 && t.find({x: {$lt: new Date(now - 10000000)}}).count() === 0;
    },
    "new expireAfterSeconds did not successfully delete expired documents, all documents: " +
        tojson(t.find().toArray()),
    70 * 1000);

s.stop();