summaryrefslogtreecommitdiff
path: root/jstests/sharding/get_stats_for_balancing.js
blob: 60f1c3cb56465605be293bf954ef40365fe27d19 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
/*
 * Basic tests for _shardsvrGetStatsForBalancing
 *
 * @tags: [
 *    requires_fcv_60,
 * ]
 */
(function() {
'use strict';

load("jstests/libs/fail_point_util.js");  // for 'configureFailPoint()'

const rangeDeleterBatchSize = 128;

const st = new ShardingTest({
    shards: 2,
    other: {shardOptions: {setParameter: {rangeDeleterBatchSize: rangeDeleterBatchSize}}}
});

function getCollSizeBytes(ns, node, optUUID) {
    let res;
    let collections = [{ns: ns}];
    if (optUUID) {
        collections[0].UUID = optUUID;
    }
    assert.soon(() => {
        res = assert.commandWorkedOrFailedWithCode(
            node.adminCommand(
                {_shardsvrGetStatsForBalancing: 1, collections: collections, scaleFactor: 1}),
            [ErrorCodes.NotYetInitialized]);
        return res.ok;
    });

    return res['stats'][0]['collSize'];
}
const kSizeSingleDocBytes = 18;
// work on non-existing collections
st.forEachConnection((shard) => {
    assert.eq(0, getCollSizeBytes("db.not_exists", shard.rs.getPrimary()));
});

const dbName = 'db';
const db = st.getDB(dbName);
assert.commandWorked(
    st.s.adminCommand({enableSharding: dbName, primaryShard: st.shard0.shardName}));

{
    // work on unsharded collections
    let coll = db['unsharded1'];
    assert.writeOK(coll.insert({_id: 1}));
    assert.eq(kSizeSingleDocBytes, getCollSizeBytes(coll.getFullName(), st.shard0.rs.getPrimary()));
    assert.eq(0, getCollSizeBytes(coll.getFullName(), st.shard1.rs.getPrimary()));
}

// work on sharded collections
let coll = db['sharded1'];
assert.commandWorked(st.s.adminCommand({shardCollection: coll.getFullName(), key: {_id: 1}}));
st.forEachConnection((shard) => {
    assert.eq(0, getCollSizeBytes(coll.getFullName(), shard.rs.getPrimary()));
});

const numDocs = 1000;
let bulk = coll.initializeUnorderedBulkOp();
for (let i = 0; i < numDocs; i++) {
    bulk.insert({_id: i});
}
assert.commandWorked(bulk.execute());

// Create two chunks
assert.commandWorked(st.s.adminCommand({split: coll.getFullName(), middle: {_id: numDocs / 2}}));

// Check the data size is correct before the chunk is moved
assert.eq(kSizeSingleDocBytes * numDocs,
          getCollSizeBytes(coll.getFullName(), st.shard0.rs.getPrimary()));
assert.eq(0, getCollSizeBytes(coll.getFullName(), st.shard1.rs.getPrimary()));

{
    // Check that optional collection's UUID is handled correctly:
    // - Return correct data size if UUID matches
    // - Return `0` data size if UUID doesn't match
    let config = st.configRS.getPrimary().getDB("config");
    let collectionUUID = config.collections.findOne({_id: coll.getFullName()}).uuid;
    assert.eq(kSizeSingleDocBytes * numDocs,
              getCollSizeBytes(coll.getFullName(), st.shard0.rs.getPrimary(), collectionUUID));
    assert.eq(0, getCollSizeBytes(coll.getFullName(), st.shard0.rs.getPrimary(), UUID()));
}

// Pause before first range deletion task
let beforeDeletionFailpoint = configureFailPoint(st.shard0, "hangBeforeDoingDeletion");
let afterDeletionFailpoint = configureFailPoint(st.shard0, "hangAfterDoingDeletion");
assert.commandWorked(db.adminCommand(
    {moveChunk: coll.getFullName(), find: {_id: (numDocs / 2)}, to: st.shard1.shardName}));

const expectedShardSizeBytes = kSizeSingleDocBytes * (numDocs / 2);
st.forEachConnection((shard) => {
    assert.eq(expectedShardSizeBytes, getCollSizeBytes(coll.getFullName(), shard.rs.getPrimary()));
});

// Check that dataSize is always correct during range deletions
const numBatches = (numDocs / 2) / rangeDeleterBatchSize;
for (let i = 0; i < numBatches; i++) {
    // Wait for failpoint and check num orphans
    beforeDeletionFailpoint.wait();
    st.forEachConnection((shard) => {
        assert.eq(expectedShardSizeBytes,
                  getCollSizeBytes(coll.getFullName(), shard.rs.getPrimary()));
    });
    // Unset and reset failpoint without allowing any batches deleted in the meantime
    afterDeletionFailpoint = configureFailPoint(st.shard0, "hangAfterDoingDeletion");
    beforeDeletionFailpoint.off();
    afterDeletionFailpoint.wait();
    beforeDeletionFailpoint = configureFailPoint(st.shard0, "hangBeforeDoingDeletion");
    afterDeletionFailpoint.off();
}
beforeDeletionFailpoint.off();

st.stop();
})();