summaryrefslogtreecommitdiff
path: root/jstests/replsets/dbhash_at_cluster_time.js
blob: b3b57965258ade10ea18aad5b49c9aaafd0e9ba0 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
/**
 * Tests that "atClusterTime" is supported by the "dbHash" command.
 */
(function() {
    "use strict";

    const rst = new ReplSetTest({nodes: 2});
    rst.startSet();

    const replSetConfig = rst.getReplSetConfig();
    replSetConfig.members[1].priority = 0;
    rst.initiate(replSetConfig);

    const primary = rst.getPrimary();
    const secondary = rst.getSecondary();

    const session = primary.startSession({causalConsistency: false});
    const db = session.getDatabase("test");
    let txnNumber = 0;

    if (!db.serverStatus().storageEngine.supportsSnapshotReadConcern) {
        rst.stopSet();
        return;
    }

    // We force 'secondary' to sync from 'primary' using the "forceSyncSourceCandidate" failpoint to
    // ensure that an intermittent connectivity issue doesn't lead to the secondary not advancing
    // its belief of the majority commit point. This avoids any complications that would arise due
    // to SERVER-33248.
    assert.commandWorked(secondary.adminCommand({
        configureFailPoint: "forceSyncSourceCandidate",
        mode: "alwaysOn",
        data: {hostAndPort: primary.host}
    }));
    rst.awaitSyncSource(secondary, primary);

    // We also prevent all nodes in the replica set from advancing oldest_timestamp. This ensures
    // that the snapshot associated with 'clusterTime' is retained for the duration of this test.
    rst.nodes.forEach(conn => {
        assert.commandWorked(conn.adminCommand({
            configureFailPoint: "WTPreserveSnapshotHistoryIndefinitely",
            mode: "alwaysOn",
        }));
    });

    // We insert a document and save the md5sum associated with the opTime of that write.
    assert.commandWorked(db.mycoll.insert({_id: 1}, {writeConcern: {w: "majority"}}));
    const clusterTime = db.getSession().getOperationTime();

    session.startTransaction({readConcern: {level: "snapshot", atClusterTime: clusterTime}});
    let res = assert.commandWorked(db.runCommand({dbHash: 1}));
    session.commitTransaction();
    const hash1 = {collections: res.collections, md5: res.md5};

    // We insert another document to ensure the collection's contents have a different md5sum now.
    assert.commandWorked(db.mycoll.insert({_id: 2}));

    // However, using atClusterTime to read at the opTime of the first insert should return the same
    // md5sum as it did originally.
    session.startTransaction({readConcern: {level: "snapshot", atClusterTime: clusterTime}});
    res = assert.commandWorked(db.runCommand({dbHash: 1}));
    session.commitTransaction();
    const hash2 = {collections: res.collections, md5: res.md5};
    assert.eq(hash1, hash2, "primary returned different dbhash after second insert");

    {
        const secondarySession = secondary.startSession({causalConsistency: false});
        const secondaryDB = secondarySession.getDatabase("test");

        // Using atClusterTime to read at the opTime of the first insert should return the same
        // md5sum on the secondary as it did on the primary.
        secondarySession.startTransaction(
            {readConcern: {level: "snapshot", atClusterTime: clusterTime}});
        res = assert.commandWorked(secondaryDB.runCommand({dbHash: 1}));
        secondarySession.commitTransaction();
        const secondaryHash = {collections: res.collections, md5: res.md5};
        assert.eq(hash1, secondaryHash, "primary and secondary have different dbhash");

        secondarySession.endSession();
    }

    {
        const otherSession = primary.startSession({causalConsistency: false});
        const otherDB = otherSession.getDatabase("test");

        // We perform another insert inside a separate transaction to cause a MODE_IX lock to be
        // held on the collection.
        otherSession.startTransaction();
        assert.commandWorked(otherDB.mycoll.insert({_id: 3}));

        // It should be possible to run the "dbHash" command with "atClusterTime" concurrently.
        session.startTransaction({readConcern: {level: "snapshot", atClusterTime: clusterTime}});
        res = assert.commandWorked(db.runCommand({dbHash: 1}));
        session.commitTransaction();
        const hash3 = {collections: res.collections, md5: res.md5};
        assert.eq(hash1, hash3, "primary returned different dbhash after third insert");

        // However, the "dbHash" command should block behind the transaction if "atClusterTime"
        // wasn't specified.
        res = assert.commandFailedWithCode(db.runCommand({dbHash: 1, maxTimeMS: 1000}),
                                           ErrorCodes.ExceededTimeLimit);

        otherSession.abortTransaction();
        otherSession.endSession();
    }

    {
        const otherSession = primary.startSession({causalConsistency: false});
        const otherDB = otherSession.getDatabase("test");

        // We create another collection inside a separate session to modify the collection catalog
        // at an opTime later than 'clusterTime'. This prevents further usage of the snapshot
        // associated with 'clusterTime' for snapshot reads.
        assert.commandWorked(otherDB.runCommand({create: "mycoll2"}));
        session.startTransaction({readConcern: {level: "snapshot", atClusterTime: clusterTime}});
        assert.commandFailedWithCode(db.runCommand({dbHash: 1}), ErrorCodes.SnapshotUnavailable);
        session.abortTransaction();

        otherSession.endSession();
    }

    session.endSession();
    rst.stopSet();
})();