summaryrefslogtreecommitdiff
path: root/jstests/sharding/transactions_read_concerns.js
blob: 0b01e2a42ec95474a43459ee9b0b660346615657 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
// Verifies basic sharded transaction behavior with the supported read concern levels.
//
// @tags: [
//   requires_find_command,
//   requires_sharding,
//   uses_multi_shard_transaction,
//   uses_transactions,
// ]
(function() {
    "use strict";

    const dbName = "test";
    const collName = "foo";
    const ns = dbName + "." + collName;

    const st = new ShardingTest({shards: 2, config: 1});

    // Set up a sharded collection with 2 chunks, one on each shard.

    assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: -1}, {writeConcern: {w: "majority"}}));
    assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 1}, {writeConcern: {w: "majority"}}));

    assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
    st.ensurePrimaryShard(dbName, st.shard0.shardName);

    assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
    assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
    assert.commandWorked(
        st.s.adminCommand({moveChunk: ns, find: {_id: 1}, to: st.shard1.shardName}));

    // Refresh second shard to avoid stale shard version error on the second transaction statement.
    assert.commandWorked(st.rs1.getPrimary().adminCommand({_flushRoutingTableCacheUpdates: ns}));

    function runTest(st, readConcern, sessionOptions) {
        jsTestLog("Testing readConcern: " + tojson(readConcern) + ", sessionOptions: " +
                  tojson(sessionOptions));

        const session = st.s.startSession(sessionOptions);
        const sessionDB = session.getDatabase(dbName);

        if (readConcern) {
            session.startTransaction({readConcern: readConcern});
        } else {
            session.startTransaction();
        }

        // Target only the first shard.
        assert.commandWorked(sessionDB.runCommand({find: collName, filter: {_id: -1}}));

        // On a separate, causally consistent session, read from the first shard then write to the
        // second one. This write is guaranteed to commit at a later cluster time than that of the
        // snapshot established by the transaction on the first shard.
        const otherSessionDB = st.s.startSession().getDatabase(dbName);
        assert.commandWorked(otherSessionDB.runCommand({find: collName}));
        assert.commandWorked(otherSessionDB.runCommand({insert: collName, documents: [{_id: 5}]}));

        // Depending on the transaction's read concern, the new document will or will not be visible
        // to the next statement.
        const numExpectedDocs = readConcern && readConcern.level === "snapshot" ? 0 : 1;
        assert.eq(numExpectedDocs,
                  sessionDB[collName].find({_id: 5}).itcount(),
                  "sharded transaction with read concern " + tojson(readConcern) +
                      " did not see expected number of documents, sessionOptions: " +
                      tojson(sessionOptions));

        assert.commandWorked(session.commitTransaction_forTesting());

        // Clean up for the next iteration.
        assert.writeOK(sessionDB[collName].remove({_id: 5}));
    }

    // Specifying no read concern level is allowed and should not compute a global snapshot.
    runTest(st, undefined, {causalConsistency: false});
    runTest(st, undefined, {causalConsistency: true});

    const kAllowedReadConcernLevels = ["local", "majority", "snapshot"];
    for (let readConcernLevel of kAllowedReadConcernLevels) {
        runTest(st, {level: readConcernLevel}, {causalConsistency: false});
        runTest(st, {level: readConcernLevel}, {causalConsistency: true});
    }

    st.stop();
})();