summaryrefslogtreecommitdiff
path: root/jstests/noPassthrough/catalog_shard_secondary_reads.js
blob: b3a41d91661bdfdf3cee8dcc7b6bf26d1727c1f9 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
/**
 * Tests config shard topology.
 *
 * @tags: [
 *   requires_fcv_70,
 *   featureFlagCatalogShard,
 * ]
 */
(function() {
"use strict";

load("jstests/libs/config_shard_util.js");
load("jstests/libs/fail_point_util.js");
load('jstests/libs/chunk_manipulation_util.js');

var staticMongod = MongoRunner.runMongod({});  // For startParallelOps.

const st = new ShardingTest({
    shards: {rs0: {nodes: 2}, rs1: {nodes: 2}},
    config: 2,
    mongos: 1,
    configShard: true,
});

assert.commandWorked(st.s0.getDB('test').user.insert({_id: 1234}));
st.ensurePrimaryShard('test', st.shard0.shardName);

assert.commandWorked(st.s0.adminCommand({enableSharding: 'sharded'}));
st.ensurePrimaryShard('sharded', st.shard0.shardName);
assert.commandWorked(st.s0.adminCommand({shardCollection: 'sharded.user', key: {_id: 1}}));

let configDbEntry = st.s.getDB('config').databases.findOne({_id: 'test'});
let dbVersion = configDbEntry.version;

let shardedUserConfigColl = st.s.getDB('config').collections.findOne({_id: 'sharded.user'});
let shardedUserConfigChunk =
    st.s.getDB('config').chunks.findOne({uuid: shardedUserConfigColl.uuid});
let shardVersion = {
    e: shardedUserConfigColl.lastmodEpoch,
    t: shardedUserConfigColl.timestamp,
    v: shardedUserConfigChunk.lastmod
};

assert.commandWorked(st.s0.getDB('sharded').user.insert({_id: 5678}));

// Prime up config shard secondary's catalog cache.
let s0Conn = new Mongo(st.s0.host);
s0Conn.setReadPref('secondary');
let doc = s0Conn.getDB('test').user.findOne({_id: 1234});
assert.eq({_id: 1234}, doc);

doc = s0Conn.getDB('sharded').user.findOne({_id: 5678});
assert.eq({_id: 5678}, doc);

let removeRes = assert.commandWorked(st.s0.adminCommand({transitionToDedicatedConfigServer: 1}));
assert.eq("started", removeRes.state, tojson(removeRes));

assert.commandWorked(st.s0.adminCommand(
    {moveChunk: 'config.system.sessions', find: {_id: 0}, to: st.shard1.shardName}));

var joinMoveChunk = moveChunkParallel(staticMongod,
                                      st.s0.host,
                                      {_id: 0},
                                      null,
                                      'sharded.user',
                                      st.shard1.shardName,
                                      true /**Parallel should expect success */);

joinMoveChunk();

assert.commandWorked(st.s0.adminCommand({movePrimary: 'test', to: st.shard1.shardName}));
assert.commandWorked(st.s0.adminCommand({movePrimary: 'sharded', to: st.shard1.shardName}));

// A config shard can't be removed until all range deletions have finished.
ConfigShardUtil.waitForRangeDeletions(st.s0);

removeRes = assert.commandWorked(st.s0.adminCommand({transitionToDedicatedConfigServer: 1}));
assert.eq("completed", removeRes.state, tojson(removeRes));

const downgradeFCV = binVersionToFCV('last-lts');
assert.commandWorked(st.s0.adminCommand({setFeatureCompatibilityVersion: downgradeFCV}));

// Connect directly to the config to simulate a stale mongos that thinks config server is
// still a shard

let configConn = new Mongo(st.configRS.getSecondary().host);
configConn.setReadPref('secondary');
configConn.setSlaveOk(true);

let findCmd =
    {find: 'user', filter: {_id: 9876}, databaseVersion: dbVersion, readConcern: {level: 'local'}};
assert.commandFailedWithCode(configConn.getDB('test').runCommand(findCmd),
                             ErrorCodes.StaleDbVersion);

// Note: secondary metadata gets cleared when replicating recoverable critical section.
let version = assert.commandWorked(
    configConn.adminCommand({getShardVersion: 'sharded.user', fullMetadata: true}));
assert.eq({}, version.metadata);

findCmd = {
    find: 'user',
    filter: {_id: 54321},
    shardVersion: shardVersion,
    readConcern: {level: 'local'}
};
assert.throwsWithCode(() => configConn.getDB('sharded').runCommand(findCmd),
                      ErrorCodes.StaleConfig);

version = assert.commandWorked(
    configConn.adminCommand({getShardVersion: 'sharded.user', fullMetadata: true}));
assert.eq(1, timestampCmp(version.metadata.collVersion, shardVersion.v), tojson(version));
assert.eq(0, timestampCmp(version.metadata.shardVersion, Timestamp(0, 0)), tojson(version));

// Should be able to do secondary reads on the config server after transitioning back.

const upgradeFCV = binVersionToFCV('latest');
assert.commandWorked(st.s0.adminCommand({setFeatureCompatibilityVersion: upgradeFCV}));

// Need to drop the database before it can become a shard again.
assert.commandWorked(st.configRS.getPrimary().getDB('sharded').dropDatabase());

assert.commandWorked(st.s0.adminCommand({transitionFromDedicatedConfigServer: 1}));
assert.commandWorked(st.s0.adminCommand({movePrimary: 'test', to: st.shard0.shardName}));
assert.commandWorked(
    st.s0.adminCommand({moveChunk: 'sharded.user', find: {_id: 0}, to: st.shard0.shardName}));

doc = s0Conn.getDB('test').user.findOne({_id: 1234});
assert.eq({_id: 1234}, doc);

doc = s0Conn.getDB('sharded').user.findOne({_id: 5678});
assert.eq({_id: 5678}, doc);

st.stop();

MongoRunner.stopMongod(staticMongod);
})();