summaryrefslogtreecommitdiff
path: root/jstests/sharding/write_commands_sharding_state.js
blob: 5d6595e5758ddcae18bbb9fd46c4b2a167df1fbf (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
// This test requires persistence because it assumes standalone shards will still have their data
// after restarting.
// @tags: [requires_persistence]

(function() {
'use strict';

var st = new ShardingTest({name: "write_commands", mongos: 2, shards: 2});

var dbTestName = 'WriteCommandsTestDB';
var collName = dbTestName + '.TestColl';

assert.commandWorked(st.s0.adminCommand({enablesharding: dbTestName}));
st.ensurePrimaryShard(dbTestName, st.shard0.shardName);

assert.commandWorked(st.s0.adminCommand({shardCollection: collName, key: {Key: 1}, unique: true}));

// Split at keys 10 and 20
assert.commandWorked(st.s0.adminCommand({split: collName, middle: {Key: 10}}));
assert.commandWorked(st.s0.adminCommand({split: collName, middle: {Key: 20}}));

printjson(st.config.getSiblingDB('config').chunks.find().toArray());

// Move 10 and 20 to st.shard0.shardName1
assert.commandWorked(st.s0.adminCommand(
    {moveChunk: collName, find: {Key: 19}, to: st.shard1.shardName, _waitForDelete: true}));
assert.commandWorked(st.s0.adminCommand(
    {moveChunk: collName, find: {Key: 21}, to: st.shard1.shardName, _waitForDelete: true}));

printjson(st.config.getSiblingDB('config').chunks.find().toArray());

// Insert one document in each chunk, which we will use to change
assert(st.s1.getDB(dbTestName).TestColl.insert({Key: 1}));
assert(st.s1.getDB(dbTestName).TestColl.insert({Key: 11}));
assert(st.s1.getDB(dbTestName).TestColl.insert({Key: 21}));

// Make sure the documents are correctly placed
printjson(st.shard0.getDB(dbTestName).TestColl.find().toArray());
printjson(st.shard1.getDB(dbTestName).TestColl.find().toArray());

assert.eq(1, st.shard0.getDB(dbTestName).TestColl.count());
assert.eq(2, st.shard1.getDB(dbTestName).TestColl.count());

assert.eq(1, st.shard0.getDB(dbTestName).TestColl.find({Key: 1}).count());
assert.eq(1, st.shard1.getDB(dbTestName).TestColl.find({Key: 11}).count());
assert.eq(1, st.shard1.getDB(dbTestName).TestColl.find({Key: 21}).count());

// Move chunk [0, 19] to st.shard0.shardName and make sure the documents are correctly placed
assert.commandWorked(st.s0.adminCommand(
    {moveChunk: collName, find: {Key: 19}, _waitForDelete: true, to: st.shard0.shardName}));

printjson(st.config.getSiblingDB('config').chunks.find().toArray());
printjson(st.shard0.getDB(dbTestName).TestColl.find({}).toArray());
printjson(st.shard1.getDB(dbTestName).TestColl.find({}).toArray());

// Now restart all mongod instances, so they don't know yet that they are sharded
st.restartShardRS(0);
st.restartShardRS(1);

// Now that both mongod shards are restarted, they don't know yet that they are part of a
// sharded
// cluster until they get a setShardVerion command. Mongos instance s1 has stale metadata and
// doesn't know that chunk with key 19 has moved to st.shard0.shardName so it will send it to
// st.shard1.shardName at
// first.
//
// Shard0001 would only send back a stale config exception if it receives a setShardVersion
// command. The bug that this test validates is that setShardVersion is indeed being sent (for
// more
// information, see SERVER-19395).
st.s1.getDB(dbTestName).TestColl.update({Key: 11}, {$inc: {Counter: 1}}, {upsert: true});

printjson(st.shard0.getDB(dbTestName).TestColl.find({}).toArray());
printjson(st.shard1.getDB(dbTestName).TestColl.find({}).toArray());

assert.eq(2, st.shard0.getDB(dbTestName).TestColl.count());
assert.eq(1, st.shard1.getDB(dbTestName).TestColl.count());

assert.eq(1, st.shard0.getDB(dbTestName).TestColl.find({Key: 1}).count());
assert.eq(1, st.shard0.getDB(dbTestName).TestColl.find({Key: 11}).count());
assert.eq(1, st.shard1.getDB(dbTestName).TestColl.find({Key: 21}).count());

st.stop();
})();