summaryrefslogtreecommitdiff
path: root/jstests/sharding/write_commands_sharding_state.js
blob: 7d0991870eb4b322bece479a01a636072cb44b8c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
// This test requires persistence because it assumes standalone shards will still have their data
// after restarting.
// @tags: [requires_persistence]

(function() {
'use strict';

var st = new ShardingTest({name: "write_commands", mongos: 2, shards: 2 });

var dbTestName = 'WriteCommandsTestDB';

assert.commandWorked(st.s0.adminCommand({ enablesharding: dbTestName }));
st.ensurePrimaryShard(dbTestName, 'shard0000');

assert.commandWorked(st.s0.adminCommand({ shardCollection: dbTestName + '.TestColl',
                                          key: { Key: 1 },
                                          unique: true }));

// Split at keys 10 and 20
assert.commandWorked(st.s0.adminCommand({ split: dbTestName + '.TestColl', middle: { Key: 10 } }));
assert.commandWorked(st.s0.adminCommand({ split: dbTestName + '.TestColl', middle: { Key: 20 } }));

printjson(st.config.getSiblingDB('config').chunks.find().toArray());

// Move < 10 to shard0000, 10 and 20 to shard00001
st.s0.adminCommand({ moveChunk: dbTestName + '.TestColl', find: { Key: 0 }, to: 'shard0000' });
st.s0.adminCommand({ moveChunk: dbTestName + '.TestColl', find: { Key: 19 }, to: 'shard0001' });
st.s0.adminCommand({ moveChunk: dbTestName + '.TestColl', find: { Key: 21 }, to: 'shard0001' });

printjson(st.config.getSiblingDB('config').chunks.find().toArray());

// Insert one document in each chunk, which we will use to change
assert(st.s1.getDB(dbTestName).TestColl.insert({ Key: 1 }));
assert(st.s1.getDB(dbTestName).TestColl.insert({ Key: 11 }));
assert(st.s1.getDB(dbTestName).TestColl.insert({ Key: 21 }));

// Make sure the documents are correctly placed
printjson(st.d0.getDB(dbTestName).TestColl.find().toArray());
printjson(st.d1.getDB(dbTestName).TestColl.find().toArray());

assert.eq(1, st.d0.getDB(dbTestName).TestColl.count());
assert.eq(2, st.d1.getDB(dbTestName).TestColl.count());

assert.eq(1, st.d0.getDB(dbTestName).TestColl.find({ Key: 1 }).count());
assert.eq(1, st.d1.getDB(dbTestName).TestColl.find({ Key: 11 }).count());
assert.eq(1, st.d1.getDB(dbTestName).TestColl.find({ Key: 21 }).count());

// Move chunk [0, 19] to shard0000 and make sure the documents are correctly placed
st.s0.adminCommand({ moveChunk: dbTestName + '.TestColl', find: { Key: 19 }, to: 'shard0000' });

printjson(st.config.getSiblingDB('config').chunks.find().toArray());
printjson(st.d0.getDB(dbTestName).TestColl.find({}).toArray());
printjson(st.d1.getDB(dbTestName).TestColl.find({}).toArray());

// Now restart all mongod instances, so they don't know yet that they are sharded
st.restartMongod(0);
st.restartMongod(1);

// Now that both mongod shards are restarted, they don't know yet that they are part of a sharded
// cluster until they get a setShardVerion command. Mongos instance s1 has stale metadata and
// doesn't know that chunk with key 19 has moved to shard0000 so it will send it to shard0001 at
// first.
//
// Shard0001 would only send back a stale config exception if it receives a setShardVersion
// command. The bug that this test validates is that setShardVersion is indeed being sent (for more
// information, see SERVER-19395).
st.s1.getDB(dbTestName).TestColl.update({ Key: 11 }, { $inc: { Counter: 1 } }, { upsert: true });

printjson(st.d0.getDB(dbTestName).TestColl.find({}).toArray());
printjson(st.d1.getDB(dbTestName).TestColl.find({}).toArray());

assert.eq(2, st.d0.getDB(dbTestName).TestColl.count());
assert.eq(1, st.d1.getDB(dbTestName).TestColl.count());

assert.eq(1, st.d0.getDB(dbTestName).TestColl.find({ Key: 1 }).count());
assert.eq(1, st.d0.getDB(dbTestName).TestColl.find({ Key: 11 }).count());
assert.eq(1, st.d1.getDB(dbTestName).TestColl.find({ Key: 21 }).count());

st.stop();

})();