summaryrefslogtreecommitdiff
path: root/jstests/gle/gle_sharded_wc.js
blob: 6e3cc919212a3b46124880ec4daa896cc2673d9a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
// Tests of sharded GLE enforcing write concern against operations in a cluster
// Basic sharded GLE operation is tested elsewhere.
//
// This test asserts that a journaled write to a mongod running with --nojournal should be rejected,
// so cannot be run on the ephemeralForTest storage engine, as it accepts all journaled writes.
// @tags: [SERVER-21420]

// Checking UUID consistency involves talking to the shard primaries, but by the end of this test,
// one shard does not have a primary.
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
TestData.skipCheckDBHashes = true;

(function() {
"use strict";

// Skip this test if running with the "wiredTiger" storage engine, since it requires
// using 'nojournal' in a replica set, which is not supported when using WT.
if (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger") {
    // WT is currently the default engine so it is used when 'storageEngine' is not set.
    jsTest.log("Skipping test because it is not applicable for the wiredTiger storage engine");
    return;
}

// Options for a cluster with two replica set shards, the first with two nodes the second with
// one
// This lets us try a number of GLE scenarios
var options = {
    rs: true,
    rsOptions: {nojournal: ""},
    // Options for each replica set shard
    rs0: {nodes: 3},
    rs1: {nodes: 3}
};

var st = new ShardingTest({shards: 2, other: options});

var mongos = st.s0;
var admin = mongos.getDB("admin");
var config = mongos.getDB("config");
var coll = mongos.getCollection(jsTestName() + ".coll");
var shards = config.shards.find().toArray();

assert.commandWorked(admin.runCommand({enableSharding: coll.getDB().toString()}));
printjson(admin.runCommand({movePrimary: coll.getDB().toString(), to: shards[0]._id}));
assert.commandWorked(admin.runCommand({shardCollection: coll.toString(), key: {_id: 1}}));
assert.commandWorked(admin.runCommand({split: coll.toString(), middle: {_id: 0}}));
assert.commandWorked(
    admin.runCommand({moveChunk: coll.toString(), find: {_id: 0}, to: shards[1]._id}));

st.printShardingStatus();

var gle = null;

//
// No journal insert, GLE fails
coll.remove({});
coll.insert({_id: 1});
printjson(gle = coll.getDB().runCommand({getLastError: 1, j: true}));
assert(!gle.ok);
assert(gle.errmsg);

//
// Successful insert, write concern mode invalid
coll.remove({});
coll.insert({_id: -1});
printjson(gle = coll.getDB().runCommand({getLastError: 1, w: 'invalid'}));
assert(!gle.ok);
assert(!gle.err);
assert(gle.errmsg);
assert.eq(gle.code, 79);  // UnknownReplWriteConcern - needed for backwards compatibility
assert.eq(coll.count(), 1);

//
// Error on insert (dup key), write concern error not reported
coll.remove({});
coll.insert({_id: -1});
coll.insert({_id: -1});
printjson(gle = coll.getDB().runCommand({getLastError: 1, w: 'invalid'}));
assert(gle.ok);
assert(gle.err);
assert(gle.code);
assert(!gle.errmsg);
assert.eq(coll.count(), 1);

//
// Successful remove on one shard, write concern timeout on the other
var s0Id = st.rs0.getNodeId(st.rs0._slaves[0]);
st.rs0.stop(s0Id);
coll.remove({});
st.rs1.awaitReplication();  // To ensure the first shard won't timeout
printjson(gle = coll.getDB().runCommand({getLastError: 1, w: 3, wtimeout: 5 * 1000}));
assert(gle.ok);
assert.eq(gle.err, 'timeout');
assert(gle.wtimeout);
assert(gle.shards);
assert.eq(coll.count(), 0);

//
// Successful remove on two hosts, write concern timeout on both
// We don't aggregate two timeouts together
var s1Id = st.rs1.getNodeId(st.rs1._slaves[0]);
st.rs1.stop(s1Id);
// new writes to both shards to ensure that remove will do something on both of them
coll.insert({_id: -1});
coll.insert({_id: 1});

coll.remove({});
printjson(gle = coll.getDB().runCommand({getLastError: 1, w: 3, wtimeout: 5 * 1000}));

assert(!gle.ok);
assert(gle.errmsg);
assert.eq(gle.code, 64);  // WriteConcernFailed - needed for backwards compatibility
assert(!gle.wtimeout);
assert(gle.shards);
assert(gle.errs);
assert.eq(coll.count(), 0);

//
// First replica set with no primary
//

//
// Successful bulk insert on two hosts, host changes before gle (error contacting host)
coll.remove({});
coll.insert([{_id: 1}, {_id: -1}]);
// Wait for write to be written to shards before shutting it down.
printjson(gle = coll.getDB().runCommand({getLastError: 1}));
st.rs0.stop(st.rs0.getPrimary(), true);  // wait for stop
printjson(gle = coll.getDB().runCommand({getLastError: 1}));
// Should get an error about contacting dead host.
assert(!gle.ok);
assert(gle.errmsg);
assert.eq(coll.count({_id: 1}), 1);

//
// Failed insert on two hosts, first replica set with no primary
// NOTE: This is DIFFERENT from 2.4, since we don't need to contact a host we didn't get
// successful writes from.
coll.remove({_id: 1});
coll.insert([{_id: 1}, {_id: -1}]);

printjson(gle = coll.getDB().runCommand({getLastError: 1}));
assert(gle.ok);
assert(gle.err);
assert.eq(coll.count({_id: 1}), 1);

st.stop();
})();