summaryrefslogtreecommitdiff
path: root/jstests/sharding/explain_cmd.js
blob: 6cf9b78d82a89391965446fb56ef6a8dcc579603 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
// Tests for the mongos explain command.
(function() {
    'use strict';

    // Create a cluster with 3 shards.
    var st = new ShardingTest({shards: 2});

    var db = st.s.getDB("test");
    var explain;

    // Setup a collection that will be sharded. The shard key will be 'a'. There's also an index on
    // 'b'.
    var collSharded = db.getCollection("mongos_explain_cmd");
    collSharded.drop();
    collSharded.ensureIndex({a: 1});
    collSharded.ensureIndex({b: 1});

    // Enable sharding.
    assert.commandWorked(db.adminCommand({enableSharding: db.getName()}));
    st.ensurePrimaryShard(db.getName(), st.shard1.shardName);
    db.adminCommand({shardCollection: collSharded.getFullName(), key: {a: 1}});

    // Pre-split the collection to ensure that both shards have chunks. Explicitly
    // move chunks since the balancer is disabled.
    assert.commandWorked(db.adminCommand({split: collSharded.getFullName(), middle: {a: 1}}));
    printjson(db.adminCommand(
        {moveChunk: collSharded.getFullName(), find: {a: 1}, to: st.shard0.shardName}));

    assert.commandWorked(db.adminCommand({split: collSharded.getFullName(), middle: {a: 2}}));
    printjson(db.adminCommand(
        {moveChunk: collSharded.getFullName(), find: {a: 2}, to: st.shard1.shardName}));

    // Put data on each shard.
    for (var i = 0; i < 3; i++) {
        collSharded.insert({_id: i, a: i, b: 1});
    }

    st.printShardingStatus();

    // Test a scatter-gather count command.
    assert.eq(3, collSharded.count({b: 1}));

    // Explain the scatter-gather count.
    explain = db.runCommand(
        {explain: {count: collSharded.getName(), query: {b: 1}}, verbosity: "allPlansExecution"});

    // Validate some basic properties of the result.
    printjson(explain);
    assert.commandWorked(explain);
    assert("queryPlanner" in explain);
    assert("executionStats" in explain);
    assert.eq(2, explain.queryPlanner.winningPlan.shards.length);
    assert.eq(2, explain.executionStats.executionStages.shards.length);
    assert("serverInfo" in explain, explain);
    assert.hasFields(explain.serverInfo, ['host', 'port', 'version', 'gitVersion']);

    // An explain of a command that doesn't exist should fail gracefully.
    explain = db.runCommand({
        explain: {nonexistent: collSharded.getName(), query: {b: 1}},
        verbosity: "allPlansExecution"
    });
    printjson(explain);
    assert.commandFailed(explain);

    // -------

    // Setup a collection that is not sharded.
    var collUnsharded = db.getCollection("mongos_explain_cmd_unsharded");
    collUnsharded.drop();
    collUnsharded.ensureIndex({a: 1});
    collUnsharded.ensureIndex({b: 1});

    for (var i = 0; i < 3; i++) {
        collUnsharded.insert({_id: i, a: i, b: 1});
    }
    assert.eq(3, collUnsharded.count({b: 1}));

    explain = db.runCommand({
        explain: {
            group: {
                ns: collUnsharded.getName(),
                key: "a",
                cond: "b",
                $reduce: function(curr, result) {},
                initial: {}
            }
        },
        verbosity: "allPlansExecution"
    });

    // Basic validation: a group command can only be passed through to an unsharded collection,
    // so we should confirm that the mongos stage is always SINGLE_SHARD.
    printjson(explain);
    assert.commandWorked(explain);
    assert("queryPlanner" in explain);
    assert("executionStats" in explain);
    assert.eq("SINGLE_SHARD", explain.queryPlanner.winningPlan.stage);

    // The same group should fail over the sharded collection, because group is only supported
    // if it is passed through to an unsharded collection.
    explain = db.runCommand({
        explain: {
            group: {
                ns: collSharded.getName(),
                key: "a",
                cond: "b",
                $reduce: function(curr, result) {},
                initial: {}
            }
        },
        verbosity: "allPlansExecution"
    });
    printjson(explain);
    assert.commandFailed(explain);

    // -------

    // Explain a delete operation and verify that it hits all shards without the shard key
    explain = db.runCommand({
        explain: {delete: collSharded.getName(), deletes: [{q: {b: 1}, limit: 0}]},
        verbosity: "allPlansExecution"
    });
    assert.commandWorked(explain, tojson(explain));
    assert.eq(explain.queryPlanner.winningPlan.stage, "SHARD_WRITE");
    assert.eq(explain.queryPlanner.winningPlan.shards.length, 2);
    assert.eq(explain.queryPlanner.winningPlan.shards[0].winningPlan.stage, "DELETE");
    assert.eq(explain.queryPlanner.winningPlan.shards[1].winningPlan.stage, "DELETE");
    // Check that the deletes didn't actually happen.
    assert.eq(3, collSharded.count({b: 1}));

    // Explain a delete operation and verify that it hits only one shard with the shard key
    explain = db.runCommand({
        explain: {delete: collSharded.getName(), deletes: [{q: {a: 1}, limit: 0}]},
        verbosity: "allPlansExecution"
    });
    assert.commandWorked(explain, tojson(explain));
    assert.eq(explain.queryPlanner.winningPlan.shards.length, 1);
    // Check that the deletes didn't actually happen.
    assert.eq(3, collSharded.count({b: 1}));

    // Check that we fail gracefully if we try to do an explain of a write batch that has more
    // than one operation in it.
    explain = db.runCommand({
        explain: {
            delete: collSharded.getName(),
            deletes: [{q: {a: 1}, limit: 1}, {q: {a: 2}, limit: 1}]
        },
        verbosity: "allPlansExecution"
    });
    assert.commandFailed(explain, tojson(explain));

    // Explain a multi upsert operation and verify that it hits all shards
    explain = db.runCommand({
        explain:
            {update: collSharded.getName(), updates: [{q: {}, u: {$set: {b: 10}}, multi: true}]},
        verbosity: "allPlansExecution"
    });
    assert.commandWorked(explain, tojson(explain));
    assert.eq(explain.queryPlanner.winningPlan.shards.length, 2);
    assert.eq(explain.queryPlanner.winningPlan.stage, "SHARD_WRITE");
    assert.eq(explain.queryPlanner.winningPlan.shards.length, 2);
    assert.eq(explain.queryPlanner.winningPlan.shards[0].winningPlan.stage, "UPDATE");
    assert.eq(explain.queryPlanner.winningPlan.shards[1].winningPlan.stage, "UPDATE");
    // Check that the update didn't actually happen.
    assert.eq(0, collSharded.count({b: 10}));

    // Explain an upsert operation and verify that it hits only a single shard
    explain = db.runCommand({
        explain: {update: collSharded.getName(), updates: [{q: {a: 10}, u: {a: 10}, upsert: true}]},
        verbosity: "allPlansExecution"
    });
    assert.commandWorked(explain, tojson(explain));
    assert.eq(explain.queryPlanner.winningPlan.shards.length, 1);
    // Check that the upsert didn't actually happen.
    assert.eq(0, collSharded.count({a: 10}));

    // Explain an upsert operation which cannot be targeted, ensure an error is thrown
    explain = db.runCommand({
        explain: {update: collSharded.getName(), updates: [{q: {b: 10}, u: {b: 10}, upsert: true}]},
        verbosity: "allPlansExecution"
    });
    assert.commandFailed(explain, tojson(explain));

    st.stop();
})();