summaryrefslogtreecommitdiff
path: root/jstests/core/explain_execution_error.js
blob: 43ea960e8d320adc484500cfc67ace43e51744d6 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
// @tags: [
//   assumes_balancer_off,
//   requires_getmore,
// ]

// Test that even when the execution of a query fails, explain reports query
// planner information.

load("jstests/libs/fixture_helpers.js");  // For FixtureHelpers.

var t = db.explain_execution_error;
t.drop();

var result;

/**
 * Asserts that explain reports an error in its execution stats section.
 */
function assertExecError(explain) {
    // Gather the exec stats from all shards.
    let allExecStats = [];
    let topLevelExecStats = explain.executionStats;
    if (topLevelExecStats.executionStages.stage == "SINGLE_SHARD" ||
        topLevelExecStats.executionStages.stage == "SHARD_MERGE_SORT") {
        allExecStats = topLevelExecStats.executionStages.shards;
    } else {
        allExecStats.push(topLevelExecStats);
    }

    // In a sharded environment, we only know that at least one of the shards will fail, we can't
    // expect all of them to fail, since there may be different amounts of data on each shard.
    let haveSeenExecutionFailure = false;
    for (let execStats of allExecStats) {
        if (!execStats.executionSuccess) {
            haveSeenExecutionFailure = true;
            assert("errorMessage" in execStats,
                   `Expected "errorMessage" to be present in ${tojson(execStats)}`);
            assert("errorCode" in execStats,
                   `Expected "errorCode" to be present in ${tojson(execStats)}`);
        }
    }
    assert(haveSeenExecutionFailure,
           `Expected at least one shard to have failed: ${tojson(explain)}`);
}

/**
 * Asserts that explain reports success in its execution stats section.
 */
function assertExecSuccess(explain) {
    let errorObjs = [];

    let execStats = explain.executionStats;
    if (execStats.executionStages.stage == "SINGLE_SHARD" ||
        execStats.executionStages.stage == "SHARD_MERGE_SORT") {
        errorObjs = execStats.executionStages.shards;
    } else {
        errorObjs.push(execStats);
    }

    for (let errorObj of errorObjs) {
        assert.eq(true, errorObj.executionSuccess);
        assert(!("errorMessage" in errorObj),
               `Expected "errorMessage" not to be present in ${tojson(errorObj)}`);
        assert(!("errorCode" in errorObj),
               `Expected "errorCode" not to be present in ${tojson(errorObj)}`);
    }
}

// Make a string that exceeds 1 MB.
var bigStr = "x";
while (bigStr.length < (1024 * 1024)) {
    bigStr += bigStr;
}

// Make a collection that is about 120 MB * number of shards.
const numShards = FixtureHelpers.numberOfShardsForCollection(t);
for (var i = 0; i < 120 * numShards; i++) {
    assert.commandWorked(t.insert({a: bigStr, b: 1, c: i}));
}

// A query which sorts the whole collection by "b" should throw an error due to hitting the
// memory limit for sort.
assert.throws(function() {
    t.find({a: {$exists: true}}).sort({b: 1}).itcount();
});

// Explain of this query should succeed at query planner verbosity.
result = db.runCommand({
    explain: {find: t.getName(), filter: {a: {$exists: true}}, sort: {b: 1}},
    verbosity: "queryPlanner"
});
assert.commandWorked(result);
assert("queryPlanner" in result);

// Explaining the same query at execution stats verbosity should succeed, but indicate that the
// underlying operation failed.
result = db.runCommand({
    explain: {find: t.getName(), filter: {a: {$exists: true}}, sort: {b: 1}},
    verbosity: "executionStats"
});
assert.commandWorked(result);
assert("queryPlanner" in result);
assert("executionStats" in result);
assertExecError(result);

// The underlying operation should also report a failure at allPlansExecution verbosity.
result = db.runCommand({
    explain: {find: t.getName(), filter: {a: {$exists: true}}, sort: {b: 1}},
    verbosity: "allPlansExecution"
});
assert.commandWorked(result);
assert("queryPlanner" in result);
assert("executionStats" in result);
assert("allPlansExecution" in result.executionStats);
assertExecError(result);

// Now we introduce two indices. One provides the requested sort order, and
// the other does not.
t.createIndex({b: 1});
t.createIndex({c: 1});

// The query should no longer fail with a memory limit error because the planner can obtain
// the sort by scanning an index.
assert.eq(40, t.find({c: {$lt: 40}}).sort({b: 1}).itcount());

// The explain should succeed at all verbosity levels because the query itself succeeds.
// First test "queryPlanner" verbosity.
result = db.runCommand({
    explain: {find: t.getName(), filter: {c: {$lt: 40}}, sort: {b: 1}},
    verbosity: "queryPlanner"
});
assert.commandWorked(result);
assert("queryPlanner" in result);

result = db.runCommand({
    explain: {find: t.getName(), filter: {c: {$lt: 40}}, sort: {b: 1}},
    verbosity: "executionStats"
});
assert.commandWorked(result);
assert("queryPlanner" in result);
assert("executionStats" in result);
assertExecSuccess(result);

// We expect allPlansExecution verbosity to show execution stats for both candidate plans.
result = db.runCommand({
    explain: {find: t.getName(), filter: {c: {$lt: 40}}, sort: {b: 1}},
    verbosity: "allPlansExecution"
});
assert.commandWorked(result);
assert("queryPlanner" in result);
assert("executionStats" in result);
assert("allPlansExecution" in result.executionStats);
assertExecSuccess(result);