summaryrefslogtreecommitdiff
path: root/jstests/noPassthrough/validate_memory_limit.js
blob: 365bd4bbcdeb916d4c687d1a164ec84f221401e2 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
/**
 * Test that the memory usage of validate is properly limited according to the
 * maxValidateMemoryUsageMB parameter.
 *
 * @tags: [
 *   requires_persistence,
 *   requires_wiredtiger,
 * ]
 */
(function() {
"use strict";

load("jstests/disk/libs/wt_file_helper.js");

const kIndexKeyLength = 1024 * 1024;

const baseName = "validate_memory_limit";
const dbpath = MongoRunner.dataPath + baseName + "/";
let conn = MongoRunner.runMongod({dbpath: dbpath});
let coll = conn.getDB("test").getCollection("corrupt");

function corruptIndex() {
    const uri = getUriForIndex(coll, "_id_");
    conn = truncateUriAndRestartMongod(uri, conn);
    coll = conn.getDB("test").getCollection("corrupt");
}

function checkValidate(maxMemoryUsage, {minMissingKeys, maxMissingKeys}) {
    conn.getDB("test").adminCommand({setParameter: 1, maxValidateMemoryUsageMB: maxMemoryUsage});
    const res = coll.validate();
    assert.commandWorked(res);
    assert(!res.valid, tojson(res));
    const notAllReportedPrefix =
        "Not all index entry inconsistencies are reported due to memory limitations.";
    assert.containsPrefix(notAllReportedPrefix, res.errors, tojson(res));
    assert.gte(res.missingIndexEntries.length, minMissingKeys, tojson(res));
    assert.lte(res.missingIndexEntries.length, maxMissingKeys, tojson(res));
}

function checkValidateLogs() {
    assert(checkLog.checkContainsWithAtLeastCountJson(
        conn, 7463100, {"spec": {"v": 2, "key": {"_id": 1}, "name": "_id_"}}, 1));
}

function checkValidateRepair() {
    const res = coll.validate({repair: true});
    assert.commandWorked(res);
    assert(!res.valid, tojson(res));
    assert(res.repaired, tojson(res));
}

// Insert a document with a key larger than maxValidateMemoryUsageMB and test that we still report
// at least one inconsistency.
const indexKey = "a".repeat(kIndexKeyLength);
assert.commandWorked(coll.insert({_id: indexKey}));
corruptIndex();
checkValidate(1, {minMissingKeys: 1, maxMissingKeys: 1});
checkValidateLogs();

// Can't repair successfully if there aren't any index inconsistencies reported.
checkValidateRepair();

// Clear collection between tests.
coll.drop();

// Test that if we have keys distributed across many buckets, and would exceed
// maxValidateMemoryUsageMB, we report as many inconsistencies as we can.
for (let i = 0; i < 10; ++i) {
    const indexKey = i.toString().repeat(kIndexKeyLength / 5);
    assert.commandWorked(coll.insert({_id: indexKey}));
}

corruptIndex();
// If each key is maxMem/5, then we can keep 4 of them (the 5th would put us at the limit). However,
// each key is counted twice, so realistically we only expect to track 2 of them. However, there's
// a small chance we could get hash collisions that would lead to us reporting only 1.
checkValidate(1, {minMissingKeys: 1, maxMissingKeys: 2});
checkValidateLogs();

// Repair, but incompletely if only some inconsistencies are reported.
checkValidateRepair();

MongoRunner.stopMongod(conn, null, {skipValidation: true});
})();