blob: c0a48b500e5054c029d9392198428736d33bafc8 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
|
/**
* Test that $group cleans up temporary files under dbpath + '/_tmp'.
*/
(function() {
"use strict";
const memoryLimitMb = 16;
const memoryLimitBytes = memoryLimitMb * 1024 * 1024;
// Start mongod with reduced memory limit for the $group stage.
const conn = MongoRunner.runMongod({
setParameter: {
internalDocumentSourceGroupMaxMemoryBytes: memoryLimitBytes,
internalQuerySlotBasedExecutionHashAggApproxMemoryUseInBytesBeforeSpill: memoryLimitBytes
}
});
const testDb = conn.getDB(jsTestName());
// Create a collection exceeding the memory limit.
testDb.largeColl.drop();
const largeStr = "A".repeat(1024 * 1024); // 1MB string
for (let i = 0; i < memoryLimitMb + 1; ++i)
assert.commandWorked(testDb.largeColl.insert({x: i, largeStr: largeStr + i}));
// Inhibit optimization so that $group runs in the classic engine.
let pipeline =
[{$_internalInhibitOptimization: {}}, {$group: {_id: '$largeStr', minId: {$min: '$_id'}}}];
// Make sure that the pipeline needs to spill to disk.
assert.throwsWithCode(() => testDb.largeColl.aggregate(pipeline, {allowDiskUse: false}),
ErrorCodes.QueryExceededMemoryLimitNoDiskUseAllowed);
testDb.largeColl.aggregate(pipeline).itcount();
assert.eq(listFiles(conn.dbpath + "/_tmp").length, 0);
// Run the pipeline without $_internalInhibitOptimization so that $group runs in the sbe engine.
pipeline = [{$group: {_id: '$largeStr', minId: {$min: '$_id'}}}];
// Make sure that the pipeline needs to spill to disk.
assert.throwsWithCode(() => testDb.largeColl.aggregate(pipeline, {allowDiskUse: false}),
ErrorCodes.QueryExceededMemoryLimitNoDiskUseAllowed);
testDb.largeColl.aggregate(pipeline).itcount();
assert.eq(listFiles(conn.dbpath + "/_tmp").length, 0);
MongoRunner.stopMongod(conn);
})();
|