summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlberto Massari <alberto.massari@mongodb.com>2022-08-19 08:30:53 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-08-19 08:55:14 +0000
commit0bc559a38b06ca9b408481c33e39d3187aa636be (patch)
tree37636807730cee4f87b1b1f3d1cf3c6db8669cc5
parent22fa5394f6fcaebfcc8fc90fbe13764bbf043745 (diff)
downloadmongo-0bc559a38b06ca9b408481c33e39d3187aa636be.tar.gz
SERVER-59834 Backport test to v5.0
-rw-r--r--jstests/noPassthrough/group_tmp_file_cleanup.js46
1 files changed, 46 insertions, 0 deletions
diff --git a/jstests/noPassthrough/group_tmp_file_cleanup.js b/jstests/noPassthrough/group_tmp_file_cleanup.js
new file mode 100644
index 00000000000..d456e73ede4
--- /dev/null
+++ b/jstests/noPassthrough/group_tmp_file_cleanup.js
@@ -0,0 +1,46 @@
+/**
+ * Test that $group cleans up temporary files under dbpath + '/_tmp'.
+ */
+
+(function() {
+"use strict";
+
+const memoryLimitMb = 16;
+const memoryLimitBytes = memoryLimitMb * 1024 * 1024;
+
+// Start mongod with reduced memory limit for the $group stage.
+const conn = MongoRunner.runMongod({
+ setParameter: {
+ internalDocumentSourceGroupMaxMemoryBytes: memoryLimitBytes,
+ }
+});
+const testDb = conn.getDB(jsTestName());
+
+// Create a collection exceeding the memory limit.
+testDb.largeColl.drop();
+const largeStr = "A".repeat(1024 * 1024); // 1MB string
+for (let i = 0; i < memoryLimitMb + 1; ++i)
+ assert.commandWorked(testDb.largeColl.insert({x: i, largeStr: largeStr + i}));
+
+// Inhibit optimization so that $group runs in the classic engine.
+let pipeline =
+ [{$_internalInhibitOptimization: {}}, {$group: {_id: '$largeStr', minId: {$min: '$_id'}}}];
+
+// Make sure that the pipeline needs to spill to disk.
+assert.throwsWithCode(() => testDb.largeColl.aggregate(pipeline, {allowDiskUse: false}),
+ ErrorCodes.QueryExceededMemoryLimitNoDiskUseAllowed);
+
+testDb.largeColl.aggregate(pipeline, {allowDiskUse: true}).itcount();
+assert.eq(listFiles(conn.dbpath + "/_tmp").length, 0);
+
+// Run the pipeline without $_internalInhibitOptimization so that $group runs in the sbe engine.
+pipeline = [{$group: {_id: '$largeStr', minId: {$min: '$_id'}}}];
+
+// Make sure that the pipeline needs to spill to disk.
+assert.throwsWithCode(() => testDb.largeColl.aggregate(pipeline, {allowDiskUse: false}),
+ ErrorCodes.QueryExceededMemoryLimitNoDiskUseAllowed);
+testDb.largeColl.aggregate(pipeline, {allowDiskUse: true}).itcount();
+assert.eq(listFiles(conn.dbpath + "/_tmp").length, 0);
+
+MongoRunner.stopMongod(conn);
+})();