summaryrefslogtreecommitdiff
path: root/jstests
diff options
context:
space:
mode:
authorAdityavardhan Agrawal <adi.agrawal@mongodb.com>2023-01-30 17:06:02 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2023-01-30 19:31:39 +0000
commit5def05b950e7de8609be92d634ccd285b95bfbb9 (patch)
tree590d3e0c34ff98d9d4ca56ddb86feb4ff695fdcd /jstests
parent59e890e0aa8db10826e6d1931e4a68559541c225 (diff)
downloadmongo-5def05b950e7de8609be92d634ccd285b95bfbb9.tar.gz
SERVER-71832 Report group spill metrics in serverStatus
Diffstat (limited to 'jstests')
-rw-r--r--jstests/noPassthrough/group_spill_metrics.js60
1 files changed, 60 insertions, 0 deletions
diff --git a/jstests/noPassthrough/group_spill_metrics.js b/jstests/noPassthrough/group_spill_metrics.js
new file mode 100644
index 00000000000..6a946ee25fc
--- /dev/null
+++ b/jstests/noPassthrough/group_spill_metrics.js
@@ -0,0 +1,60 @@
+/**
+ * Tests that $group stage reports spill stats when serverStatus is run.
+ */
+(function() {
+"use strict";
+
+load("jstests/libs/analyze_plan.js"); // For getAggPlanStage().
+
+const conn = MongoRunner.runMongod();
+const db = conn.getDB('test');
+const coll = db.explain_group_stage_exec_stats;
+coll.drop();
+
+const bigStr = Array(1025).toString(); // 1KB of ','
+const maxMemoryLimitForGroupStage = 1024 * 300;
+const nDocs = 1000;
+const nGroups = 50;
+
+const bulk = coll.initializeUnorderedBulkOp();
+for (let i = 1; i <= nDocs; i++) {
+ bulk.insert({_id: i, a: i, b: i % nGroups, bigStr: bigStr});
+}
+assert.commandWorked(bulk.execute());
+
+const pipeline = [
+ {$_internalInhibitOptimization: {}},
+ {$match: {a: {$gt: 0}}},
+ {$sort: {b: 1}},
+ {$group: {_id: "$b", count: {$sum: 1}, push: {$push: "$bigStr"}, set: {$addToSet: "$bigStr"}}},
+];
+
+const metricsBefore = db.serverStatus().metrics.query.group;
+
+// Set MaxMemory low to force spill to disk.
+assert.commandWorked(db.adminCommand(
+ {setParameter: 1, internalDocumentSourceGroupMaxMemoryBytes: maxMemoryLimitForGroupStage}));
+
+const result = getAggPlanStage(coll.explain("executionStats").aggregate(pipeline), "$group");
+
+const metricsAfter = db.serverStatus().metrics.query.group;
+
+const expectedSpills = result.spills + metricsBefore.spills;
+const expectedSpillFileSizeBytes = result.spillFileSizeBytes + metricsBefore.spillFileSizeBytes;
+const expectedNumBytesSpilledEstimate =
+ result.numBytesSpilledEstimate + metricsBefore.numBytesSpilledEstimate;
+
+assert.gt(metricsAfter.spills, metricsBefore.spills, pipeline);
+
+assert.eq(metricsAfter.spills, expectedSpills, pipeline);
+
+assert.gt(metricsAfter.spillFileSizeBytes, metricsBefore.spillFileSizeBytes, pipeline);
+
+assert.eq(metricsAfter.spillFileSizeBytes, expectedSpillFileSizeBytes, pipeline);
+
+assert.gt(metricsAfter.numBytesSpilledEstimate, metricsBefore.numBytesSpilledEstimate, pipeline);
+
+assert.eq(metricsAfter.numBytesSpilledEstimate, expectedNumBytesSpilledEstimate, pipeline);
+
+MongoRunner.stopMongod(conn);
+}());