diff options
author | David Storch <david.storch@mongodb.com> | 2023-01-30 22:54:10 +0000 |
---|---|---|
committer | Evergreen Agent <no-reply@evergreen.mongodb.com> | 2023-01-31 01:27:54 +0000 |
commit | 355aadd8ff9d5599da2983990f87c7ec300a972d (patch) | |
tree | b75d91aea6c48c0e47edcff1c3a9ededde8c3354 /jstests/noPassthrough | |
parent | 40c93f028e36f78c06756f4bfd358d240bdd9b34 (diff) | |
download | mongo-355aadd8ff9d5599da2983990f87c7ec300a972d.tar.gz |
SERVER-70395 Change spilling for SBE HashAggStage to use a more efficient algorithm
The new algorithm spills the entire hash table to a
RecordStore whenever the memory budget is exceeded. Once all
the input is consumed, it switches to a streaming approach,
merging the partial aggregates recovered from disk.
Diffstat (limited to 'jstests/noPassthrough')
-rw-r--r-- | jstests/noPassthrough/spill_to_disk_secondary_read.js | 6 |
1 files changed, 4 insertions, 2 deletions
diff --git a/jstests/noPassthrough/spill_to_disk_secondary_read.js b/jstests/noPassthrough/spill_to_disk_secondary_read.js index 217ce510eef..8ab9178b4c5 100644 --- a/jstests/noPassthrough/spill_to_disk_secondary_read.js +++ b/jstests/noPassthrough/spill_to_disk_secondary_read.js @@ -91,8 +91,10 @@ const readColl = secondary.getDB("test").foo; assert(hashAggGroup.hasOwnProperty("usedDisk"), hashAggGroup); assert(hashAggGroup.usedDisk, hashAggGroup); assert.eq(hashAggGroup.spilledRecords, expectedSpilledRecords, hashAggGroup); - assert.gte(hashAggGroup.spilledBytesApprox, expectedSpilledBytesAtLeast, hashAggGroup); - assert.gt(hashAggGroup.spilledRecordEstimatedStorageSize, 0, hashAggGroup); + // We expect each record to be individually spilled, so the number of spill events and the + // number of spilled records should be equal. + assert.eq(hashAggGroup.numSpills, hashAggGroup.spilledRecords, hashAggGroup); + assert.gt(hashAggGroup.spilledDataStorageSize, expectedSpilledBytesAtLeast, hashAggGroup); } finally { assert.commandWorked(secondary.adminCommand({ setParameter: 1, |