diff options
author | Faustoleyva54 <fausto.leyva@mongodb.com> | 2023-02-16 03:05:04 +0000 |
---|---|---|
committer | Evergreen Agent <no-reply@evergreen.mongodb.com> | 2023-04-13 21:26:19 +0000 |
commit | 9e5c48ae34d786849ef64df35e3ed31b57358b09 (patch) | |
tree | 7f737d181f7657dd3eca8bcb719bdb18175a83cc | |
parent | f000834e8834248a3f43b0c0198216ec0d15064c (diff) | |
download | mongo-9e5c48ae34d786849ef64df35e3ed31b57358b09.tar.gz |
SERVER-73936 Clean up timeseries_direct_update.js and add logs
(cherry picked from commit 795b10df8afc75c3f6e01d0d029a967556efc536)
-rw-r--r-- | jstests/noPassthrough/timeseries_dynamic_bucket_sizing.js | 98 | ||||
-rw-r--r-- | jstests/noPassthrough/timeseries_dynamic_bucket_sizing_large.js | 73 |
2 files changed, 86 insertions, 85 deletions
diff --git a/jstests/noPassthrough/timeseries_dynamic_bucket_sizing.js b/jstests/noPassthrough/timeseries_dynamic_bucket_sizing.js index dd799c3bd94..2baf8294674 100644 --- a/jstests/noPassthrough/timeseries_dynamic_bucket_sizing.js +++ b/jstests/noPassthrough/timeseries_dynamic_bucket_sizing.js @@ -20,6 +20,9 @@ const minWiredTigerCacheSizeGB = 0.256; const cacheSize = minWiredTigerCacheSizeGB * 1000 * 1000 * 1000; // 256 MB const defaultBucketMaxSize = 128000; // 125 KB const minBucketCount = 10; +const timeFieldName = 'time'; +const metaFieldName = 'meta'; +const timestamp = ISODate('2023-02-13T01:00:00Z'); // A cardinality higher than this calculated value will call for smaller bucket size limit caused by // cache pressure. @@ -32,9 +35,11 @@ const replSet = new ReplSetTest({ replSet.startSet({setParameter: {timeseriesBucketMaxSize: defaultBucketMaxSize}}); replSet.initiate(); -const db = replSet.getPrimary().getDB("test"); +const db = replSet.getPrimary().getDB(jsTestName()); const coll = db.getCollection('t'); coll.drop(); +assert.commandWorked(db.createCollection( + coll.getName(), {timeseries: {timeField: timeFieldName, metaField: metaFieldName}})); if (!TimeseriesTest.timeseriesScalabilityImprovementsEnabled(db)) { replSet.stopSet(); @@ -43,43 +48,30 @@ if (!TimeseriesTest.timeseriesScalabilityImprovementsEnabled(db)) { return; } -const timeFieldName = 'time'; -const metaFieldName = 'meta'; - -const resetCollection = (() => { - coll.drop(); - assert.commandWorked(db.createCollection( - coll.getName(), {timeseries: {timeField: timeFieldName, metaField: metaFieldName}})); +// Helper to log timeseries stats. +const formatStatsLog = ((stats) => { + return "Timeseries stats: " + tojson(stats); }); // Inserts documents into the collection with increasing meta fields to generate N buckets. We make // sure to exceed the bucket min count per bucket to bypass large measurement checks. const initializeBucketsPastMinCount = function(numOfBuckets = 1) { - jsTestLog("Inserting and generating buckets."); - let batch = []; + jsTestLog("Inserting and generating buckets. Targeting '" + numOfBuckets + "' buckets."); + let bulk = coll.initializeUnorderedBulkOp(); for (let i = 0; i < numOfBuckets; i++) { for (let j = 0; j < minBucketCount; ++j) { const doc = { _id: '' + i + j, - [timeFieldName]: ISODate(), + [timeFieldName]: timestamp, [metaFieldName]: i, value: "a".repeat(1000) }; - batch.push(doc); - - if (batch.length >= 100) { - assert.commandWorked(coll.insertMany(batch)); - batch = []; - } + bulk.insert(doc); } } - if (batch.length > 0) { - assert.commandWorked(coll.insertMany(batch)); - } + assert.commandWorked(bulk.execute()); }; -resetCollection(); - const belowCardinalityThreshold = cardinalityForCachePressure; initializeBucketsPastMinCount(belowCardinalityThreshold); @@ -88,34 +80,30 @@ let bucketsClosedDueToSize = timeseriesStats.numBucketsClosedDueToSize; let bucketsClosedDueToCachePressure = timeseriesStats.numBucketsClosedDueToCachePressure; let compressedBuckets = timeseriesStats.numCompressedBuckets; +// Ensure we have not closed any buckets due to size or cache pressure. +assert.eq(bucketsClosedDueToSize, 0, formatStatsLog(timeseriesStats)); +assert.eq(bucketsClosedDueToCachePressure, 0, formatStatsLog(timeseriesStats)); +assert.eq(timeseriesStats.bucketCount, belowCardinalityThreshold, formatStatsLog(timeseriesStats)); + // We only end up doing two passes before we start to close buckets due to size limits. while (bucketsClosedDueToSize == 0) { - let batch = []; + jsTestLog("Inserting 50000 bytes of data into buckets."); + let bulk = coll.initializeUnorderedBulkOp(); for (let i = 0; i < belowCardinalityThreshold; i++) { - const doc1 = { + bulk.insert({ _id: '00' + i, - [timeFieldName]: ISODate(), + [timeFieldName]: timestamp, [metaFieldName]: i, value: "a".repeat(30000) - }; - const doc2 = { + }); + bulk.insert({ _id: '00' + i, - [timeFieldName]: ISODate(), + [timeFieldName]: timestamp, [metaFieldName]: i, value: "a".repeat(20000) - }; - batch.push(doc1); - batch.push(doc2); - - if (batch.length >= 100) { - assert.commandWorked(coll.insertMany(batch)); - batch = []; - } - } - - if (batch.length != 0) { - assert.commandWorked(coll.insertMany(batch)); + }); } + assert.commandWorked(bulk.execute()); timeseriesStats = assert.commandWorked(coll.stats()).timeseries; bucketsClosedDueToSize = timeseriesStats.numBucketsClosedDueToSize; @@ -125,30 +113,21 @@ while (bucketsClosedDueToSize == 0) { // On the second pass of inserts, we will close buckets due to the default size constraints. No // buckets should be closed due to cache pressure. -assert.eq(bucketsClosedDueToSize, cardinalityForCachePressure); -assert.eq(bucketsClosedDueToCachePressure, 0); -assert.eq(compressedBuckets, cardinalityForCachePressure); +assert.eq(bucketsClosedDueToSize, cardinalityForCachePressure, formatStatsLog(timeseriesStats)); +assert.eq(bucketsClosedDueToCachePressure, 0, formatStatsLog(timeseriesStats)); +assert.eq(compressedBuckets, cardinalityForCachePressure, formatStatsLog(timeseriesStats)); // If we pass the cardinality point to simulate cache pressure, we will begin to see buckets closed // due to 'CachePressure' and not 'DueToSize'. const aboveCardinalityThreshold = cardinalityForCachePressure * 3 / 2; initializeBucketsPastMinCount(aboveCardinalityThreshold); -let batch = []; +let bulk = coll.initializeUnorderedBulkOp(); for (let i = 0; i < aboveCardinalityThreshold; i++) { - const doc = - {_id: '00' + i, [timeFieldName]: ISODate(), [metaFieldName]: i, value: "a".repeat(20000)}; - batch.push(doc); - - if (batch.length >= 100) { - assert.commandWorked(coll.insertMany(batch)); - batch = []; - } -} -if (batch.length != 0) { - print(batch.length); - assert.commandWorked(coll.insertMany(batch)); + bulk.insert( + {_id: '00' + i, [timeFieldName]: timestamp, [metaFieldName]: i, value: "a".repeat(20000)}); } +assert.commandWorked(bulk.execute()); timeseriesStats = assert.commandWorked(coll.stats()).timeseries; bucketsClosedDueToSize = timeseriesStats.numBucketsClosedDueToSize; @@ -157,17 +136,18 @@ compressedBuckets = timeseriesStats.numCompressedBuckets; // We expect 'bucketsClosedDueToSize' to remain the same but 'bucketsClosedDueToCachePressure' to // increase. -assert.eq(bucketsClosedDueToSize, cardinalityForCachePressure); +assert.eq(bucketsClosedDueToSize, cardinalityForCachePressure, formatStatsLog(timeseriesStats)); // Previously, the bucket max size was 128000 bytes, but under cache pressure using // 'aboveCardinalityThreshold', the max size drops to roughly ~85334. This means the old // measurements (up to 'cardinalityForCachePressure') will need to be closed since they are sized at // ~120000 bytes. The newly inserted measurements are only sized at ~(20000 * 3) bytes so stay open. -assert.eq(bucketsClosedDueToCachePressure, cardinalityForCachePressure); +assert.eq( + bucketsClosedDueToCachePressure, cardinalityForCachePressure, formatStatsLog(timeseriesStats)); // We expect the number of compressed buckets to double (independent to whether the buckets were // closed due to size or cache pressure). -assert.eq(compressedBuckets, 2 * cardinalityForCachePressure); +assert.eq(compressedBuckets, 2 * cardinalityForCachePressure, formatStatsLog(timeseriesStats)); replSet.stopSet(); })(); diff --git a/jstests/noPassthrough/timeseries_dynamic_bucket_sizing_large.js b/jstests/noPassthrough/timeseries_dynamic_bucket_sizing_large.js index 3c644219837..f8e4d77fd29 100644 --- a/jstests/noPassthrough/timeseries_dynamic_bucket_sizing_large.js +++ b/jstests/noPassthrough/timeseries_dynamic_bucket_sizing_large.js @@ -21,6 +21,8 @@ const minWiredTigerCacheSizeGB = 0.256; / const minWiredTigerCacheSize = minWiredTigerCacheSizeGB * 1024 * 1024 * 1024; // 256 MB const measurementValueLength = 1 * 1024 * 1024; // 1 MB const defaultBucketMinCount = 10; +const timeFieldName = 'time'; +const metaFieldName = 'meta'; const replSet = new ReplSetTest({ nodes: 1, @@ -29,7 +31,7 @@ const replSet = new ReplSetTest({ replSet.startSet({setParameter: {timeseriesBucketMaxSize: defaultBucketMaxSize}}); replSet.initiate(); -const db = replSet.getPrimary().getDB("test"); +const db = replSet.getPrimary().getDB(jsTestName()); let coll = db.getCollection('t'); coll.drop(); @@ -40,8 +42,10 @@ if (!TimeseriesTest.timeseriesScalabilityImprovementsEnabled(db)) { return; } -const timeFieldName = 'time'; -const metaFieldName = 'meta'; +// Helper to log timeseries stats. +const formatStatsLog = ((stats) => { + return "Timeseries stats: " + tojson(stats); +}); const resetCollection = (() => { coll.drop(); @@ -52,12 +56,12 @@ const resetCollection = (() => { // Inserts small documents into the collection with increasing meta fields to generate N buckets. const initializeBuckets = function(numOfBuckets = 1) { jsTestLog("Inserting and generating buckets."); - let batch = []; + let bulk = coll.initializeUnorderedBulkOp(); for (let i = 0; i < numOfBuckets; i++) { const doc = {_id: i, [timeFieldName]: ISODate(), [metaFieldName]: i, value: "a"}; - batch.push(doc); + bulk.insert(doc); } - assert.commandWorked(coll.insertMany(batch)); + assert.commandWorked(bulk.execute()); }; (function largeMeasurementsNoCachePressure() { @@ -87,11 +91,16 @@ const initializeBuckets = function(numOfBuckets = 1) { expectedBucketCount++; let timeseriesStats = assert.commandWorked(coll.stats()).timeseries; - assert.eq(timeseriesStats.bucketCount, expectedBucketCount); - assert.eq(timeseriesStats.numBucketsClosedDueToSize, numBucketsClosedDueToSize); + assert.eq(timeseriesStats.bucketCount, expectedBucketCount, formatStatsLog(timeseriesStats)); + assert.eq(timeseriesStats.numBucketsClosedDueToSize, + numBucketsClosedDueToSize, + formatStatsLog(timeseriesStats)); assert.eq(timeseriesStats.numBucketsClosedDueToCachePressure, - numBucketsClosedDueToCachePressure); - assert.eq(timeseriesStats.numCompressedBuckets, numCompressedBuckets); + numBucketsClosedDueToCachePressure, + formatStatsLog(timeseriesStats)); + assert.eq(timeseriesStats.numCompressedBuckets, + numCompressedBuckets, + formatStatsLog(timeseriesStats)); // If we exceed the min bucket count of 10, we should close the bucket since it exceeds our // default bucket size of 125 KB. (This requires two additional insertions). @@ -104,11 +113,16 @@ const initializeBuckets = function(numOfBuckets = 1) { numCompressedBuckets++; timeseriesStats = assert.commandWorked(coll.stats()).timeseries; - assert.eq(timeseriesStats.bucketCount, expectedBucketCount); - assert.eq(timeseriesStats.numBucketsClosedDueToSize, numBucketsClosedDueToSize); + assert.eq(timeseriesStats.bucketCount, expectedBucketCount, formatStatsLog(timeseriesStats)); + assert.eq(timeseriesStats.numBucketsClosedDueToSize, + numBucketsClosedDueToSize, + formatStatsLog(timeseriesStats)); assert.eq(timeseriesStats.numBucketsClosedDueToCachePressure, - numBucketsClosedDueToCachePressure); - assert.eq(timeseriesStats.numCompressedBuckets, numCompressedBuckets); + numBucketsClosedDueToCachePressure, + formatStatsLog(timeseriesStats)); + assert.eq(timeseriesStats.numCompressedBuckets, + numCompressedBuckets, + formatStatsLog(timeseriesStats)); // Since the maximum size for buckets is capped at 12 MB, we should hit the size limit before // closing the bucket due to the minimum count, so we expect to close the oversized bucket and @@ -129,11 +143,16 @@ const initializeBuckets = function(numOfBuckets = 1) { numCompressedBuckets++; timeseriesStats = assert.commandWorked(coll.stats()).timeseries; - assert.eq(timeseriesStats.bucketCount, expectedBucketCount); - assert.eq(timeseriesStats.numBucketsClosedDueToSize, numBucketsClosedDueToSize); + assert.eq(timeseriesStats.bucketCount, expectedBucketCount, formatStatsLog(timeseriesStats)); + assert.eq(timeseriesStats.numBucketsClosedDueToSize, + numBucketsClosedDueToSize, + formatStatsLog(timeseriesStats)); assert.eq(timeseriesStats.numBucketsClosedDueToCachePressure, - numBucketsClosedDueToCachePressure); - assert.eq(timeseriesStats.numCompressedBuckets, numCompressedBuckets); + numBucketsClosedDueToCachePressure, + formatStatsLog(timeseriesStats)); + assert.eq(timeseriesStats.numCompressedBuckets, + numCompressedBuckets, + formatStatsLog(timeseriesStats)); })(); (function largeMeasurementsWithCachePressure() { @@ -161,10 +180,11 @@ const initializeBuckets = function(numOfBuckets = 1) { } let timeseriesStats = assert.commandWorked(coll.stats()).timeseries; - assert.eq(timeseriesStats.bucketCount, bucketCount); - assert.eq(timeseriesStats.numBucketsClosedDueToSize, 0); - assert.eq(timeseriesStats.numBucketsClosedDueToCachePressure, 0); - assert.eq(timeseriesStats.numCompressedBuckets, 0); + assert.eq(timeseriesStats.bucketCount, bucketCount, formatStatsLog(timeseriesStats)); + assert.eq(timeseriesStats.numBucketsClosedDueToSize, 0, formatStatsLog(timeseriesStats)); + assert.eq( + timeseriesStats.numBucketsClosedDueToCachePressure, 0, formatStatsLog(timeseriesStats)); + assert.eq(timeseriesStats.numCompressedBuckets, 0, formatStatsLog(timeseriesStats)); // We expect this insert to cause the bucket to close due to cache pressure since it will exceed // the rough cacheDerivedMaxSize of 5.5 MB and create a new bucket for this measurement. @@ -177,10 +197,11 @@ const initializeBuckets = function(numOfBuckets = 1) { assert.commandWorked(coll.insert(doc)); timeseriesStats = assert.commandWorked(coll.stats()).timeseries; - assert.eq(timeseriesStats.bucketCount, bucketCount + 1); - assert.eq(timeseriesStats.numBucketsClosedDueToSize, 0); - assert.eq(timeseriesStats.numBucketsClosedDueToCachePressure, 1); - assert.eq(timeseriesStats.numCompressedBuckets, 1); + assert.eq(timeseriesStats.bucketCount, bucketCount + 1, formatStatsLog(timeseriesStats)); + assert.eq(timeseriesStats.numBucketsClosedDueToSize, 0, formatStatsLog(timeseriesStats)); + assert.eq( + timeseriesStats.numBucketsClosedDueToCachePressure, 1, formatStatsLog(timeseriesStats)); + assert.eq(timeseriesStats.numCompressedBuckets, 1, formatStatsLog(timeseriesStats)); })(); replSet.stopSet(); |