summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDan Larkin-York <dan.larkin-york@mongodb.com>2023-01-06 22:19:07 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2023-01-06 22:54:35 +0000
commit4d30c6111ae6cdc7f6f60656af0e4b632ac6c0a4 (patch)
treeea73fa406d505ff89cfa26779ace9e7ecccfcc80
parent55efeeb9d858a5c93deaa7ca5f943ed97958a3ed (diff)
downloadmongo-4d30c6111ae6cdc7f6f60656af0e4b632ac6c0a4.tar.gz
SERVER-72595 Prevent query-based reopening from considering compressed buckets
-rw-r--r--jstests/core/timeseries/timeseries_reopened_bucket_insert.js83
-rw-r--r--src/mongo/db/timeseries/bucket_catalog_helpers.cpp6
-rw-r--r--src/mongo/db/timeseries/bucket_catalog_helpers_test.cpp27
3 files changed, 73 insertions, 43 deletions
diff --git a/jstests/core/timeseries/timeseries_reopened_bucket_insert.js b/jstests/core/timeseries/timeseries_reopened_bucket_insert.js
index 4a36277cc31..9bfcd73e8c5 100644
--- a/jstests/core/timeseries/timeseries_reopened_bucket_insert.js
+++ b/jstests/core/timeseries/timeseries_reopened_bucket_insert.js
@@ -215,40 +215,6 @@ const expectToReopenArchivedBuckets = function() {
jsTestLog("Exiting expectToReopenArchivedBuckets.");
}();
-const expectToReopenCompressedBuckets = function() {
- jsTestLog("Entering expectToReopenCompressedBuckets...");
- resetCollection();
-
- let initialMeasurements = [];
- for (let i = 0; i < 5; ++i) {
- initialMeasurements.push({
- [timeField]: ISODate("2022-08-26T19:19:00Z"),
- [metaField]: "ReopenedBucket1",
- });
- }
- const forward = {
- [timeField]: ISODate("2022-08-27T19:19:00Z"),
- [metaField]: "ReopenedBucket1",
- };
- const backward = {
- [timeField]: ISODate("2022-08-26T19:19:00Z"),
- [metaField]: "ReopenedBucket1",
- };
-
- for (let i = 0; i < initialMeasurements.length; ++i) {
- checkIfBucketReopened(
- initialMeasurements[i], /* willCreateBucket */ i == 0, /* willReopenBucket */ false);
- }
- // Time forwards will open a new bucket, and close and compress the old one.
- checkIfBucketReopened(forward, /* willCreateBucket */ true, /* willReopenBucket */ false);
- assert.eq(1, bucketsColl.find({"control.version": 2}).toArray().length);
-
- // We expect to reopen the compressed bucket with time backwards.
- checkIfBucketReopened(backward, /* willCreateBucket */ false, /* willReopenBucket */ true);
-
- jsTestLog("Exiting expectToReopenCompressedBuckets.");
-}();
-
const failToReopenNonSuitableBuckets = function() {
jsTestLog("Entering failToReopenNonSuitableBuckets...");
resetCollection();
@@ -294,7 +260,46 @@ const failToReopenNonSuitableBuckets = function() {
"time": {"0": ISODate("2022-08-26T19:19:30Z")}
}
};
-
+ const compressedBucketDoc = {
+ "_id": ObjectId("05091c2c050b7495eaef4583"),
+ "control": {
+ "version": 2,
+ "min": {
+ "_id": ObjectId("63091c30138e9261fd70a903"),
+ "time": ISODate("2022-08-26T19:19:00Z")
+ },
+ "max": {
+ "_id": ObjectId("63091c30138e9261fd70a903"),
+ "time": ISODate("2022-08-26T19:19:30Z")
+ },
+ "closed": false
+ },
+ "meta": "NonSuitableBucket2",
+ "data": {
+ "_id": {"0": ObjectId("63091c30138e9261fd70a903")},
+ "time": {"0": ISODate("2022-08-26T19:19:30Z")}
+ }
+ };
+ const closedAndCompressedBucketDoc = {
+ "_id": ObjectId("06091c2c050b7495eaef4584"),
+ "control": {
+ "version": 2,
+ "min": {
+ "_id": ObjectId("63091c30138e9261fd70a903"),
+ "time": ISODate("2022-08-26T19:19:00Z")
+ },
+ "max": {
+ "_id": ObjectId("63091c30138e9261fd70a903"),
+ "time": ISODate("2022-08-26T19:19:30Z")
+ },
+ "closed": true
+ },
+ "meta": "NonSuitableBucket3",
+ "data": {
+ "_id": {"0": ObjectId("63091c30138e9261fd70a903")},
+ "time": {"0": ISODate("2022-08-26T19:19:30Z")}
+ }
+ };
const year2000BucketDoc = {
"_id": ObjectId("07091c2c050b7495eaef4585"),
"control": {
@@ -340,6 +345,14 @@ const failToReopenNonSuitableBuckets = function() {
// If an otherwise suitable bucket has the closed flag set, we expect to open a new bucket.
checkIfBucketReopened(measurement1, /* willCreateBucket */ true, /* willReopenBucket */ false);
+ assert.commandWorked(bucketsColl.insert(compressedBucketDoc));
+ // If an otherwise suitable bucket is compressed, we expect to open a new bucket.
+ checkIfBucketReopened(measurement2, /* willCreateBucket */ true, /* willReopenBucket */ false);
+
+ assert.commandWorked(bucketsColl.insert(closedAndCompressedBucketDoc));
+ // If an otherwise suitable bucket is compressed and closed, we expect to open a new bucket.
+ checkIfBucketReopened(measurement3, /* willCreateBucket */ true, /* willReopenBucket */ false);
+
assert.commandWorked(bucketsColl.insert(year2000BucketDoc));
// If an otherwise suitable bucket has an incompatible time range with the measurement, we
// expect to open a new bucket.
diff --git a/src/mongo/db/timeseries/bucket_catalog_helpers.cpp b/src/mongo/db/timeseries/bucket_catalog_helpers.cpp
index 6d3f0265a0f..13e174cd1bf 100644
--- a/src/mongo/db/timeseries/bucket_catalog_helpers.cpp
+++ b/src/mongo/db/timeseries/bucket_catalog_helpers.cpp
@@ -135,6 +135,9 @@ BSONObj generateReopeningFilters(const Date_t& time,
boost::optional<BSONElement> metadata,
const std::string& controlMinTimePath,
int64_t bucketMaxSpanSeconds) {
+ // The bucket must be uncompressed.
+ auto versionFilter = BSON(kControlVersionPath << kTimeseriesControlDefaultVersion);
+
// The bucket cannot be closed (aka open for new measurements).
auto closedFlagFilter =
BSON("$or" << BSON_ARRAY(BSON(kControlClosedPath << BSON("$exists" << false))
@@ -158,7 +161,8 @@ BSONObj generateReopeningFilters(const Date_t& time,
auto upperBound = BSON(controlMinTimePath << BSON("$gt" << measurementMaxDifference));
auto timeRangeFilter = BSON("$and" << BSON_ARRAY(lowerBound << upperBound));
- return BSON("$and" << BSON_ARRAY(closedFlagFilter << timeRangeFilter << metaFieldFilter));
+ return BSON("$and" << BSON_ARRAY(versionFilter << closedFlagFilter << timeRangeFilter
+ << metaFieldFilter));
}
StatusWith<MinMax> generateMinMaxFromBucketDoc(const BSONObj& bucketDoc,
diff --git a/src/mongo/db/timeseries/bucket_catalog_helpers_test.cpp b/src/mongo/db/timeseries/bucket_catalog_helpers_test.cpp
index c056c07964a..0a5442d4080 100644
--- a/src/mongo/db/timeseries/bucket_catalog_helpers_test.cpp
+++ b/src/mongo/db/timeseries/bucket_catalog_helpers_test.cpp
@@ -379,7 +379,7 @@ TEST_F(BucketCatalogHelpersTest, FindSuitableBucketForMeasurements) {
mongo::fromjson(
R"({
"_id":{"$oid":"629e1e680958e279dc29a517"},
- "control":{"version":2,"min":{"_id":7,"time":{"$date":"2023-08-01T13:00:00Z"},"a":1},
+ "control":{"version":1,"min":{"_id":7,"time":{"$date":"2023-08-01T13:00:00Z"},"a":1},
"max":{"_id":10,"time":{"$date":"2023-08-01T14:00:00Z"},"a":3},
"closed":false},
"meta":3,
@@ -475,14 +475,25 @@ TEST_F(BucketCatalogHelpersTest, IncompatibleBucketsForNewMeasurements) {
ASSERT(autoColl->getTimeseriesOptions() && autoColl->getTimeseriesOptions()->getMetaField());
auto tsOptions = *autoColl->getTimeseriesOptions();
- std::vector<BSONObj> bucketDocs = {// control.closed flag is true.
+ std::vector<BSONObj> bucketDocs = {// control.version indicates bucket is compressed.
+ mongo::fromjson(
+ R"({
+ "_id":{"$oid":"62e7e6ec27c28d338ab29200"},
+ "control":{"version":2,"min":{"_id":1,"time":{"$date":"2021-08-01T11:00:00Z"},"a":1},
+ "max":{"_id":3,"time":{"$date":"2021-08-01T12:00:00Z"},"a":3}},
+ "meta":1,
+ "data":{"time":{"0":{"$date":"2021-08-01T11:00:00Z"},
+ "1":{"$date":"2021-08-01T11:00:00Z"},
+ "2":{"$date":"2021-08-01T11:00:00Z"}},
+ "a":{"0":1,"1":2,"2":3}}})"),
+ // control.closed flag is true.
mongo::fromjson(
R"(
{"_id":{"$oid":"62e7eee4f33f295800073138"},
"control":{"version":1,"min":{"_id":7,"time":{"$date":"2022-08-01T12:00:00Z"},"a":1},
"max":{"_id":10,"time":{"$date":"2022-08-01T13:00:00Z"},"a":3},
"closed":true},
- "meta":1,
+ "meta":2,
"data":{"time":{"0":{"$date":"2022-08-01T12:00:00Z"},
"1":{"$date":"2022-08-01T12:00:00Z"},
"2":{"$date":"2022-08-01T12:00:00Z"}},
@@ -494,7 +505,7 @@ TEST_F(BucketCatalogHelpersTest, IncompatibleBucketsForNewMeasurements) {
"control":{"version":2,"min":{"_id":7,"time":{"$date":"2023-08-01T13:00:00Z"},"a":1},
"max":{"_id":10,"time":{"$date":"2023-08-01T14:00:00Z"},"a":3},
"closed":true},
- "meta":2,
+ "meta":3,
"data":{"time":{"0":{"$date":"2023-08-01T13:00:00Z"},
"1":{"$date":"2023-08-01T13:00:00Z"},
"2":{"$date":"2023-08-01T13:00:00Z"}},
@@ -505,11 +516,13 @@ TEST_F(BucketCatalogHelpersTest, IncompatibleBucketsForNewMeasurements) {
_insertIntoBucketColl(doc);
}
- auto time1 = dateFromISOString("2022-08-01T12:30:00Z");
- auto time2 = dateFromISOString("2023-08-01T13:30:00Z");
+ auto time1 = dateFromISOString("2021-08-01T11:30:00Z");
+ auto time2 = dateFromISOString("2022-08-01T12:30:00Z");
+ auto time3 = dateFromISOString("2023-08-01T13:30:00Z");
std::vector<BSONObj> validMeasurementDocs = {
BSON("_id" << 1 << _timeField << time1.getValue() << _metaField << 1),
- BSON("_id" << 2 << _timeField << time2.getValue() << _metaField << 2)};
+ BSON("_id" << 2 << _timeField << time2.getValue() << _metaField << 2),
+ BSON("_id" << 3 << _timeField << time3.getValue() << _metaField << 3)};
// Verify that even with matching meta fields and buckets with acceptable time ranges, if the
// bucket is compressed and/or closed, we should not see it as a candid bucket for future