summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_deletes.js26
-rw-r--r--jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_inserts.js26
2 files changed, 40 insertions, 12 deletions
diff --git a/jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_deletes.js b/jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_deletes.js
index 3dd6588ef68..16a0d6777a0 100644
--- a/jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_deletes.js
+++ b/jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_deletes.js
@@ -44,6 +44,9 @@ var $config = extendWorkload($config, function($config, $super) {
jsTestLog(
"Skipping executing this test as the requisite feature flags are not enabled.");
}
+
+ this.arbitraryDeletesEnabled =
+ FeatureFlagUtil.isPresentAndEnabled(db, "TimeseriesDeletesSupport");
};
$config.states.doDelete = function doDelete(db, collName, connCache) {
@@ -51,17 +54,32 @@ var $config = extendWorkload($config, function($config, $super) {
return;
}
+ // Alternate between filtering on the meta field and filtering on a data field. This will
+ // cover both the timeseries batch delete and arbitrary delete paths.
+ const filterFieldName = !this.arbitraryDeletesEnabled || Random.randInt(2) == 0
+ ? "m.tid" + this.tid
+ : "f.tid" + this.tid;
const filter = {
- m: {
- ["tid" + this.tid]: {
- $gte: Random.randInt($config.data.numMetaCount),
- },
+ [filterFieldName]: {
+ $gte: Random.randInt($config.data.numMetaCount),
},
};
assertAlways.commandWorked(db[collName].deleteMany(filter));
assertAlways.commandWorked(db[this.nonShardCollName].deleteMany(filter));
};
+ $config.data.validateCollection = function validate(db, collName) {
+ // Since we can't use a 'snapshot' read concern for timeseries deletes, deletes on the
+ // sharded collection may not see the exact same records as the non-sharded, so the
+ // validation needs to be more lenient.
+ const count = db[collName].find().itcount();
+ const countNonSharded = db[this.nonShardCollName].find().itcount();
+ assertAlways.gte(
+ count,
+ countNonSharded,
+ "Expected sharded collection to have the same or more records than unsharded");
+ };
+
$config.transitions = {
init: {insert: 1},
insert: {insert: 3, doDelete: 3, moveChunk: 1},
diff --git a/jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_inserts.js b/jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_inserts.js
index 4f95d508df0..7491ff8d71e 100644
--- a/jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_inserts.js
+++ b/jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_inserts.js
@@ -60,10 +60,12 @@ var $config = extendWorkload($config, function($config, $super) {
// Generate a random timestamp between 'startTime' and largest timestamp we inserted.
const timer =
this.startTime + Math.floor(Random.rand() * this.numInitialDocs * this.increment);
+ const metaVal = this.generateMetaFieldValueForInsertStage(this.tid);
const doc = {
_id: new ObjectId(),
[this.timeField]: new Date(timer),
- [this.metaField]: this.generateMetaFieldValueForInsertStage(this.tid),
+ [this.metaField]: metaVal,
+ f: metaVal,
};
assertAlways.commandWorked(db[collName].insert(doc));
assertAlways.commandWorked(db[this.nonShardCollName].insert(doc));
@@ -112,6 +114,15 @@ var $config = extendWorkload($config, function($config, $super) {
moveChunk: {insert: 1, moveChunk: 0}
};
+ $config.data.validateCollection = function validate(db, collName) {
+ const pipeline =
+ [{$project: {_id: "$_id", m: "$m", t: "$t"}}, {$sort: {m: 1, t: 1, _id: 1}}];
+ const diff = DataConsistencyChecker.getDiff(db[collName].aggregate(pipeline),
+ db[this.nonShardCollName].aggregate(pipeline));
+ assertAlways.eq(
+ diff, {docsWithDifferentContents: [], docsMissingOnFirst: [], docsMissingOnSecond: []});
+ };
+
$config.teardown = function teardown(db, collName, cluster) {
if (this.featureFlagDisabled) {
return;
@@ -123,12 +134,9 @@ var $config = extendWorkload($config, function($config, $super) {
jsTestLog("NumBuckets " + numBuckets + ", numDocs on sharded cluster" +
db[collName].find().itcount() + "numDocs on unsharded collection " +
db[this.nonShardCollName].find({}).itcount());
- const pipeline =
- [{$project: {_id: "$_id", m: "$m", t: "$t"}}, {$sort: {m: 1, t: 1, _id: 1}}];
- const diff = DataConsistencyChecker.getDiff(db[collName].aggregate(pipeline),
- db[this.nonShardCollName].aggregate(pipeline));
- assertAlways.eq(
- diff, {docsWithDifferentContents: [], docsMissingOnFirst: [], docsMissingOnSecond: []});
+
+ // Validate the contents of the collection.
+ this.validateCollection(db, collName);
// Make sure that queries using various indexes on time-series buckets collection return
// buckets with all documents.
@@ -185,10 +193,12 @@ var $config = extendWorkload($config, function($config, $super) {
for (let i = 0; i < this.numInitialDocs; ++i) {
currentTimeStamp += this.increment;
+ const metaVal = this.generateMetaFieldValueForInitialInserts(i);
const doc = {
_id: new ObjectId(),
[this.timeField]: new Date(currentTimeStamp),
- [this.metaField]: this.generateMetaFieldValueForInitialInserts(i),
+ [this.metaField]: metaVal,
+ f: metaVal,
};
bulk.insert(doc);
bulkUnsharded.insert(doc);