summaryrefslogtreecommitdiff
path: root/jstests/concurrency
diff options
context:
space:
mode:
authorRui Liu <rui.liu@mongodb.com>2021-09-08 16:59:28 +0100
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-09-15 17:02:07 +0000
commitf8b20f1c3cbde42c92b0f4fcf8274586afd71e8c (patch)
tree8350323b2dc3c86f1aa9e45c84bd6123fe2b1f76 /jstests/concurrency
parentecc452fcbb203d47ea43f525ac9de8d238312d30 (diff)
downloadmongo-f8b20f1c3cbde42c92b0f4fcf8274586afd71e8c.tar.gz
SERVER-59184 Verify index integrity through concurrent inserts and chunk migration
Diffstat (limited to 'jstests/concurrency')
-rw-r--r--jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_inserts.js48
1 files changed, 42 insertions, 6 deletions
diff --git a/jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_inserts.js b/jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_inserts.js
index fbe472c6857..fb1ca2aed35 100644
--- a/jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_inserts.js
+++ b/jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_inserts.js
@@ -1,7 +1,8 @@
/**
* Tests the insertions into sharded time-series collection during a chunk migration. To ensure the
* correctness, the test does the same inserts into an unsharded collection and verified that the
- * number of documents remain the same at the end.
+ * number of documents remain the same at the end. This test also checks that indexes on the
+ * time-series buckets collection remain consistent after the test run.
* @tags: [
* requires_sharding,
* assumes_balancer_off,
@@ -12,6 +13,7 @@
load('jstests/concurrency/fsm_workload_helpers/chunks.js'); // for chunk helpers
load("jstests/core/timeseries/libs/timeseries.js"); // For 'TimeseriesTest' helpers.
+load("jstests/libs/analyze_plan.js"); // for 'getPlanStages'
load('jstests/concurrency/fsm_workloads/sharded_moveChunk_partitioned.js');
var $config = extendWorkload($config, function($config, $super) {
@@ -26,6 +28,7 @@ var $config = extendWorkload($config, function($config, $super) {
// This should generate documents for a span of one month.
$config.data.numInitialDocs = 60 * 24 * 30;
+ $config.data.numMetaCount = 30;
$config.data.featureFlagDisabled = true;
@@ -43,7 +46,11 @@ var $config = extendWorkload($config, function($config, $super) {
for (let i = 0; i < 10; i++) {
// Generate a random timestamp between 'startTime' and largest timestamp we inserted.
const timer = this.startTime + Random.rand() * this.numInitialDocs * this.increment;
- const doc = {_id: new ObjectId(), t: new Date(timer)};
+ const doc = {
+ _id: new ObjectId(),
+ t: new Date(timer),
+ m: Math.floor(Random.rand() * this.numMetaCount)
+ };
assertAlways.commandWorked(db[collName].insert(doc));
assertAlways.commandWorked(db[this.nonShardCollName].insert(doc));
}
@@ -103,13 +110,32 @@ var $config = extendWorkload($config, function($config, $super) {
jsTestLog("NumBuckets " + numBuckets + " and numDocs " + numInitialDocs);
assert.eq(numInitialDocs, db[this.nonShardCollName].find({}).itcount());
- const pipeline = [{$project: {_id: "$_id", t: "$t"}}, {$sort: {t: 1}}];
+ const pipeline = [{$project: {_id: "$_id", m: "$m", t: "$t"}}, {$sort: {m: 1, t: 1}}];
const diff = DataConsistencyChecker.getDiff(db[collName].aggregate(pipeline),
db[this.nonShardCollName].aggregate(pipeline));
assertAlways.eq(
diff,
{docsWithDifferentContents: [], docsMissingOnFirst: [], docsMissingOnSecond: []},
diff);
+
+ // Make sure that queries using various indexes on time-series buckets collection return
+ // buckets with all documents.
+ const verifyBucketIndex = (bucketIndex) => {
+ const bucketColl = db.getCollection(`system.buckets.${collName}`);
+ const buckets = bucketColl.aggregate([{$sort: bucketIndex}]).toArray();
+ const numDocsInBuckets =
+ buckets.map(b => Object.keys(b.data._id).length).reduce((x, y) => x + y, 0);
+ assert.eq(numInitialDocs, numDocsInBuckets);
+ const plan = bucketColl.explain().aggregate([{$sort: bucketIndex}]);
+ const stages = getPlanStages(plan, 'IXSCAN');
+ assert(stages.length > 0);
+ for (let ixScan of stages) {
+ assert.eq(bucketIndex, ixScan.keyPattern, ixScan);
+ }
+ };
+ verifyBucketIndex({"control.min.t": 1});
+ verifyBucketIndex({meta: 1});
+ verifyBucketIndex({meta: 1, "control.min.t": 1, "control.max.t": 1});
};
$config.setup = function setup(db, collName, cluster) {
@@ -122,9 +148,15 @@ var $config = extendWorkload($config, function($config, $super) {
db[collName].drop();
db[this.nonShardCollName].drop();
- assertAlways.commandWorked(db.createCollection(collName, {timeseries: {timeField: "t"}}));
+ assertAlways.commandWorked(
+ db.createCollection(collName, {timeseries: {timeField: "t", metaField: "m"}}));
+ // Create indexes to verify index integrity during the teardown state.
cluster.shardCollection(db[collName], {t: 1}, false);
- db[this.nonShardCollName].createIndex({t: 1});
+ assert.commandWorked(db[this.nonShardCollName].createIndex({t: 1}));
+ assert.commandWorked(db[collName].createIndex({m: 1}));
+ assert.commandWorked(db[this.nonShardCollName].createIndex({m: 1}));
+ assert.commandWorked(db[collName].createIndex({m: 1, t: 1}));
+ assert.commandWorked(db[this.nonShardCollName].createIndex({m: 1, t: 1}));
const bulk = db[collName].initializeUnorderedBulkOp();
const bulkUnsharded = db[this.nonShardCollName].initializeUnorderedBulkOp();
@@ -133,7 +165,11 @@ var $config = extendWorkload($config, function($config, $super) {
for (let i = 0; i < this.numInitialDocs; ++i) {
currentTimeStamp += this.increment;
- const doc = {_id: new ObjectId(), t: new Date(currentTimeStamp)};
+ const doc = {
+ _id: new ObjectId(),
+ t: new Date(currentTimeStamp),
+ m: i % this.numMetaCount
+ };
bulk.insert(doc);
bulkUnsharded.insert(doc);
}