summaryrefslogtreecommitdiff
path: root/jstests
diff options
context:
space:
mode:
authorArun Banala <arun.banala@mongodb.com>2021-05-06 11:13:52 +0100
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-10-19 10:00:24 +0000
commitc54ece46d916ff97fd0b7631df09b2e2cb568b7e (patch)
tree4d987ef4bad92be5f81ebf4591692e371b2627cc /jstests
parentd86a36630a4166c9ad01de9404da075f634d02b3 (diff)
downloadmongo-c54ece46d916ff97fd0b7631df09b2e2cb568b7e.tar.gz
SERVER-60231 Unordered batched time-series inserts should abort upon stale config expections
(cherry picked from commit a6810aa4a19bc5c0330494778e73e6c7cbcc21f8) SERVER-60231 Add 'required_sharding' tag to fix test failure (cherry picked from commit 5291cb9e3cf942de61ac8008d484dbe39273dec9)
Diffstat (limited to 'jstests')
-rw-r--r--jstests/noPassthrough/timeseries_insert_ordered_false.js160
1 files changed, 109 insertions, 51 deletions
diff --git a/jstests/noPassthrough/timeseries_insert_ordered_false.js b/jstests/noPassthrough/timeseries_insert_ordered_false.js
index ad6def1ac5a..089e8619fee 100644
--- a/jstests/noPassthrough/timeseries_insert_ordered_false.js
+++ b/jstests/noPassthrough/timeseries_insert_ordered_false.js
@@ -1,5 +1,9 @@
/**
* Tests that time-series inserts respect {ordered: false}.
+ *
+ * @tags: [
+ * requires_sharding,
+ * ]
*/
(function() {
'use strict';
@@ -9,62 +13,116 @@ load('jstests/libs/fail_point_util.js');
const conn = MongoRunner.runMongod();
-if (!TimeseriesTest.timeseriesCollectionsEnabled(conn)) {
- jsTestLog('Skipping test because the time-series collection feature flag is disabled');
- MongoRunner.stopMongod(conn);
- return;
+function runTest(conn, failPointConn, shardColl) {
+ if (!TimeseriesTest.timeseriesCollectionsEnabled(conn)) {
+ jsTestLog('Skipping test because the time-series collection feature flag is disabled');
+ MongoRunner.stopMongod(conn);
+ return;
+ }
+
+ const testDB = conn.getDB(jsTestName());
+
+ const coll = testDB.getCollection('t');
+ const bucketsColl = testDB.getCollection('system.buckets.' + coll.getName());
+
+ const timeFieldName = 'time';
+ const metaFieldName = 'meta';
+
+ coll.drop();
+ assert.commandWorked(testDB.createCollection(
+ coll.getName(), {timeseries: {timeField: timeFieldName, metaField: metaFieldName}}));
+ if (shardColl) {
+ assert.commandWorked(conn.adminCommand({
+ shardCollection: coll.getFullName(),
+ key: {[metaFieldName]: 1},
+ }));
+ }
+ assert.contains(bucketsColl.getName(), testDB.getCollectionNames());
+
+ const docs = [
+ {_id: 0, [timeFieldName]: ISODate(), [metaFieldName]: 0},
+ {_id: 1, [timeFieldName]: ISODate(), [metaFieldName]: 0},
+ {_id: 2, [timeFieldName]: ISODate(), [metaFieldName]: 0},
+ {_id: 3, [timeFieldName]: ISODate(), [metaFieldName]: 1},
+ {_id: 4, [timeFieldName]: ISODate(), [metaFieldName]: 1},
+ ];
+
+ //
+ // Test with failPoint which aborts all subsequent write operations of the batch.
+ //
+ let fp = configureFailPoint(failPointConn ? failPointConn : conn,
+ 'failUnorderedTimeseriesInsert',
+ {metadata: 0, canContinue: false});
+
+ const resWithCannotContinue =
+ assert.commandFailed(coll.insert(docs.slice(1), {ordered: false}));
+
+ jsTestLog('Checking insert result: ' + tojson(resWithCannotContinue));
+ assert.eq(resWithCannotContinue.nInserted, 0);
+ assert.eq(resWithCannotContinue.getWriteErrors().length,
+ docs.length - resWithCannotContinue.nInserted - 1);
+ for (let i = 0; i < resWithCannotContinue.getWriteErrors().length; i++) {
+ assert.eq(resWithCannotContinue.getWriteErrors()[i].index, i);
+ assert.docEq(resWithCannotContinue.getWriteErrors()[i].getOperation(), docs[i + 1]);
+ }
+
+ //
+ // Test with failPoint which can allow subsequent write operations of the batch.
+ //
+ assert.docEq(coll.find().sort({_id: 1}).toArray(), []);
+ assert.eq(bucketsColl.count(),
+ 0,
+ 'Expected zero buckets but found: ' + tojson(bucketsColl.find().toArray()));
+
+ assert.commandWorked(coll.insert(docs[0]));
+ fp = configureFailPoint(failPointConn ? failPointConn : conn,
+ 'failUnorderedTimeseriesInsert',
+ {metadata: 0, canContinue: true});
+
+ // Insert two documents that would go into the existing bucket and two documents that go into a
+ // new bucket.
+ const res = assert.commandFailed(coll.insert(docs.slice(1), {ordered: false}));
+
+ jsTestLog('Checking insert result: ' + tojson(res));
+ assert.eq(res.nInserted, 2);
+ assert.eq(res.getWriteErrors().length, docs.length - res.nInserted - 1);
+ for (let i = 0; i < res.getWriteErrors().length; i++) {
+ assert.eq(res.getWriteErrors()[i].index, i);
+ assert.docEq(res.getWriteErrors()[i].getOperation(), docs[i + 1]);
+ }
+
+ assert.docEq(coll.find().sort({_id: 1}).toArray(), [docs[0], docs[3], docs[4]]);
+ assert.eq(bucketsColl.count(),
+ 2,
+ 'Expected two buckets but found: ' + tojson(bucketsColl.find().toArray()));
+
+ fp.off();
+
+ // The documents should go into two new buckets due to the failed insert on the existing bucket.
+ assert.commandWorked(coll.insert(docs.slice(1, 3), {ordered: false}));
+ assert.docEq(coll.find().sort({_id: 1}).toArray(), docs);
+ assert.eq(bucketsColl.count(),
+ 3,
+ 'Expected three buckets but found: ' + tojson(bucketsColl.find().toArray()));
}
-const testDB = conn.getDB(jsTestName());
-
-const coll = testDB.getCollection('t');
-const bucketsColl = testDB.getCollection('system.buckets.' + coll.getName());
-
-const timeFieldName = 'time';
-const metaFieldName = 'meta';
-
-coll.drop();
-assert.commandWorked(testDB.createCollection(
- coll.getName(), {timeseries: {timeField: timeFieldName, metaField: metaFieldName}}));
-assert.contains(bucketsColl.getName(), testDB.getCollectionNames());
-
-const docs = [
- {_id: 0, [timeFieldName]: ISODate(), [metaFieldName]: 0},
- {_id: 1, [timeFieldName]: ISODate(), [metaFieldName]: 0},
- {_id: 2, [timeFieldName]: ISODate(), [metaFieldName]: 0},
- {_id: 3, [timeFieldName]: ISODate(), [metaFieldName]: 1},
- {_id: 4, [timeFieldName]: ISODate(), [metaFieldName]: 1},
-];
-
-assert.commandWorked(coll.insert(docs[0]));
+runTest(conn);
+MongoRunner.stopMongod(conn);
-const fp = configureFailPoint(conn, 'failUnorderedTimeseriesInsert', {metadata: 0});
+const st = new ShardingTest({shards: 2, rs: {nodes: 2}});
+const mongos = st.s0;
+assert.commandWorked(mongos.adminCommand({enableSharding: jsTestName()}));
-// Insert two documents that would go into the existing bucket and two documents that go into a new
-// bucket.
-const res = assert.commandFailed(coll.insert(docs.slice(1), {ordered: false}));
+// Run test on sharded cluster before sharding the collection.
+runTest(mongos, st.getPrimaryShard(jsTestName()), false);
-jsTestLog('Checking insert result: ' + tojson(res));
-assert.eq(res.nInserted, 2);
-assert.eq(res.getWriteErrors().length, docs.length - res.nInserted - 1);
-for (let i = 0; i < res.getWriteErrors().length; i++) {
- assert.eq(res.getWriteErrors()[i].index, i);
- assert.docEq(res.getWriteErrors()[i].getOperation(), docs[i + 1]);
+if (!TimeseriesTest.shardedtimeseriesCollectionsEnabled(st.shard0)) {
+ jsTestLog("Skipping test because the sharded time-series collection feature flag is disabled");
+ st.stop();
+ return;
}
-assert.docEq(coll.find().sort({_id: 1}).toArray(), [docs[0], docs[3], docs[4]]);
-assert.eq(bucketsColl.count(),
- 2,
- 'Expected two buckets but found: ' + tojson(bucketsColl.find().toArray()));
-
-fp.off();
-
-// The documents should go into two new buckets due to the failed insert on the existing bucket.
-assert.commandWorked(coll.insert(docs.slice(1, 3), {ordered: false}));
-assert.docEq(coll.find().sort({_id: 1}).toArray(), docs);
-assert.eq(bucketsColl.count(),
- 3,
- 'Expected three buckets but found: ' + tojson(bucketsColl.find().toArray()));
-
-MongoRunner.stopMongod(conn);
+// Run test on sharded cluster after sharding the collection.
+runTest(mongos, st.getPrimaryShard(jsTestName()), true);
+st.stop();
})(); \ No newline at end of file