summaryrefslogtreecommitdiff
path: root/jstests
diff options
context:
space:
mode:
Diffstat (limited to 'jstests')
-rw-r--r--jstests/aggregation/bugs/server66418.js38
-rw-r--r--jstests/aggregation/sources/densify/libs/parse_util.js14
-rw-r--r--jstests/core/capped_resize.js8
-rw-r--r--jstests/core/timeseries/bucket_unpacking_with_sort.js7
-rw-r--r--jstests/libs/parallelTester.js1
-rw-r--r--jstests/multiVersion/internal_sessions_setfcv_wait_for_transaction_coordinator_cleanup.js8
-rw-r--r--jstests/multiVersion/internal_transactions_index_setFCV.js76
-rw-r--r--jstests/multiVersion/targetedTestsLastLtsFeatures/internal_transactions_transient_error_after_setFCV.js5
-rw-r--r--jstests/noPassthrough/list_local_sessions.js (renamed from jstests/core/list_local_sessions.js)14
-rw-r--r--jstests/sharding/internal_txns/partial_index.js556
10 files changed, 448 insertions, 279 deletions
diff --git a/jstests/aggregation/bugs/server66418.js b/jstests/aggregation/bugs/server66418.js
new file mode 100644
index 00000000000..9b8c960282a
--- /dev/null
+++ b/jstests/aggregation/bugs/server66418.js
@@ -0,0 +1,38 @@
+// SERVER-66418
+// Bad projection created during dependency analysis due to string order assumption
+(function() {
+"use strict";
+
+const coll = db[jsTest.name()];
+coll.drop();
+
+coll.save({
+ _id: 1,
+ type: 'PRODUCT',
+ status: 'VALID',
+ locale: {
+ en: 'INSTRUMENT PANEL',
+ es: 'INSTRUMENTOS DEL CUADRO',
+ fr: 'INSTRUMENT TABLEAU DE BORD',
+ }
+});
+
+// before SERVER-66418, this incorrectly threw a PathCollision error
+coll.aggregate([
+ {"$match": {"_id": 1}},
+ {"$sort": {"_id": 1}},
+ {
+ "$project": {
+ "designation": {
+ "$switch": {
+ "branches": [{
+ "case": {"$eq": ["$type", "PRODUCT"]},
+ "then": {"$ifNull": ["$locale.en-GB.name", "$locale.en.name"]}
+ }],
+ "default": {"$ifNull": ["$locale.en-GB", "$locale.en"]}
+ }
+ }
+ }
+ }
+]);
+})();
diff --git a/jstests/aggregation/sources/densify/libs/parse_util.js b/jstests/aggregation/sources/densify/libs/parse_util.js
index 7ccedefd81d..d2e6ba4e489 100644
--- a/jstests/aggregation/sources/densify/libs/parse_util.js
+++ b/jstests/aggregation/sources/densify/libs/parse_util.js
@@ -139,6 +139,20 @@ let parseUtil = (function(db, coll, stageName, options = {}) {
}),
5733402,
"a bounding array must be an ascending array of either two dates or two numbers");
+ // Non-whole number step with date bounds
+ assert.commandFailedWithCode(
+ run({
+ [stageName]: {
+ field: "a",
+ range: {
+ step: 1.1,
+ bounds: [new ISODate("2020-01-01"), new ISODate("2020-01-03")],
+ unit: "second"
+ }
+ }
+ }),
+ 6586400,
+ "The step parameter in a range satement must be a whole number when densifying a date range");
// Positive test cases
assert.commandWorked(run({[stageName]: {field: "a", range: {step: 1.0, bounds: [1, 2]}}}));
diff --git a/jstests/core/capped_resize.js b/jstests/core/capped_resize.js
index 38cc7abd9a0..06baab6b21e 100644
--- a/jstests/core/capped_resize.js
+++ b/jstests/core/capped_resize.js
@@ -83,6 +83,14 @@ let verifyLimitUpdate = function(updates) {
assert.eq(stats.count, initialDocSize);
assert.lte(stats.size, maxSize);
+ // We used to not allow resizing the size of a capped collection below 4096 bytes. This
+ // restriction was lifted in SERVER-67036.
+ // We should see a reduction in collection size and count relative to the previous test case.
+ verifyLimitUpdate({cappedSize: 256});
+ stats = assert.commandWorked(cappedColl.stats());
+ assert.lt(stats.count, initialDocSize);
+ assert.lt(stats.size, maxSize);
+
// We expect the resizing of a capped collection to fail when maxSize <= 0 and maxSize >
// maxSizeCeiling.
const negativeSize = -1 * maxSize;
diff --git a/jstests/core/timeseries/bucket_unpacking_with_sort.js b/jstests/core/timeseries/bucket_unpacking_with_sort.js
index 3e92a9e2461..029697be74a 100644
--- a/jstests/core/timeseries/bucket_unpacking_with_sort.js
+++ b/jstests/core/timeseries/bucket_unpacking_with_sort.js
@@ -236,7 +236,12 @@ const runRewritesTest = (sortSpec,
// changing out from under us.
const bucketSpanMatch = {
$match: {
- $expr: {$lte: [{$subtract: ["$control.max.t", "$control.min.t"]}, {$const: 3600000}]},
+ $expr: {
+ $lte: [
+ {$subtract: ["$control.max.t", "$control.min.t"]},
+ {$const: NumberLong(3600000)}
+ ]
+ },
}
};
let foundMatch = findFirstMatch(optExplain);
diff --git a/jstests/libs/parallelTester.js b/jstests/libs/parallelTester.js
index 8974cf569dd..c0a31e54b86 100644
--- a/jstests/libs/parallelTester.js
+++ b/jstests/libs/parallelTester.js
@@ -311,7 +311,6 @@ if (typeof _threadInject != "undefined") {
// run in parallel, they could interfere with the cache and cause failures.
parallelFilesDir + "/list_all_local_sessions.js",
parallelFilesDir + "/list_all_sessions.js",
- parallelFilesDir + "/list_local_sessions.js",
parallelFilesDir + "/list_sessions.js",
];
var serialTests = makeKeys(serialTestsArr);
diff --git a/jstests/multiVersion/internal_sessions_setfcv_wait_for_transaction_coordinator_cleanup.js b/jstests/multiVersion/internal_sessions_setfcv_wait_for_transaction_coordinator_cleanup.js
index bd56ea89f96..e9b0aea9005 100644
--- a/jstests/multiVersion/internal_sessions_setfcv_wait_for_transaction_coordinator_cleanup.js
+++ b/jstests/multiVersion/internal_sessions_setfcv_wait_for_transaction_coordinator_cleanup.js
@@ -35,6 +35,8 @@ assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
assert.commandWorked(
st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: participant1.shardName}));
+st.refreshCatalogCacheForNs(st.s, ns);
+
function runTestBasic(lsid) {
jsTest.log("Test transaction coordinator documents are deleted before downgrade finishes " +
"with lsid: " + tojson(lsid));
@@ -42,9 +44,6 @@ function runTestBasic(lsid) {
// Upgrade fcv to make sure cluster is on the latestFCV before starting any transactions.
assert.commandWorked(st.s.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
- // Upgrading to lastestFCV clears the filtering metadata of all collections.
- st.refreshCatalogCacheForNs(st.s, ns);
-
let commitTxnFp = configureFailPoint(coordinator, "hangBeforeCommitingTxn");
let deleteCoordinatorDocFp =
configureFailPoint(coordinator, "hangBeforeDeletingCoordinatorDoc");
@@ -129,9 +128,6 @@ function runTestWithFailoverBeforeDocumentRemoval(lsid) {
// Upgrade fcv to make sure cluster is on the latestFCV before starting any transactions.
assert.commandWorked(st.s.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
- // Upgrading to lastestFCV clears the filtering metadata of all collections.
- st.refreshCatalogCacheForNs(st.s, ns);
-
let commitTxnFp = configureFailPoint(coordinator, "hangBeforeCommitingTxn");
let deleteCoordinatorDocFp =
configureFailPoint(coordinator, "hangBeforeDeletingCoordinatorDoc");
diff --git a/jstests/multiVersion/internal_transactions_index_setFCV.js b/jstests/multiVersion/internal_transactions_index_setFCV.js
index 712e8249a54..a3ce2ead44a 100644
--- a/jstests/multiVersion/internal_transactions_index_setFCV.js
+++ b/jstests/multiVersion/internal_transactions_index_setFCV.js
@@ -41,7 +41,8 @@ function assertPartialIndexDoesNotExist(node) {
* Verifies the partial index is dropped/created on FCV transitions and retryable writes work in all
* FCVs.
*/
-function runTest(setFCVConn, modifyIndexConns, verifyIndexConns, rst) {
+function runTest(
+ setFCVConn, modifyIndexConns, verifyIndexConns, rst, alwaysCreateFeatureFlagEnabled) {
// Start at latest FCV which should have the index.
assert.commandWorked(setFCVConn.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
verifyIndexConns.forEach(conn => {
@@ -54,8 +55,12 @@ function runTest(setFCVConn, modifyIndexConns, verifyIndexConns, rst) {
assertPartialIndexDoesNotExist(conn);
});
+ assert.commandWorked(setFCVConn.getDB("foo").runCommand(
+ {insert: "bar", documents: [{x: 1}], lsid: {id: UUID()}, txnNumber: NumberLong(11)}));
+
if (rst) {
- // On step up to primary the index should not be created.
+ // On step up to primary the index should not be created. Note this tests the empty
+ // collection case when alwaysCreateFeatureFlagEnabled is true.
let primary = rst.getPrimary();
// Clear the collection so we'd try to create the index.
@@ -69,8 +74,21 @@ function runTest(setFCVConn, modifyIndexConns, verifyIndexConns, rst) {
rst.awaitReplication();
verifyIndexConns.forEach(conn => {
reconnect(conn);
- assertPartialIndexDoesNotExist(conn);
+ if (alwaysCreateFeatureFlagEnabled) {
+ assertPartialIndexExists(conn);
+ } else {
+ assertPartialIndexDoesNotExist(conn);
+ }
});
+
+ if (alwaysCreateFeatureFlagEnabled) {
+ // The test expects no index after this block, so remove it.
+ modifyIndexConns.forEach(conn => {
+ assert.commandWorked(
+ conn.getCollection("config.transactions").dropIndex("parent_lsid"));
+ });
+ }
+ rst.awaitReplication();
}
assert.commandWorked(setFCVConn.getDB("foo").runCommand(
@@ -93,11 +111,15 @@ function runTest(setFCVConn, modifyIndexConns, verifyIndexConns, rst) {
});
if (rst) {
- // On step up to primary the index should not be created.
+ // On step up to primary the index should not be created. Note this tests the non-empty
+ // collection case when alwaysCreateFeatureFlagEnabled is true.
let primary = rst.getPrimary();
- // Clear the collection so we'd try to create the index.
- assert.commandWorked(primary.getDB("config").transactions.remove({}));
+ // Clear the collection so we'd try to create the index. Skip if the always create index
+ // feature flag is on because we'd try to create the index anyway.
+ if (!alwaysCreateFeatureFlagEnabled) {
+ assert.commandWorked(primary.getDB("config").transactions.remove({}));
+ }
assert.commandWorked(
primary.adminCommand({replSetStepDown: ReplSetTest.kForeverSecs, force: true}));
assert.commandWorked(primary.adminCommand({replSetFreeze: 0}));
@@ -107,8 +129,21 @@ function runTest(setFCVConn, modifyIndexConns, verifyIndexConns, rst) {
rst.awaitReplication();
verifyIndexConns.forEach(conn => {
reconnect(conn);
- assertPartialIndexDoesNotExist(conn);
+ if (alwaysCreateFeatureFlagEnabled) {
+ assertPartialIndexExists(conn);
+ } else {
+ assertPartialIndexDoesNotExist(conn);
+ }
});
+
+ if (alwaysCreateFeatureFlagEnabled) {
+ // The test expects no index after this block, so remove it.
+ modifyIndexConns.forEach(conn => {
+ assert.commandWorked(
+ conn.getCollection("config.transactions").dropIndex("parent_lsid"));
+ });
+ }
+ rst.awaitReplication();
}
assert.commandWorked(setFCVConn.getDB("foo").runCommand(
@@ -124,8 +159,11 @@ function runTest(setFCVConn, modifyIndexConns, verifyIndexConns, rst) {
// On step up to primary the index should be created.
let primary = rst.getPrimary();
- // Clear the collection so we'll try to create the index.
- assert.commandWorked(primary.getDB("config").transactions.remove({}));
+ // Clear the collection so we'd try to create the index. Skip if the always create index
+ // feature flag is on because we'd try to create the index anyway.
+ if (!alwaysCreateFeatureFlagEnabled) {
+ assert.commandWorked(primary.getDB("config").transactions.remove({}));
+ }
assert.commandWorked(
primary.adminCommand({replSetStepDown: ReplSetTest.kForeverSecs, force: true}));
assert.commandWorked(primary.adminCommand({replSetFreeze: 0}));
@@ -193,6 +231,26 @@ function runTest(setFCVConn, modifyIndexConns, verifyIndexConns, rst) {
}
{
+ // Enabling featureFlagAlwaysCreateConfigTransactionsPartialIndexOnStepUp should not lead to
+ // creating the index if the internal transactions feature flag is not enabled.
+ const featureFlagRst = new ReplSetTest({
+ nodes: 2,
+ nodeOptions:
+ {setParameter: "featureFlagAlwaysCreateConfigTransactionsPartialIndexOnStepUp=true"}
+ });
+ featureFlagRst.startSet();
+ featureFlagRst.initiate();
+ // Note setFCV always waits for majority write concern so in a two node cluster secondaries will
+ // always have replicated the setFCV writes.
+ runTest(featureFlagRst.getPrimary(),
+ [featureFlagRst.getPrimary()],
+ [featureFlagRst.getPrimary(), featureFlagRst.getSecondary()],
+ featureFlagRst,
+ true /* alwaysCreateFeatureFlagEnabled */);
+ featureFlagRst.stopSet();
+}
+
+{
const conn = MongoRunner.runMongod();
const configTxnsCollection = conn.getCollection("config.transactions");
diff --git a/jstests/multiVersion/targetedTestsLastLtsFeatures/internal_transactions_transient_error_after_setFCV.js b/jstests/multiVersion/targetedTestsLastLtsFeatures/internal_transactions_transient_error_after_setFCV.js
index eccf64190d6..344c4df64b4 100644
--- a/jstests/multiVersion/targetedTestsLastLtsFeatures/internal_transactions_transient_error_after_setFCV.js
+++ b/jstests/multiVersion/targetedTestsLastLtsFeatures/internal_transactions_transient_error_after_setFCV.js
@@ -31,6 +31,8 @@ assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
assert.commandWorked(
st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: participant1.shardName}));
+st.refreshCatalogCacheForNs(st.s, ns);
+
function runTest(lsid) {
jsTest.log("Test that the correct error response is propagated upon losing in memory " +
"transaction metadata and durable metadata in the config.transactions collection " +
@@ -39,9 +41,6 @@ function runTest(lsid) {
// Upgrade fcv to make sure cluster is on the latestFCV before starting any transactions.
assert.commandWorked(st.s.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
- // Upgrading to lastestFCV clears the filtering metadata of all collections.
- st.refreshCatalogCacheForNs(st.s, ns);
-
// Inserts are split to guarantee that shard0 will be chosen as the coordinator.
assert.commandWorked(st.s.getDB(kDbName).runCommand({
insert: kCollName,
diff --git a/jstests/core/list_local_sessions.js b/jstests/noPassthrough/list_local_sessions.js
index bee5c084ca7..60b73f7f6ad 100644
--- a/jstests/core/list_local_sessions.js
+++ b/jstests/noPassthrough/list_local_sessions.js
@@ -8,12 +8,20 @@
// # Sessions are asynchronously flushed to disk, so a stepdown immediately after calling
// # startSession may cause this test to fail to find the returned sessionId.
// does_not_support_stepdowns,
+// requires_sharding,
// ]
(function() {
'use strict';
-const admin = db.getSiblingDB('admin');
+const st = new ShardingTest({
+ shards: 1,
+ mongos: 1,
+ other: {mongosOptions: {setParameter: {disableLogicalSessionCacheRefresh: true}}}
+});
+
+const admin = st.s.getDB("admin");
+
function listLocalSessions() {
return admin.aggregate([{'$listLocalSessions': {allUsers: false}}]);
}
@@ -23,7 +31,7 @@ let originalLogLevel = assert.commandWorked(admin.setLogLevel(1)).was.verbosity;
try {
// Start a new session and capture its sessionId.
- const myid = assert.commandWorked(db.runCommand({startSession: 1})).id.id;
+ const myid = assert.commandWorked(st.s.adminCommand({startSession: 1})).id.id;
assert(myid !== undefined);
// Ensure that the cache now contains the session and is visible.
@@ -80,4 +88,6 @@ try {
} finally {
admin.setLogLevel(originalLogLevel);
}
+
+st.stop();
})();
diff --git a/jstests/sharding/internal_txns/partial_index.js b/jstests/sharding/internal_txns/partial_index.js
index c6fbb997b85..b9c5462aaa4 100644
--- a/jstests/sharding/internal_txns/partial_index.js
+++ b/jstests/sharding/internal_txns/partial_index.js
@@ -9,285 +9,327 @@
load("jstests/libs/analyze_plan.js");
-const st = new ShardingTest({shards: {rs0: {nodes: 2}}});
-
const kDbName = "testDb";
const kCollName = "testColl";
const kConfigTxnNs = "config.transactions";
const kPartialIndexName = "parent_lsid";
-const mongosTestDB = st.s.getDB(kDbName);
-const shard0PrimaryConfigTxnColl = st.rs0.getPrimary().getCollection(kConfigTxnNs);
-
-function assertPartialIndexExists(node) {
- const configDB = node.getDB("config");
- const indexSpecs = assert.commandWorked(configDB.runCommand({"listIndexes": "transactions"}))
- .cursor.firstBatch;
- indexSpecs.sort((index0, index1) => index0.name > index1.name);
- assert.eq(indexSpecs.length, 2);
- const idIndexSpec = indexSpecs[0];
- assert.eq(idIndexSpec.key, {"_id": 1});
- const partialIndexSpec = indexSpecs[1];
- assert.eq(partialIndexSpec.key, {"parentLsid": 1, "_id.txnNumber": 1, "_id": 1});
- assert.eq(partialIndexSpec.partialFilterExpression, {"parentLsid": {"$exists": true}});
-}
-
-function assertFindUsesCoveredQuery(node) {
- const configTxnColl = node.getCollection(kConfigTxnNs);
- const childSessionDoc = configTxnColl.findOne({
- "_id.id": sessionUUID,
- "_id.txnNumber": childLsid.txnNumber,
- "_id.txnUUID": childLsid.txnUUID
- });
+function runTest(st, alwaysCreateFeatureFlagEnabled) {
+ const mongosTestDB = st.s.getDB(kDbName);
+ const shard0PrimaryConfigTxnColl = st.rs0.getPrimary().getCollection(kConfigTxnNs);
+
+ function assertPartialIndexExists(node) {
+ const configDB = node.getDB("config");
+ const indexSpecs =
+ assert.commandWorked(configDB.runCommand({"listIndexes": "transactions"}))
+ .cursor.firstBatch;
+ indexSpecs.sort((index0, index1) => index0.name > index1.name);
+ assert.eq(indexSpecs.length, 2);
+ const idIndexSpec = indexSpecs[0];
+ assert.eq(idIndexSpec.key, {"_id": 1});
+ const partialIndexSpec = indexSpecs[1];
+ assert.eq(partialIndexSpec.key, {"parentLsid": 1, "_id.txnNumber": 1, "_id": 1});
+ assert.eq(partialIndexSpec.partialFilterExpression, {"parentLsid": {"$exists": true}});
+ }
+
+ function assertFindUsesCoveredQuery(node) {
+ const configTxnColl = node.getCollection(kConfigTxnNs);
+ const childSessionDoc = configTxnColl.findOne({
+ "_id.id": sessionUUID,
+ "_id.txnNumber": childLsid.txnNumber,
+ "_id.txnUUID": childLsid.txnUUID
+ });
+
+ const explainRes = assert.commandWorked(
+ configTxnColl.explain()
+ .find({"parentLsid": parentSessionDoc._id, "_id.txnNumber": childLsid.txnNumber},
+ {_id: 1})
+ .finish());
+ const winningPlan = getWinningPlan(explainRes.queryPlanner);
+ assert.eq(winningPlan.stage, "PROJECTION_COVERED");
+ assert.eq(winningPlan.inputStage.stage, "IXSCAN");
+
+ const findRes =
+ configTxnColl
+ .find({"parentLsid": parentSessionDoc._id, "_id.txnNumber": childLsid.txnNumber},
+ {_id: 1})
+ .toArray();
+ assert.eq(findRes.length, 1);
+ assert.eq(findRes[0]._id, childSessionDoc._id);
+ }
+
+ function assertPartialIndexDoesNotExist(node) {
+ const configDB = node.getDB("config");
+ const indexSpecs =
+ assert.commandWorked(configDB.runCommand({"listIndexes": "transactions"}))
+ .cursor.firstBatch;
+ assert.eq(indexSpecs.length, 1);
+ const idIndexSpec = indexSpecs[0];
+ assert.eq(idIndexSpec.key, {"_id": 1});
+ }
+
+ function indexRecreationTest(expectRecreateAfterDrop) {
+ st.rs0.getPrimary().getCollection(kConfigTxnNs).dropIndex(kPartialIndexName);
+ st.rs0.awaitReplication();
+
+ st.rs0.nodes.forEach(node => {
+ assertPartialIndexDoesNotExist(node);
+ });
+
+ let primary = st.rs0.getPrimary();
+ assert.commandWorked(
+ primary.adminCommand({replSetStepDown: ReplSetTest.kForeverSecs, force: true}));
+ assert.commandWorked(primary.adminCommand({replSetFreeze: 0}));
+
+ st.rs0.awaitNodesAgreeOnPrimary();
+ st.rs0.awaitReplication();
+
+ st.rs0.nodes.forEach(node => {
+ if (expectRecreateAfterDrop) {
+ assertPartialIndexExists(node);
+ } else {
+ assertPartialIndexDoesNotExist(node);
+ }
+ });
+ }
+
+ // If the collection is empty and the index does not exist, we should always create the partial
+ // index on stepup,
+ indexRecreationTest(true /* expectRecreateAfterDrop */);
+
+ const sessionUUID = UUID();
+ const parentLsid = {id: sessionUUID};
+ const parentTxnNumber = 35;
+ let stmtId = 0;
- const explainRes = assert.commandWorked(
- configTxnColl.explain()
- .find({"parentLsid": parentSessionDoc._id, "_id.txnNumber": childLsid.txnNumber},
- {_id: 1})
- .finish());
- const winningPlan = getWinningPlan(explainRes.queryPlanner);
- assert.eq(winningPlan.stage, "PROJECTION_COVERED");
- assert.eq(winningPlan.inputStage.stage, "IXSCAN");
-
- const findRes =
- configTxnColl
- .find({"parentLsid": parentSessionDoc._id, "_id.txnNumber": childLsid.txnNumber},
- {_id: 1})
- .toArray();
- assert.eq(findRes.length, 1);
- assert.eq(findRes[0]._id, childSessionDoc._id);
-}
+ assert.commandWorked(mongosTestDB.runCommand({
+ insert: kCollName,
+ documents: [{_id: 0}],
+ lsid: parentLsid,
+ txnNumber: NumberLong(parentTxnNumber),
+ stmtId: NumberInt(stmtId++)
+ }));
+ const parentSessionDoc = shard0PrimaryConfigTxnColl.findOne({"_id.id": sessionUUID});
+
+ const childLsid = {id: sessionUUID, txnNumber: NumberLong(parentTxnNumber), txnUUID: UUID()};
+ let childTxnNumber = 0;
+
+ function runRetryableInternalTransaction(txnNumber) {
+ assert.commandWorked(mongosTestDB.runCommand({
+ insert: kCollName,
+ documents: [{x: 1}],
+ lsid: childLsid,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId++),
+ autocommit: false,
+ startTransaction: true
+ }));
+ assert.commandWorked(mongosTestDB.adminCommand({
+ commitTransaction: 1,
+ lsid: childLsid,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false
+ }));
+ }
+
+ runRetryableInternalTransaction(childTxnNumber);
+ assert.eq(shard0PrimaryConfigTxnColl.count({"_id.id": sessionUUID}), 2);
-function assertPartialIndexDoesNotExist(node) {
- const configDB = node.getDB("config");
- const indexSpecs = assert.commandWorked(configDB.runCommand({"listIndexes": "transactions"}))
- .cursor.firstBatch;
- assert.eq(indexSpecs.length, 1);
- const idIndexSpec = indexSpecs[0];
- assert.eq(idIndexSpec.key, {"_id": 1});
-}
+ st.rs0.nodes.forEach(node => {
+ assertPartialIndexExists(node);
+ assertFindUsesCoveredQuery(node);
+ });
-function indexRecreationTest(recreateAfterDrop) {
- st.rs0.getPrimary().getCollection(kConfigTxnNs).dropIndex(kPartialIndexName);
- st.rs0.awaitReplication();
+ childTxnNumber++;
+ runRetryableInternalTransaction(childTxnNumber);
+ assert.eq(shard0PrimaryConfigTxnColl.count({"_id.id": sessionUUID}), 2);
st.rs0.nodes.forEach(node => {
- assertPartialIndexDoesNotExist(node);
+ assertPartialIndexExists(node);
+ assertFindUsesCoveredQuery(node);
});
- let primary = st.rs0.getPrimary();
+ //
+ // Verify clients can create the index only if they provide the exact specification and that
+ // operations requiring the index fails if it does not exist.
+ //
+
+ const indexConn = st.rs0.getPrimary();
assert.commandWorked(
- primary.adminCommand({replSetStepDown: ReplSetTest.kForeverSecs, force: true}));
- assert.commandWorked(primary.adminCommand({replSetFreeze: 0}));
+ indexConn.getCollection("config.transactions").dropIndex(kPartialIndexName));
- st.rs0.awaitNodesAgreeOnPrimary();
- st.rs0.awaitReplication();
+ // Normal writes don't involve config.transactions, so they succeed.
+ assert.commandWorked(indexConn.getDB(kDbName).runCommand(
+ {insert: kCollName, documents: [{x: 1}], lsid: {id: UUID()}}));
- st.rs0.nodes.forEach(node => {
- if (recreateAfterDrop) {
- assertPartialIndexExists(node);
- } else {
- assertPartialIndexDoesNotExist(node);
- }
- });
-}
+ // Retryable writes read from the partial index, so they fail.
+ let res = assert.commandFailedWithCode(indexConn.getDB(kDbName).runCommand({
+ insert: kCollName,
+ documents: [{x: 1}],
+ lsid: {id: UUID()},
+ txnNumber: NumberLong(11)
+ }),
+ ErrorCodes.BadValue);
+ assert(res.errmsg.includes("Please create an index directly "), tojson(res));
+
+ // User transactions read from the partial index, so they fail.
+ assert.commandFailedWithCode(indexConn.getDB(kDbName).runCommand({
+ insert: kCollName,
+ documents: [{x: 1}],
+ lsid: {id: UUID()},
+ txnNumber: NumberLong(11),
+ startTransaction: true,
+ autocommit: false
+ }),
+ ErrorCodes.BadValue);
-// If the collection is empty and the index does not exist, we should create the partial index on
-// stepup.
-indexRecreationTest(true /*Recreate after drop*/);
-
-const sessionUUID = UUID();
-const parentLsid = {
- id: sessionUUID
-};
-const parentTxnNumber = 35;
-let stmtId = 0;
-
-assert.commandWorked(mongosTestDB.runCommand({
- insert: kCollName,
- documents: [{_id: 0}],
- lsid: parentLsid,
- txnNumber: NumberLong(parentTxnNumber),
- stmtId: NumberInt(stmtId++)
-}));
-const parentSessionDoc = shard0PrimaryConfigTxnColl.findOne({"_id.id": sessionUUID});
-
-const childLsid = {
- id: sessionUUID,
- txnNumber: NumberLong(parentTxnNumber),
- txnUUID: UUID()
-};
-let childTxnNumber = 0;
-
-function runRetryableInternalTransaction(txnNumber) {
- assert.commandWorked(mongosTestDB.runCommand({
+ // Non retryable internal transactions do not read from or update the partial index, so they can
+ // succeed without the index existing.
+ let nonRetryableTxnSession = {id: UUID(), txnUUID: UUID()};
+ assert.commandWorked(indexConn.getDB(kDbName).runCommand({
+ insert: kCollName,
+ documents: [{x: 1}],
+ lsid: nonRetryableTxnSession,
+ txnNumber: NumberLong(11),
+ stmtId: NumberInt(0),
+ startTransaction: true,
+ autocommit: false
+ }));
+ assert.commandWorked(indexConn.adminCommand({
+ commitTransaction: 1,
+ lsid: nonRetryableTxnSession,
+ txnNumber: NumberLong(11),
+ autocommit: false
+ }));
+
+ // Retryable transactions read from the partial index, so they fail.
+ assert.commandFailedWithCode(indexConn.getDB(kDbName).runCommand({
insert: kCollName,
documents: [{x: 1}],
- lsid: childLsid,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId++),
- autocommit: false,
- startTransaction: true
+ lsid: {id: UUID(), txnUUID: UUID(), txnNumber: NumberLong(2)},
+ txnNumber: NumberLong(11),
+ stmtId: NumberInt(0),
+ startTransaction: true,
+ autocommit: false
+ }),
+ ErrorCodes.BadValue);
+
+ // Recreating the partial index requires the exact options used internally, but in any order.
+ assert.commandFailedWithCode(indexConn.getDB("config").runCommand({
+ createIndexes: "transactions",
+ indexes: [{v: 2, name: "parent_lsid", key: {parentLsid: 1, "_id.txnNumber": 1, _id: 1}}],
+ }),
+ ErrorCodes.IllegalOperation);
+ assert.commandWorked(indexConn.getDB("config").runCommand({
+ createIndexes: "transactions",
+ indexes: [{
+ name: "parent_lsid",
+ key: {parentLsid: 1, "_id.txnNumber": 1, _id: 1},
+ partialFilterExpression: {parentLsid: {$exists: true}},
+ v: 2,
+ }],
+ }));
+
+ // Operations involving the index should succeed now.
+
+ assert.commandWorked(indexConn.getDB(kDbName).runCommand(
+ {insert: kCollName, documents: [{x: 1}], lsid: {id: UUID()}}));
+
+ assert.commandWorked(indexConn.getDB(kDbName).runCommand(
+ {insert: kCollName, documents: [{x: 1}], lsid: {id: UUID()}, txnNumber: NumberLong(11)}));
+
+ let userSessionAfter = {id: UUID()};
+ assert.commandWorked(indexConn.getDB(kDbName).runCommand({
+ insert: kCollName,
+ documents: [{x: 1}],
+ lsid: userSessionAfter,
+ txnNumber: NumberLong(11),
+ startTransaction: true,
+ autocommit: false
+ }));
+ assert.commandWorked(indexConn.adminCommand({
+ commitTransaction: 1,
+ lsid: userSessionAfter,
+ txnNumber: NumberLong(11),
+ autocommit: false
+ }));
+
+ let nonRetryableTxnSessionAfter = {id: UUID(), txnUUID: UUID()};
+ assert.commandWorked(indexConn.getDB(kDbName).runCommand({
+ insert: kCollName,
+ documents: [{x: 1}],
+ lsid: nonRetryableTxnSessionAfter,
+ txnNumber: NumberLong(11),
+ stmtId: NumberInt(0),
+ startTransaction: true,
+ autocommit: false
+ }));
+ assert.commandWorked(indexConn.adminCommand({
+ commitTransaction: 1,
+ lsid: nonRetryableTxnSessionAfter,
+ txnNumber: NumberLong(11),
+ autocommit: false
+ }));
+
+ let retryableTxnSessionAfter = {id: UUID(), txnUUID: UUID(), txnNumber: NumberLong(2)};
+ assert.commandWorked(indexConn.getDB(kDbName).runCommand({
+ insert: kCollName,
+ documents: [{x: 1}],
+ lsid: retryableTxnSessionAfter,
+ txnNumber: NumberLong(11),
+ stmtId: NumberInt(0),
+ startTransaction: true,
+ autocommit: false
}));
- assert.commandWorked(mongosTestDB.adminCommand({
+ assert.commandWorked(indexConn.adminCommand({
commitTransaction: 1,
- lsid: childLsid,
- txnNumber: NumberLong(txnNumber),
+ lsid: retryableTxnSessionAfter,
+ txnNumber: NumberLong(11),
autocommit: false
}));
+
+ if (!alwaysCreateFeatureFlagEnabled) {
+ // We expect that if the partial index is dropped when the collection isn't empty, then on
+ // stepup we should not recreate the collection.
+ indexRecreationTest(false /* expectRecreateAfterDrop */);
+ } else {
+ // Creating the partial index when the collection isn't empty can be enabled by a feature
+ // flag.
+ indexRecreationTest(true /* expectRecreateAfterDrop */);
+ }
+}
+
+{
+ const st = new ShardingTest({shards: {rs0: {nodes: 2}}});
+ runTest(st, false /* alwaysCreateFeatureFlagEnabled */);
+ st.stop();
}
-runRetryableInternalTransaction(childTxnNumber);
-assert.eq(shard0PrimaryConfigTxnColl.count({"_id.id": sessionUUID}), 2);
-
-st.rs0.nodes.forEach(node => {
- assertPartialIndexExists(node);
- assertFindUsesCoveredQuery(node);
-});
-
-childTxnNumber++;
-runRetryableInternalTransaction(childTxnNumber);
-assert.eq(shard0PrimaryConfigTxnColl.count({"_id.id": sessionUUID}), 2);
-
-st.rs0.nodes.forEach(node => {
- assertPartialIndexExists(node);
- assertFindUsesCoveredQuery(node);
-});
-
-//
-// Verify clients can create the index only if they provide the exact specification and that
-// operations requiring the index fails if it does not exist.
-//
-
-const indexConn = st.rs0.getPrimary();
-assert.commandWorked(indexConn.getCollection("config.transactions").dropIndex(kPartialIndexName));
-
-// Normal writes don't involve config.transactions, so they succeed.
-assert.commandWorked(indexConn.getDB(kDbName).runCommand(
- {insert: kCollName, documents: [{x: 1}], lsid: {id: UUID()}}));
-
-// Retryable writes read from the partial index, so they fail.
-let res = assert.commandFailedWithCode(
- indexConn.getDB(kDbName).runCommand(
- {insert: kCollName, documents: [{x: 1}], lsid: {id: UUID()}, txnNumber: NumberLong(11)}),
- ErrorCodes.BadValue);
-assert(res.errmsg.includes("Please create an index directly "), tojson(res));
-
-// User transactions read from the partial index, so they fail.
-assert.commandFailedWithCode(indexConn.getDB(kDbName).runCommand({
- insert: kCollName,
- documents: [{x: 1}],
- lsid: {id: UUID()},
- txnNumber: NumberLong(11),
- startTransaction: true,
- autocommit: false
-}),
- ErrorCodes.BadValue);
-
-// Non retryable internal transactions do not read from or update the partial index, so they can
-// succeed without the index existing.
-let nonRetryableTxnSession = {id: UUID(), txnUUID: UUID()};
-assert.commandWorked(indexConn.getDB(kDbName).runCommand({
- insert: kCollName,
- documents: [{x: 1}],
- lsid: nonRetryableTxnSession,
- txnNumber: NumberLong(11),
- stmtId: NumberInt(0),
- startTransaction: true,
- autocommit: false
-}));
-assert.commandWorked(indexConn.adminCommand({
- commitTransaction: 1,
- lsid: nonRetryableTxnSession,
- txnNumber: NumberLong(11),
- autocommit: false
-}));
-
-// Retryable transactions read from the partial index, so they fail.
-assert.commandFailedWithCode(indexConn.getDB(kDbName).runCommand({
- insert: kCollName,
- documents: [{x: 1}],
- lsid: {id: UUID(), txnUUID: UUID(), txnNumber: NumberLong(2)},
- txnNumber: NumberLong(11),
- stmtId: NumberInt(0),
- startTransaction: true,
- autocommit: false
-}),
- ErrorCodes.BadValue);
-
-// Recreating the partial index requires the exact options used internally, but in any order.
-assert.commandFailedWithCode(indexConn.getDB("config").runCommand({
- createIndexes: "transactions",
- indexes: [{v: 2, name: "parent_lsid", key: {parentLsid: 1, "_id.txnNumber": 1, _id: 1}}],
-}),
- ErrorCodes.IllegalOperation);
-assert.commandWorked(indexConn.getDB("config").runCommand({
- createIndexes: "transactions",
- indexes: [{
- name: "parent_lsid",
- key: {parentLsid: 1, "_id.txnNumber": 1, _id: 1},
- partialFilterExpression: {parentLsid: {$exists: true}},
- v: 2,
- }],
-}));
-
-// Operations involving the index should succeed now.
-
-assert.commandWorked(indexConn.getDB(kDbName).runCommand(
- {insert: kCollName, documents: [{x: 1}], lsid: {id: UUID()}}));
-
-assert.commandWorked(indexConn.getDB(kDbName).runCommand(
- {insert: kCollName, documents: [{x: 1}], lsid: {id: UUID()}, txnNumber: NumberLong(11)}));
-
-let userSessionAfter = {id: UUID()};
-assert.commandWorked(indexConn.getDB(kDbName).runCommand({
- insert: kCollName,
- documents: [{x: 1}],
- lsid: userSessionAfter,
- txnNumber: NumberLong(11),
- startTransaction: true,
- autocommit: false
-}));
-assert.commandWorked(indexConn.adminCommand(
- {commitTransaction: 1, lsid: userSessionAfter, txnNumber: NumberLong(11), autocommit: false}));
-
-let nonRetryableTxnSessionAfter = {id: UUID(), txnUUID: UUID()};
-assert.commandWorked(indexConn.getDB(kDbName).runCommand({
- insert: kCollName,
- documents: [{x: 1}],
- lsid: nonRetryableTxnSessionAfter,
- txnNumber: NumberLong(11),
- stmtId: NumberInt(0),
- startTransaction: true,
- autocommit: false
-}));
-assert.commandWorked(indexConn.adminCommand({
- commitTransaction: 1,
- lsid: nonRetryableTxnSessionAfter,
- txnNumber: NumberLong(11),
- autocommit: false
-}));
-
-let retryableTxnSessionAfter = {id: UUID(), txnUUID: UUID(), txnNumber: NumberLong(2)};
-assert.commandWorked(indexConn.getDB(kDbName).runCommand({
- insert: kCollName,
- documents: [{x: 1}],
- lsid: retryableTxnSessionAfter,
- txnNumber: NumberLong(11),
- stmtId: NumberInt(0),
- startTransaction: true,
- autocommit: false
-}));
-assert.commandWorked(indexConn.adminCommand({
- commitTransaction: 1,
- lsid: retryableTxnSessionAfter,
- txnNumber: NumberLong(11),
- autocommit: false
-}));
-
-// We expect that if the partial index is dropped when the collection isn't empty, then on stepup we
-// should not recreate the collection.
-indexRecreationTest(false /*Don't recreate after drop*/);
-
-st.stop();
+{
+ const featureFlagSt = new ShardingTest({
+ shards: 1,
+ other: {
+ rs: {nodes: 2},
+ rsOptions:
+ {setParameter: "featureFlagAlwaysCreateConfigTransactionsPartialIndexOnStepUp=true"}
+ }
+ });
+
+ // Sanity check the feature flag was enabled.
+ assert(assert
+ .commandWorked(featureFlagSt.rs0.getPrimary().adminCommand({
+ getParameter: 1,
+ featureFlagAlwaysCreateConfigTransactionsPartialIndexOnStepUp: 1
+ }))
+ .featureFlagAlwaysCreateConfigTransactionsPartialIndexOnStepUp.value);
+ assert(assert
+ .commandWorked(featureFlagSt.rs0.getSecondary().adminCommand({
+ getParameter: 1,
+ featureFlagAlwaysCreateConfigTransactionsPartialIndexOnStepUp: 1
+ }))
+ .featureFlagAlwaysCreateConfigTransactionsPartialIndexOnStepUp.value);
+
+ runTest(featureFlagSt, true /* alwaysCreateFeatureFlagEnabled */);
+ featureFlagSt.stop();
+}
})();