summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJack Mulrow <jack.mulrow@mongodb.com>2022-06-14 22:27:33 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-06-16 21:11:37 +0000
commitc48357dadc2df9d65dfe4da6ca9cf285f095b967 (patch)
tree704a60499c967b8ea2009d1a5fa71871b936b2c7
parent342552401af6b49d37bdc2a507c5524642bb4e95 (diff)
downloadmongo-c48357dadc2df9d65dfe4da6ca9cf285f095b967.tar.gz
SERVER-67273 Add feature flag to always create config.transactions index on step up
(cherry picked from commit 389d799f96c23cd534aa5c8b3a52e181d7f99ebc)
-rw-r--r--buildscripts/resmokeconfig/fully_disabled_feature_flags.yml4
-rw-r--r--jstests/multiVersion/internal_transactions_index_setFCV.js76
-rw-r--r--jstests/sharding/internal_txns/partial_index.js556
-rw-r--r--src/mongo/db/internal_transactions_feature_flag.idl5
-rw-r--r--src/mongo/db/session_catalog_mongod.cpp55
5 files changed, 418 insertions, 278 deletions
diff --git a/buildscripts/resmokeconfig/fully_disabled_feature_flags.yml b/buildscripts/resmokeconfig/fully_disabled_feature_flags.yml
index e05f4ed1a0f..22bf09a9f5c 100644
--- a/buildscripts/resmokeconfig/fully_disabled_feature_flags.yml
+++ b/buildscripts/resmokeconfig/fully_disabled_feature_flags.yml
@@ -9,3 +9,7 @@
# and TenantDatabase constructors.
- featureFlagRequireTenantID
- featureFlagSbePlanCache
+# This flag exists to help users in managed environments that upgraded to 6.0 before 6.0.0-rc8 was
+# released create the transactions collection index and is only meant to be enabled adhoc, so only
+# its targeted tests should enable it.
+- featureFlagAlwaysCreateConfigTransactionsPartialIndexOnStepUp
diff --git a/jstests/multiVersion/internal_transactions_index_setFCV.js b/jstests/multiVersion/internal_transactions_index_setFCV.js
index 712e8249a54..a3ce2ead44a 100644
--- a/jstests/multiVersion/internal_transactions_index_setFCV.js
+++ b/jstests/multiVersion/internal_transactions_index_setFCV.js
@@ -41,7 +41,8 @@ function assertPartialIndexDoesNotExist(node) {
* Verifies the partial index is dropped/created on FCV transitions and retryable writes work in all
* FCVs.
*/
-function runTest(setFCVConn, modifyIndexConns, verifyIndexConns, rst) {
+function runTest(
+ setFCVConn, modifyIndexConns, verifyIndexConns, rst, alwaysCreateFeatureFlagEnabled) {
// Start at latest FCV which should have the index.
assert.commandWorked(setFCVConn.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
verifyIndexConns.forEach(conn => {
@@ -54,8 +55,12 @@ function runTest(setFCVConn, modifyIndexConns, verifyIndexConns, rst) {
assertPartialIndexDoesNotExist(conn);
});
+ assert.commandWorked(setFCVConn.getDB("foo").runCommand(
+ {insert: "bar", documents: [{x: 1}], lsid: {id: UUID()}, txnNumber: NumberLong(11)}));
+
if (rst) {
- // On step up to primary the index should not be created.
+ // On step up to primary the index should not be created. Note this tests the empty
+ // collection case when alwaysCreateFeatureFlagEnabled is true.
let primary = rst.getPrimary();
// Clear the collection so we'd try to create the index.
@@ -69,8 +74,21 @@ function runTest(setFCVConn, modifyIndexConns, verifyIndexConns, rst) {
rst.awaitReplication();
verifyIndexConns.forEach(conn => {
reconnect(conn);
- assertPartialIndexDoesNotExist(conn);
+ if (alwaysCreateFeatureFlagEnabled) {
+ assertPartialIndexExists(conn);
+ } else {
+ assertPartialIndexDoesNotExist(conn);
+ }
});
+
+ if (alwaysCreateFeatureFlagEnabled) {
+ // The test expects no index after this block, so remove it.
+ modifyIndexConns.forEach(conn => {
+ assert.commandWorked(
+ conn.getCollection("config.transactions").dropIndex("parent_lsid"));
+ });
+ }
+ rst.awaitReplication();
}
assert.commandWorked(setFCVConn.getDB("foo").runCommand(
@@ -93,11 +111,15 @@ function runTest(setFCVConn, modifyIndexConns, verifyIndexConns, rst) {
});
if (rst) {
- // On step up to primary the index should not be created.
+ // On step up to primary the index should not be created. Note this tests the non-empty
+ // collection case when alwaysCreateFeatureFlagEnabled is true.
let primary = rst.getPrimary();
- // Clear the collection so we'd try to create the index.
- assert.commandWorked(primary.getDB("config").transactions.remove({}));
+ // Clear the collection so we'd try to create the index. Skip if the always create index
+ // feature flag is on because we'd try to create the index anyway.
+ if (!alwaysCreateFeatureFlagEnabled) {
+ assert.commandWorked(primary.getDB("config").transactions.remove({}));
+ }
assert.commandWorked(
primary.adminCommand({replSetStepDown: ReplSetTest.kForeverSecs, force: true}));
assert.commandWorked(primary.adminCommand({replSetFreeze: 0}));
@@ -107,8 +129,21 @@ function runTest(setFCVConn, modifyIndexConns, verifyIndexConns, rst) {
rst.awaitReplication();
verifyIndexConns.forEach(conn => {
reconnect(conn);
- assertPartialIndexDoesNotExist(conn);
+ if (alwaysCreateFeatureFlagEnabled) {
+ assertPartialIndexExists(conn);
+ } else {
+ assertPartialIndexDoesNotExist(conn);
+ }
});
+
+ if (alwaysCreateFeatureFlagEnabled) {
+ // The test expects no index after this block, so remove it.
+ modifyIndexConns.forEach(conn => {
+ assert.commandWorked(
+ conn.getCollection("config.transactions").dropIndex("parent_lsid"));
+ });
+ }
+ rst.awaitReplication();
}
assert.commandWorked(setFCVConn.getDB("foo").runCommand(
@@ -124,8 +159,11 @@ function runTest(setFCVConn, modifyIndexConns, verifyIndexConns, rst) {
// On step up to primary the index should be created.
let primary = rst.getPrimary();
- // Clear the collection so we'll try to create the index.
- assert.commandWorked(primary.getDB("config").transactions.remove({}));
+ // Clear the collection so we'd try to create the index. Skip if the always create index
+ // feature flag is on because we'd try to create the index anyway.
+ if (!alwaysCreateFeatureFlagEnabled) {
+ assert.commandWorked(primary.getDB("config").transactions.remove({}));
+ }
assert.commandWorked(
primary.adminCommand({replSetStepDown: ReplSetTest.kForeverSecs, force: true}));
assert.commandWorked(primary.adminCommand({replSetFreeze: 0}));
@@ -193,6 +231,26 @@ function runTest(setFCVConn, modifyIndexConns, verifyIndexConns, rst) {
}
{
+ // Enabling featureFlagAlwaysCreateConfigTransactionsPartialIndexOnStepUp should not lead to
+ // creating the index if the internal transactions feature flag is not enabled.
+ const featureFlagRst = new ReplSetTest({
+ nodes: 2,
+ nodeOptions:
+ {setParameter: "featureFlagAlwaysCreateConfigTransactionsPartialIndexOnStepUp=true"}
+ });
+ featureFlagRst.startSet();
+ featureFlagRst.initiate();
+ // Note setFCV always waits for majority write concern so in a two node cluster secondaries will
+ // always have replicated the setFCV writes.
+ runTest(featureFlagRst.getPrimary(),
+ [featureFlagRst.getPrimary()],
+ [featureFlagRst.getPrimary(), featureFlagRst.getSecondary()],
+ featureFlagRst,
+ true /* alwaysCreateFeatureFlagEnabled */);
+ featureFlagRst.stopSet();
+}
+
+{
const conn = MongoRunner.runMongod();
const configTxnsCollection = conn.getCollection("config.transactions");
diff --git a/jstests/sharding/internal_txns/partial_index.js b/jstests/sharding/internal_txns/partial_index.js
index c6fbb997b85..b9c5462aaa4 100644
--- a/jstests/sharding/internal_txns/partial_index.js
+++ b/jstests/sharding/internal_txns/partial_index.js
@@ -9,285 +9,327 @@
load("jstests/libs/analyze_plan.js");
-const st = new ShardingTest({shards: {rs0: {nodes: 2}}});
-
const kDbName = "testDb";
const kCollName = "testColl";
const kConfigTxnNs = "config.transactions";
const kPartialIndexName = "parent_lsid";
-const mongosTestDB = st.s.getDB(kDbName);
-const shard0PrimaryConfigTxnColl = st.rs0.getPrimary().getCollection(kConfigTxnNs);
-
-function assertPartialIndexExists(node) {
- const configDB = node.getDB("config");
- const indexSpecs = assert.commandWorked(configDB.runCommand({"listIndexes": "transactions"}))
- .cursor.firstBatch;
- indexSpecs.sort((index0, index1) => index0.name > index1.name);
- assert.eq(indexSpecs.length, 2);
- const idIndexSpec = indexSpecs[0];
- assert.eq(idIndexSpec.key, {"_id": 1});
- const partialIndexSpec = indexSpecs[1];
- assert.eq(partialIndexSpec.key, {"parentLsid": 1, "_id.txnNumber": 1, "_id": 1});
- assert.eq(partialIndexSpec.partialFilterExpression, {"parentLsid": {"$exists": true}});
-}
-
-function assertFindUsesCoveredQuery(node) {
- const configTxnColl = node.getCollection(kConfigTxnNs);
- const childSessionDoc = configTxnColl.findOne({
- "_id.id": sessionUUID,
- "_id.txnNumber": childLsid.txnNumber,
- "_id.txnUUID": childLsid.txnUUID
- });
+function runTest(st, alwaysCreateFeatureFlagEnabled) {
+ const mongosTestDB = st.s.getDB(kDbName);
+ const shard0PrimaryConfigTxnColl = st.rs0.getPrimary().getCollection(kConfigTxnNs);
+
+ function assertPartialIndexExists(node) {
+ const configDB = node.getDB("config");
+ const indexSpecs =
+ assert.commandWorked(configDB.runCommand({"listIndexes": "transactions"}))
+ .cursor.firstBatch;
+ indexSpecs.sort((index0, index1) => index0.name > index1.name);
+ assert.eq(indexSpecs.length, 2);
+ const idIndexSpec = indexSpecs[0];
+ assert.eq(idIndexSpec.key, {"_id": 1});
+ const partialIndexSpec = indexSpecs[1];
+ assert.eq(partialIndexSpec.key, {"parentLsid": 1, "_id.txnNumber": 1, "_id": 1});
+ assert.eq(partialIndexSpec.partialFilterExpression, {"parentLsid": {"$exists": true}});
+ }
+
+ function assertFindUsesCoveredQuery(node) {
+ const configTxnColl = node.getCollection(kConfigTxnNs);
+ const childSessionDoc = configTxnColl.findOne({
+ "_id.id": sessionUUID,
+ "_id.txnNumber": childLsid.txnNumber,
+ "_id.txnUUID": childLsid.txnUUID
+ });
+
+ const explainRes = assert.commandWorked(
+ configTxnColl.explain()
+ .find({"parentLsid": parentSessionDoc._id, "_id.txnNumber": childLsid.txnNumber},
+ {_id: 1})
+ .finish());
+ const winningPlan = getWinningPlan(explainRes.queryPlanner);
+ assert.eq(winningPlan.stage, "PROJECTION_COVERED");
+ assert.eq(winningPlan.inputStage.stage, "IXSCAN");
+
+ const findRes =
+ configTxnColl
+ .find({"parentLsid": parentSessionDoc._id, "_id.txnNumber": childLsid.txnNumber},
+ {_id: 1})
+ .toArray();
+ assert.eq(findRes.length, 1);
+ assert.eq(findRes[0]._id, childSessionDoc._id);
+ }
+
+ function assertPartialIndexDoesNotExist(node) {
+ const configDB = node.getDB("config");
+ const indexSpecs =
+ assert.commandWorked(configDB.runCommand({"listIndexes": "transactions"}))
+ .cursor.firstBatch;
+ assert.eq(indexSpecs.length, 1);
+ const idIndexSpec = indexSpecs[0];
+ assert.eq(idIndexSpec.key, {"_id": 1});
+ }
+
+ function indexRecreationTest(expectRecreateAfterDrop) {
+ st.rs0.getPrimary().getCollection(kConfigTxnNs).dropIndex(kPartialIndexName);
+ st.rs0.awaitReplication();
+
+ st.rs0.nodes.forEach(node => {
+ assertPartialIndexDoesNotExist(node);
+ });
+
+ let primary = st.rs0.getPrimary();
+ assert.commandWorked(
+ primary.adminCommand({replSetStepDown: ReplSetTest.kForeverSecs, force: true}));
+ assert.commandWorked(primary.adminCommand({replSetFreeze: 0}));
+
+ st.rs0.awaitNodesAgreeOnPrimary();
+ st.rs0.awaitReplication();
+
+ st.rs0.nodes.forEach(node => {
+ if (expectRecreateAfterDrop) {
+ assertPartialIndexExists(node);
+ } else {
+ assertPartialIndexDoesNotExist(node);
+ }
+ });
+ }
+
+ // If the collection is empty and the index does not exist, we should always create the partial
+ // index on stepup,
+ indexRecreationTest(true /* expectRecreateAfterDrop */);
+
+ const sessionUUID = UUID();
+ const parentLsid = {id: sessionUUID};
+ const parentTxnNumber = 35;
+ let stmtId = 0;
- const explainRes = assert.commandWorked(
- configTxnColl.explain()
- .find({"parentLsid": parentSessionDoc._id, "_id.txnNumber": childLsid.txnNumber},
- {_id: 1})
- .finish());
- const winningPlan = getWinningPlan(explainRes.queryPlanner);
- assert.eq(winningPlan.stage, "PROJECTION_COVERED");
- assert.eq(winningPlan.inputStage.stage, "IXSCAN");
-
- const findRes =
- configTxnColl
- .find({"parentLsid": parentSessionDoc._id, "_id.txnNumber": childLsid.txnNumber},
- {_id: 1})
- .toArray();
- assert.eq(findRes.length, 1);
- assert.eq(findRes[0]._id, childSessionDoc._id);
-}
+ assert.commandWorked(mongosTestDB.runCommand({
+ insert: kCollName,
+ documents: [{_id: 0}],
+ lsid: parentLsid,
+ txnNumber: NumberLong(parentTxnNumber),
+ stmtId: NumberInt(stmtId++)
+ }));
+ const parentSessionDoc = shard0PrimaryConfigTxnColl.findOne({"_id.id": sessionUUID});
+
+ const childLsid = {id: sessionUUID, txnNumber: NumberLong(parentTxnNumber), txnUUID: UUID()};
+ let childTxnNumber = 0;
+
+ function runRetryableInternalTransaction(txnNumber) {
+ assert.commandWorked(mongosTestDB.runCommand({
+ insert: kCollName,
+ documents: [{x: 1}],
+ lsid: childLsid,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId++),
+ autocommit: false,
+ startTransaction: true
+ }));
+ assert.commandWorked(mongosTestDB.adminCommand({
+ commitTransaction: 1,
+ lsid: childLsid,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false
+ }));
+ }
+
+ runRetryableInternalTransaction(childTxnNumber);
+ assert.eq(shard0PrimaryConfigTxnColl.count({"_id.id": sessionUUID}), 2);
-function assertPartialIndexDoesNotExist(node) {
- const configDB = node.getDB("config");
- const indexSpecs = assert.commandWorked(configDB.runCommand({"listIndexes": "transactions"}))
- .cursor.firstBatch;
- assert.eq(indexSpecs.length, 1);
- const idIndexSpec = indexSpecs[0];
- assert.eq(idIndexSpec.key, {"_id": 1});
-}
+ st.rs0.nodes.forEach(node => {
+ assertPartialIndexExists(node);
+ assertFindUsesCoveredQuery(node);
+ });
-function indexRecreationTest(recreateAfterDrop) {
- st.rs0.getPrimary().getCollection(kConfigTxnNs).dropIndex(kPartialIndexName);
- st.rs0.awaitReplication();
+ childTxnNumber++;
+ runRetryableInternalTransaction(childTxnNumber);
+ assert.eq(shard0PrimaryConfigTxnColl.count({"_id.id": sessionUUID}), 2);
st.rs0.nodes.forEach(node => {
- assertPartialIndexDoesNotExist(node);
+ assertPartialIndexExists(node);
+ assertFindUsesCoveredQuery(node);
});
- let primary = st.rs0.getPrimary();
+ //
+ // Verify clients can create the index only if they provide the exact specification and that
+ // operations requiring the index fails if it does not exist.
+ //
+
+ const indexConn = st.rs0.getPrimary();
assert.commandWorked(
- primary.adminCommand({replSetStepDown: ReplSetTest.kForeverSecs, force: true}));
- assert.commandWorked(primary.adminCommand({replSetFreeze: 0}));
+ indexConn.getCollection("config.transactions").dropIndex(kPartialIndexName));
- st.rs0.awaitNodesAgreeOnPrimary();
- st.rs0.awaitReplication();
+ // Normal writes don't involve config.transactions, so they succeed.
+ assert.commandWorked(indexConn.getDB(kDbName).runCommand(
+ {insert: kCollName, documents: [{x: 1}], lsid: {id: UUID()}}));
- st.rs0.nodes.forEach(node => {
- if (recreateAfterDrop) {
- assertPartialIndexExists(node);
- } else {
- assertPartialIndexDoesNotExist(node);
- }
- });
-}
+ // Retryable writes read from the partial index, so they fail.
+ let res = assert.commandFailedWithCode(indexConn.getDB(kDbName).runCommand({
+ insert: kCollName,
+ documents: [{x: 1}],
+ lsid: {id: UUID()},
+ txnNumber: NumberLong(11)
+ }),
+ ErrorCodes.BadValue);
+ assert(res.errmsg.includes("Please create an index directly "), tojson(res));
+
+ // User transactions read from the partial index, so they fail.
+ assert.commandFailedWithCode(indexConn.getDB(kDbName).runCommand({
+ insert: kCollName,
+ documents: [{x: 1}],
+ lsid: {id: UUID()},
+ txnNumber: NumberLong(11),
+ startTransaction: true,
+ autocommit: false
+ }),
+ ErrorCodes.BadValue);
-// If the collection is empty and the index does not exist, we should create the partial index on
-// stepup.
-indexRecreationTest(true /*Recreate after drop*/);
-
-const sessionUUID = UUID();
-const parentLsid = {
- id: sessionUUID
-};
-const parentTxnNumber = 35;
-let stmtId = 0;
-
-assert.commandWorked(mongosTestDB.runCommand({
- insert: kCollName,
- documents: [{_id: 0}],
- lsid: parentLsid,
- txnNumber: NumberLong(parentTxnNumber),
- stmtId: NumberInt(stmtId++)
-}));
-const parentSessionDoc = shard0PrimaryConfigTxnColl.findOne({"_id.id": sessionUUID});
-
-const childLsid = {
- id: sessionUUID,
- txnNumber: NumberLong(parentTxnNumber),
- txnUUID: UUID()
-};
-let childTxnNumber = 0;
-
-function runRetryableInternalTransaction(txnNumber) {
- assert.commandWorked(mongosTestDB.runCommand({
+ // Non retryable internal transactions do not read from or update the partial index, so they can
+ // succeed without the index existing.
+ let nonRetryableTxnSession = {id: UUID(), txnUUID: UUID()};
+ assert.commandWorked(indexConn.getDB(kDbName).runCommand({
+ insert: kCollName,
+ documents: [{x: 1}],
+ lsid: nonRetryableTxnSession,
+ txnNumber: NumberLong(11),
+ stmtId: NumberInt(0),
+ startTransaction: true,
+ autocommit: false
+ }));
+ assert.commandWorked(indexConn.adminCommand({
+ commitTransaction: 1,
+ lsid: nonRetryableTxnSession,
+ txnNumber: NumberLong(11),
+ autocommit: false
+ }));
+
+ // Retryable transactions read from the partial index, so they fail.
+ assert.commandFailedWithCode(indexConn.getDB(kDbName).runCommand({
insert: kCollName,
documents: [{x: 1}],
- lsid: childLsid,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId++),
- autocommit: false,
- startTransaction: true
+ lsid: {id: UUID(), txnUUID: UUID(), txnNumber: NumberLong(2)},
+ txnNumber: NumberLong(11),
+ stmtId: NumberInt(0),
+ startTransaction: true,
+ autocommit: false
+ }),
+ ErrorCodes.BadValue);
+
+ // Recreating the partial index requires the exact options used internally, but in any order.
+ assert.commandFailedWithCode(indexConn.getDB("config").runCommand({
+ createIndexes: "transactions",
+ indexes: [{v: 2, name: "parent_lsid", key: {parentLsid: 1, "_id.txnNumber": 1, _id: 1}}],
+ }),
+ ErrorCodes.IllegalOperation);
+ assert.commandWorked(indexConn.getDB("config").runCommand({
+ createIndexes: "transactions",
+ indexes: [{
+ name: "parent_lsid",
+ key: {parentLsid: 1, "_id.txnNumber": 1, _id: 1},
+ partialFilterExpression: {parentLsid: {$exists: true}},
+ v: 2,
+ }],
+ }));
+
+ // Operations involving the index should succeed now.
+
+ assert.commandWorked(indexConn.getDB(kDbName).runCommand(
+ {insert: kCollName, documents: [{x: 1}], lsid: {id: UUID()}}));
+
+ assert.commandWorked(indexConn.getDB(kDbName).runCommand(
+ {insert: kCollName, documents: [{x: 1}], lsid: {id: UUID()}, txnNumber: NumberLong(11)}));
+
+ let userSessionAfter = {id: UUID()};
+ assert.commandWorked(indexConn.getDB(kDbName).runCommand({
+ insert: kCollName,
+ documents: [{x: 1}],
+ lsid: userSessionAfter,
+ txnNumber: NumberLong(11),
+ startTransaction: true,
+ autocommit: false
+ }));
+ assert.commandWorked(indexConn.adminCommand({
+ commitTransaction: 1,
+ lsid: userSessionAfter,
+ txnNumber: NumberLong(11),
+ autocommit: false
+ }));
+
+ let nonRetryableTxnSessionAfter = {id: UUID(), txnUUID: UUID()};
+ assert.commandWorked(indexConn.getDB(kDbName).runCommand({
+ insert: kCollName,
+ documents: [{x: 1}],
+ lsid: nonRetryableTxnSessionAfter,
+ txnNumber: NumberLong(11),
+ stmtId: NumberInt(0),
+ startTransaction: true,
+ autocommit: false
+ }));
+ assert.commandWorked(indexConn.adminCommand({
+ commitTransaction: 1,
+ lsid: nonRetryableTxnSessionAfter,
+ txnNumber: NumberLong(11),
+ autocommit: false
+ }));
+
+ let retryableTxnSessionAfter = {id: UUID(), txnUUID: UUID(), txnNumber: NumberLong(2)};
+ assert.commandWorked(indexConn.getDB(kDbName).runCommand({
+ insert: kCollName,
+ documents: [{x: 1}],
+ lsid: retryableTxnSessionAfter,
+ txnNumber: NumberLong(11),
+ stmtId: NumberInt(0),
+ startTransaction: true,
+ autocommit: false
}));
- assert.commandWorked(mongosTestDB.adminCommand({
+ assert.commandWorked(indexConn.adminCommand({
commitTransaction: 1,
- lsid: childLsid,
- txnNumber: NumberLong(txnNumber),
+ lsid: retryableTxnSessionAfter,
+ txnNumber: NumberLong(11),
autocommit: false
}));
+
+ if (!alwaysCreateFeatureFlagEnabled) {
+ // We expect that if the partial index is dropped when the collection isn't empty, then on
+ // stepup we should not recreate the collection.
+ indexRecreationTest(false /* expectRecreateAfterDrop */);
+ } else {
+ // Creating the partial index when the collection isn't empty can be enabled by a feature
+ // flag.
+ indexRecreationTest(true /* expectRecreateAfterDrop */);
+ }
+}
+
+{
+ const st = new ShardingTest({shards: {rs0: {nodes: 2}}});
+ runTest(st, false /* alwaysCreateFeatureFlagEnabled */);
+ st.stop();
}
-runRetryableInternalTransaction(childTxnNumber);
-assert.eq(shard0PrimaryConfigTxnColl.count({"_id.id": sessionUUID}), 2);
-
-st.rs0.nodes.forEach(node => {
- assertPartialIndexExists(node);
- assertFindUsesCoveredQuery(node);
-});
-
-childTxnNumber++;
-runRetryableInternalTransaction(childTxnNumber);
-assert.eq(shard0PrimaryConfigTxnColl.count({"_id.id": sessionUUID}), 2);
-
-st.rs0.nodes.forEach(node => {
- assertPartialIndexExists(node);
- assertFindUsesCoveredQuery(node);
-});
-
-//
-// Verify clients can create the index only if they provide the exact specification and that
-// operations requiring the index fails if it does not exist.
-//
-
-const indexConn = st.rs0.getPrimary();
-assert.commandWorked(indexConn.getCollection("config.transactions").dropIndex(kPartialIndexName));
-
-// Normal writes don't involve config.transactions, so they succeed.
-assert.commandWorked(indexConn.getDB(kDbName).runCommand(
- {insert: kCollName, documents: [{x: 1}], lsid: {id: UUID()}}));
-
-// Retryable writes read from the partial index, so they fail.
-let res = assert.commandFailedWithCode(
- indexConn.getDB(kDbName).runCommand(
- {insert: kCollName, documents: [{x: 1}], lsid: {id: UUID()}, txnNumber: NumberLong(11)}),
- ErrorCodes.BadValue);
-assert(res.errmsg.includes("Please create an index directly "), tojson(res));
-
-// User transactions read from the partial index, so they fail.
-assert.commandFailedWithCode(indexConn.getDB(kDbName).runCommand({
- insert: kCollName,
- documents: [{x: 1}],
- lsid: {id: UUID()},
- txnNumber: NumberLong(11),
- startTransaction: true,
- autocommit: false
-}),
- ErrorCodes.BadValue);
-
-// Non retryable internal transactions do not read from or update the partial index, so they can
-// succeed without the index existing.
-let nonRetryableTxnSession = {id: UUID(), txnUUID: UUID()};
-assert.commandWorked(indexConn.getDB(kDbName).runCommand({
- insert: kCollName,
- documents: [{x: 1}],
- lsid: nonRetryableTxnSession,
- txnNumber: NumberLong(11),
- stmtId: NumberInt(0),
- startTransaction: true,
- autocommit: false
-}));
-assert.commandWorked(indexConn.adminCommand({
- commitTransaction: 1,
- lsid: nonRetryableTxnSession,
- txnNumber: NumberLong(11),
- autocommit: false
-}));
-
-// Retryable transactions read from the partial index, so they fail.
-assert.commandFailedWithCode(indexConn.getDB(kDbName).runCommand({
- insert: kCollName,
- documents: [{x: 1}],
- lsid: {id: UUID(), txnUUID: UUID(), txnNumber: NumberLong(2)},
- txnNumber: NumberLong(11),
- stmtId: NumberInt(0),
- startTransaction: true,
- autocommit: false
-}),
- ErrorCodes.BadValue);
-
-// Recreating the partial index requires the exact options used internally, but in any order.
-assert.commandFailedWithCode(indexConn.getDB("config").runCommand({
- createIndexes: "transactions",
- indexes: [{v: 2, name: "parent_lsid", key: {parentLsid: 1, "_id.txnNumber": 1, _id: 1}}],
-}),
- ErrorCodes.IllegalOperation);
-assert.commandWorked(indexConn.getDB("config").runCommand({
- createIndexes: "transactions",
- indexes: [{
- name: "parent_lsid",
- key: {parentLsid: 1, "_id.txnNumber": 1, _id: 1},
- partialFilterExpression: {parentLsid: {$exists: true}},
- v: 2,
- }],
-}));
-
-// Operations involving the index should succeed now.
-
-assert.commandWorked(indexConn.getDB(kDbName).runCommand(
- {insert: kCollName, documents: [{x: 1}], lsid: {id: UUID()}}));
-
-assert.commandWorked(indexConn.getDB(kDbName).runCommand(
- {insert: kCollName, documents: [{x: 1}], lsid: {id: UUID()}, txnNumber: NumberLong(11)}));
-
-let userSessionAfter = {id: UUID()};
-assert.commandWorked(indexConn.getDB(kDbName).runCommand({
- insert: kCollName,
- documents: [{x: 1}],
- lsid: userSessionAfter,
- txnNumber: NumberLong(11),
- startTransaction: true,
- autocommit: false
-}));
-assert.commandWorked(indexConn.adminCommand(
- {commitTransaction: 1, lsid: userSessionAfter, txnNumber: NumberLong(11), autocommit: false}));
-
-let nonRetryableTxnSessionAfter = {id: UUID(), txnUUID: UUID()};
-assert.commandWorked(indexConn.getDB(kDbName).runCommand({
- insert: kCollName,
- documents: [{x: 1}],
- lsid: nonRetryableTxnSessionAfter,
- txnNumber: NumberLong(11),
- stmtId: NumberInt(0),
- startTransaction: true,
- autocommit: false
-}));
-assert.commandWorked(indexConn.adminCommand({
- commitTransaction: 1,
- lsid: nonRetryableTxnSessionAfter,
- txnNumber: NumberLong(11),
- autocommit: false
-}));
-
-let retryableTxnSessionAfter = {id: UUID(), txnUUID: UUID(), txnNumber: NumberLong(2)};
-assert.commandWorked(indexConn.getDB(kDbName).runCommand({
- insert: kCollName,
- documents: [{x: 1}],
- lsid: retryableTxnSessionAfter,
- txnNumber: NumberLong(11),
- stmtId: NumberInt(0),
- startTransaction: true,
- autocommit: false
-}));
-assert.commandWorked(indexConn.adminCommand({
- commitTransaction: 1,
- lsid: retryableTxnSessionAfter,
- txnNumber: NumberLong(11),
- autocommit: false
-}));
-
-// We expect that if the partial index is dropped when the collection isn't empty, then on stepup we
-// should not recreate the collection.
-indexRecreationTest(false /*Don't recreate after drop*/);
-
-st.stop();
+{
+ const featureFlagSt = new ShardingTest({
+ shards: 1,
+ other: {
+ rs: {nodes: 2},
+ rsOptions:
+ {setParameter: "featureFlagAlwaysCreateConfigTransactionsPartialIndexOnStepUp=true"}
+ }
+ });
+
+ // Sanity check the feature flag was enabled.
+ assert(assert
+ .commandWorked(featureFlagSt.rs0.getPrimary().adminCommand({
+ getParameter: 1,
+ featureFlagAlwaysCreateConfigTransactionsPartialIndexOnStepUp: 1
+ }))
+ .featureFlagAlwaysCreateConfigTransactionsPartialIndexOnStepUp.value);
+ assert(assert
+ .commandWorked(featureFlagSt.rs0.getSecondary().adminCommand({
+ getParameter: 1,
+ featureFlagAlwaysCreateConfigTransactionsPartialIndexOnStepUp: 1
+ }))
+ .featureFlagAlwaysCreateConfigTransactionsPartialIndexOnStepUp.value);
+
+ runTest(featureFlagSt, true /* alwaysCreateFeatureFlagEnabled */);
+ featureFlagSt.stop();
+}
})();
diff --git a/src/mongo/db/internal_transactions_feature_flag.idl b/src/mongo/db/internal_transactions_feature_flag.idl
index d0373f56140..bbbb9fa1477 100644
--- a/src/mongo/db/internal_transactions_feature_flag.idl
+++ b/src/mongo/db/internal_transactions_feature_flag.idl
@@ -41,6 +41,11 @@ feature_flags:
default: true
version: 6.0
+ featureFlagAlwaysCreateConfigTransactionsPartialIndexOnStepUp:
+ description: Feature flag to enable always creating the config.transactions partial index on step up to primary even if the collection is not empty.
+ cpp_varname: gFeatureFlagAlwaysCreateConfigTransactionsPartialIndexOnStepUp
+ default: false
+
featureFlagUpdateDocumentShardKeyUsingTransactionApi:
description: Feature flag to enable usage of the transaction api for update findAndModify and update commands that change a document's shard key.
cpp_varname: gFeatureFlagUpdateDocumentShardKeyUsingTransactionApi
diff --git a/src/mongo/db/session_catalog_mongod.cpp b/src/mongo/db/session_catalog_mongod.cpp
index fbfa223d80c..50b63c0f390 100644
--- a/src/mongo/db/session_catalog_mongod.cpp
+++ b/src/mongo/db/session_catalog_mongod.cpp
@@ -379,23 +379,55 @@ void createTransactionTable(OperationContext* opCtx) {
auto createCollectionStatus = storageInterface->createCollection(
opCtx, NamespaceString::kSessionTransactionsTableNamespace, options);
+ auto internalTransactionsFlagEnabled =
+ feature_flags::gFeatureFlagInternalTransactions.isEnabled(
+ serverGlobalParams.featureCompatibility);
+
+ // This flag is off by default and only exists to facilitate creating the partial index more
+ // easily, so we don't tie it to FCV. This overrides the internal transactions feature flag.
+ auto alwaysCreateIndexFlagEnabled =
+ feature_flags::gFeatureFlagAlwaysCreateConfigTransactionsPartialIndexOnStepUp
+ .isEnabledAndIgnoreFCV();
+
if (createCollectionStatus == ErrorCodes::NamespaceExists) {
- if (!feature_flags::gFeatureFlagInternalTransactions.isEnabled(
- serverGlobalParams.featureCompatibility)) {
+ if (!internalTransactionsFlagEnabled && !alwaysCreateIndexFlagEnabled) {
return;
}
- AutoGetCollection autoColl(
- opCtx, NamespaceString::kSessionTransactionsTableNamespace, LockMode::MODE_IS);
+ bool collectionIsEmpty = false;
+ {
+ AutoGetCollection autoColl(
+ opCtx, NamespaceString::kSessionTransactionsTableNamespace, LockMode::MODE_IS);
+ invariant(autoColl);
+
+ if (autoColl->getIndexCatalog()->findIndexByName(
+ opCtx, MongoDSessionCatalog::kConfigTxnsPartialIndexName)) {
+ // Index already exists, so there's nothing to do.
+ return;
+ }
+
+ collectionIsEmpty = autoColl->isEmpty(opCtx);
+ }
+
+ if (!collectionIsEmpty) {
+ // Unless explicitly enabled, don't create the index to avoid delaying step up.
+ if (alwaysCreateIndexFlagEnabled) {
+ AutoGetCollection autoColl(
+ opCtx, NamespaceString::kSessionTransactionsTableNamespace, LockMode::MODE_X);
+ IndexBuildsCoordinator::get(opCtx)->createIndex(
+ opCtx,
+ autoColl->uuid(),
+ MongoDSessionCatalog::getConfigTxnPartialIndexSpec(),
+ IndexBuildsManager::IndexConstraints::kEnforce,
+ false /* fromMigration */);
+ }
- // During failover recovery it is possible that the collection is created, but the partial
- // index is not since they are recorded as separate oplog entries. If it is already created
- // or if the collection isn't empty we can return early.
- if (autoColl->getIndexCatalog()->findIndexByName(
- opCtx, MongoDSessionCatalog::kConfigTxnsPartialIndexName) ||
- !autoColl->isEmpty(opCtx)) {
return;
}
+
+ // The index does not exist and the collection is empty, so fall through to create it on the
+ // empty collection. This can happen after a failover because the collection and index
+ // creation are recorded as separate oplog entries.
} else {
uassertStatusOKWithContext(createCollectionStatus,
str::stream()
@@ -404,8 +436,7 @@ void createTransactionTable(OperationContext* opCtx) {
<< " collection");
}
- if (!feature_flags::gFeatureFlagInternalTransactions.isEnabled(
- serverGlobalParams.featureCompatibility)) {
+ if (!internalTransactionsFlagEnabled && !alwaysCreateIndexFlagEnabled) {
return;
}