summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--etc/backports_required_for_multiversion_tests.yml2
-rw-r--r--jstests/replsets/sessions_collection_reaping.js27
-rw-r--r--src/mongo/db/session_catalog_mongod.cpp46
3 files changed, 60 insertions, 15 deletions
diff --git a/etc/backports_required_for_multiversion_tests.yml b/etc/backports_required_for_multiversion_tests.yml
index ef7f2870072..c70824550e8 100644
--- a/etc/backports_required_for_multiversion_tests.yml
+++ b/etc/backports_required_for_multiversion_tests.yml
@@ -163,6 +163,8 @@ all:
test_file: jstests/replsets/cluster_chaining_override.js
- ticket: SERVER-53849
test_file: jstests/core/txns/timestamped_reads_wait_for_prepare_oplog_visibility.js
+ - ticket: SERVER-59197
+ test_file: jstests/replsets/sessions_collection_reaping.js
suites:
diff --git a/jstests/replsets/sessions_collection_reaping.js b/jstests/replsets/sessions_collection_reaping.js
index 415bf4a4c82..84102415984 100644
--- a/jstests/replsets/sessions_collection_reaping.js
+++ b/jstests/replsets/sessions_collection_reaping.js
@@ -20,7 +20,12 @@ let replTest = new ReplSetTest({
{/* secondary */ rsConfig: {priority: 0}},
{/* arbiter */ rsConfig: {arbiterOnly: true}}
],
- nodeOptions: {setParameter: {TransactionRecordMinimumLifetimeMinutes: 0}}
+ nodeOptions: {
+ setParameter: {
+ TransactionRecordMinimumLifetimeMinutes: 0,
+ storeFindAndModifyImagesInSideCollection: true
+ }
+ }
});
let nodes = replTest.startSet();
@@ -28,6 +33,7 @@ replTest.initiate();
let primary = replTest.getPrimary();
let sessionsCollOnPrimary = primary.getDB("config").system.sessions;
let transactionsCollOnPrimary = primary.getDB("config").transactions;
+let imageCollOnPrimary = primary.getDB("config").image_collection;
replTest.awaitSecondaryNodes();
let secondary = replTest.getSecondary();
@@ -38,12 +44,18 @@ const collName = "test";
const reapErrorMsgRegex =
new RegExp("Sessions collection is not set up.*waiting until next sessions reap interval");
-// Set up a session with a retryable write.
+// Set up a session with a retryable insert and findAndModify.
let session = primary.startSession({retryWrites: 1});
assert.commandWorked(session.getDatabase(dbName)[collName].save({x: 1}));
+let result =
+ session.getDatabase(dbName)[collName].findAndModify({query: {x: 1}, update: {$set: {y: 1}}});
+// The findAndModify helper returns the document and not the whole response. Assert on the value of
+// `x`. Though it's expected a command error would become an exception that fails the test.
+assert.eq(1, result['x'], "Whole result: " + result);
assert.commandWorked(primary.adminCommand({refreshLogicalSessionCacheNow: 1}));
-assert.eq(1, sessionsCollOnPrimary.count());
-assert.eq(1, transactionsCollOnPrimary.count());
+assert.eq(1, sessionsCollOnPrimary.find().itcount());
+assert.eq(1, transactionsCollOnPrimary.find().itcount());
+assert.eq(1, imageCollOnPrimary.find().itcount());
// Remove the session doc so the session gets reaped when reapLogicalSessionCacheNow is run.
assert.commandWorked(sessionsCollOnPrimary.remove({}));
@@ -54,7 +66,7 @@ assert.commandWorked(sessionsCollOnPrimary.remove({}));
assert.commandWorked(secondary.adminCommand({clearLog: 'global'}));
assert.commandWorked(secondary.adminCommand({reapLogicalSessionCacheNow: 1}));
- assert.eq(1, transactionsCollOnPrimary.count());
+ assert.eq(1, transactionsCollOnPrimary.find().itcount());
assert.eq(false, checkLog.checkContainsOnce(secondary, reapErrorMsgRegex));
}
@@ -63,7 +75,7 @@ assert.commandWorked(sessionsCollOnPrimary.remove({}));
assert.commandWorked(arbiter.adminCommand({clearLog: 'global'}));
assert.commandWorked(arbiter.adminCommand({reapLogicalSessionCacheNow: 1}));
- assert.eq(1, transactionsCollOnPrimary.count());
+ assert.eq(1, transactionsCollOnPrimary.find().itcount());
if (!jsTest.options().useRandomBinVersionsWithinReplicaSet) {
// Verify that the arbiter did not try to reap the session.
@@ -76,7 +88,8 @@ assert.commandWorked(sessionsCollOnPrimary.remove({}));
assert.commandWorked(primary.adminCommand({clearLog: 'global'}));
assert.commandWorked(primary.adminCommand({reapLogicalSessionCacheNow: 1}));
- assert.eq(0, transactionsCollOnPrimary.count());
+ assert.eq(0, transactionsCollOnPrimary.find().itcount());
+ assert.eq(0, imageCollOnPrimary.find().itcount());
}
replTest.stopSet();
diff --git a/src/mongo/db/session_catalog_mongod.cpp b/src/mongo/db/session_catalog_mongod.cpp
index ef24836ad74..a23ee996295 100644
--- a/src/mongo/db/session_catalog_mongod.cpp
+++ b/src/mongo/db/session_catalog_mongod.cpp
@@ -141,14 +141,20 @@ int removeSessionsTransactionRecords(OperationContext* opCtx,
if (expiredSessionIds.empty())
return 0;
- // Remove the session ids from the on-disk catalog
- write_ops::Delete deleteOp(NamespaceString::kSessionTransactionsTableNamespace);
- deleteOp.setWriteCommandBase([] {
+ // Remove findAndModify images that map to deleted sessions. We first delete any images
+ // belonging to sessions about to be reaped, followed by the sessions. This way if there's a
+ // failure, we'll only be left with sessions that have a dangling reference to an image. Session
+ // reaping will rediscover the sessions to delete and try again.
+ //
+ // We opt for this rather than performing the two sets of deletes in a single transaction simply
+ // to reduce code complexity.
+ write_ops::Delete imageDeleteOp(NamespaceString::kConfigImagesNamespace);
+ imageDeleteOp.setWriteCommandBase([] {
write_ops::WriteCommandBase base;
base.setOrdered(false);
return base;
}());
- deleteOp.setDeletes([&] {
+ imageDeleteOp.setDeletes([&] {
std::vector<write_ops::DeleteOpEntry> entries;
for (const auto& lsid : expiredSessionIds) {
entries.emplace_back(BSON(LogicalSessionRecord::kIdFieldName << lsid.toBSON()),
@@ -157,15 +163,39 @@ int removeSessionsTransactionRecords(OperationContext* opCtx,
return entries;
}());
+ BatchedCommandResponse response;
+ std::string errmsg;
BSONObj result;
DBDirectClient client(opCtx);
- client.runCommand(NamespaceString::kSessionTransactionsTableNamespace.db().toString(),
- deleteOp.toBSON({}),
+ client.runCommand(
+ NamespaceString::kConfigImagesNamespace.db().toString(), imageDeleteOp.toBSON({}), result);
+
+ uassert(ErrorCodes::FailedToParse,
+ str::stream() << "Failed to parse response " << result,
+ response.parseBSON(result, &errmsg));
+ uassertStatusOK(response.getTopLevelStatus());
+
+ // Remove the session ids from the on-disk catalog
+ write_ops::Delete sessionDeleteOp(NamespaceString::kSessionTransactionsTableNamespace);
+ sessionDeleteOp.setWriteCommandBase([] {
+ write_ops::WriteCommandBase base;
+ base.setOrdered(false);
+ return base;
+ }());
+ sessionDeleteOp.setDeletes([&] {
+ std::vector<write_ops::DeleteOpEntry> entries;
+ for (const auto& lsid : expiredSessionIds) {
+ entries.emplace_back(BSON(LogicalSessionRecord::kIdFieldName << lsid.toBSON()),
+ false /* multi = false */);
+ }
+ return entries;
+ }());
+
+ client.runCommand(NamespaceString::kConfigImagesNamespace.db().toString(),
+ sessionDeleteOp.toBSON({}),
result);
- BatchedCommandResponse response;
- std::string errmsg;
uassert(ErrorCodes::FailedToParse,
str::stream() << "Failed to parse response " << result,
response.parseBSON(result, &errmsg));