summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorXiangyu Yao <xiangyu.yao@mongodb.com>2018-06-13 15:21:28 -0400
committerXiangyu Yao <xiangyu.yao@mongodb.com>2018-07-10 15:11:12 -0400
commit86439f3cd32a4bf7bc0aaf9f550b0f0fd9dc2e8e (patch)
tree360f61cdcfca719c071097b524dc42f002a2de22
parent2d498837ce5d795b026a7f901f469e2bf8ea4cfc (diff)
downloadmongo-86439f3cd32a4bf7bc0aaf9f550b0f0fd9dc2e8e.tar.gz
SERVER-34113 Remove all support for snapshot reads outside multi-document transactions
(cherry picked from commit 1871507cdbdd492abee785076203467d20e0e716)
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_14.yml4
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_auth_19.yml4
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_auth_audit_18.yml4
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_auth_audit_misc.yml2
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_auth_misc.yml2
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_ese_20.yml4
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_ese_misc.yml2
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml2
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards_misc.yml2
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_misc.yml2
-rw-r--r--buildscripts/templates/generate_resmoke_suites/sharding_last_stable_mongos_and_mixed_shards.yml.j22
-rw-r--r--jstests/core/txns/no_read_concern_snapshot_outside_txn.js (renamed from jstests/core/txns/no_snapshot_writes_outside_txn.js)36
-rw-r--r--jstests/core/txns/statement_ids_accepted.js18
-rw-r--r--jstests/multiVersion/global_snapshot_reads_downgrade_cluster.js91
-rw-r--r--jstests/multiVersion/global_snapshot_reads_upgrade_cluster.js89
-rw-r--r--jstests/multiVersion/libs/global_snapshot_reads_helpers.js96
-rw-r--r--jstests/noPassthrough/agg_explain_read_concern.js2
-rw-r--r--jstests/noPassthrough/crud_timestamps.js12
-rw-r--r--jstests/noPassthrough/readConcern_atClusterTime.js113
-rw-r--r--jstests/noPassthrough/readConcern_atClusterTime_noop_write.js22
-rw-r--r--jstests/noPassthrough/readConcern_snapshot.js54
-rw-r--r--jstests/noPassthrough/read_concern_snapshot_yielding.js31
-rw-r--r--jstests/sharding/snapshot_aggregate_mongos.js121
-rw-r--r--jstests/sharding/snapshot_find_mongos.js122
-rw-r--r--src/mongo/db/commands/count_cmd.cpp2
-rw-r--r--src/mongo/db/commands/dbhash.cpp17
-rw-r--r--src/mongo/db/commands/find_and_modify.cpp2
-rw-r--r--src/mongo/db/commands/parallel_collection_scan.cpp2
-rw-r--r--src/mongo/db/commands/run_aggregate.cpp3
-rw-r--r--src/mongo/db/commands/write_commands/write_commands.cpp2
-rw-r--r--src/mongo/db/cursor_manager.cpp81
-rw-r--r--src/mongo/db/cursor_manager.h15
-rw-r--r--src/mongo/db/ops/write_ops_exec.cpp20
-rw-r--r--src/mongo/db/pipeline/expression_context.h2
-rw-r--r--src/mongo/db/pipeline/pipeline.cpp5
-rw-r--r--src/mongo/db/pipeline/pipeline_test.cpp26
-rw-r--r--src/mongo/db/read_concern.cpp8
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp4
-rw-r--r--src/mongo/db/service_entry_point_common.cpp15
-rw-r--r--src/mongo/db/session.cpp79
-rw-r--r--src/mongo/db/session.h16
-rw-r--r--src/mongo/db/session_catalog_test.cpp8
-rw-r--r--src/mongo/db/session_test.cpp19
43 files changed, 180 insertions, 983 deletions
diff --git a/buildscripts/resmokeconfig/suites/sharding_14.yml b/buildscripts/resmokeconfig/suites/sharding_14.yml
index 25c5d2e264a..1c7d11b846e 100644
--- a/buildscripts/resmokeconfig/suites/sharding_14.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_14.yml
@@ -56,8 +56,6 @@ selector:
- jstests/sharding/auth_repl.js
- jstests/sharding/count_config_servers.js
- jstests/sharding/sharding_options.js
- - jstests/sharding/snapshot_aggregate_mongos.js
- - jstests/sharding/snapshot_find_mongos.js
- jstests/sharding/advance_cluster_time_action_type.js
- jstests/sharding/mongos_wait_csrs_initiate.js
- jstests/sharding/config_rs_change.js
@@ -70,4 +68,4 @@ executor:
config:
shell_options:
nodb: ''
- readMode: commands \ No newline at end of file
+ readMode: commands
diff --git a/buildscripts/resmokeconfig/suites/sharding_auth_19.yml b/buildscripts/resmokeconfig/suites/sharding_auth_19.yml
index 1beab3cf4da..3ec9b318ee8 100644
--- a/buildscripts/resmokeconfig/suites/sharding_auth_19.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_auth_19.yml
@@ -29,8 +29,6 @@ selector:
- jstests/sharding/empty_cluster_init.js
- jstests/sharding/addshard6.js
- jstests/sharding/read_after_optime.js
- - jstests/sharding/snapshot_aggregate_mongos.js
- - jstests/sharding/snapshot_find_mongos.js
- jstests/sharding/sharding_options.js
- jstests/sharding/current_op_no_shards.js
- jstests/sharding/config_rs_change.js
@@ -48,4 +46,4 @@ executor:
keyFile: *keyFile
keyFileData: *keyFileData
nodb: ''
- readMode: commands \ No newline at end of file
+ readMode: commands
diff --git a/buildscripts/resmokeconfig/suites/sharding_auth_audit_18.yml b/buildscripts/resmokeconfig/suites/sharding_auth_audit_18.yml
index c3ce889273a..1df76878e50 100644
--- a/buildscripts/resmokeconfig/suites/sharding_auth_audit_18.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_auth_audit_18.yml
@@ -45,8 +45,6 @@ selector:
- jstests/sharding/empty_cluster_init.js
- jstests/sharding/addshard6.js
- jstests/sharding/read_after_optime.js
- - jstests/sharding/snapshot_aggregate_mongos.js
- - jstests/sharding/snapshot_find_mongos.js
- jstests/sharding/sharding_options.js
- jstests/sharding/config_rs_change.js
- jstests/sharding/current_op_no_shards.js
@@ -65,4 +63,4 @@ executor:
keyFile: *keyFile
keyFileData: *keyFileData
nodb: ''
- readMode: commands \ No newline at end of file
+ readMode: commands
diff --git a/buildscripts/resmokeconfig/suites/sharding_auth_audit_misc.yml b/buildscripts/resmokeconfig/suites/sharding_auth_audit_misc.yml
index 2b032d6acf0..431b77f6309 100644
--- a/buildscripts/resmokeconfig/suites/sharding_auth_audit_misc.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_auth_audit_misc.yml
@@ -367,8 +367,6 @@ selector:
- jstests/sharding/empty_cluster_init.js
- jstests/sharding/addshard6.js
- jstests/sharding/read_after_optime.js
- - jstests/sharding/snapshot_aggregate_mongos.js
- - jstests/sharding/snapshot_find_mongos.js
- jstests/sharding/sharding_options.js
- jstests/sharding/config_rs_change.js
- jstests/sharding/current_op_no_shards.js
diff --git a/buildscripts/resmokeconfig/suites/sharding_auth_misc.yml b/buildscripts/resmokeconfig/suites/sharding_auth_misc.yml
index ce646686d38..f211e199bb6 100644
--- a/buildscripts/resmokeconfig/suites/sharding_auth_misc.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_auth_misc.yml
@@ -366,8 +366,6 @@ selector:
- jstests/sharding/empty_cluster_init.js
- jstests/sharding/addshard6.js
- jstests/sharding/read_after_optime.js
- - jstests/sharding/snapshot_aggregate_mongos.js
- - jstests/sharding/snapshot_find_mongos.js
- jstests/sharding/sharding_options.js
- jstests/sharding/current_op_no_shards.js
- jstests/sharding/config_rs_change.js
diff --git a/buildscripts/resmokeconfig/suites/sharding_ese_20.yml b/buildscripts/resmokeconfig/suites/sharding_ese_20.yml
index aa0aa2b7ac2..43ab6667a55 100644
--- a/buildscripts/resmokeconfig/suites/sharding_ese_20.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_ese_20.yml
@@ -46,8 +46,6 @@ selector:
- jstests/sharding/auth_sharding_cmd_metadata.js
- jstests/sharding/read_after_optime.js
- jstests/sharding/auth_repl.js
- - jstests/sharding/snapshot_aggregate_mongos.js
- - jstests/sharding/snapshot_find_mongos.js
- jstests/sharding/sharding_options.js
- jstests/sharding/advance_cluster_time_action_type.js
- jstests/sharding/mongos_wait_csrs_initiate.js
@@ -65,4 +63,4 @@ executor:
TestData:
enableEncryption: ''
encryptionKeyFile: *keyFile
- readMode: commands \ No newline at end of file
+ readMode: commands
diff --git a/buildscripts/resmokeconfig/suites/sharding_ese_misc.yml b/buildscripts/resmokeconfig/suites/sharding_ese_misc.yml
index cf813fafd62..6c6bc57eb0a 100644
--- a/buildscripts/resmokeconfig/suites/sharding_ese_misc.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_ese_misc.yml
@@ -374,8 +374,6 @@ selector:
- jstests/sharding/auth_sharding_cmd_metadata.js
- jstests/sharding/read_after_optime.js
- jstests/sharding/auth_repl.js
- - jstests/sharding/snapshot_aggregate_mongos.js
- - jstests/sharding/snapshot_find_mongos.js
- jstests/sharding/sharding_options.js
- jstests/sharding/advance_cluster_time_action_type.js
- jstests/sharding/mongos_wait_csrs_initiate.js
diff --git a/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml b/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml
index 56828b3224f..ee3fb0fdb03 100644
--- a/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml
@@ -61,8 +61,6 @@ selector:
# Requires killOp to work for local operations on mongos, introduced in v4.0.
- jstests/sharding/killop.js
# New 4.0 feature
- - jstests/sharding/snapshot_aggregate_mongos.js
- - jstests/sharding/snapshot_find_mongos.js
- jstests/sharding/change_stream_lookup_single_shard_cluster.js
- jstests/sharding/change_stream_metadata_notifications.js
- jstests/sharding/change_streams.js
diff --git a/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards_misc.yml b/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards_misc.yml
index 9f8b953266a..42555003031 100644
--- a/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards_misc.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards_misc.yml
@@ -69,8 +69,6 @@ selector:
# Requires killOp to work for local operations on mongos, introduced in v4.0.
- jstests/sharding/killop.js
# New 4.0 feature
- - jstests/sharding/snapshot_aggregate_mongos.js
- - jstests/sharding/snapshot_find_mongos.js
- jstests/sharding/change_stream_lookup_single_shard_cluster.js
- jstests/sharding/change_stream_metadata_notifications.js
- jstests/sharding/change_streams.js
diff --git a/buildscripts/resmokeconfig/suites/sharding_misc.yml b/buildscripts/resmokeconfig/suites/sharding_misc.yml
index 054c1ba5afb..f464a330f70 100644
--- a/buildscripts/resmokeconfig/suites/sharding_misc.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_misc.yml
@@ -368,8 +368,6 @@ selector:
- jstests/sharding/auth_repl.js
- jstests/sharding/count_config_servers.js
- jstests/sharding/sharding_options.js
- - jstests/sharding/snapshot_aggregate_mongos.js
- - jstests/sharding/snapshot_find_mongos.js
- jstests/sharding/advance_cluster_time_action_type.js
- jstests/sharding/mongos_wait_csrs_initiate.js
- jstests/sharding/config_rs_change.js
diff --git a/buildscripts/templates/generate_resmoke_suites/sharding_last_stable_mongos_and_mixed_shards.yml.j2 b/buildscripts/templates/generate_resmoke_suites/sharding_last_stable_mongos_and_mixed_shards.yml.j2
index f1aa7055ca4..b4f2f2f150d 100644
--- a/buildscripts/templates/generate_resmoke_suites/sharding_last_stable_mongos_and_mixed_shards.yml.j2
+++ b/buildscripts/templates/generate_resmoke_suites/sharding_last_stable_mongos_and_mixed_shards.yml.j2
@@ -78,8 +78,6 @@ selector:
# Requires killOp to work for local operations on mongos, introduced in v4.0.
- jstests/sharding/killop.js
# New 4.0 feature
- - jstests/sharding/snapshot_aggregate_mongos.js
- - jstests/sharding/snapshot_find_mongos.js
- jstests/sharding/change_stream_lookup_single_shard_cluster.js
- jstests/sharding/change_streams_unsharded_becomes_sharded.js
- jstests/sharding/change_streams_whole_db.js
diff --git a/jstests/core/txns/no_snapshot_writes_outside_txn.js b/jstests/core/txns/no_read_concern_snapshot_outside_txn.js
index e725be47944..5634d7df473 100644
--- a/jstests/core/txns/no_snapshot_writes_outside_txn.js
+++ b/jstests/core/txns/no_read_concern_snapshot_outside_txn.js
@@ -1,5 +1,5 @@
/**
- * Verify that readConcern: snapshot is not permitted on writes outside transactions.
+ * Verify that readConcern: snapshot is not permitted outside transactions.
*
* @tags: [uses_transactions]
*/
@@ -7,7 +7,7 @@
(function() {
"use strict";
const dbName = "test";
- const collName = "no_snapshot_writes_outside_txn";
+ const collName = "no_read_concern_snapshot_outside_txn";
const testDB = db.getSiblingDB(dbName);
// Set up the test collection.
@@ -22,14 +22,14 @@
let txnNumber = 0;
let stmtId = 0;
- function tryWrites({testDB, useSnapshotReadSyntax, message}) {
+ function tryCommands({testDB, assignTxnNumber, message}) {
jsTestLog("Verify that inserts cannot use readConcern snapshot " + message);
let cmd = {
insert: collName,
documents: [{_id: 0}],
readConcern: {level: "snapshot"},
};
- if (useSnapshotReadSyntax) {
+ if (assignTxnNumber) {
Object.assign(cmd, {txnNumber: NumberLong(txnNumber++), stmtId: NumberInt(stmtId)});
}
assert.commandFailedWithCode(testDB.runCommand(cmd), ErrorCodes.InvalidOptions);
@@ -40,7 +40,7 @@
updates: [{q: {_id: 0}, u: {$set: {x: 1}}}],
readConcern: {level: "snapshot"},
};
- if (useSnapshotReadSyntax) {
+ if (assignTxnNumber) {
Object.assign(cmd, {txnNumber: NumberLong(txnNumber++), stmtId: NumberInt(stmtId)});
}
assert.commandFailedWithCode(testDB.runCommand(cmd), ErrorCodes.InvalidOptions);
@@ -51,7 +51,7 @@
deletes: [{q: {_id: 0}, limit: 1}],
readConcern: {level: "snapshot"},
};
- if (useSnapshotReadSyntax) {
+ if (assignTxnNumber) {
Object.assign(cmd, {txnNumber: NumberLong(txnNumber++), stmtId: NumberInt(stmtId)});
}
assert.commandFailedWithCode(testDB.runCommand(cmd), ErrorCodes.InvalidOptions);
@@ -63,18 +63,32 @@
remove: true,
readConcern: {level: "snapshot"},
};
- if (useSnapshotReadSyntax) {
+ if (assignTxnNumber) {
+ Object.assign(cmd, {txnNumber: NumberLong(txnNumber++), stmtId: NumberInt(stmtId)});
+ }
+ assert.commandFailedWithCode(testDB.runCommand(cmd), ErrorCodes.InvalidOptions);
+
+ jsTestLog("Verify that finds cannot use readConcern snapshot " + message);
+ cmd = {find: collName, readConcern: {level: "snapshot"}};
+ if (assignTxnNumber) {
+ Object.assign(cmd, {txnNumber: NumberLong(txnNumber++), stmtId: NumberInt(stmtId)});
+ }
+ assert.commandFailedWithCode(testDB.runCommand(cmd), ErrorCodes.InvalidOptions);
+
+ jsTestLog("Verify that aggregate cannot use readConcern snapshot " + message);
+ cmd = {aggregate: collName, pipeline: [], readConcern: {level: "snapshot"}};
+ if (assignTxnNumber) {
Object.assign(cmd, {txnNumber: NumberLong(txnNumber++), stmtId: NumberInt(stmtId)});
}
assert.commandFailedWithCode(testDB.runCommand(cmd), ErrorCodes.InvalidOptions);
}
- tryWrites({
+ tryCommands({
testDB: sessionDb,
- useSnapshotReadSyntax: true,
+ assignTxnNumber: true,
message: "in session using snapshot read syntax."
});
- tryWrites({testDB: sessionDb, useSnapshotReadSyntax: false, message: "in session."});
- tryWrites({testDB: testDB, useSnapshotReadSyntax: false, message: "outside session."});
+ tryCommands({testDB: sessionDb, assignTxnNumber: false, message: "in session."});
+ tryCommands({testDB: testDB, assignTxnNumber: false, message: "outside session."});
session.endSession();
}());
diff --git a/jstests/core/txns/statement_ids_accepted.js b/jstests/core/txns/statement_ids_accepted.js
index 0a27f3c0c6f..b02001e7706 100644
--- a/jstests/core/txns/statement_ids_accepted.js
+++ b/jstests/core/txns/statement_ids_accepted.js
@@ -67,14 +67,6 @@
autocommit: false
}));
- jsTestLog("Check that count accepts a statement ID");
- assert.commandWorked(sessionDb.runCommand({
- count: collName,
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber++),
- stmtId: NumberInt(0),
- }));
-
jsTestLog("Check that delete accepts a statement ID");
assert.commandWorked(sessionDb.runCommand({
delete: collName,
@@ -93,6 +85,8 @@
readConcern: {level: "snapshot"},
txnNumber: NumberLong(txnNumber++),
stmtId: NumberInt(0),
+ startTransaction: true,
+ autocommit: false
}));
// The doTxn command is intentionally left out.
@@ -203,14 +197,6 @@
testColl.find({}, {readConcern: {level: "snapshot"}});
return true;
});
- assert.commandWorked(sessionDb.runCommand({
- geoNear: collName,
- near: {type: "Point", coordinates: [0, 0]},
- spherical: true,
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber++),
- stmtId: NumberInt(0)
- }));
jsTestLog("Check that geoSearch accepts a statement ID");
assert.commandWorked(sessionDb.runCommand({
diff --git a/jstests/multiVersion/global_snapshot_reads_downgrade_cluster.js b/jstests/multiVersion/global_snapshot_reads_downgrade_cluster.js
deleted file mode 100644
index 9dfdaa7c8a0..00000000000
--- a/jstests/multiVersion/global_snapshot_reads_downgrade_cluster.js
+++ /dev/null
@@ -1,91 +0,0 @@
-/**
- * Test the downgrade of a sharded cluster from latest to last-stable version succeeds, verifying
- * the behavior of global snapshot reads throughout the process.
- */
-
-// Checking UUID consistency uses cached connections, which are not valid across restarts or
-// stepdowns.
-TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
-
-(function() {
- "use strict";
-
- load("jstests/libs/feature_compatibility_version.js");
- load("jstests/multiVersion/libs/multi_rs.js");
- load("jstests/multiVersion/libs/multi_cluster.js");
- load("jstests/multiVersion/libs/global_snapshot_reads_helpers.js");
-
- if (!supportsSnapshotReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support snapshot read concern.");
- return;
- }
-
- // Start a cluster with two shards and two mongos at the latest version.
- var st = new ShardingTest({
- shards: 2,
- mongos: 2,
- other: {
- mongosOptions: {binVersion: "latest"},
- configOptions: {binVersion: "latest"},
- rsOptions: {binVersion: "latest"},
- },
- rs: {nodes: 3} // Use 3 node replica sets to allow downgrades with no downtime.
- });
- checkFCV(st.configRS.getPrimary().getDB("admin"), latestFCV);
-
- // Setup a sharded collection with two chunks, one on each shard.
- assert.commandWorked(st.s.adminCommand({enableSharding: "shardedDb"}));
- st.ensurePrimaryShard("shardedDb", st.shard0.shardName);
- assert.commandWorked(st.s.adminCommand({shardCollection: "shardedDb.sharded", key: {x: 1}}));
- assert.commandWorked(st.s.adminCommand({split: "shardedDb.sharded", middle: {x: 0}}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: "shardedDb.sharded", find: {x: 1}, to: st.shard1.shardName}));
-
- // Insert some data for the reads to find.
- st.s.getDB("unshardedDb").unsharded.insert({x: 1});
- st.s.getDB("shardedDb").sharded.insert([{x: -1}, {x: 1}]);
-
- // Global snapshot reads are accepted.
- verifyGlobalSnapshotReads(st.s0, true);
- verifyGlobalSnapshotReads(st.s1, true);
-
- // Downgrade featureCompatibilityVersion to 3.6.
- assert.commandWorked(st.s.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
- checkFCV(st.configRS.getPrimary().getDB("admin"), lastStableFCV);
-
- // Global snapshot reads are accepted.
- verifyGlobalSnapshotReads(st.s0, true);
- verifyGlobalSnapshotReads(st.s1, true);
-
- // Downgrade the mongos servers first.
- jsTest.log("Downgrading mongos servers.");
- st.upgradeCluster("last-stable",
- {upgradeConfigs: false, upgradeMongos: true, upgradeShards: false});
-
- // Global snapshot reads are rejected with InvalidOptions, because downgraded mongos will not
- // forward the txnNumber to the shards.
- verifyGlobalSnapshotReads(st.s0, false, ErrorCodes.InvalidOptions);
- verifyGlobalSnapshotReads(st.s1, false, ErrorCodes.InvalidOptions);
-
- // Then downgrade the shard servers.
- jsTest.log("Downgrading shard servers.");
- st.upgradeCluster("last-stable",
- {upgradeConfigs: false, upgradeMongos: false, upgradeShards: true});
-
- // Global snapshot reads are rejected with FailedToParse, because the shards will reject the
- // unknown readConcern field.
- verifyGlobalSnapshotReads(st.s0, false, ErrorCodes.FailedToParse);
- verifyGlobalSnapshotReads(st.s1, false, ErrorCodes.FailedToParse);
-
- // Finally, downgrade the config servers.
- jsTest.log("Downgrading config servers.");
- st.upgradeCluster("last-stable",
- {upgradeConfigs: true, upgradeMongos: false, upgradeShards: false});
-
- // Global snapshot reads are rejected with FailedToParse, because the shards will reject the
- // unknown readConcern field.
- verifyGlobalSnapshotReads(st.s0, false, ErrorCodes.FailedToParse);
- verifyGlobalSnapshotReads(st.s1, false, ErrorCodes.FailedToParse);
-
- st.stop();
-})();
diff --git a/jstests/multiVersion/global_snapshot_reads_upgrade_cluster.js b/jstests/multiVersion/global_snapshot_reads_upgrade_cluster.js
deleted file mode 100644
index 86113083fb0..00000000000
--- a/jstests/multiVersion/global_snapshot_reads_upgrade_cluster.js
+++ /dev/null
@@ -1,89 +0,0 @@
-/**
- * Tests upgrading a cluster with two shards and two mongos servers from last stable to current
- * version, verifying the behavior of global snapshot reads throughout the process.
- */
-
-// Checking UUID consistency uses cached connections, which are not valid across restarts or
-// stepdowns.
-TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
-
-(function() {
- "use strict";
-
- load("jstests/libs/feature_compatibility_version.js");
- load("jstests/multiVersion/libs/multi_rs.js");
- load("jstests/multiVersion/libs/multi_cluster.js");
- load("jstests/multiVersion/libs/global_snapshot_reads_helpers.js");
-
- if (!supportsSnapshotReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support snapshot read concern.");
- return;
- }
-
- // Start a cluster with two shards and two mongos at the last stable version.
- var st = new ShardingTest({
- shards: 2,
- mongos: 2,
- other: {
- configOptions: {binVersion: "last-stable"},
- mongosOptions: {binVersion: "last-stable"},
- rsOptions: {binVersion: "last-stable"},
- },
- rs: {nodes: 3} // Use 3 node replica sets to allow upgrades with no downtime.
- });
-
- // Setup a sharded collection with two chunks, one on each shard.
- assert.commandWorked(st.s.adminCommand({enableSharding: "shardedDb"}));
- st.ensurePrimaryShard("shardedDb", st.shard0.shardName);
- assert.commandWorked(st.s.adminCommand({shardCollection: "shardedDb.sharded", key: {x: 1}}));
- assert.commandWorked(st.s.adminCommand({split: "shardedDb.sharded", middle: {x: 0}}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: "shardedDb.sharded", find: {x: 1}, to: st.shard1.shardName}));
-
- // Insert some data for the reads to find.
- st.s.getDB("unshardedDb").unsharded.insert({x: 1});
- st.s.getDB("shardedDb").sharded.insert([{x: -1}, {x: 1}]);
-
- // Global snapshot reads are rejected with FailedToParse, because the shards will reject the
- // unknown readConcern field.
- verifyGlobalSnapshotReads(st.s0, false, ErrorCodes.FailedToParse);
- verifyGlobalSnapshotReads(st.s1, false, ErrorCodes.FailedToParse);
-
- // Upgrade the config servers.
- jsTest.log("Upgrading config servers.");
- st.upgradeCluster("latest", {upgradeConfigs: true, upgradeMongos: false, upgradeShards: false});
-
- // Global snapshot reads are rejected with FailedToParse, because the shards will reject the
- // unknown readConcern field.
- verifyGlobalSnapshotReads(st.s0, false, ErrorCodes.FailedToParse);
- verifyGlobalSnapshotReads(st.s1, false, ErrorCodes.FailedToParse);
-
- // Then upgrade the shard servers.
- jsTest.log("Upgrading shard servers.");
- st.upgradeCluster("latest", {upgradeConfigs: false, upgradeMongos: false, upgradeShards: true});
-
- // Global snapshot reads are rejected with InvalidOptions, because mongos will not forward the
- // txnNumber to the upgraded shards.
- verifyGlobalSnapshotReads(st.s0, false, ErrorCodes.InvalidOptions);
- verifyGlobalSnapshotReads(st.s1, false, ErrorCodes.InvalidOptions);
-
- // Finally, upgrade mongos servers.
- jsTest.log("Upgrading mongos servers.");
- st.upgradeCluster("latest", {upgradeConfigs: false, upgradeMongos: true, upgradeShards: false});
- checkFCV(st.configRS.getPrimary().getDB("admin"), lastStableFCV);
-
- // Global snapshot reads are accepted.
- verifyGlobalSnapshotReads(st.s0, true);
- verifyGlobalSnapshotReads(st.s1, true);
-
- // Upgrade the cluster's feature compatibility version to the latest.
- assert.commandWorked(
- st.s.getDB("admin").runCommand({setFeatureCompatibilityVersion: latestFCV}));
- checkFCV(st.configRS.getPrimary().getDB("admin"), latestFCV);
-
- // Global snapshot reads are accepted.
- verifyGlobalSnapshotReads(st.s0, true);
- verifyGlobalSnapshotReads(st.s1, true);
-
- st.stop();
-})();
diff --git a/jstests/multiVersion/libs/global_snapshot_reads_helpers.js b/jstests/multiVersion/libs/global_snapshot_reads_helpers.js
deleted file mode 100644
index be7730fdc99..00000000000
--- a/jstests/multiVersion/libs/global_snapshot_reads_helpers.js
+++ /dev/null
@@ -1,96 +0,0 @@
-/**
- * Helper functions for testing global snapshot reads in the multiversion suite.
- */
-
-function supportsSnapshotReadConcern() {
- const conn = MongoRunner.runMongod();
- const supportsSnapshotReadConcern =
- conn.getDB("test").serverStatus().storageEngine.supportsSnapshotReadConcern;
- MongoRunner.stopMongod(conn);
-
- return supportsSnapshotReadConcern;
-}
-
-/**
- * Runs the given command on the given database, asserting the command failed or succeeded
- * depending on the value of expectSuccess. Returns the last used txnNumber.
- */
-function runCommandAndVerifyResponse(sessionDb, txnNumber, cmdObj, expectSuccess, expectedCode) {
- if (expectSuccess) {
- // A snapshot read may fail with SnapshotTooOld in 4.0 when targeting multiple shards
- // because atClusterTime reads can only be established at the majority commit point, and
- // noop writes may advance the majority commit point past the given atClusterTime
- // resulting in a SnapshotTooOld error. Eventually the read should succeed, when all
- // targeted shards are at the same cluster time, so retry until it does.
- // A snapshot read may also fail with NoSuchTransaction if it encountered a StaleEpoch
- // error while it was running.
- assert.soon(() => {
- const res = sessionDb.runCommand(cmdObj);
- if (!res.ok) {
- assert(res.code === ErrorCodes.SnapshotTooOld ||
- res.code === ErrorCodes.NoSuchTransaction,
- "expected command to fail with SnapshotTooOld or NoSuchTransaction, cmd: " +
- tojson(cmdObj) + ", result: " + tojson(res));
- print("Retrying because of SnapshotTooOld or NoSuchTransaction error.");
- txnNumber++;
- cmdObj.txnNumber = NumberLong(txnNumber);
- return false;
- }
-
- assert.commandWorked(res, "expected command to succeed, cmd: " + tojson(cmdObj));
- return true;
- });
- } else {
- assert.commandFailedWithCode(sessionDb.runCommand(cmdObj),
- expectedCode,
- "command did not fail with expected error code, cmd: " +
- tojson(cmdObj) + ", expectedCode: " +
- tojson(expectedCode));
- }
- return txnNumber;
-}
-
-/**
- * Runs reads with snapshot readConcern against mongos, expecting they either fail or succeed
- * depending on the expectSuccess parameter.
- */
-function verifyGlobalSnapshotReads(conn, expectSuccess, expectedCode) {
- const session = conn.startSession({causalConsistency: false});
- let txnNumber = 0; // Counter used and incremented for all snapshot reads per session.
-
- // Unsharded collection.
- const unshardedDb = session.getDatabase("unshardedDb");
- txnNumber = runCommandAndVerifyResponse(
- unshardedDb,
- txnNumber,
- {find: "unsharded", readConcern: {level: "snapshot"}, txnNumber: NumberLong(txnNumber)},
- expectSuccess,
- expectedCode);
-
- // Sharded collection, one shard.
- txnNumber++;
- const shardedDb = session.getDatabase("shardedDb");
- txnNumber = runCommandAndVerifyResponse(shardedDb,
- txnNumber,
- {
- find: "sharded",
- filter: {x: 1},
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber)
- },
- expectSuccess,
- expectedCode);
-
- // TODO: SERVER-31767
- const server_31767_fixed = false;
- if (server_31767_fixed) {
- // Sharded collection, all shards.
- txnNumber++;
- txnNumber = runCommandAndVerifyResponse(
- shardedDb,
- txnNumber,
- {find: "sharded", readConcern: {level: "snapshot"}, txnNumber: NumberLong(txnNumber)},
- expectSuccess,
- expectedCode);
- }
-}
diff --git a/jstests/noPassthrough/agg_explain_read_concern.js b/jstests/noPassthrough/agg_explain_read_concern.js
index 698d5bebda3..e699a79340f 100644
--- a/jstests/noPassthrough/agg_explain_read_concern.js
+++ b/jstests/noPassthrough/agg_explain_read_concern.js
@@ -39,8 +39,6 @@
}));
// Test that explain is illegal with other readConcern levels.
- // TODO: SERVER-34113 Remove this test when we completely remove snapshot
- // reads since this command is not supported with transaction api.
const nonLocalReadConcerns = ["majority", "available", "linearizable"];
nonLocalReadConcerns.forEach(function(readConcernLevel) {
let aggCmd = {
diff --git a/jstests/noPassthrough/crud_timestamps.js b/jstests/noPassthrough/crud_timestamps.js
index cb7986d7bc9..3157e2a265a 100644
--- a/jstests/noPassthrough/crud_timestamps.js
+++ b/jstests/noPassthrough/crud_timestamps.js
@@ -31,20 +31,16 @@
const sessionDb = session.getDatabase(dbName);
const response = assert.commandWorked(testDB.createCollection("coll"));
const startTime = response.operationTime;
- let txnNumber = 0;
function check(atClusterTime, expected) {
+ session.startTransaction({readConcern: {level: "snapshot", atClusterTime: atClusterTime}});
// Check both a collection scan and scanning the _id index.
[{$natural: 1}, {_id: 1}].forEach(sort => {
- let response = assert.commandWorked(sessionDb.runCommand({
- find: collName,
- sort: sort,
- readConcern: {level: "snapshot", atClusterTime: atClusterTime},
- txnNumber: NumberLong(txnNumber++),
- singleBatch: true
- }));
+ let response = assert.commandWorked(
+ sessionDb.runCommand({find: collName, sort: sort, singleBatch: true}));
assert.eq(expected, response.cursor.firstBatch);
});
+ session.commitTransaction();
}
// insert
diff --git a/jstests/noPassthrough/readConcern_atClusterTime.js b/jstests/noPassthrough/readConcern_atClusterTime.js
index a1fb057f8d0..58814771bbf 100644
--- a/jstests/noPassthrough/readConcern_atClusterTime.js
+++ b/jstests/noPassthrough/readConcern_atClusterTime.js
@@ -39,72 +39,66 @@ function _getClusterTime(rst) {
const sessionDb = session.getDatabase(dbName);
const clusterTime = _getClusterTime(rst);
- let txnNumber = 0;
// 'atClusterTime' can be used with readConcern level 'snapshot'.
- assert.commandWorked(sessionDb.runCommand({
- find: collName,
- readConcern: {level: "snapshot", atClusterTime: clusterTime},
- txnNumber: NumberLong(txnNumber++)
- }));
+ session.startTransaction({readConcern: {level: "snapshot", atClusterTime: clusterTime}});
+ assert.commandWorked(sessionDb.runCommand({find: collName}));
+ session.commitTransaction();
// 'atClusterTime' cannot be greater than the current cluster time.
const futureClusterTime = new Timestamp(clusterTime.getTime() + 1000, 1);
- assert.commandFailedWithCode(sessionDb.runCommand({
- find: collName,
- readConcern: {level: "snapshot", atClusterTime: futureClusterTime},
- txnNumber: NumberLong(txnNumber++)
- }),
- ErrorCodes.InvalidOptions);
+ session.startTransaction({readConcern: {level: "snapshot", atClusterTime: futureClusterTime}});
+ assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.InvalidOptions);
+ assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
// 'atClusterTime' must have type Timestamp.
- assert.commandFailedWithCode(sessionDb.runCommand({
- find: collName,
- readConcern: {level: "snapshot", atClusterTime: "bad"},
- txnNumber: NumberLong(txnNumber++)
- }),
- ErrorCodes.TypeMismatch);
+ session.startTransaction({readConcern: {level: "snapshot", atClusterTime: "bad"}});
+ assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.TypeMismatch);
+ assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
// 'atClusterTime' cannot be used with readConcern level 'majority'.
- assert.commandFailedWithCode(
- sessionDb.runCommand(
- {find: collName, readConcern: {level: "majority", atClusterTime: clusterTime}}),
- ErrorCodes.InvalidOptions);
+ session.startTransaction({readConcern: {level: "majority", atClusterTime: clusterTime}});
+ assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.InvalidOptions);
+ assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
// 'atClusterTime' cannot be used with readConcern level 'local'.
- assert.commandFailedWithCode(
- sessionDb.runCommand(
- {find: collName, readConcern: {level: "local", atClusterTime: clusterTime}}),
- ErrorCodes.InvalidOptions);
+ session.startTransaction({readConcern: {level: "local", atClusterTime: clusterTime}});
+ assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.InvalidOptions);
+ assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
// 'atClusterTime' cannot be used with readConcern level 'available'.
- assert.commandFailedWithCode(
- sessionDb.runCommand(
- {find: collName, readConcern: {level: "available", atClusterTime: clusterTime}}),
- ErrorCodes.InvalidOptions);
+ session.startTransaction({readConcern: {level: "available", atClusterTime: clusterTime}});
+ assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.InvalidOptions);
+ assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
// 'atClusterTime' cannot be used with readConcern level 'linearizable'.
- assert.commandFailedWithCode(
- sessionDb.runCommand(
- {find: collName, readConcern: {level: "linearizable", atClusterTime: clusterTime}}),
- ErrorCodes.InvalidOptions);
+ session.startTransaction({readConcern: {level: "linearizable", atClusterTime: clusterTime}});
+ assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.InvalidOptions);
+ assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
// 'atClusterTime' cannot be used without readConcern level (level is 'local' by default).
- assert.commandFailedWithCode(
- sessionDb.runCommand({find: collName, readConcern: {atClusterTime: clusterTime}}),
- ErrorCodes.InvalidOptions);
+ session.startTransaction({readConcern: {atClusterTime: clusterTime}});
+ assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.InvalidOptions);
+ assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
// 'atClusterTime' cannot be used with 'afterOpTime'.
- assert.commandFailedWithCode(sessionDb.runCommand({
- find: collName,
+ session.startTransaction({
readConcern: {
level: "snapshot",
atClusterTime: clusterTime,
afterOpTime: {ts: Timestamp(1, 2), t: 1}
- },
- txnNumber: NumberLong(txnNumber++)
- }),
- ErrorCodes.InvalidOptions);
+ }
+ });
+ assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.InvalidOptions);
+ assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
// 'atClusterTime' cannot be used outside of a session.
assert.commandFailedWithCode(
@@ -113,17 +107,19 @@ function _getClusterTime(rst) {
ErrorCodes.InvalidOptions);
// 'atClusterTime' cannot be used with 'afterClusterTime'.
- assert.commandFailedWithCode(sessionDb.runCommand({
- find: collName,
- readConcern: {level: "snapshot", atClusterTime: clusterTime, afterClusterTime: clusterTime},
- txnNumber: NumberLong(txnNumber++)
- }),
- ErrorCodes.InvalidOptions);
+ session.startTransaction({
+ readConcern:
+ {level: "snapshot", atClusterTime: clusterTime, afterClusterTime: clusterTime}
+ });
+ assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.InvalidOptions);
+ assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
session.endSession();
rst.stopSet();
// readConcern with 'atClusterTime' should fail when 'enableTestCommands' is set to false.
+ // TODO: SERVER-35643 Allow atClusterTime when enableTestCommands is false.
{
jsTest.setOption('enableTestCommands', false);
let rst = new ReplSetTest({nodes: 1});
@@ -132,12 +128,12 @@ function _getClusterTime(rst) {
let session =
rst.getPrimary().getDB(dbName).getMongo().startSession({causalConsistency: false});
let sessionDb = session.getDatabase(dbName);
- assert.commandFailedWithCode(sessionDb.runCommand({
- find: collName,
- readConcern: {level: "snapshot", atClusterTime: _getClusterTime(rst)},
- txnNumber: NumberLong(0)
- }),
+ session.startTransaction(
+ {readConcern: {level: "snapshot", atClusterTime: _getClusterTime(rst)}});
+ assert.commandFailedWithCode(sessionDb.runCommand({find: collName}),
ErrorCodes.InvalidOptions);
+ assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
session.endSession();
rst.stopSet();
@@ -148,11 +144,10 @@ function _getClusterTime(rst) {
session =
rst.getPrimary().getDB(dbName).getMongo().startSession({causalConsistency: false});
sessionDb = session.getDatabase(dbName);
- assert.commandWorked(sessionDb.runCommand({
- find: collName,
- readConcern: {level: "snapshot", atClusterTime: _getClusterTime(rst)},
- txnNumber: NumberLong(0)
- }));
+ session.startTransaction(
+ {readConcern: {level: "snapshot", atClusterTime: _getClusterTime(rst)}});
+ assert.commandWorked(sessionDb.runCommand({find: collName}));
+ session.commitTransaction();
session.endSession();
rst.stopSet();
}
diff --git a/jstests/noPassthrough/readConcern_atClusterTime_noop_write.js b/jstests/noPassthrough/readConcern_atClusterTime_noop_write.js
index ebafbb0c1d4..9d12659dc07 100644
--- a/jstests/noPassthrough/readConcern_atClusterTime_noop_write.js
+++ b/jstests/noPassthrough/readConcern_atClusterTime_noop_write.js
@@ -50,13 +50,14 @@
// Test reading from the primary.
const shard1Session =
st.rs1.getPrimary().getDB("test1").getMongo().startSession({causalConsistency: false});
- res = shard1Session.getDatabase("test1").runCommand({
- find: "coll1",
- readConcern: {level: "snapshot", atClusterTime: clusterTime},
- txnNumber: NumberLong(0)
- });
+ shard1Session.startTransaction({readConcern: {level: "snapshot", atClusterTime: clusterTime}});
+ res = shard1Session.getDatabase("test1").runCommand({find: "coll1"});
if (res.ok === 0) {
assert.commandFailedWithCode(res, ErrorCodes.SnapshotTooOld);
+ assert.commandFailedWithCode(shard1Session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+ } else {
+ shard1Session.commitTransaction();
}
const shard1PrimaryMajOpTime =
st.rs1.getReadConcernMajorityOpTimeOrThrow(st.rs1.getPrimary()).ts;
@@ -77,13 +78,14 @@
// Test reading from the secondary.
const shard0Session =
st.rs0.getSecondary().getDB("test0").getMongo().startSession({causalConsistency: false});
- res = shard0Session.getDatabase("test0").runCommand({
- find: "coll0",
- readConcern: {level: "snapshot", atClusterTime: clusterTime},
- txnNumber: NumberLong(0)
- });
+ shard0Session.startTransaction({readConcern: {level: "snapshot", atClusterTime: clusterTime}});
+ res = shard0Session.getDatabase("test0").runCommand({find: "coll0"});
if (res.ok === 0) {
assert.commandFailedWithCode(res, ErrorCodes.SnapshotTooOld);
+ assert.commandFailedWithCode(shard0Session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+ } else {
+ shard0Session.commitTransaction();
}
const shard0SecondaryMajOpTime =
st.rs0.getReadConcernMajorityOpTimeOrThrow(st.rs0.getSecondary()).ts;
diff --git a/jstests/noPassthrough/readConcern_snapshot.js b/jstests/noPassthrough/readConcern_snapshot.js
index 8926d6fd72a..f83f04aa3f3 100644
--- a/jstests/noPassthrough/readConcern_snapshot.js
+++ b/jstests/noPassthrough/readConcern_snapshot.js
@@ -23,13 +23,15 @@
session.startTransaction({readConcern: {level: "snapshot"}});
assert.commandFailedWithCode(sessionDb.runCommand({find: collName}),
ErrorCodes.IllegalOperation);
- session.abortTransaction();
+ assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
// Transactions without readConcern snapshot fail.
session.startTransaction();
assert.commandFailedWithCode(sessionDb.runCommand({find: collName}),
ErrorCodes.IllegalOperation);
- session.abortTransaction();
+ assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
rst.stopSet();
return;
@@ -37,24 +39,6 @@
session.endSession();
rst.stopSet();
- // TODO: SERVER-34113:
- // readConcern 'snapshot' should fail for autocommit:true transactions when test
- // 'enableTestCommands' is set to false.
- jsTest.setOption('enableTestCommands', false);
- rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
- session = rst.getPrimary().getDB(dbName).getMongo().startSession({causalConsistency: false});
- sessionDb = session.getDatabase(dbName);
- assert.commandWorked(sessionDb.coll.insert({}, {writeConcern: {w: "majority"}}));
- assert.commandFailedWithCode(
- sessionDb.runCommand(
- {find: collName, readConcern: {level: "snapshot"}, txnNumber: NumberLong(1)}),
- ErrorCodes.InvalidOptions);
- jsTest.setOption('enableTestCommands', true);
- session.endSession();
- rst.stopSet();
-
// readConcern 'snapshot' is not allowed on a standalone.
const conn = MongoRunner.runMongod();
session = conn.startSession({causalConsistency: false});
@@ -63,7 +47,8 @@
session.startTransaction({readConcern: {level: "snapshot"}});
assert.commandFailedWithCode(sessionDb.runCommand({find: collName}),
ErrorCodes.IllegalOperation);
- session.abortTransaction();
+ assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.IllegalOperation);
session.endSession();
MongoRunner.stopMongod(conn);
@@ -95,7 +80,8 @@
session.startTransaction(
{readConcern: {level: "snapshot", afterOpTime: {ts: Timestamp(1, 2), t: 1}}});
assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.InvalidOptions);
- session.abortTransaction();
+ assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
session.endSession();
// readConcern 'snapshot' is allowed on a replica set secondary.
@@ -156,29 +142,5 @@
50768);
assert.commandWorked(session.abortTransaction_forTesting());
session.endSession();
-
- // TODO: SERVER-34113 Remove this test when we completely remove snapshot
- // reads since this command is not supported with transaction api.
- // readConcern 'snapshot' is supported by group.
- session = rst.getPrimary().getDB(dbName).getMongo().startSession({causalConsistency: false});
- sessionDb = session.getDatabase(dbName);
- let txnNumber = 0;
- assert.commandWorked(sessionDb.runCommand({
- group: {ns: collName, key: {_id: 1}, $reduce: function(curr, result) {}, initial: {}},
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber++)
- }));
-
- // TODO: SERVER-34113 Remove this test when we completely remove snapshot
- // reads since this command is not supported with transaction api.
- // readConcern 'snapshot' is supported by geoNear.
- assert.commandWorked(sessionDb.runCommand({
- geoNear: collName,
- near: [0, 0],
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber++)
- }));
-
- session.endSession();
rst.stopSet();
}());
diff --git a/jstests/noPassthrough/read_concern_snapshot_yielding.js b/jstests/noPassthrough/read_concern_snapshot_yielding.js
index 3792b3bb655..2a44a57d750 100644
--- a/jstests/noPassthrough/read_concern_snapshot_yielding.js
+++ b/jstests/noPassthrough/read_concern_snapshot_yielding.js
@@ -242,22 +242,6 @@
assert.eq(res.cursor.firstBatch.length, TestData.numDocs, tojson(res));
}, {"command.pipeline": [{$match: {x: 1}}]});
- // TODO: SERVER-34113 Remove this test when we completely remove snapshot
- // reads since this command is not supported with transaction api.
- // Test geoNear.
- testCommand(function() {
- const sessionId = db.getMongo().startSession({causalConsistency: false}).getSessionId();
- const res = assert.commandWorked(db.runCommand({
- geoNear: "coll",
- near: [0, 0],
- readConcern: {level: "snapshot"},
- lsid: sessionId,
- txnNumber: NumberLong(0)
- }));
- assert(res.hasOwnProperty("results"));
- assert.eq(res.results.length, TestData.numDocs, tojson(res));
- }, {"command.geoNear": "coll"});
-
// Test getMore with an initial find batchSize of 0. Interrupt behavior of a getMore is not
// expected to change with a change of batchSize in the originating command.
testCommand(function() {
@@ -291,21 +275,6 @@
assert.eq(res.values.length, 4, tojson(res));
}, {"command.distinct": "coll"});
- // TODO: SERVER-34113 Remove this test when we completely remove snapshot
- // reads since this command is not supported with transaction api.
- // Test group.
- testCommand(function() {
- const sessionId = db.getMongo().startSession({causalConsistency: false}).getSessionId();
- const res = assert.commandWorked(db.runCommand({
- group: {ns: "coll", key: {_id: 1}, $reduce: function(curr, result) {}, initial: {}},
- readConcern: {level: "snapshot"},
- lsid: sessionId,
- txnNumber: NumberLong(0)
- }));
- assert(res.hasOwnProperty("count"), tojson(res));
- assert.eq(res.count, 4);
- }, {"command.group.ns": "coll"});
-
// Test update.
testCommand(function() {
const session = db.getMongo().startSession({causalConsistency: false});
diff --git a/jstests/sharding/snapshot_aggregate_mongos.js b/jstests/sharding/snapshot_aggregate_mongos.js
deleted file mode 100644
index b0480fc534c..00000000000
--- a/jstests/sharding/snapshot_aggregate_mongos.js
+++ /dev/null
@@ -1,121 +0,0 @@
-// Tests snapshot isolation on readConcern level snapshot reads through mongos.
-// @tags: [requires_sharding]
-(function() {
- "use strict";
-
- load("jstests/libs/global_snapshot_reads_util.js");
-
- const dbName = "test";
- const shardedCollName = "shardedColl";
- const unshardedCollName = "unshardedColl";
-
- const st = new ShardingTest({shards: 1, mongos: 1, config: 1});
-
- const shardDb = st.rs0.getPrimary().getDB(dbName);
- if (!shardDb.serverStatus().storageEngine.supportsSnapshotReadConcern) {
- st.stop();
- return;
- }
-
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- assert.commandWorked(st.s.adminCommand(
- {shardCollection: st.s.getDB(dbName)[shardedCollName] + "", key: {_id: 1}}));
-
- function runTest(mainDb, {useCausalConsistency, collName}) {
- const session = mainDb.getMongo().startSession({causalConsistency: useCausalConsistency});
- const sessionDb = session.getDatabase(dbName);
-
- const bulk = mainDb[collName].initializeUnorderedBulkOp();
- for (let x = 0; x < 10; ++x) {
- bulk.insert({_id: x});
- }
- assert.commandWorked(bulk.execute({w: "majority"}));
-
- let txnNumber = 1;
-
- // Test snapshot reads using aggregate.
- let cursorCmd = {
- aggregate: collName,
- pipeline: [{$sort: {_id: 1}}],
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber),
- cursor: {batchSize: 5}
- };
-
- // Establish a snapshot cursor, fetching the first 5 documents.
-
- let res = assert.commandWorked(sessionDb.runCommand(cursorCmd));
-
- assert(res.hasOwnProperty("cursor"));
- assert(res.cursor.hasOwnProperty("firstBatch"));
- assert.eq(5, res.cursor.firstBatch.length);
-
- assert(res.cursor.hasOwnProperty("id"));
- const cursorId = res.cursor.id;
- assert.neq(cursorId, 0);
-
- // Insert an 11th document which should not be visible to the snapshot cursor. This write is
- // performed outside of the session.
- assert.writeOK(mainDb[collName].insert({_id: 10}, {writeConcern: {w: "majority"}}));
-
- verifyInvalidGetMoreAttempts(mainDb, sessionDb, collName, cursorId, txnNumber);
-
- // Fetch the 6th document. This confirms that the transaction stash is preserved across
- // multiple getMore invocations.
- res = assert.commandWorked(sessionDb.runCommand({
- getMore: cursorId,
- collection: collName,
- batchSize: 1,
- txnNumber: NumberLong(txnNumber)
- }));
- assert(res.hasOwnProperty("cursor"));
- assert(res.cursor.hasOwnProperty("id"));
- assert.neq(0, res.cursor.id);
-
- // Exhaust the cursor, retrieving the remainder of the result set.
- res = assert.commandWorked(sessionDb.runCommand({
- getMore: cursorId,
- collection: collName,
- batchSize: 10,
- txnNumber: NumberLong(txnNumber)
- }));
-
- // The cursor has been exhausted.
- assert(res.hasOwnProperty("cursor"));
- assert(res.cursor.hasOwnProperty("id"));
- assert.eq(0, res.cursor.id);
-
- // Only the remaining 4 of the initial 10 documents are returned. The 11th document is not
- // part of the result set.
- assert(res.cursor.hasOwnProperty("nextBatch"));
- assert.eq(4, res.cursor.nextBatch.length);
-
- // Perform a second snapshot read under a new transaction.
- txnNumber++;
- res = assert.commandWorked(sessionDb.runCommand({
- aggregate: collName,
- pipeline: [{$sort: {_id: 1}}],
- cursor: {batchSize: 20},
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber)
- }));
-
- // The cursor has been exhausted.
- assert(res.hasOwnProperty("cursor"));
- assert(res.cursor.hasOwnProperty("id"));
- assert.eq(0, res.cursor.id);
-
- // All 11 documents are returned.
- assert(res.cursor.hasOwnProperty("firstBatch"));
- assert.eq(11, res.cursor.firstBatch.length);
-
- session.endSession();
- }
-
- jsTestLog("Running sharded");
- runTest(st.s.getDB(dbName), {useCausalConsistency: false, collName: shardedCollName});
- jsTestLog("Running unsharded");
- runTest(st.s.getDB(dbName), {useCausalConsistency: false, collName: unshardedCollName});
-
- st.stop();
-})();
diff --git a/jstests/sharding/snapshot_find_mongos.js b/jstests/sharding/snapshot_find_mongos.js
deleted file mode 100644
index 92b5b679730..00000000000
--- a/jstests/sharding/snapshot_find_mongos.js
+++ /dev/null
@@ -1,122 +0,0 @@
-// Tests snapshot isolation on readConcern level snapshot finds through mongos.
-//
-// @tags: [requires_sharding]
-(function() {
- "use strict";
-
- load("jstests/libs/global_snapshot_reads_util.js");
-
- const dbName = "test";
- const shardedCollName = "shardedColl";
- const unshardedCollName = "unshardedColl";
-
- const st = new ShardingTest({shards: 1, mongos: 1, config: 1});
-
- const shardDb = st.rs0.getPrimary().getDB(dbName);
- if (!shardDb.serverStatus().storageEngine.supportsSnapshotReadConcern) {
- st.stop();
- return;
- }
-
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- assert.commandWorked(st.s.adminCommand(
- {shardCollection: st.s.getDB(dbName)[shardedCollName] + "", key: {_id: 1}}));
-
- function runTest(mainDb, {useCausalConsistency, collName}) {
- const session = mainDb.getMongo().startSession({causalConsistency: useCausalConsistency});
- const sessionDb = session.getDatabase(dbName);
-
- const bulk = mainDb[collName].initializeUnorderedBulkOp();
- for (let x = 0; x < 10; ++x) {
- bulk.insert({_id: x});
- }
- assert.commandWorked(bulk.execute({w: "majority"}));
-
- let txnNumber = 0;
-
- // Test snapshot reads using find.
- let cursorCmd = {
- find: collName,
- sort: {_id: 1},
- batchSize: 5,
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber)
- };
-
- // Establish a snapshot cursor, fetching the first 5 documents.
-
- let res = assert.commandWorked(sessionDb.runCommand(cursorCmd));
-
- assert(res.hasOwnProperty("cursor"));
- assert(res.cursor.hasOwnProperty("firstBatch"));
- assert.eq(5, res.cursor.firstBatch.length);
-
- assert(res.cursor.hasOwnProperty("id"));
- const cursorId = res.cursor.id;
- assert.neq(cursorId, 0);
-
- // Insert an 11th document which should not be visible to the snapshot cursor. This write is
- // performed outside of the session.
- assert.writeOK(mainDb[collName].insert({_id: 10}, {writeConcern: {w: "majority"}}));
-
- verifyInvalidGetMoreAttempts(mainDb, sessionDb, collName, cursorId, txnNumber);
-
- // Fetch the 6th document. This confirms that the transaction stash is preserved across
- // multiple getMore invocations.
- res = assert.commandWorked(sessionDb.runCommand({
- getMore: cursorId,
- collection: collName,
- batchSize: 1,
- txnNumber: NumberLong(txnNumber)
- }));
- assert(res.hasOwnProperty("cursor"));
- assert(res.cursor.hasOwnProperty("id"));
- assert.neq(0, res.cursor.id);
-
- // Exhaust the cursor, retrieving the remainder of the result set.
- res = assert.commandWorked(sessionDb.runCommand({
- getMore: cursorId,
- collection: collName,
- batchSize: 10,
- txnNumber: NumberLong(txnNumber)
- }));
-
- // The cursor has been exhausted.
- assert(res.hasOwnProperty("cursor"));
- assert(res.cursor.hasOwnProperty("id"));
- assert.eq(0, res.cursor.id);
-
- // Only the remaining 4 of the initial 10 documents are returned. The 11th document is not
- // part of the result set.
- assert(res.cursor.hasOwnProperty("nextBatch"));
- assert.eq(4, res.cursor.nextBatch.length);
-
- // Perform a second snapshot read under a new transaction.
- txnNumber++;
- res = assert.commandWorked(sessionDb.runCommand({
- find: collName,
- sort: {_id: 1},
- batchSize: 20,
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber)
- }));
-
- // The cursor has been exhausted.
- assert(res.hasOwnProperty("cursor"));
- assert(res.cursor.hasOwnProperty("id"));
- assert.eq(0, res.cursor.id);
-
- // All 11 documents are returned.
- assert(res.cursor.hasOwnProperty("firstBatch"));
- assert.eq(11, res.cursor.firstBatch.length);
-
- session.endSession();
- }
-
- jsTestLog("Running sharded");
- runTest(st.s.getDB(dbName), {useCausalConsistency: false, collName: shardedCollName});
- jsTestLog("Running unsharded");
- runTest(st.s.getDB(dbName), {useCausalConsistency: false, collName: unshardedCollName});
-
- st.stop();
-})();
diff --git a/src/mongo/db/commands/count_cmd.cpp b/src/mongo/db/commands/count_cmd.cpp
index 45ad89b47b6..8818ded8b59 100644
--- a/src/mongo/db/commands/count_cmd.cpp
+++ b/src/mongo/db/commands/count_cmd.cpp
@@ -81,7 +81,7 @@ public:
bool supportsReadConcern(const std::string& dbName,
const BSONObj& cmdObj,
repl::ReadConcernLevel level) const override {
- return true;
+ return level != repl::ReadConcernLevel::kSnapshotReadConcern;
}
ReadWriteType getReadWriteType() const override {
diff --git a/src/mongo/db/commands/dbhash.cpp b/src/mongo/db/commands/dbhash.cpp
index 1a9b1ca3678..61a9333c52d 100644
--- a/src/mongo/db/commands/dbhash.cpp
+++ b/src/mongo/db/commands/dbhash.cpp
@@ -116,10 +116,9 @@ public:
// change for the snapshot.
auto lockMode = LockMode::MODE_S;
auto* session = OperationContextSession::get(opCtx);
- if (session && session->inSnapshotReadOrMultiDocumentTransaction()) {
- // However, if we are inside a multi-statement transaction or using snapshot reads to
- // read from a consistent snapshot, then we only need to lock the database in intent
- // mode to ensure that none of the collections get dropped.
+ if (session && session->inMultiDocumentTransaction()) {
+ // However, if we are inside a multi-statement transaction, then we only need to lock
+ // the database in intent mode to ensure that none of the collections get dropped.
lockMode = getLockModeForQuery(opCtx);
}
AutoGetDb autoDb(opCtx, ns, lockMode);
@@ -220,11 +219,11 @@ private:
boost::optional<Lock::CollectionLock> collLock;
auto* session = OperationContextSession::get(opCtx);
- if (session && session->inSnapshotReadOrMultiDocumentTransaction()) {
- // When inside a multi-statement transaction or using snapshot reads, we are only
- // holding the database lock in intent mode. We need to also acquire the collection lock
- // in intent mode to ensure reading from the consistent snapshot doesn't overlap with
- // any catalog operations on the collection.
+ if (session && session->inMultiDocumentTransaction()) {
+ // When inside a multi-statement transaction, we are only holding the database lock in
+ // intent mode. We need to also acquire the collection lock in intent mode to ensure
+ // reading from the consistent snapshot doesn't overlap with any catalog operations on
+ // the collection.
invariant(
opCtx->lockState()->isDbLockedForMode(db->name(), getLockModeForQuery(opCtx)));
collLock.emplace(opCtx->lockState(), fullCollectionName, getLockModeForQuery(opCtx));
diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp
index 194691412f3..80c4b8194ee 100644
--- a/src/mongo/db/commands/find_and_modify.cpp
+++ b/src/mongo/db/commands/find_and_modify.cpp
@@ -334,7 +334,7 @@ public:
maybeDisableValidation.emplace(opCtx);
const auto session = OperationContextSession::get(opCtx);
- const auto inTransaction = session && session->inSnapshotReadOrMultiDocumentTransaction();
+ const auto inTransaction = session && session->inMultiDocumentTransaction();
uassert(50781,
str::stream() << "Cannot write to system collection " << nsString.ns()
<< " within a transaction.",
diff --git a/src/mongo/db/commands/parallel_collection_scan.cpp b/src/mongo/db/commands/parallel_collection_scan.cpp
index a34079181bd..3b4b1ee654f 100644
--- a/src/mongo/db/commands/parallel_collection_scan.cpp
+++ b/src/mongo/db/commands/parallel_collection_scan.cpp
@@ -64,7 +64,7 @@ public:
bool supportsReadConcern(const std::string& dbName,
const BSONObj& cmdObj,
repl::ReadConcernLevel level) const override {
- return true;
+ return level != repl::ReadConcernLevel::kSnapshotReadConcern;
}
ReadWriteType getReadWriteType() const override {
diff --git a/src/mongo/db/commands/run_aggregate.cpp b/src/mongo/db/commands/run_aggregate.cpp
index f6e599a34ae..36a2c6af1bb 100644
--- a/src/mongo/db/commands/run_aggregate.cpp
+++ b/src/mongo/db/commands/run_aggregate.cpp
@@ -445,8 +445,7 @@ Status runAggregate(OperationContext* opCtx,
uuid));
expCtx->tempDir = storageGlobalParams.dbpath + "/_tmp";
auto session = OperationContextSession::get(opCtx);
- expCtx->inSnapshotReadOrMultiDocumentTransaction =
- session && session->inSnapshotReadOrMultiDocumentTransaction();
+ expCtx->inMultiDocumentTransaction = session && session->inMultiDocumentTransaction();
auto pipeline = uassertStatusOK(Pipeline::parse(request.getPipeline(), expCtx));
diff --git a/src/mongo/db/commands/write_commands/write_commands.cpp b/src/mongo/db/commands/write_commands/write_commands.cpp
index 3c24a0da687..edbd9a6d315 100644
--- a/src/mongo/db/commands/write_commands/write_commands.cpp
+++ b/src/mongo/db/commands/write_commands/write_commands.cpp
@@ -262,7 +262,7 @@ private:
void _transactionChecks(OperationContext* opCtx) const {
auto session = OperationContextSession::get(opCtx);
- if (!session || !session->inSnapshotReadOrMultiDocumentTransaction())
+ if (!session || !session->inMultiDocumentTransaction())
return;
uassert(50791,
str::stream() << "Cannot write to system collection " << ns().toString()
diff --git a/src/mongo/db/cursor_manager.cpp b/src/mongo/db/cursor_manager.cpp
index 216114adc78..6cfc442ed67 100644
--- a/src/mongo/db/cursor_manager.cpp
+++ b/src/mongo/db/cursor_manager.cpp
@@ -59,15 +59,6 @@
namespace mongo {
-MONGO_INITIALIZER(RegisterCursorExistsFunction)
-(InitializerContext* const) {
- Session::registerCursorExistsFunction([](LogicalSessionId lsid, TxnNumber txnNumber) {
- return CursorManager::hasTransactionCursorReference(lsid, txnNumber);
- });
-
- return Status::OK();
-}
-
using std::vector;
constexpr int CursorManager::kNumPartitions;
@@ -118,27 +109,13 @@ public:
int64_t nextSeed();
- void addTransactionCursorReference(LogicalSessionId lsid,
- TxnNumber txnNumber,
- NamespaceString nss,
- CursorId cursorId);
-
- void removeTransactionCursorReference(LogicalSessionId lsid,
- TxnNumber txnNumber,
- CursorId cursorId);
-
- size_t numOpenCursorsForTransaction(LogicalSessionId lsid, TxnNumber txnNumber);
-
private:
// '_mutex' must not be held when acquiring a CursorManager mutex to avoid deadlock.
SimpleMutex _mutex;
using CursorIdToNssMap = stdx::unordered_map<CursorId, NamespaceString>;
- using TxnNumberToCursorMap = stdx::unordered_map<TxnNumber, CursorIdToNssMap>;
- using LsidToTxnCursorMap = LogicalSessionIdMap<TxnNumberToCursorMap>;
using IdToNssMap = stdx::unordered_map<unsigned, NamespaceString>;
- LsidToTxnCursorMap _lsidToTxnCursorMap;
IdToNssMap _idToNss;
unsigned _nextId;
@@ -172,38 +149,6 @@ int64_t GlobalCursorIdCache::nextSeed() {
return _secureRandom->nextInt64();
}
-void GlobalCursorIdCache::addTransactionCursorReference(LogicalSessionId lsid,
- TxnNumber txnNumber,
- NamespaceString nss,
- CursorId cursorId) {
- stdx::lock_guard<SimpleMutex> lk(_mutex);
- invariant(_lsidToTxnCursorMap[lsid][txnNumber].insert({cursorId, nss}).second == true,
- "Expected insert to succeed");
-}
-
-void GlobalCursorIdCache::removeTransactionCursorReference(LogicalSessionId lsid,
- TxnNumber txnNumber,
- CursorId cursorId) {
- stdx::lock_guard<SimpleMutex> lk(_mutex);
- invariant(_lsidToTxnCursorMap[lsid][txnNumber].erase(cursorId) == 1); // cursorId was erased.
-
- if (_lsidToTxnCursorMap[lsid][txnNumber].size() == 0) {
- invariant(_lsidToTxnCursorMap[lsid].erase(txnNumber) == 1);
- if (_lsidToTxnCursorMap[lsid].size() == 0) {
- invariant(_lsidToTxnCursorMap.erase(lsid) == 1);
- }
- }
-}
-
-size_t GlobalCursorIdCache::numOpenCursorsForTransaction(LogicalSessionId lsid,
- TxnNumber txnNumber) {
- stdx::lock_guard<SimpleMutex> lk(_mutex);
- if (_lsidToTxnCursorMap.count(lsid) == 0 || _lsidToTxnCursorMap[lsid].count(txnNumber) == 0) {
- return 0;
- }
- return _lsidToTxnCursorMap[lsid][txnNumber].size();
-}
-
uint32_t GlobalCursorIdCache::registerCursorManager(const NamespaceString& nss) {
static const uint32_t kMaxIds = 1000 * 1000 * 1000;
static_assert((kMaxIds & (0b11 << 30)) == 0,
@@ -415,29 +360,10 @@ std::pair<Status, int> CursorManager::killCursorsWithMatchingSessions(
return std::make_pair(visitor.getStatus(), visitor.getCursorsKilled());
}
-void CursorManager::addTransactionCursorReference(LogicalSessionId lsid,
- TxnNumber txnNumber,
- NamespaceString nss,
- CursorId cursorId) {
- globalCursorIdCache->addTransactionCursorReference(lsid, txnNumber, nss, cursorId);
-}
-
-void CursorManager::removeTransactionCursorReference(const ClientCursor* cursor) {
- // Remove cursor transaction registration if needed.
- if (cursor->_lsid && cursor->_txnNumber) {
- globalCursorIdCache->removeTransactionCursorReference(
- *cursor->_lsid, *cursor->_txnNumber, cursor->_cursorid);
- }
-}
-
std::size_t CursorManager::timeoutCursorsGlobal(OperationContext* opCtx, Date_t now) {
return globalCursorIdCache->timeoutCursors(opCtx, now);
}
-bool CursorManager::hasTransactionCursorReference(LogicalSessionId lsid, TxnNumber txnNumber) {
- return globalCursorIdCache->numOpenCursorsForTransaction(lsid, txnNumber) > 0;
-}
-
int CursorManager::killCursorGlobalIfAuthorized(OperationContext* opCtx, int n, const char* _ids) {
ConstDataCursor ids(_ids);
int numDeleted = 0;
@@ -536,7 +462,6 @@ void CursorManager::invalidateAll(OperationContext* opCtx,
// responsible for cleaning it up. Otherwise we can immediately dispose of it.
if (cursor->_operationUsingCursor) {
it = partition.erase(it);
- removeTransactionCursorReference(cursor);
continue;
}
@@ -545,7 +470,6 @@ void CursorManager::invalidateAll(OperationContext* opCtx,
// will result in a useful error message.
++it;
} else {
- removeTransactionCursorReference(cursor);
toDisposeWithoutMutex.emplace_back(cursor);
it = partition.erase(it);
}
@@ -602,7 +526,6 @@ std::size_t CursorManager::timeoutCursors(OperationContext* opCtx, Date_t now) {
for (auto it = lockedPartition->begin(); it != lockedPartition->end();) {
auto* cursor = it->second;
if (cursorShouldTimeout_inlock(cursor, now)) {
- removeTransactionCursorReference(cursor);
toDisposeWithoutMutex.emplace_back(cursor);
it = lockedPartition->erase(it);
} else {
@@ -804,8 +727,6 @@ ClientCursorPin CursorManager::registerCursor(OperationContext* opCtx,
// Register this cursor for lookup by transaction.
if (opCtx->getLogicalSessionId() && opCtx->getTxnNumber()) {
invariant(opCtx->getLogicalSessionId());
- addTransactionCursorReference(
- *opCtx->getLogicalSessionId(), *opCtx->getTxnNumber(), cursorParams.nss, cursorId);
}
// Transfer ownership of the cursor to '_cursorMap'.
@@ -817,7 +738,6 @@ ClientCursorPin CursorManager::registerCursor(OperationContext* opCtx,
void CursorManager::deregisterCursor(ClientCursor* cursor) {
_cursorMap->erase(cursor->cursorid());
- removeTransactionCursorReference(cursor);
}
void CursorManager::deregisterAndDestroyCursor(
@@ -827,7 +747,6 @@ void CursorManager::deregisterAndDestroyCursor(
{
auto lockWithRestrictedScope = std::move(lk);
lockWithRestrictedScope->erase(cursor->cursorid());
- removeTransactionCursorReference(cursor.get());
}
// Dispose of the cursor without holding any cursor manager mutexes. Disposal of a cursor can
// require taking lock manager locks, which we want to avoid while holding a mutex. If we did
diff --git a/src/mongo/db/cursor_manager.h b/src/mongo/db/cursor_manager.h
index 5ca69676757..24d83a6e981 100644
--- a/src/mongo/db/cursor_manager.h
+++ b/src/mongo/db/cursor_manager.h
@@ -100,12 +100,6 @@ public:
static std::pair<Status, int> killCursorsWithMatchingSessions(
OperationContext* opCtx, const SessionKiller::Matcher& matcher);
- /**
- * Returns true if the CursorManager has cursor references for the given session ID and
- * transaction number.
- */
- static bool hasTransactionCursorReference(LogicalSessionId lsid, TxnNumber txnNumber);
-
CursorManager(NamespaceString nss);
/**
@@ -262,15 +256,6 @@ private:
std::size_t operator()(const PlanExecutor* exec, std::size_t nPartitions);
};
- // Adds a CursorId to structure that allows for lookup by LogicalSessionId and TxnNumber.
- void addTransactionCursorReference(LogicalSessionId lsid,
- TxnNumber txnNumber,
- NamespaceString nss,
- CursorId cursorId);
-
- // Removes a CursorId from the LogicalSessionId / TxnNumber lookup structure.
- void removeTransactionCursorReference(const ClientCursor* cursor);
-
CursorId allocateCursorId_inlock();
ClientCursorPin _registerCursor(
diff --git a/src/mongo/db/ops/write_ops_exec.cpp b/src/mongo/db/ops/write_ops_exec.cpp
index 14f8a72e06b..4745a6ce962 100644
--- a/src/mongo/db/ops/write_ops_exec.cpp
+++ b/src/mongo/db/ops/write_ops_exec.cpp
@@ -123,7 +123,7 @@ void finishCurOp(OperationContext* opCtx, CurOp* curOp) {
auto session = OperationContextSession::get(opCtx);
if (curOp->shouldDBProfile(shouldSample)) {
boost::optional<Session::TxnResources> txnResources;
- if (session && session->inSnapshotReadOrMultiDocumentTransaction()) {
+ if (session && session->inMultiDocumentTransaction()) {
// Stash the current transaction so that writes to the profile collection are not
// done as part of the transaction. This must be done under the client lock, since
// we are modifying 'opCtx'.
@@ -200,7 +200,7 @@ void assertCanWrite_inlock(OperationContext* opCtx, const NamespaceString& ns) {
void makeCollection(OperationContext* opCtx, const NamespaceString& ns) {
auto session = OperationContextSession::get(opCtx);
- auto inTransaction = session && session->inSnapshotReadOrMultiDocumentTransaction();
+ auto inTransaction = session && session->inMultiDocumentTransaction();
uassert(ErrorCodes::NamespaceNotFound,
str::stream() << "Cannot create namespace " << ns.ns()
<< " in multi-document transaction.",
@@ -238,7 +238,7 @@ bool handleError(OperationContext* opCtx,
}
auto session = OperationContextSession::get(opCtx);
- if (session && session->inSnapshotReadOrMultiDocumentTransaction()) {
+ if (session && session->inMultiDocumentTransaction()) {
// If we are in a transaction, we must fail the whole batch.
throw;
}
@@ -489,11 +489,11 @@ SingleWriteResult makeWriteResultForInsertOrDeleteRetry() {
} // namespace
WriteResult performInserts(OperationContext* opCtx, const write_ops::Insert& wholeOp) {
- // Insert performs its own retries, so we should only be within a WriteUnitOfWork when run under
- // snapshot read concern or in a transaction.
+ // Insert performs its own retries, so we should only be within a WriteUnitOfWork when run in a
+ // transaction.
auto session = OperationContextSession::get(opCtx);
invariant(!opCtx->lockState()->inAWriteUnitOfWork() ||
- (session && session->inSnapshotReadOrMultiDocumentTransaction()));
+ (session && session->inMultiDocumentTransaction()));
auto& curOp = *CurOp::get(opCtx);
ON_BLOCK_EXIT([&] {
// This is the only part of finishCurOp we need to do for inserts because they reuse the
@@ -694,11 +694,11 @@ static SingleWriteResult performSingleUpdateOp(OperationContext* opCtx,
}
WriteResult performUpdates(OperationContext* opCtx, const write_ops::Update& wholeOp) {
- // Update performs its own retries, so we should not be in a WriteUnitOfWork unless run under
- // snapshot read concern or in a transaction.
+ // Update performs its own retries, so we should not be in a WriteUnitOfWork unless run in a
+ // transaction.
auto session = OperationContextSession::get(opCtx);
invariant(!opCtx->lockState()->inAWriteUnitOfWork() ||
- (session && session->inSnapshotReadOrMultiDocumentTransaction()));
+ (session && session->inMultiDocumentTransaction()));
uassertStatusOK(userAllowedWriteNS(wholeOp.getNamespace()));
DisableDocumentValidationIfTrue docValidationDisabler(
@@ -839,7 +839,7 @@ WriteResult performDeletes(OperationContext* opCtx, const write_ops::Delete& who
// transaction.
auto session = OperationContextSession::get(opCtx);
invariant(!opCtx->lockState()->inAWriteUnitOfWork() ||
- (session && session->inSnapshotReadOrMultiDocumentTransaction()));
+ (session && session->inMultiDocumentTransaction()));
uassertStatusOK(userAllowedWriteNS(wholeOp.getNamespace()));
DisableDocumentValidationIfTrue docValidationDisabler(
diff --git a/src/mongo/db/pipeline/expression_context.h b/src/mongo/db/pipeline/expression_context.h
index 27401d9403e..ed5dd84b251 100644
--- a/src/mongo/db/pipeline/expression_context.h
+++ b/src/mongo/db/pipeline/expression_context.h
@@ -197,7 +197,7 @@ public:
bool inMongos = false;
bool allowDiskUse = false;
bool bypassDocumentValidation = false;
- bool inSnapshotReadOrMultiDocumentTransaction = false;
+ bool inMultiDocumentTransaction = false;
NamespaceString ns;
diff --git a/src/mongo/db/pipeline/pipeline.cpp b/src/mongo/db/pipeline/pipeline.cpp
index 79dd962d871..610a3752371 100644
--- a/src/mongo/db/pipeline/pipeline.cpp
+++ b/src/mongo/db/pipeline/pipeline.cpp
@@ -228,10 +228,9 @@ void Pipeline::validateCommon() const {
str::stream() << stage->getSourceName() << " can only be run on mongoS",
!(constraints.hostRequirement == HostTypeRequirement::kMongoS && !pCtx->inMongos));
- if (pCtx->inSnapshotReadOrMultiDocumentTransaction) {
+ if (pCtx->inMultiDocumentTransaction) {
uassert(50742,
- str::stream() << "Stage not supported with readConcern level \"snapshot\" "
- "or inside of a multi-document transaction: "
+ str::stream() << "Stage not supported inside of a multi-document transaction: "
<< stage->getSourceName(),
constraints.isAllowedInTransaction());
}
diff --git a/src/mongo/db/pipeline/pipeline_test.cpp b/src/mongo/db/pipeline/pipeline_test.cpp
index acb80b6b00c..98e23a9a954 100644
--- a/src/mongo/db/pipeline/pipeline_test.cpp
+++ b/src/mongo/db/pipeline/pipeline_test.cpp
@@ -2427,9 +2427,9 @@ TEST_F(PipelineValidateTest, ChangeStreamIsNotValidIfNotFirstStageInFacet) {
ASSERT(std::string::npos != parseStatus.reason().find("$changeStream"));
}
-class DocumentSourceDisallowedWithSnapshotReads : public DocumentSourceMock {
+class DocumentSourceDisallowedInTransactions : public DocumentSourceMock {
public:
- DocumentSourceDisallowedWithSnapshotReads() : DocumentSourceMock({}) {}
+ DocumentSourceDisallowedInTransactions() : DocumentSourceMock({}) {}
StageConstraints constraints(Pipeline::SplitState pipeState) const final {
return StageConstraints{StreamType::kStreaming,
@@ -2440,42 +2440,36 @@ public:
TransactionRequirement::kNotAllowed};
}
- static boost::intrusive_ptr<DocumentSourceDisallowedWithSnapshotReads> create() {
- return new DocumentSourceDisallowedWithSnapshotReads();
+ static boost::intrusive_ptr<DocumentSourceDisallowedInTransactions> create() {
+ return new DocumentSourceDisallowedInTransactions();
}
};
-TEST_F(PipelineValidateTest, TopLevelPipelineValidatedForStagesIllegalWithSnapshotReads) {
+TEST_F(PipelineValidateTest, TopLevelPipelineValidatedForStagesIllegalInTransactions) {
BSONObj readConcernSnapshot = BSON("readConcern" << BSON("level"
<< "snapshot"));
auto ctx = getExpCtx();
- auto&& readConcernArgs = repl::ReadConcernArgs::get(ctx->opCtx);
- ASSERT_OK(readConcernArgs.initialize(readConcernSnapshot["readConcern"]));
- ASSERT(readConcernArgs.getLevel() == repl::ReadConcernLevel::kSnapshotReadConcern);
- ctx->inSnapshotReadOrMultiDocumentTransaction = true;
+ ctx->inMultiDocumentTransaction = true;
// Make a pipeline with a legal $match, and then an illegal mock stage, and verify that pipeline
// creation fails with the expected error code.
auto matchStage = DocumentSourceMatch::create(BSON("_id" << 3), ctx);
- auto illegalStage = DocumentSourceDisallowedWithSnapshotReads::create();
+ auto illegalStage = DocumentSourceDisallowedInTransactions::create();
auto pipeline = Pipeline::create({matchStage, illegalStage}, ctx);
ASSERT_NOT_OK(pipeline.getStatus());
ASSERT_EQ(pipeline.getStatus(), ErrorCodes::duplicateCodeForTest(50742));
}
-TEST_F(PipelineValidateTest, FacetPipelineValidatedForStagesIllegalWithSnapshotReads) {
+TEST_F(PipelineValidateTest, FacetPipelineValidatedForStagesIllegalInTransactions) {
BSONObj readConcernSnapshot = BSON("readConcern" << BSON("level"
<< "snapshot"));
auto ctx = getExpCtx();
- auto&& readConcernArgs = repl::ReadConcernArgs::get(ctx->opCtx);
- ASSERT_OK(readConcernArgs.initialize(readConcernSnapshot["readConcern"]));
- ASSERT(readConcernArgs.getLevel() == repl::ReadConcernLevel::kSnapshotReadConcern);
- ctx->inSnapshotReadOrMultiDocumentTransaction = true;
+ ctx->inMultiDocumentTransaction = true;
// Make a pipeline with a legal $match, and then an illegal mock stage, and verify that pipeline
// creation fails with the expected error code.
auto matchStage = DocumentSourceMatch::create(BSON("_id" << 3), ctx);
- auto illegalStage = DocumentSourceDisallowedWithSnapshotReads::create();
+ auto illegalStage = DocumentSourceDisallowedInTransactions::create();
auto pipeline = Pipeline::createFacetPipeline({matchStage, illegalStage}, ctx);
ASSERT_NOT_OK(pipeline.getStatus());
ASSERT_EQ(pipeline.getStatus(), ErrorCodes::duplicateCodeForTest(50742));
diff --git a/src/mongo/db/read_concern.cpp b/src/mongo/db/read_concern.cpp
index c5fade9dcb1..b8215af1610 100644
--- a/src/mongo/db/read_concern.cpp
+++ b/src/mongo/db/read_concern.cpp
@@ -217,12 +217,12 @@ Status waitForReadConcern(OperationContext* opCtx,
repl::ReplicationCoordinator* const replCoord = repl::ReplicationCoordinator::get(opCtx);
invariant(replCoord);
- // Currently speculative read concern is used only for transactions and snapshot reads. However,
- // speculative read concern is not yet supported with atClusterTime.
+ // Currently speculative read concern is used only for transactions. However, speculative read
+ // concern is not yet supported with atClusterTime.
//
// TODO SERVER-34620: Re-enable speculative behavior when "atClusterTime" is specified.
- const bool speculative = session && session->inSnapshotReadOrMultiDocumentTransaction() &&
- !readConcernArgs.getArgsAtClusterTime();
+ const bool speculative =
+ session && session->inMultiDocumentTransaction() && !readConcernArgs.getArgsAtClusterTime();
if (readConcernArgs.getLevel() == repl::ReadConcernLevel::kLinearizableReadConcern) {
if (replCoord->getReplicationMode() != repl::ReplicationCoordinator::modeReplSet) {
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index f1db996b4eb..26b263d58ff 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -1346,8 +1346,8 @@ Status ReplicationCoordinatorImpl::_waitUntilClusterTimeForRead(OperationContext
// TODO SERVER-34620: Re-enable speculative behavior when "atClusterTime" is specified.
auto session = OperationContextSession::get(opCtx);
- const bool speculative = session && session->inSnapshotReadOrMultiDocumentTransaction() &&
- !readConcern.getArgsAtClusterTime();
+ const bool speculative =
+ session && session->inMultiDocumentTransaction() && !readConcern.getArgsAtClusterTime();
const bool isMajorityCommittedRead =
(readConcern.getLevel() == ReadConcernLevel::kMajorityReadConcern ||
diff --git a/src/mongo/db/service_entry_point_common.cpp b/src/mongo/db/service_entry_point_common.cpp
index f2b523834d9..d935724a5c4 100644
--- a/src/mongo/db/service_entry_point_common.cpp
+++ b/src/mongo/db/service_entry_point_common.cpp
@@ -802,11 +802,9 @@ void execCommandDatabase(OperationContext* opCtx,
}
auto& readConcernArgs = repl::ReadConcernArgs::get(opCtx);
- // TODO(SERVER-34113) replace below txnNumber/logicalSessionId checks with
- // Session::inMultiDocumentTransaction().
- if (!opCtx->getClient()->isInDirectClient() || !opCtx->getTxnNumber() ||
- !opCtx->getLogicalSessionId()) {
- auto session = OperationContextSession::get(opCtx);
+ auto session = OperationContextSession::get(opCtx);
+ if (!opCtx->getClient()->isInDirectClient() || !session ||
+ !session->inMultiDocumentTransaction()) {
const bool upconvertToSnapshot = session && session->inMultiDocumentTransaction() &&
sessionOptions &&
(sessionOptions->getStartTransaction() == boost::optional<bool>(true));
@@ -824,12 +822,7 @@ void execCommandDatabase(OperationContext* opCtx,
auto session = OperationContextSession::get(opCtx);
uassert(ErrorCodes::InvalidOptions,
"readConcern level snapshot is only valid in multi-statement transactions",
- // With test commands enabled, a read command with readConcern snapshot is
- // a valid snapshot read.
- (getTestCommandsEnabled() &&
- invocation->definition()->getReadWriteType() ==
- BasicCommand::ReadWriteType::kRead) ||
- (session && session->inMultiDocumentTransaction()));
+ session && session->inMultiDocumentTransaction());
uassert(ErrorCodes::InvalidOptions,
"readConcern level snapshot requires a session ID",
opCtx->getLogicalSessionId());
diff --git a/src/mongo/db/session.cpp b/src/mongo/db/session.cpp
index eeb84e5cff5..7b07ef67596 100644
--- a/src/mongo/db/session.cpp
+++ b/src/mongo/db/session.cpp
@@ -69,8 +69,6 @@ namespace mongo {
// to run without aborting transactions.
MONGO_EXPORT_SERVER_PARAMETER(maxTransactionLockRequestTimeoutMillis, int, 5);
-Session::CursorExistsFunction Session::_cursorExistsFunction;
-
// Server parameter that dictates the lifetime given to each transaction.
// Transactions must eventually expire to preempt storage cache pressure immobilizing the system.
MONGO_EXPORT_SERVER_PARAMETER(transactionLifetimeLimitSeconds, std::int32_t, 60)
@@ -373,15 +371,6 @@ void Session::beginOrContinueTxnOnMigration(OperationContext* opCtx, TxnNumber t
}
void Session::setSpeculativeTransactionOpTimeToLastApplied(OperationContext* opCtx) {
- // TODO: This check can be removed once SERVER-34113 is completed. Certain commands that use
- // DBDirectClient can use snapshot readConcern, but are not supported in transactions. These
- // commands are only allowed when test commands are enabled, but violate an invariant that the
- // read timestamp cannot be changed on a RecoveryUnit while it is active.
- if (opCtx->getClient()->isInDirectClient() &&
- opCtx->recoveryUnit()->getTimestampReadSource() != RecoveryUnit::ReadSource::kUnset) {
- return;
- }
-
stdx::lock_guard<stdx::mutex> lg(_mutex);
repl::ReplicationCoordinator* replCoord =
repl::ReplicationCoordinator::get(opCtx->getClient()->getServiceContext());
@@ -553,9 +542,8 @@ void Session::_beginOrContinueTxn(WithLock wl,
<< " since it is already in progress.",
startTransaction == boost::none);
- // Continue a retryable write or a snapshot read.
- if (_txnState == MultiDocumentTransactionState::kNone ||
- _txnState == MultiDocumentTransactionState::kInSnapshotRead) {
+ // Continue a retryable write.
+ if (_txnState == MultiDocumentTransactionState::kNone) {
uassert(ErrorCodes::InvalidOptions,
"Cannot specify 'autocommit' on an operation not inside a multi-statement "
"transaction.",
@@ -564,8 +552,8 @@ void Session::_beginOrContinueTxn(WithLock wl,
}
// Continue a multi-statement transaction. In this case, it is required that
- // autocommit=false be given as an argument on the request. Retryable writes and snapshot
- // reads will have _autocommit=true, so that is why we verify that _autocommit=false here.
+ // autocommit=false be given as an argument on the request. Retryable writes will have
+ // _autocommit=true, so that is why we verify that _autocommit=false here.
if (!_autocommit) {
uassert(
ErrorCodes::InvalidOptions,
@@ -616,7 +604,7 @@ void Session::_beginOrContinueTxn(WithLock wl,
_transactionExpireDate =
Date_t::now() + stdx::chrono::seconds{transactionLifetimeLimitSeconds.load()};
} else {
- // Execute a retryable write or snapshot read.
+ // Execute a retryable write.
invariant(startTransaction == boost::none);
_setActiveTxn(wl, txnNumber);
_autocommit = true;
@@ -713,21 +701,13 @@ void Session::stashTransactionResources(OperationContext* opCtx) {
// expect this function to be called at the end of the 'abortTransaction' command.
_checkIsActiveTransaction(lg, *opCtx->getTxnNumber(), false);
- if (_txnState != MultiDocumentTransactionState::kInProgress &&
- _txnState != MultiDocumentTransactionState::kInSnapshotRead) {
- // Not in a multi-document transaction or snapshot read: nothing to do.
+ if (_txnState != MultiDocumentTransactionState::kInProgress) {
+ // Not in a multi-document transaction: nothing to do.
return;
}
- if (_txnState == MultiDocumentTransactionState::kInSnapshotRead &&
- !_cursorExistsFunction(_sessionId, _activeTxnNumber)) {
- // The snapshot read is complete.
- invariant(opCtx->getWriteUnitOfWork());
- _commitTransaction(std::move(lg), opCtx);
- } else {
- invariant(!_txnResourceStash);
- _txnResourceStash = TxnResources(opCtx);
- }
+ invariant(!_txnResourceStash);
+ _txnResourceStash = TxnResources(opCtx);
}
void Session::unstashTransactionResources(OperationContext* opCtx, const std::string& cmdName) {
@@ -781,31 +761,20 @@ void Session::unstashTransactionResources(OperationContext* opCtx, const std::st
return;
}
-
// Stashed transaction resources do not exist for this transaction. If this is a
- // snapshot read or a multi-document transaction, set up the transaction resources on
- // the opCtx.
- auto readConcernArgs = repl::ReadConcernArgs::get(opCtx);
- if (!(readConcernArgs.getLevel() == repl::ReadConcernLevel::kSnapshotReadConcern ||
- _txnState == MultiDocumentTransactionState::kInProgress)) {
+ // multi-document transaction, set up the transaction resources on the opCtx.
+ if (_txnState != MultiDocumentTransactionState::kInProgress) {
return;
}
opCtx->setWriteUnitOfWork(std::make_unique<WriteUnitOfWork>(opCtx));
-
- if (_txnState == MultiDocumentTransactionState::kInProgress) {
- // If maxTransactionLockRequestTimeoutMillis is set, then we will ensure no
- // future lock request waits longer than maxTransactionLockRequestTimeoutMillis
- // to acquire a lock. This is to avoid deadlocks and minimize non-transaction
- // operation performance degradations.
- auto maxTransactionLockMillis = maxTransactionLockRequestTimeoutMillis.load();
- if (maxTransactionLockMillis >= 0) {
- opCtx->lockState()->setMaxLockTimeout(Milliseconds(maxTransactionLockMillis));
- }
- }
-
- if (_txnState != MultiDocumentTransactionState::kInProgress) {
- _txnState = MultiDocumentTransactionState::kInSnapshotRead;
+ // If maxTransactionLockRequestTimeoutMillis is set, then we will ensure no
+ // future lock request waits longer than maxTransactionLockRequestTimeoutMillis
+ // to acquire a lock. This is to avoid deadlocks and minimize non-transaction
+ // operation performance degradations.
+ auto maxTransactionLockMillis = maxTransactionLockRequestTimeoutMillis.load();
+ if (maxTransactionLockMillis >= 0) {
+ opCtx->lockState()->setMaxLockTimeout(Milliseconds(maxTransactionLockMillis));
}
}
@@ -839,8 +808,7 @@ void Session::abortArbitraryTransactionIfExpired() {
}
void Session::_abortArbitraryTransaction(WithLock lock) {
- if (_txnState != MultiDocumentTransactionState::kInProgress &&
- _txnState != MultiDocumentTransactionState::kInSnapshotRead) {
+ if (_txnState != MultiDocumentTransactionState::kInProgress) {
return;
}
@@ -851,8 +819,7 @@ void Session::abortActiveTransaction(OperationContext* opCtx) {
stdx::unique_lock<Client> clientLock(*opCtx->getClient());
stdx::lock_guard<stdx::mutex> lock(_mutex);
- if (_txnState != MultiDocumentTransactionState::kInProgress &&
- _txnState != MultiDocumentTransactionState::kInSnapshotRead) {
+ if (_txnState != MultiDocumentTransactionState::kInProgress) {
return;
}
@@ -897,8 +864,7 @@ void Session::_beginOrContinueTxnOnMigration(WithLock wl, TxnNumber txnNumber) {
void Session::_setActiveTxn(WithLock wl, TxnNumber txnNumber) {
// Abort the existing transaction if it's not committed or aborted.
- if (_txnState == MultiDocumentTransactionState::kInProgress ||
- _txnState == MultiDocumentTransactionState::kInSnapshotRead) {
+ if (_txnState == MultiDocumentTransactionState::kInProgress) {
_abortTransaction(wl);
}
_activeTxnNumber = txnNumber;
@@ -959,8 +925,7 @@ void Session::commitTransaction(OperationContext* opCtx) {
}
void Session::_commitTransaction(stdx::unique_lock<stdx::mutex> lk, OperationContext* opCtx) {
- invariant(_txnState == MultiDocumentTransactionState::kInProgress ||
- _txnState == MultiDocumentTransactionState::kInSnapshotRead);
+ invariant(_txnState == MultiDocumentTransactionState::kInProgress);
const bool isMultiDocumentTransaction = _txnState == MultiDocumentTransactionState::kInProgress;
if (isMultiDocumentTransaction) {
// We need to unlock the session to run the opObserver onTransactionCommit, which calls back
diff --git a/src/mongo/db/session.h b/src/mongo/db/session.h
index 17ce18047d9..335944a46b8 100644
--- a/src/mongo/db/session.h
+++ b/src/mongo/db/session.h
@@ -247,12 +247,6 @@ public:
*/
void unstashTransactionResources(OperationContext* opCtx, const std::string& cmdName);
- // TODO SERVER-34113: Remove the "cursor exists" mechanism from both Session and CursorManager
- // once snapshot reads outside of multi-statement transcactions are no longer supported.
- static void registerCursorExistsFunction(CursorExistsFunction cursorExistsFunc) {
- _cursorExistsFunction = cursorExistsFunc;
- }
-
/**
* Commits the transaction, including committing the write unit of work and updating
* transaction state.
@@ -290,15 +284,6 @@ public:
return _txnState == MultiDocumentTransactionState::kInProgress;
};
- /**
- * Returns whether we are in a read-only or multi-document transaction.
- */
- bool inSnapshotReadOrMultiDocumentTransaction() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- return _txnState == MultiDocumentTransactionState::kInProgress ||
- _txnState == MultiDocumentTransactionState::kInSnapshotRead;
- }
-
bool transactionIsCommitted() const {
stdx::lock_guard<stdx::mutex> lk(_mutex);
return _txnState == MultiDocumentTransactionState::kCommitted;
@@ -458,7 +443,6 @@ private:
// the transaction is in any state but kInProgress, no more operations can be collected.
enum class MultiDocumentTransactionState {
kNone,
- kInSnapshotRead,
kInProgress,
kCommitting,
kCommitted,
diff --git a/src/mongo/db/session_catalog_test.cpp b/src/mongo/db/session_catalog_test.cpp
index 2dd8279a1c7..f6e04a2d016 100644
--- a/src/mongo/db/session_catalog_test.cpp
+++ b/src/mongo/db/session_catalog_test.cpp
@@ -176,7 +176,7 @@ TEST_F(SessionCatalogTest, StashInNestedSessionIsANoop) {
{
OperationContextSession outerScopedSession(
- opCtx(), true, boost::none, boost::none, "testDB", "find");
+ opCtx(), true, /* autocommit */ false, /* startTransaction */ true, "testDB", "find");
Locker* originalLocker = opCtx()->lockState();
RecoveryUnit* originalRecoveryUnit = opCtx()->recoveryUnit();
@@ -204,10 +204,6 @@ TEST_F(SessionCatalogTest, StashInNestedSessionIsANoop) {
OperationContextSession innerScopedSession(
opCtx(), true, boost::none, boost::none, "testDB", "find");
- // Report to Session that there is a stashed cursor. If we were not in a nested session,
- // this would ensure that stashing is not a noop.
- Session::registerCursorExistsFunction([](LogicalSessionId, TxnNumber) { return true; });
-
OperationContextSession::get(opCtx())->stashTransactionResources(opCtx());
// The stash was a noop, so the locker, RecoveryUnit, and WriteUnitOfWork on the
@@ -225,7 +221,7 @@ TEST_F(SessionCatalogTest, UnstashInNestedSessionIsANoop) {
{
OperationContextSession outerScopedSession(
- opCtx(), true, boost::none, boost::none, "testDB", "find");
+ opCtx(), true, /* autocommit */ false, /* startTransaction */ true, "testDB", "find");
Locker* originalLocker = opCtx()->lockState();
RecoveryUnit* originalRecoveryUnit = opCtx()->recoveryUnit();
diff --git a/src/mongo/db/session_test.cpp b/src/mongo/db/session_test.cpp
index 0b92952c63f..dd4ed8e13ac 100644
--- a/src/mongo/db/session_test.cpp
+++ b/src/mongo/db/session_test.cpp
@@ -647,7 +647,6 @@ TEST_F(SessionTest, StashAndUnstashResources) {
ASSERT(originalLocker);
ASSERT(originalRecoveryUnit);
- Session::registerCursorExistsFunction(noopCursorExistsFunction);
Session session(sessionId);
session.refreshFromStorageIfNeeded(opCtx());
@@ -689,7 +688,6 @@ TEST_F(SessionTest, StashAndUnstashResources) {
}
TEST_F(SessionTest, ReportStashedResources) {
- Session::registerCursorExistsFunction(noopCursorExistsFunction);
const auto sessionId = makeLogicalSessionIdForTest();
const TxnNumber txnNum = 20;
opCtx()->setLogicalSessionId(sessionId);
@@ -758,7 +756,6 @@ TEST_F(SessionTest, ReportStashedResources) {
}
TEST_F(SessionTest, CannotSpecifyStartTransactionOnInProgressTxn) {
- Session::registerCursorExistsFunction(noopCursorExistsFunction);
const auto sessionId = makeLogicalSessionIdForTest();
Session session(sessionId);
session.refreshFromStorageIfNeeded(opCtx());
@@ -772,7 +769,7 @@ TEST_F(SessionTest, CannotSpecifyStartTransactionOnInProgressTxn) {
// Autocommit should be set to false and we should be in a mult-doc transaction.
ASSERT_FALSE(session.getAutocommit());
- ASSERT_TRUE(session.inSnapshotReadOrMultiDocumentTransaction());
+ ASSERT_TRUE(session.inMultiDocumentTransaction());
// Cannot try to start a transaction that already started.
ASSERT_THROWS_CODE(session.beginOrContinueTxn(opCtx(), txnNum, false, true, "testDB", "insert"),
@@ -819,7 +816,6 @@ TEST_F(SessionTest, AutocommitRequiredOnEveryTxnOp) {
}
TEST_F(SessionTest, SameTransactionPreservesStoredStatements) {
- Session::registerCursorExistsFunction(noopCursorExistsFunction);
const auto sessionId = makeLogicalSessionIdForTest();
Session session(sessionId);
session.refreshFromStorageIfNeeded(opCtx());
@@ -847,7 +843,6 @@ TEST_F(SessionTest, SameTransactionPreservesStoredStatements) {
}
TEST_F(SessionTest, AbortClearsStoredStatements) {
- Session::registerCursorExistsFunction(noopCursorExistsFunction);
const auto sessionId = makeLogicalSessionIdForTest();
Session session(sessionId);
session.refreshFromStorageIfNeeded(opCtx());
@@ -871,7 +866,6 @@ TEST_F(SessionTest, AbortClearsStoredStatements) {
// This test makes sure the commit machinery works even when no operations are done on the
// transaction.
TEST_F(SessionTest, EmptyTransactionCommit) {
- Session::registerCursorExistsFunction(noopCursorExistsFunction);
const auto sessionId = makeLogicalSessionIdForTest();
Session session(sessionId);
session.refreshFromStorageIfNeeded(opCtx());
@@ -891,7 +885,6 @@ TEST_F(SessionTest, EmptyTransactionCommit) {
// This test makes sure the abort machinery works even when no operations are done on the
// transaction.
TEST_F(SessionTest, EmptyTransactionAbort) {
- Session::registerCursorExistsFunction(noopCursorExistsFunction);
const auto sessionId = makeLogicalSessionIdForTest();
Session session(sessionId);
session.refreshFromStorageIfNeeded(opCtx());
@@ -909,7 +902,6 @@ TEST_F(SessionTest, EmptyTransactionAbort) {
}
TEST_F(SessionTest, ConcurrencyOfUnstashAndAbort) {
- Session::registerCursorExistsFunction(noopCursorExistsFunction);
const auto sessionId = makeLogicalSessionIdForTest();
Session session(sessionId);
session.refreshFromStorageIfNeeded(opCtx());
@@ -929,7 +921,6 @@ TEST_F(SessionTest, ConcurrencyOfUnstashAndAbort) {
}
TEST_F(SessionTest, ConcurrencyOfUnstashAndMigration) {
- Session::registerCursorExistsFunction(noopCursorExistsFunction);
const auto sessionId = makeLogicalSessionIdForTest();
Session session(sessionId);
session.refreshFromStorageIfNeeded(opCtx());
@@ -957,7 +948,6 @@ TEST_F(SessionTest, ConcurrencyOfUnstashAndMigration) {
}
TEST_F(SessionTest, ConcurrencyOfStashAndAbort) {
- Session::registerCursorExistsFunction(noopCursorExistsFunction);
const auto sessionId = makeLogicalSessionIdForTest();
Session session(sessionId);
session.refreshFromStorageIfNeeded(opCtx());
@@ -977,7 +967,6 @@ TEST_F(SessionTest, ConcurrencyOfStashAndAbort) {
}
TEST_F(SessionTest, ConcurrencyOfStashAndMigration) {
- Session::registerCursorExistsFunction(noopCursorExistsFunction);
const auto sessionId = makeLogicalSessionIdForTest();
Session session(sessionId);
session.refreshFromStorageIfNeeded(opCtx());
@@ -1002,7 +991,6 @@ TEST_F(SessionTest, ConcurrencyOfStashAndMigration) {
}
TEST_F(SessionTest, ConcurrencyOfAddTransactionOperationAndAbort) {
- Session::registerCursorExistsFunction(noopCursorExistsFunction);
const auto sessionId = makeLogicalSessionIdForTest();
Session session(sessionId);
session.refreshFromStorageIfNeeded(opCtx());
@@ -1025,7 +1013,6 @@ TEST_F(SessionTest, ConcurrencyOfAddTransactionOperationAndAbort) {
}
TEST_F(SessionTest, ConcurrencyOfAddTransactionOperationAndMigration) {
- Session::registerCursorExistsFunction(noopCursorExistsFunction);
const auto sessionId = makeLogicalSessionIdForTest();
Session session(sessionId);
session.refreshFromStorageIfNeeded(opCtx());
@@ -1051,7 +1038,6 @@ TEST_F(SessionTest, ConcurrencyOfAddTransactionOperationAndMigration) {
}
TEST_F(SessionTest, ConcurrencyOfEndTransactionAndRetrieveOperationsAndAbort) {
- Session::registerCursorExistsFunction(noopCursorExistsFunction);
const auto sessionId = makeLogicalSessionIdForTest();
Session session(sessionId);
session.refreshFromStorageIfNeeded(opCtx());
@@ -1073,7 +1059,6 @@ TEST_F(SessionTest, ConcurrencyOfEndTransactionAndRetrieveOperationsAndAbort) {
}
TEST_F(SessionTest, ConcurrencyOfEndTransactionAndRetrieveOperationsAndMigration) {
- Session::registerCursorExistsFunction(noopCursorExistsFunction);
const auto sessionId = makeLogicalSessionIdForTest();
Session session(sessionId);
session.refreshFromStorageIfNeeded(opCtx());
@@ -1099,7 +1084,6 @@ TEST_F(SessionTest, ConcurrencyOfEndTransactionAndRetrieveOperationsAndMigration
}
TEST_F(SessionTest, ConcurrencyOfCommitTransactionAndAbort) {
- Session::registerCursorExistsFunction(noopCursorExistsFunction);
const auto sessionId = makeLogicalSessionIdForTest();
Session session(sessionId);
session.refreshFromStorageIfNeeded(opCtx());
@@ -1120,7 +1104,6 @@ TEST_F(SessionTest, ConcurrencyOfCommitTransactionAndAbort) {
}
TEST_F(SessionTest, ConcurrencyOfCommitTransactionAndMigration) {
- Session::registerCursorExistsFunction(noopCursorExistsFunction);
const auto sessionId = makeLogicalSessionIdForTest();
Session session(sessionId);
session.refreshFromStorageIfNeeded(opCtx());