summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorTess Avitabile <tess.avitabile@mongodb.com>2018-01-04 17:32:21 -0500
committerTess Avitabile <tess.avitabile@mongodb.com>2018-01-23 09:57:56 -0500
commita98d497c957dc2da7d29c37be9809ace992ef946 (patch)
tree17191166815defa371bfead2ddeb022b752f03a6 /src
parentad0ab27807a29e025b36a82ede139c975ad65cfb (diff)
downloadmongo-a98d497c957dc2da7d29c37be9809ace992ef946.tar.gz
SERVER-32517 Parse readConcern snapshot and atClusterTime
Diffstat (limited to 'src')
-rw-r--r--src/mongo/db/commands.h19
-rw-r--r--src/mongo/db/commands/count_cmd.cpp4
-rw-r--r--src/mongo/db/commands/distinct.cpp4
-rw-r--r--src/mongo/db/commands/find_and_modify.cpp7
-rw-r--r--src/mongo/db/commands/find_cmd.cpp4
-rw-r--r--src/mongo/db/commands/geo_near_cmd.cpp4
-rw-r--r--src/mongo/db/commands/getmore_cmd.cpp6
-rw-r--r--src/mongo/db/commands/group_cmd.cpp4
-rw-r--r--src/mongo/db/commands/haystack.cpp4
-rw-r--r--src/mongo/db/commands/parallel_collection_scan.cpp4
-rw-r--r--src/mongo/db/commands/pipeline_command.cpp8
-rw-r--r--src/mongo/db/commands/write_commands/write_commands.cpp7
-rw-r--r--src/mongo/db/read_concern.cpp24
-rw-r--r--src/mongo/db/repl/read_concern_args.cpp124
-rw-r--r--src/mongo/db/repl/read_concern_args.h26
-rw-r--r--src/mongo/db/repl/read_concern_args_test.cpp463
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state.h5
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_impl.cpp8
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_impl.h1
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_mock.cpp5
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_mock.h1
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp32
-rw-r--r--src/mongo/db/service_entry_point_mongod.cpp31
-rw-r--r--src/mongo/db/storage/kv/kv_engine.h7
-rw-r--r--src/mongo/db/storage/kv/kv_storage_engine.cpp4
-rw-r--r--src/mongo/db/storage/kv/kv_storage_engine.h2
-rw-r--r--src/mongo/db/storage/storage_engine.h7
-rw-r--r--src/mongo/db/storage/storage_init.cpp2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp4
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h2
-rw-r--r--src/mongo/s/commands/strategy.cpp13
31 files changed, 592 insertions, 244 deletions
diff --git a/src/mongo/db/commands.h b/src/mongo/db/commands.h
index f6c987d03b6..dc8b3fbf15c 100644
--- a/src/mongo/db/commands.h
+++ b/src/mongo/db/commands.h
@@ -347,22 +347,21 @@ public:
}
/**
- * Returns true if this Command supports the non-local readConcern:level field value. Takes the
- * command object and the name of the database on which it was invoked as arguments, so that
- * readConcern can be conditionally rejected based on the command's parameters and/or namespace.
+ * Returns true if this Command supports the given readConcern level. Takes the command object
+ * and the name of the database on which it was invoked as arguments, so that readConcern can be
+ * conditionally rejected based on the command's parameters and/or namespace.
*
- * If the readConcern non-local level argument is sent to a command that returns false the
- * command processor will reject the command, returning an appropriate error message. For
- * commands that support the argument, the command processor will instruct the RecoveryUnit to
- * only return "committed" data, failing if this isn't supported by the storage engine.
+ * If a readConcern level argument is sent to a command that returns false the command processor
+ * will reject the command, returning an appropriate error message.
*
* Note that this is never called on mongos. Sharded commands are responsible for forwarding
* the option to the shards as needed. We rely on the shards to fail the commands in the
* cases where it isn't supported.
*/
- virtual bool supportsNonLocalReadConcern(const std::string& dbName,
- const BSONObj& cmdObj) const {
- return false;
+ virtual bool supportsReadConcern(const std::string& dbName,
+ const BSONObj& cmdObj,
+ repl::ReadConcernLevel level) const {
+ return level == repl::ReadConcernLevel::kLocalReadConcern;
}
/**
diff --git a/src/mongo/db/commands/count_cmd.cpp b/src/mongo/db/commands/count_cmd.cpp
index 1627cc0407b..68e99247586 100644
--- a/src/mongo/db/commands/count_cmd.cpp
+++ b/src/mongo/db/commands/count_cmd.cpp
@@ -80,7 +80,9 @@ public:
return false;
}
- bool supportsNonLocalReadConcern(const std::string& dbName, const BSONObj& cmdObj) const final {
+ bool supportsReadConcern(const std::string& dbName,
+ const BSONObj& cmdObj,
+ repl::ReadConcernLevel level) const final {
return true;
}
diff --git a/src/mongo/db/commands/distinct.cpp b/src/mongo/db/commands/distinct.cpp
index 0f0bb883578..84fdb451376 100644
--- a/src/mongo/db/commands/distinct.cpp
+++ b/src/mongo/db/commands/distinct.cpp
@@ -87,7 +87,9 @@ public:
return false;
}
- bool supportsNonLocalReadConcern(const std::string& dbName, const BSONObj& cmdObj) const final {
+ bool supportsReadConcern(const std::string& dbName,
+ const BSONObj& cmdObj,
+ repl::ReadConcernLevel level) const final {
return true;
}
diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp
index c76cbbd5516..cf3bb5609c8 100644
--- a/src/mongo/db/commands/find_and_modify.cpp
+++ b/src/mongo/db/commands/find_and_modify.cpp
@@ -224,6 +224,13 @@ public:
return false;
}
+ bool supportsReadConcern(const std::string& dbName,
+ const BSONObj& cmdObj,
+ repl::ReadConcernLevel level) const final {
+ return level == repl::ReadConcernLevel::kLocalReadConcern ||
+ level == repl::ReadConcernLevel::kSnapshotReadConcern;
+ }
+
bool supportsWriteConcern(const BSONObj& cmd) const override {
return true;
}
diff --git a/src/mongo/db/commands/find_cmd.cpp b/src/mongo/db/commands/find_cmd.cpp
index b3294bf8d6c..ccb97bd98a5 100644
--- a/src/mongo/db/commands/find_cmd.cpp
+++ b/src/mongo/db/commands/find_cmd.cpp
@@ -85,7 +85,9 @@ public:
return false;
}
- bool supportsNonLocalReadConcern(const std::string& dbName, const BSONObj& cmdObj) const final {
+ bool supportsReadConcern(const std::string& dbName,
+ const BSONObj& cmdObj,
+ repl::ReadConcernLevel level) const final {
return true;
}
diff --git a/src/mongo/db/commands/geo_near_cmd.cpp b/src/mongo/db/commands/geo_near_cmd.cpp
index 970bdd6871a..4eb9ac3075b 100644
--- a/src/mongo/db/commands/geo_near_cmd.cpp
+++ b/src/mongo/db/commands/geo_near_cmd.cpp
@@ -80,7 +80,9 @@ public:
bool slaveOverrideOk() const {
return true;
}
- bool supportsNonLocalReadConcern(const std::string& dbName, const BSONObj& cmdObj) const final {
+ bool supportsReadConcern(const std::string& dbName,
+ const BSONObj& cmdObj,
+ repl::ReadConcernLevel level) const final {
return true;
}
diff --git a/src/mongo/db/commands/getmore_cmd.cpp b/src/mongo/db/commands/getmore_cmd.cpp
index 360dc810f5e..8e40999ca36 100644
--- a/src/mongo/db/commands/getmore_cmd.cpp
+++ b/src/mongo/db/commands/getmore_cmd.cpp
@@ -101,9 +101,11 @@ public:
return false;
}
- bool supportsNonLocalReadConcern(const std::string& dbName, const BSONObj& cmdObj) const final {
+ bool supportsReadConcern(const std::string& dbName,
+ const BSONObj& cmdObj,
+ repl::ReadConcernLevel level) const final {
// Uses the readConcern setting from whatever created the cursor.
- return false;
+ return level == repl::ReadConcernLevel::kLocalReadConcern;
}
ReadWriteType getReadWriteType() const {
diff --git a/src/mongo/db/commands/group_cmd.cpp b/src/mongo/db/commands/group_cmd.cpp
index 5eb751a2991..4025fb138bf 100644
--- a/src/mongo/db/commands/group_cmd.cpp
+++ b/src/mongo/db/commands/group_cmd.cpp
@@ -79,7 +79,9 @@ private:
return true;
}
- bool supportsNonLocalReadConcern(const std::string& dbName, const BSONObj& cmdObj) const final {
+ bool supportsReadConcern(const std::string& dbName,
+ const BSONObj& cmdObj,
+ repl::ReadConcernLevel level) const final {
return true;
}
diff --git a/src/mongo/db/commands/haystack.cpp b/src/mongo/db/commands/haystack.cpp
index 1f7dd0fcdf6..18f3b1d1edc 100644
--- a/src/mongo/db/commands/haystack.cpp
+++ b/src/mongo/db/commands/haystack.cpp
@@ -76,7 +76,9 @@ public:
return true;
}
- bool supportsNonLocalReadConcern(const std::string& dbName, const BSONObj& cmdObj) const final {
+ bool supportsReadConcern(const std::string& dbName,
+ const BSONObj& cmdObj,
+ repl::ReadConcernLevel level) const final {
return true;
}
diff --git a/src/mongo/db/commands/parallel_collection_scan.cpp b/src/mongo/db/commands/parallel_collection_scan.cpp
index ec72bc03324..783e74be716 100644
--- a/src/mongo/db/commands/parallel_collection_scan.cpp
+++ b/src/mongo/db/commands/parallel_collection_scan.cpp
@@ -66,7 +66,9 @@ public:
return true;
}
- bool supportsNonLocalReadConcern(const std::string& dbName, const BSONObj& cmdObj) const final {
+ bool supportsReadConcern(const std::string& dbName,
+ const BSONObj& cmdObj,
+ repl::ReadConcernLevel level) const final {
return true;
}
diff --git a/src/mongo/db/commands/pipeline_command.cpp b/src/mongo/db/commands/pipeline_command.cpp
index 7f2586f5827..3cc4b4d1e8e 100644
--- a/src/mongo/db/commands/pipeline_command.cpp
+++ b/src/mongo/db/commands/pipeline_command.cpp
@@ -65,9 +65,11 @@ public:
return true;
}
- bool supportsNonLocalReadConcern(const std::string& dbName,
- const BSONObj& cmdObj) const override {
- return !AggregationRequest::parseNs(dbName, cmdObj).isCollectionlessAggregateNS();
+ bool supportsReadConcern(const std::string& dbName,
+ const BSONObj& cmdObj,
+ repl::ReadConcernLevel level) const override {
+ return level == repl::ReadConcernLevel::kLocalReadConcern ||
+ !AggregationRequest::parseNs(dbName, cmdObj).isCollectionlessAggregateNS();
}
ReadWriteType getReadWriteType() const {
diff --git a/src/mongo/db/commands/write_commands/write_commands.cpp b/src/mongo/db/commands/write_commands/write_commands.cpp
index 214eb13871c..e3f9c60774d 100644
--- a/src/mongo/db/commands/write_commands/write_commands.cpp
+++ b/src/mongo/db/commands/write_commands/write_commands.cpp
@@ -206,6 +206,13 @@ public:
return false;
}
+ bool supportsReadConcern(const std::string& dbName,
+ const BSONObj& cmdObj,
+ repl::ReadConcernLevel level) const final {
+ return level == repl::ReadConcernLevel::kLocalReadConcern ||
+ level == repl::ReadConcernLevel::kSnapshotReadConcern;
+ }
+
bool supportsWriteConcern(const BSONObj& cmd) const {
return true;
}
diff --git a/src/mongo/db/read_concern.cpp b/src/mongo/db/read_concern.cpp
index 44196c98dcf..57ae829011c 100644
--- a/src/mongo/db/read_concern.cpp
+++ b/src/mongo/db/read_concern.cpp
@@ -233,7 +233,19 @@ Status waitForReadConcern(OperationContext* opCtx,
}
}
- auto afterClusterTime = readConcernArgs.getArgsClusterTime();
+ if (readConcernArgs.getLevel() == repl::ReadConcernLevel::kSnapshotReadConcern) {
+ if (replCoord->getReplicationMode() != repl::ReplicationCoordinator::modeReplSet) {
+ return {ErrorCodes::NotAReplicaSet,
+ "node needs to be a replica set member to use readConcern: snapshot"};
+ }
+
+ if (!replCoord->isV1ElectionProtocol()) {
+ return {ErrorCodes::IncompatibleElectionProtocol,
+ "Replica sets running protocol version 0 do not support readConcern: snapshot"};
+ }
+ }
+
+ auto afterClusterTime = readConcernArgs.getArgsAfterClusterTime();
if (afterClusterTime) {
if (!allowAfterClusterTime) {
return {ErrorCodes::InvalidOptions, "afterClusterTime is not allowed for this command"};
@@ -246,11 +258,6 @@ Status waitForReadConcern(OperationContext* opCtx,
}
}
- auto pointInTime = readConcernArgs.getArgsPointInTime();
- if (pointInTime) {
- fassertStatusOK(39345, opCtx->recoveryUnit()->selectSnapshot(pointInTime->asTimestamp()));
- }
-
if (!readConcernArgs.isEmpty()) {
if (replCoord->isReplEnabled() && afterClusterTime) {
auto status = makeNoopWriteIfNeeded(opCtx, *afterClusterTime);
@@ -268,6 +275,11 @@ Status waitForReadConcern(OperationContext* opCtx,
}
}
+ auto pointInTime = readConcernArgs.getArgsAtClusterTime();
+ if (pointInTime) {
+ fassertStatusOK(39345, opCtx->recoveryUnit()->selectSnapshot(pointInTime->asTimestamp()));
+ }
+
if (readConcernArgs.getLevel() == repl::ReadConcernLevel::kMajorityReadConcern &&
replCoord->getReplicationMode() == repl::ReplicationCoordinator::Mode::modeReplSet) {
// ReadConcern Majority is not supported in ProtocolVersion 0.
diff --git a/src/mongo/db/repl/read_concern_args.cpp b/src/mongo/db/repl/read_concern_args.cpp
index 0611dc05c4d..7d3a9a1d3dc 100644
--- a/src/mongo/db/repl/read_concern_args.cpp
+++ b/src/mongo/db/repl/read_concern_args.cpp
@@ -45,10 +45,11 @@ namespace repl {
namespace {
-const char kLocalReadConcernStr[] = "local";
-const char kMajorityReadConcernStr[] = "majority";
-const char kLinearizableReadConcernStr[] = "linearizable";
-const char kAvailableReadConcernStr[] = "available";
+constexpr StringData kLocalReadConcernStr = "local"_sd;
+constexpr StringData kMajorityReadConcernStr = "majority"_sd;
+constexpr StringData kLinearizableReadConcernStr = "linearizable"_sd;
+constexpr StringData kAvailableReadConcernStr = "available"_sd;
+constexpr StringData kSnapshotReadConcernStr = "snapshot"_sd;
} // unnamed namespace
@@ -73,7 +74,7 @@ ReadConcernArgs::ReadConcernArgs(boost::optional<OpTime> opTime,
ReadConcernArgs::ReadConcernArgs(boost::optional<LogicalTime> clusterTime,
boost::optional<ReadConcernLevel> level)
- : _clusterTime(std::move(clusterTime)), _level(std::move(level)) {}
+ : _afterClusterTime(std::move(clusterTime)), _level(std::move(level)) {}
std::string ReadConcernArgs::toString() const {
return toBSON().toString();
@@ -86,7 +87,7 @@ BSONObj ReadConcernArgs::toBSON() const {
}
bool ReadConcernArgs::isEmpty() const {
- return !_clusterTime && !_opTime && !_level;
+ return !_afterClusterTime && !_opTime && !_level;
}
ReadConcernLevel ReadConcernArgs::getLevel() const {
@@ -101,15 +102,15 @@ boost::optional<OpTime> ReadConcernArgs::getArgsOpTime() const {
return _opTime;
}
-boost::optional<LogicalTime> ReadConcernArgs::getArgsClusterTime() const {
- return _clusterTime;
+boost::optional<LogicalTime> ReadConcernArgs::getArgsAfterClusterTime() const {
+ return _afterClusterTime;
}
-boost::optional<LogicalTime> ReadConcernArgs::getArgsPointInTime() const {
- return _pointInTime;
+boost::optional<LogicalTime> ReadConcernArgs::getArgsAtClusterTime() const {
+ return _atClusterTime;
}
-Status ReadConcernArgs::initialize(const BSONElement& readConcernElem, bool testMode) {
+Status ReadConcernArgs::initialize(const BSONElement& readConcernElem) {
invariant(isEmpty()); // only legal to call on uninitialized object.
if (readConcernElem.eoo()) {
@@ -136,21 +137,21 @@ Status ReadConcernArgs::initialize(const BSONElement& readConcernElem, bool test
}
_opTime = opTime;
} else if (fieldName == kAfterClusterTimeFieldName) {
- Timestamp clusterTime;
- auto clusterTimeStatus =
- bsonExtractTimestampField(readConcernObj, kAfterClusterTimeFieldName, &clusterTime);
- if (!clusterTimeStatus.isOK()) {
- return clusterTimeStatus;
+ Timestamp afterClusterTime;
+ auto afterClusterTimeStatus = bsonExtractTimestampField(
+ readConcernObj, kAfterClusterTimeFieldName, &afterClusterTime);
+ if (!afterClusterTimeStatus.isOK()) {
+ return afterClusterTimeStatus;
}
- _clusterTime = LogicalTime(clusterTime);
- } else if (fieldName == kAtClusterTimeFieldName && testMode) {
- Timestamp pointInTime;
- auto pointInTimeStatus =
- bsonExtractTimestampField(readConcernObj, kAtClusterTimeFieldName, &pointInTime);
- if (!pointInTimeStatus.isOK()) {
- return pointInTimeStatus;
+ _afterClusterTime = LogicalTime(afterClusterTime);
+ } else if (fieldName == kAtClusterTimeFieldName) {
+ Timestamp atClusterTime;
+ auto atClusterTimeStatus =
+ bsonExtractTimestampField(readConcernObj, kAtClusterTimeFieldName, &atClusterTime);
+ if (!atClusterTimeStatus.isOK()) {
+ return atClusterTimeStatus;
}
- _pointInTime = LogicalTime(pointInTime);
+ _atClusterTime = LogicalTime(atClusterTime);
} else if (fieldName == kLevelFieldName) {
std::string levelString;
// TODO pass field in rather than scanning again.
@@ -169,14 +170,13 @@ Status ReadConcernArgs::initialize(const BSONElement& readConcernElem, bool test
_level = ReadConcernLevel::kLinearizableReadConcern;
} else if (levelString == kAvailableReadConcernStr) {
_level = ReadConcernLevel::kAvailableReadConcern;
+ } else if (levelString == kSnapshotReadConcernStr) {
+ _level = ReadConcernLevel::kSnapshotReadConcern;
} else {
- return Status(
- ErrorCodes::FailedToParse,
- str::stream()
- << kReadConcernFieldName
- << '.'
- << kLevelFieldName
- << " must be either 'local', 'majority', 'linearizable', or 'available'");
+ return Status(ErrorCodes::FailedToParse,
+ str::stream() << kReadConcernFieldName << '.' << kLevelFieldName
+ << " must be either 'local', 'majority', "
+ "'linearizable', 'available', or 'snapshot'");
}
} else {
return Status(ErrorCodes::InvalidOptions,
@@ -186,31 +186,63 @@ Status ReadConcernArgs::initialize(const BSONElement& readConcernElem, bool test
}
}
- if (_clusterTime && _opTime) {
+ if (_afterClusterTime && _opTime) {
return Status(ErrorCodes::InvalidOptions,
str::stream() << "Can not specify both " << kAfterClusterTimeFieldName
<< " and "
<< kAfterOpTimeFieldName);
}
+ if (_afterClusterTime && _atClusterTime) {
+ return Status(ErrorCodes::InvalidOptions,
+ str::stream() << "Can not specify both " << kAfterClusterTimeFieldName
+ << " and "
+ << kAtClusterTimeFieldName);
+ }
+
// Note: 'available' should not be used with after cluster time, as cluster time can wait for
- // replication whereas the premise of 'available' is to avoid waiting.
- if (_clusterTime && getLevel() != ReadConcernLevel::kMajorityReadConcern &&
- getLevel() != ReadConcernLevel::kLocalReadConcern) {
+ // replication whereas the premise of 'available' is to avoid waiting. 'linearizable' should not
+ // be used with after cluster time, since linearizable reads are inherently causally consistent.
+ if (_afterClusterTime && getLevel() != ReadConcernLevel::kMajorityReadConcern &&
+ getLevel() != ReadConcernLevel::kLocalReadConcern &&
+ getLevel() != ReadConcernLevel::kSnapshotReadConcern) {
return Status(ErrorCodes::InvalidOptions,
str::stream() << kAfterClusterTimeFieldName << " field can be set only if "
<< kLevelFieldName
<< " is equal to "
<< kMajorityReadConcernStr
- << " or "
- << kLocalReadConcernStr);
+ << ", "
+ << kLocalReadConcernStr
+ << ", or "
+ << kSnapshotReadConcernStr);
+ }
+
+ if (_opTime && getLevel() == ReadConcernLevel::kSnapshotReadConcern) {
+ return Status(ErrorCodes::InvalidOptions,
+ str::stream() << kAfterOpTimeFieldName << " field cannot be set if "
+ << kLevelFieldName
+ << " is equal to "
+ << kSnapshotReadConcernStr);
+ }
+
+ if (_atClusterTime && getLevel() != ReadConcernLevel::kSnapshotReadConcern) {
+ return Status(ErrorCodes::InvalidOptions,
+ str::stream() << kAtClusterTimeFieldName << " field can be set only if "
+ << kLevelFieldName
+ << " is equal to "
+ << kSnapshotReadConcernStr);
}
- if (_clusterTime && _clusterTime == LogicalTime::kUninitialized) {
+ if (_afterClusterTime && _afterClusterTime == LogicalTime::kUninitialized) {
return Status(ErrorCodes::InvalidOptions,
str::stream() << kAfterClusterTimeFieldName << " cannot be a null timestamp");
}
+ if (_atClusterTime && _atClusterTime == LogicalTime::kUninitialized) {
+ return Status(ErrorCodes::InvalidOptions,
+ str::stream() << kAtClusterTimeFieldName << " cannot be a null timestamp");
+ }
+
return Status::OK();
}
@@ -218,7 +250,7 @@ void ReadConcernArgs::appendInfo(BSONObjBuilder* builder) const {
BSONObjBuilder rcBuilder(builder->subobjStart(kReadConcernFieldName));
if (_level) {
- string levelName;
+ StringData levelName;
switch (_level.get()) {
case ReadConcernLevel::kLocalReadConcern:
levelName = kLocalReadConcernStr;
@@ -236,8 +268,12 @@ void ReadConcernArgs::appendInfo(BSONObjBuilder* builder) const {
levelName = kAvailableReadConcernStr;
break;
+ case ReadConcernLevel::kSnapshotReadConcern:
+ levelName = kSnapshotReadConcernStr;
+ break;
+
default:
- fassert(28754, false);
+ MONGO_UNREACHABLE;
}
rcBuilder.append(kLevelFieldName, levelName);
@@ -247,8 +283,12 @@ void ReadConcernArgs::appendInfo(BSONObjBuilder* builder) const {
_opTime->append(&rcBuilder, kAfterOpTimeFieldName);
}
- if (_clusterTime) {
- rcBuilder.append(kAfterClusterTimeFieldName, _clusterTime->asTimestamp());
+ if (_afterClusterTime) {
+ rcBuilder.append(kAfterClusterTimeFieldName, _afterClusterTime->asTimestamp());
+ }
+
+ if (_atClusterTime) {
+ rcBuilder.append(kAtClusterTimeFieldName, _atClusterTime->asTimestamp());
}
rcBuilder.done();
diff --git a/src/mongo/db/repl/read_concern_args.h b/src/mongo/db/repl/read_concern_args.h
index 2fab94d4f75..f8a6226ecf8 100644
--- a/src/mongo/db/repl/read_concern_args.h
+++ b/src/mongo/db/repl/read_concern_args.h
@@ -48,7 +48,8 @@ enum class ReadConcernLevel {
kLocalReadConcern,
kMajorityReadConcern,
kLinearizableReadConcern,
- kAvailableReadConcern
+ kAvailableReadConcern,
+ kSnapshotReadConcern
};
class ReadConcernArgs {
@@ -75,14 +76,15 @@ public:
* find: "coll"
* filter: <Query Object>,
* readConcern: { // optional
- * level: "[majority|local|linearizable|available]",
+ * level: "[majority|local|linearizable|available|snapshot]",
* afterOpTime: { ts: <timestamp>, term: <NumberLong> },
* afterClusterTime: <timestamp>,
+ * atClusterTime: <timestamp>
* }
* }
*/
- Status initialize(const BSONObj& cmdObj, bool testMode = false) {
- return initialize(cmdObj[kReadConcernFieldName], testMode);
+ Status initialize(const BSONObj& cmdObj) {
+ return initialize(cmdObj[kReadConcernFieldName]);
}
/**
@@ -90,7 +92,7 @@ public:
* Use this if you are already iterating over the fields in the command object.
* This method correctly handles missing BSONElements.
*/
- Status initialize(const BSONElement& readConcernElem, bool testMode = false);
+ Status initialize(const BSONElement& readConcernElem);
/**
* Appends level and afterOpTime.
@@ -113,13 +115,13 @@ public:
bool hasLevel() const;
/**
- * Returns the opTime. Deprecated: will be replaced with getArgsClusterTime.
+ * Returns the opTime. Deprecated: will be replaced with getArgsAfterClusterTime.
*/
boost::optional<OpTime> getArgsOpTime() const;
- boost::optional<LogicalTime> getArgsClusterTime() const;
+ boost::optional<LogicalTime> getArgsAfterClusterTime() const;
- boost::optional<LogicalTime> getArgsPointInTime() const;
+ boost::optional<LogicalTime> getArgsAtClusterTime() const;
BSONObj toBSON() const;
std::string toString() const;
@@ -132,9 +134,11 @@ private:
/**
* Read data after cluster-wide cluster time.
*/
- boost::optional<LogicalTime> _clusterTime;
-
- boost::optional<LogicalTime> _pointInTime;
+ boost::optional<LogicalTime> _afterClusterTime;
+ /**
+ * Read data at a particular cluster time.
+ */
+ boost::optional<LogicalTime> _atClusterTime;
boost::optional<ReadConcernLevel> _level;
};
diff --git a/src/mongo/db/repl/read_concern_args_test.cpp b/src/mongo/db/repl/read_concern_args_test.cpp
index 830c1533622..e38263a3c8d 100644
--- a/src/mongo/db/repl/read_concern_args_test.cpp
+++ b/src/mongo/db/repl/read_concern_args_test.cpp
@@ -37,8 +37,8 @@ namespace repl {
namespace {
TEST(ReadAfterParse, OpTimeOnly) {
- ReadConcernArgs readAfterOpTime;
- ASSERT_OK(readAfterOpTime.initialize(BSON(
+ ReadConcernArgs readConcern;
+ ASSERT_OK(readConcern.initialize(BSON(
"find"
<< "test"
<< ReadConcernArgs::kReadConcernFieldName
@@ -46,81 +46,192 @@ TEST(ReadAfterParse, OpTimeOnly) {
<< BSON(OpTime::kTimestampFieldName << Timestamp(20, 30) << OpTime::kTermFieldName
<< 2)))));
- ASSERT_TRUE(readAfterOpTime.getArgsOpTime());
- ASSERT_TRUE(!readAfterOpTime.getArgsClusterTime());
- auto argsOpTime = readAfterOpTime.getArgsOpTime();
+ ASSERT_TRUE(readConcern.getArgsOpTime());
+ ASSERT_TRUE(!readConcern.getArgsAfterClusterTime());
+ auto argsOpTime = readConcern.getArgsOpTime();
ASSERT_EQ(Timestamp(20, 30), argsOpTime->getTimestamp());
ASSERT_EQ(2, argsOpTime->getTerm());
- ASSERT(ReadConcernLevel::kLocalReadConcern == readAfterOpTime.getLevel());
+ ASSERT(ReadConcernLevel::kLocalReadConcern == readConcern.getLevel());
}
-TEST(ReadAfterParse, ClusterTimeOnly) {
- ReadConcernArgs readAfterOpTime;
- auto clusterTime = LogicalTime(Timestamp(20, 30));
- ASSERT_OK(readAfterOpTime.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAfterClusterTimeFieldName
- << clusterTime.asTimestamp()))));
- auto argsClusterTime = readAfterOpTime.getArgsClusterTime();
- ASSERT_TRUE(argsClusterTime);
- ASSERT_TRUE(!readAfterOpTime.getArgsOpTime());
- ASSERT_TRUE(clusterTime == *argsClusterTime);
+TEST(ReadAfterParse, AfterClusterTimeOnly) {
+ ReadConcernArgs readConcern;
+ auto afterClusterTime = LogicalTime(Timestamp(20, 30));
+ ASSERT_OK(readConcern.initialize(BSON("find"
+ << "test"
+ << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAfterClusterTimeFieldName
+ << afterClusterTime.asTimestamp()))));
+ auto argsAfterClusterTime = readConcern.getArgsAfterClusterTime();
+ ASSERT_TRUE(argsAfterClusterTime);
+ ASSERT_TRUE(!readConcern.getArgsOpTime());
+ ASSERT_TRUE(afterClusterTime == *argsAfterClusterTime);
}
-TEST(ReadAfterParse, ClusterTimeAndLevelLocal) {
- ReadConcernArgs readAfterOpTime;
+TEST(ReadAfterParse, AfterClusterTimeAndLevelLocal) {
+ ReadConcernArgs readConcern;
// Must have level=majority
- auto clusterTime = LogicalTime(Timestamp(20, 30));
- ASSERT_OK(readAfterOpTime.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAfterClusterTimeFieldName
- << clusterTime.asTimestamp()
- << ReadConcernArgs::kLevelFieldName
- << "local"))));
- auto argsClusterTime = readAfterOpTime.getArgsClusterTime();
- ASSERT_TRUE(argsClusterTime);
- ASSERT_TRUE(!readAfterOpTime.getArgsOpTime());
- ASSERT_TRUE(clusterTime == *argsClusterTime);
- ASSERT(ReadConcernLevel::kLocalReadConcern == readAfterOpTime.getLevel());
-}
-
-TEST(ReadAfterParse, ClusterTimeAndLevelMajority) {
- ReadConcernArgs readAfterOpTime;
+ auto afterClusterTime = LogicalTime(Timestamp(20, 30));
+ ASSERT_OK(readConcern.initialize(BSON("find"
+ << "test"
+ << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAfterClusterTimeFieldName
+ << afterClusterTime.asTimestamp()
+ << ReadConcernArgs::kLevelFieldName
+ << "local"))));
+ auto argsAfterClusterTime = readConcern.getArgsAfterClusterTime();
+ ASSERT_TRUE(argsAfterClusterTime);
+ ASSERT_TRUE(!readConcern.getArgsOpTime());
+ ASSERT_TRUE(afterClusterTime == *argsAfterClusterTime);
+ ASSERT(ReadConcernLevel::kLocalReadConcern == readConcern.getLevel());
+}
+
+TEST(ReadAfterParse, AfterClusterTimeAndLevelMajority) {
+ ReadConcernArgs readConcern;
// Must have level=majority
- auto clusterTime = LogicalTime(Timestamp(20, 30));
- ASSERT_OK(readAfterOpTime.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAfterClusterTimeFieldName
- << clusterTime.asTimestamp()
- << ReadConcernArgs::kLevelFieldName
- << "majority"))));
- auto argsClusterTime = readAfterOpTime.getArgsClusterTime();
- ASSERT_TRUE(argsClusterTime);
- ASSERT_TRUE(!readAfterOpTime.getArgsOpTime());
- ASSERT_TRUE(clusterTime == *argsClusterTime);
- ASSERT(ReadConcernLevel::kMajorityReadConcern == readAfterOpTime.getLevel());
-}
-
-TEST(ReadAfterParse, LevelOnly) {
- ReadConcernArgs readAfterOpTime;
+ auto afterClusterTime = LogicalTime(Timestamp(20, 30));
+ ASSERT_OK(readConcern.initialize(BSON("find"
+ << "test"
+ << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAfterClusterTimeFieldName
+ << afterClusterTime.asTimestamp()
+ << ReadConcernArgs::kLevelFieldName
+ << "majority"))));
+ auto argsAfterClusterTime = readConcern.getArgsAfterClusterTime();
+ ASSERT_TRUE(argsAfterClusterTime);
+ ASSERT_TRUE(!readConcern.getArgsOpTime());
+ ASSERT_TRUE(afterClusterTime == *argsAfterClusterTime);
+ ASSERT(ReadConcernLevel::kMajorityReadConcern == readConcern.getLevel());
+}
+
+TEST(ReadAfterParse, AfterClusterTimeAndLevelSnapshot) {
+ ReadConcernArgs readConcern;
+ auto afterClusterTime = LogicalTime(Timestamp(20, 30));
+ ASSERT_OK(readConcern.initialize(BSON("find"
+ << "test"
+ << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAfterClusterTimeFieldName
+ << afterClusterTime.asTimestamp()
+ << ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
+ auto argsAfterClusterTime = readConcern.getArgsAfterClusterTime();
+ ASSERT_TRUE(argsAfterClusterTime);
+ ASSERT_TRUE(!readConcern.getArgsOpTime());
+ ASSERT_TRUE(afterClusterTime == *argsAfterClusterTime);
+ ASSERT(ReadConcernLevel::kSnapshotReadConcern == readConcern.getLevel());
+}
+
+TEST(ReadAfterParse, AtClusterTimeOnly) {
+ ReadConcernArgs readConcern;
+ auto atClusterTime = LogicalTime(Timestamp(20, 30));
+ ASSERT_EQ(ErrorCodes::InvalidOptions,
+ readConcern.initialize(BSON("find"
+ << "test"
+ << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAtClusterTimeFieldName
+ << atClusterTime.asTimestamp()))));
+}
+
+TEST(ReadAfterParse, AtClusterTimeAndLevelSnapshot) {
+ ReadConcernArgs readConcern;
+ auto atClusterTime = LogicalTime(Timestamp(20, 30));
+ ASSERT_OK(readConcern.initialize(BSON("find"
+ << "test"
+ << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAtClusterTimeFieldName
+ << atClusterTime.asTimestamp()
+ << ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
+ auto argsAtClusterTime = readConcern.getArgsAtClusterTime();
+ ASSERT_TRUE(argsAtClusterTime);
+ ASSERT_FALSE(readConcern.getArgsOpTime());
+ ASSERT_FALSE(readConcern.getArgsAfterClusterTime());
+ ASSERT(ReadConcernLevel::kSnapshotReadConcern == readConcern.getLevel());
+}
+
+TEST(ReadAfterParse, AtClusterTimeAndLevelMajority) {
+ ReadConcernArgs readConcern;
+ auto atClusterTime = LogicalTime(Timestamp(20, 30));
+ ASSERT_EQ(ErrorCodes::InvalidOptions,
+ readConcern.initialize(BSON("find"
+ << "test"
+ << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAtClusterTimeFieldName
+ << atClusterTime.asTimestamp()
+ << ReadConcernArgs::kLevelFieldName
+ << "majority"))));
+}
+
+TEST(ReadAfterParse, AtClusterTimeAndLevelLocal) {
+ ReadConcernArgs readConcern;
+ auto atClusterTime = LogicalTime(Timestamp(20, 30));
+ ASSERT_EQ(ErrorCodes::InvalidOptions,
+ readConcern.initialize(BSON("find"
+ << "test"
+ << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAtClusterTimeFieldName
+ << atClusterTime.asTimestamp()
+ << ReadConcernArgs::kLevelFieldName
+ << "local"))));
+}
+
+TEST(ReadAfterParse, AtClusterTimeAndLevelAvailable) {
+ ReadConcernArgs readConcern;
+ auto atClusterTime = LogicalTime(Timestamp(20, 30));
+ ASSERT_EQ(ErrorCodes::InvalidOptions,
+ readConcern.initialize(BSON("find"
+ << "test"
+ << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAtClusterTimeFieldName
+ << atClusterTime.asTimestamp()
+ << ReadConcernArgs::kLevelFieldName
+ << "available"))));
+}
+
+TEST(ReadAfterParse, AtClusterTimeAndLevelLinearizable) {
+ ReadConcernArgs readConcern;
+ auto atClusterTime = LogicalTime(Timestamp(20, 30));
+ ASSERT_EQ(ErrorCodes::InvalidOptions,
+ readConcern.initialize(BSON("find"
+ << "test"
+ << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAtClusterTimeFieldName
+ << atClusterTime.asTimestamp()
+ << ReadConcernArgs::kLevelFieldName
+ << "linearizable"))));
+}
+
+TEST(ReadAfterParse, LevelMajorityOnly) {
+ ReadConcernArgs readConcern;
ASSERT_OK(
- readAfterOpTime.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kLevelFieldName << "majority"))));
+ readConcern.initialize(BSON("find"
+ << "test"
+ << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kLevelFieldName << "majority"))));
+
+ ASSERT_TRUE(!readConcern.getArgsOpTime());
+ ASSERT_TRUE(!readConcern.getArgsAfterClusterTime());
+ ASSERT_TRUE(ReadConcernLevel::kMajorityReadConcern == readConcern.getLevel());
+}
- ASSERT_TRUE(!readAfterOpTime.getArgsOpTime());
- ASSERT_TRUE(!readAfterOpTime.getArgsClusterTime());
- ASSERT_TRUE(ReadConcernLevel::kMajorityReadConcern == readAfterOpTime.getLevel());
+TEST(ReadAfterParse, LevelSnapshotOnly) {
+ ReadConcernArgs readConcern;
+ ASSERT_OK(
+ readConcern.initialize(BSON("find"
+ << "test"
+ << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kLevelFieldName << "snapshot"))));
+
+ ASSERT_TRUE(!readConcern.getArgsOpTime());
+ ASSERT_TRUE(!readConcern.getArgsAfterClusterTime());
+ ASSERT_TRUE(!readConcern.getArgsAtClusterTime());
+ ASSERT_TRUE(ReadConcernLevel::kSnapshotReadConcern == readConcern.getLevel());
}
TEST(ReadAfterParse, ReadCommittedFullSpecification) {
- ReadConcernArgs readAfterOpTime;
- auto clusterTime = LogicalTime(Timestamp(100, 200));
- ASSERT_NOT_OK(readAfterOpTime.initialize(BSON(
+ ReadConcernArgs readConcern;
+ auto afterClusterTime = LogicalTime(Timestamp(100, 200));
+ ASSERT_NOT_OK(readConcern.initialize(BSON(
"find"
<< "test"
<< ReadConcernArgs::kReadConcernFieldName
@@ -128,68 +239,91 @@ TEST(ReadAfterParse, ReadCommittedFullSpecification) {
<< BSON(OpTime::kTimestampFieldName << Timestamp(20, 30) << OpTime::kTermFieldName
<< 2)
<< ReadConcernArgs::kAfterClusterTimeFieldName
- << clusterTime.asTimestamp()
+ << afterClusterTime.asTimestamp()
<< ReadConcernArgs::kLevelFieldName
<< "majority"))));
}
TEST(ReadAfterParse, Empty) {
- ReadConcernArgs readAfterOpTime;
- ASSERT_OK(readAfterOpTime.initialize(BSON("find"
- << "test")));
+ ReadConcernArgs readConcern;
+ ASSERT_OK(readConcern.initialize(BSON("find"
+ << "test")));
- ASSERT_TRUE(!readAfterOpTime.getArgsOpTime());
- ASSERT_TRUE(!readAfterOpTime.getArgsClusterTime());
- ASSERT(ReadConcernLevel::kLocalReadConcern == readAfterOpTime.getLevel());
+ ASSERT_TRUE(!readConcern.getArgsOpTime());
+ ASSERT_TRUE(!readConcern.getArgsAfterClusterTime());
+ ASSERT(ReadConcernLevel::kLocalReadConcern == readConcern.getLevel());
}
TEST(ReadAfterParse, BadRootType) {
- ReadConcernArgs readAfterOpTime;
- ASSERT_NOT_OK(readAfterOpTime.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << "x")));
+ ReadConcernArgs readConcern;
+ ASSERT_NOT_OK(readConcern.initialize(BSON("find"
+ << "test"
+ << ReadConcernArgs::kReadConcernFieldName
+ << "x")));
+}
+
+TEST(ReadAfterParse, BadAtClusterTimeType) {
+ ReadConcernArgs readConcern;
+ ASSERT_EQ(ErrorCodes::TypeMismatch,
+ readConcern.initialize(BSON("find"
+ << "test"
+ << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAtClusterTimeFieldName
+ << 2
+ << ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
+}
+
+TEST(ReadAfterParse, BadAtClusterTimeValue) {
+ ReadConcernArgs readConcern;
+ ASSERT_EQ(ErrorCodes::InvalidOptions,
+ readConcern.initialize(BSON("find"
+ << "test"
+ << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAtClusterTimeFieldName
+ << LogicalTime::kUninitialized.asTimestamp()
+ << ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
}
TEST(ReadAfterParse, BadOpTimeType) {
- ReadConcernArgs readAfterOpTime;
+ ReadConcernArgs readConcern;
ASSERT_NOT_OK(
- readAfterOpTime.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAfterOpTimeFieldName << 2))));
+ readConcern.initialize(BSON("find"
+ << "test"
+ << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAfterOpTimeFieldName << 2))));
}
TEST(ReadAfterParse, OpTimeNotNeededForValidReadConcern) {
- ReadConcernArgs readAfterOpTime;
- ASSERT_OK(readAfterOpTime.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSONObj())));
+ ReadConcernArgs readConcern;
+ ASSERT_OK(readConcern.initialize(BSON("find"
+ << "test"
+ << ReadConcernArgs::kReadConcernFieldName
+ << BSONObj())));
}
TEST(ReadAfterParse, NoOpTimeTS) {
- ReadConcernArgs readAfterOpTime;
- ASSERT_NOT_OK(
- readAfterOpTime.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAfterOpTimeFieldName
- << BSON(OpTime::kTimestampFieldName << 2)))));
+ ReadConcernArgs readConcern;
+ ASSERT_NOT_OK(readConcern.initialize(BSON("find"
+ << "test"
+ << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAfterOpTimeFieldName
+ << BSON(OpTime::kTimestampFieldName << 2)))));
}
TEST(ReadAfterParse, NoOpTimeTerm) {
- ReadConcernArgs readAfterOpTime;
- ASSERT_NOT_OK(readAfterOpTime.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAfterOpTimeFieldName
- << BSON(OpTime::kTermFieldName << 2)))));
+ ReadConcernArgs readConcern;
+ ASSERT_NOT_OK(readConcern.initialize(BSON("find"
+ << "test"
+ << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAfterOpTimeFieldName
+ << BSON(OpTime::kTermFieldName << 2)))));
}
TEST(ReadAfterParse, BadOpTimeTSType) {
- ReadConcernArgs readAfterOpTime;
- ASSERT_NOT_OK(readAfterOpTime.initialize(
+ ReadConcernArgs readConcern;
+ ASSERT_NOT_OK(readConcern.initialize(
BSON("find"
<< "test"
<< ReadConcernArgs::kReadConcernFieldName
@@ -199,8 +333,8 @@ TEST(ReadAfterParse, BadOpTimeTSType) {
}
TEST(ReadAfterParse, BadOpTimeTermType) {
- ReadConcernArgs readAfterOpTime;
- ASSERT_NOT_OK(readAfterOpTime.initialize(BSON(
+ ReadConcernArgs readConcern;
+ ASSERT_NOT_OK(readConcern.initialize(BSON(
"find"
<< "test"
<< ReadConcernArgs::kReadConcernFieldName
@@ -210,37 +344,67 @@ TEST(ReadAfterParse, BadOpTimeTermType) {
}
TEST(ReadAfterParse, BadLevelType) {
- ReadConcernArgs readAfterOpTime;
+ ReadConcernArgs readConcern;
ASSERT_EQ(ErrorCodes::TypeMismatch,
- readAfterOpTime.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kLevelFieldName << 7))));
+ readConcern.initialize(BSON("find"
+ << "test"
+ << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kLevelFieldName << 7))));
}
TEST(ReadAfterParse, BadLevelValue) {
- ReadConcernArgs readAfterOpTime;
+ ReadConcernArgs readConcern;
ASSERT_EQ(ErrorCodes::FailedToParse,
- readAfterOpTime.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kLevelFieldName
- << "seven is not a real level"))));
+ readConcern.initialize(BSON("find"
+ << "test"
+ << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kLevelFieldName
+ << "seven is not a real level"))));
}
TEST(ReadAfterParse, BadOption) {
- ReadConcernArgs readAfterOpTime;
+ ReadConcernArgs readConcern;
ASSERT_EQ(ErrorCodes::InvalidOptions,
- readAfterOpTime.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSON("asdf" << 1))));
+ readConcern.initialize(BSON("find"
+ << "test"
+ << ReadConcernArgs::kReadConcernFieldName
+ << BSON("asdf" << 1))));
+}
+
+TEST(ReadAfterParse, AtClusterTimeAndAfterClusterTime) {
+ ReadConcernArgs readConcern;
+ auto clusterTime = LogicalTime(Timestamp(20, 30));
+ ASSERT_EQ(ErrorCodes::InvalidOptions,
+ readConcern.initialize(BSON("find"
+ << "test"
+ << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAtClusterTimeFieldName
+ << clusterTime.asTimestamp()
+ << ReadConcernArgs::kAfterClusterTimeFieldName
+ << clusterTime.asTimestamp()
+ << ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
+}
+
+TEST(ReadAfterParse, AfterOpTimeAndLevelSnapshot) {
+ ReadConcernArgs readConcern;
+ ASSERT_EQ(ErrorCodes::InvalidOptions,
+ readConcern.initialize(BSON("find"
+ << "test"
+ << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAfterOpTimeFieldName
+ << BSON(OpTime::kTimestampFieldName
+ << Timestamp(20, 30)
+ << OpTime::kTermFieldName
+ << 2)
+ << ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
}
TEST(ReadAfterSerialize, Empty) {
BSONObjBuilder builder;
- ReadConcernArgs readAfterOpTime;
- readAfterOpTime.appendInfo(&builder);
+ ReadConcernArgs readConcern;
+ readConcern.appendInfo(&builder);
BSONObj obj(builder.done());
@@ -249,21 +413,21 @@ TEST(ReadAfterSerialize, Empty) {
TEST(ReadAfterSerialize, AfterClusterTimeOnly) {
BSONObjBuilder builder;
- auto clusterTime = LogicalTime(Timestamp(20, 30));
- ReadConcernArgs readAfterClusterTime(clusterTime, boost::none);
- readAfterClusterTime.appendInfo(&builder);
+ auto afterClusterTime = LogicalTime(Timestamp(20, 30));
+ ReadConcernArgs readConcern(afterClusterTime, boost::none);
+ readConcern.appendInfo(&builder);
- BSONObj expectedObj(
- BSON(ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAfterClusterTimeFieldName << clusterTime.asTimestamp())));
+ BSONObj expectedObj(BSON(
+ ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAfterClusterTimeFieldName << afterClusterTime.asTimestamp())));
ASSERT_BSONOBJ_EQ(expectedObj, builder.done());
}
TEST(ReadAfterSerialize, AfterOpTimeOnly) {
BSONObjBuilder builder;
- ReadConcernArgs readAfterOpTime(OpTime(Timestamp(20, 30), 2), boost::none);
- readAfterOpTime.appendInfo(&builder);
+ ReadConcernArgs readConcern(OpTime(Timestamp(20, 30), 2), boost::none);
+ readConcern.appendInfo(&builder);
BSONObj expectedObj(BSON(
ReadConcernArgs::kReadConcernFieldName << BSON(
@@ -275,8 +439,8 @@ TEST(ReadAfterSerialize, AfterOpTimeOnly) {
TEST(ReadAfterSerialize, CommitLevelOnly) {
BSONObjBuilder builder;
- ReadConcernArgs readAfterOpTime(ReadConcernLevel::kLocalReadConcern);
- readAfterOpTime.appendInfo(&builder);
+ ReadConcernArgs readConcern(ReadConcernLevel::kLocalReadConcern);
+ readConcern.appendInfo(&builder);
BSONObj expectedObj(BSON(ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kLevelFieldName << "local")));
@@ -286,24 +450,24 @@ TEST(ReadAfterSerialize, CommitLevelOnly) {
TEST(ReadAfterSerialize, iAfterCLusterTimeAndLevel) {
BSONObjBuilder builder;
- auto clusterTime = LogicalTime(Timestamp(20, 30));
- ReadConcernArgs readAfterClusterTime(clusterTime, ReadConcernLevel::kMajorityReadConcern);
- readAfterClusterTime.appendInfo(&builder);
+ auto afterClusterTime = LogicalTime(Timestamp(20, 30));
+ ReadConcernArgs readConcern(afterClusterTime, ReadConcernLevel::kMajorityReadConcern);
+ readConcern.appendInfo(&builder);
BSONObj expectedObj(
BSON(ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kLevelFieldName << "majority"
<< ReadConcernArgs::kAfterClusterTimeFieldName
- << clusterTime.asTimestamp())));
+ << afterClusterTime.asTimestamp())));
ASSERT_BSONOBJ_EQ(expectedObj, builder.done());
}
TEST(ReadAfterSerialize, AfterOpTimeAndLevel) {
BSONObjBuilder builder;
- ReadConcernArgs readAfterOpTime(OpTime(Timestamp(20, 30), 2),
- ReadConcernLevel::kMajorityReadConcern);
- readAfterOpTime.appendInfo(&builder);
+ ReadConcernArgs readConcern(OpTime(Timestamp(20, 30), 2),
+ ReadConcernLevel::kMajorityReadConcern);
+ readConcern.appendInfo(&builder);
BSONObj expectedObj(BSON(
ReadConcernArgs::kReadConcernFieldName
@@ -316,6 +480,29 @@ TEST(ReadAfterSerialize, AfterOpTimeAndLevel) {
ASSERT_BSONOBJ_EQ(expectedObj, builder.done());
}
+TEST(ReadAfterSerialize, AtClusterTimeAndLevelSnapshot) {
+ BSONObjBuilder builder;
+ ReadConcernArgs readConcern;
+ auto atClusterTime = LogicalTime(Timestamp(20, 30));
+ ASSERT_OK(readConcern.initialize(BSON("find"
+ << "test"
+ << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kLevelFieldName
+ << "snapshot"
+ << ReadConcernArgs::kAtClusterTimeFieldName
+ << atClusterTime.asTimestamp()))));
+
+ readConcern.appendInfo(&builder);
+
+ BSONObj expectedObj(
+ BSON(ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kLevelFieldName << "snapshot"
+ << ReadConcernArgs::kAtClusterTimeFieldName
+ << atClusterTime.asTimestamp())));
+
+ ASSERT_BSONOBJ_EQ(expectedObj, builder.done());
+}
+
} // unnamed namespace
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/repl/replication_coordinator_external_state.h b/src/mongo/db/repl/replication_coordinator_external_state.h
index ff12be19d49..744abfa991a 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state.h
+++ b/src/mongo/db/repl/replication_coordinator_external_state.h
@@ -284,6 +284,11 @@ public:
virtual bool isReadCommittedSupportedByStorageEngine(OperationContext* opCtx) const = 0;
/**
+ * Returns true if the current storage engine supports snapshot read concern.
+ */
+ virtual bool isReadConcernSnapshotSupportedByStorageEngine(OperationContext* opCtx) const = 0;
+
+ /**
* Applies the operations described in the oplog entries contained in "ops" using the
* "applyOperation" function.
*/
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
index 421f71f48e1..3a6819534b5 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
@@ -864,6 +864,14 @@ bool ReplicationCoordinatorExternalStateImpl::isReadCommittedSupportedByStorageE
return storageEngine->getSnapshotManager();
}
+bool ReplicationCoordinatorExternalStateImpl::isReadConcernSnapshotSupportedByStorageEngine(
+ OperationContext* opCtx) const {
+ auto storageEngine = opCtx->getServiceContext()->getGlobalStorageEngine();
+ // This should never be called if the storage engine has not been initialized.
+ invariant(storageEngine);
+ return storageEngine->supportsReadConcernSnapshot();
+}
+
StatusWith<OpTime> ReplicationCoordinatorExternalStateImpl::multiApply(
OperationContext* opCtx,
MultiApplier::Operations ops,
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.h b/src/mongo/db/repl/replication_coordinator_external_state_impl.h
index 26ad9b9bebb..85c86929b0b 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_impl.h
+++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.h
@@ -105,6 +105,7 @@ public:
virtual void notifyOplogMetadataWaiters(const OpTime& committedOpTime);
virtual double getElectionTimeoutOffsetLimitFraction() const;
virtual bool isReadCommittedSupportedByStorageEngine(OperationContext* opCtx) const;
+ virtual bool isReadConcernSnapshotSupportedByStorageEngine(OperationContext* opCtx) const;
virtual StatusWith<OpTime> multiApply(OperationContext* opCtx,
MultiApplier::Operations ops,
MultiApplier::ApplyOperationFn applyOperation) override;
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_mock.cpp b/src/mongo/db/repl/replication_coordinator_external_state_mock.cpp
index f211bacf67f..6cc8c139238 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_mock.cpp
+++ b/src/mongo/db/repl/replication_coordinator_external_state_mock.cpp
@@ -240,6 +240,11 @@ bool ReplicationCoordinatorExternalStateMock::isReadCommittedSupportedByStorageE
return _isReadCommittedSupported;
}
+bool ReplicationCoordinatorExternalStateMock::isReadConcernSnapshotSupportedByStorageEngine(
+ OperationContext* opCtx) const {
+ return true;
+}
+
StatusWith<OpTime> ReplicationCoordinatorExternalStateMock::multiApply(
OperationContext*, MultiApplier::Operations, MultiApplier::ApplyOperationFn) {
return {ErrorCodes::InternalError, "Method not implemented"};
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_mock.h b/src/mongo/db/repl/replication_coordinator_external_state_mock.h
index 59ac2bfce33..8d0d68b8133 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_mock.h
+++ b/src/mongo/db/repl/replication_coordinator_external_state_mock.h
@@ -93,6 +93,7 @@ public:
virtual void notifyOplogMetadataWaiters(const OpTime& committedOpTime);
virtual double getElectionTimeoutOffsetLimitFraction() const;
virtual bool isReadCommittedSupportedByStorageEngine(OperationContext* opCtx) const;
+ virtual bool isReadConcernSnapshotSupportedByStorageEngine(OperationContext* opCtx) const;
virtual StatusWith<OpTime> multiApply(OperationContext* opCtx,
MultiApplier::Operations ops,
MultiApplier::ApplyOperationFn applyOperation) override;
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index 2e989028fb6..553412a5916 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -1167,18 +1167,34 @@ Status ReplicationCoordinatorImpl::_validateReadConcern(OperationContext* opCtx,
"Waiting for replication not allowed while holding a lock"};
}
- if (readConcern.getArgsClusterTime() &&
+ if (readConcern.getArgsAfterClusterTime() &&
readConcern.getLevel() != ReadConcernLevel::kMajorityReadConcern &&
- readConcern.getLevel() != ReadConcernLevel::kLocalReadConcern) {
+ readConcern.getLevel() != ReadConcernLevel::kLocalReadConcern &&
+ readConcern.getLevel() != ReadConcernLevel::kSnapshotReadConcern) {
+ return {
+ ErrorCodes::BadValue,
+ "Only readConcern level 'majority', 'local', or 'snapshot' is allowed when specifying "
+ "afterClusterTime"};
+ }
+
+ if (readConcern.getArgsAtClusterTime() &&
+ readConcern.getLevel() != ReadConcernLevel::kSnapshotReadConcern) {
return {ErrorCodes::BadValue,
- "Only readConcern level 'majority' or 'local' is allowed when specifying "
- "afterClusterTime"};
+ "readConcern level 'snapshot' is required when specifying atClusterTime"};
}
if (readConcern.getLevel() == ReadConcernLevel::kMajorityReadConcern &&
!_externalState->isReadCommittedSupportedByStorageEngine(opCtx)) {
return {ErrorCodes::ReadConcernMajorityNotEnabled,
- "Majority read concern requested, but it is not supported by the storage engine."};
+ str::stream() << "Storage engine does not support read concern: "
+ << readConcern.toString()};
+ }
+
+ if (readConcern.getLevel() == ReadConcernLevel::kSnapshotReadConcern &&
+ !_externalState->isReadConcernSnapshotSupportedByStorageEngine(opCtx)) {
+ return {ErrorCodes::InvalidOptions,
+ str::stream() << "Storage engine does not support read concern: "
+ << readConcern.toString()};
}
return Status::OK();
@@ -1192,7 +1208,7 @@ Status ReplicationCoordinatorImpl::waitUntilOpTimeForRead(OperationContext* opCt
}
// nothing to wait for
- if (!readConcern.getArgsClusterTime() && !readConcern.getArgsOpTime()) {
+ if (!readConcern.getArgsAfterClusterTime() && !readConcern.getArgsOpTime()) {
return Status::OK();
}
@@ -1210,7 +1226,7 @@ Status ReplicationCoordinatorImpl::waitUntilOpTimeForReadUntil(OperationContext*
"node needs to be a replica set member to use read concern"};
}
- if (readConcern.getArgsClusterTime()) {
+ if (readConcern.getArgsAfterClusterTime()) {
return _waitUntilClusterTimeForRead(opCtx, readConcern, deadline);
} else {
return _waitUntilOpTimeForReadDeprecated(opCtx, readConcern);
@@ -1296,7 +1312,7 @@ Status ReplicationCoordinatorImpl::_waitUntilOpTime(OperationContext* opCtx,
Status ReplicationCoordinatorImpl::_waitUntilClusterTimeForRead(OperationContext* opCtx,
const ReadConcernArgs& readConcern,
boost::optional<Date_t> deadline) {
- auto clusterTime = *readConcern.getArgsClusterTime();
+ auto clusterTime = *readConcern.getArgsAfterClusterTime();
invariant(clusterTime != LogicalTime::kUninitialized);
// convert clusterTime to opTime so it can be used by the _opTimeWaiterList for wait on
diff --git a/src/mongo/db/service_entry_point_mongod.cpp b/src/mongo/db/service_entry_point_mongod.cpp
index b50c8b95924..1281cc50106 100644
--- a/src/mongo/db/service_entry_point_mongod.cpp
+++ b/src/mongo/db/service_entry_point_mongod.cpp
@@ -289,22 +289,23 @@ void appendReplyMetadata(OperationContext* opCtx,
}
/**
- * Given the specified command and whether it supports read concern, returns an effective read
- * concern which should be used.
+ * Given the specified command, returns an effective read concern which should be used or an error
+ * if the read concern is not valid for the command.
*/
-StatusWith<repl::ReadConcernArgs> _extractReadConcern(const BSONObj& cmdObj,
- bool supportsNonLocalReadConcern) {
+StatusWith<repl::ReadConcernArgs> _extractReadConcern(const Command* command,
+ const std::string& dbName,
+ const BSONObj& cmdObj) {
repl::ReadConcernArgs readConcernArgs;
- auto readConcernParseStatus = readConcernArgs.initialize(cmdObj, Command::testCommandsEnabled);
+ auto readConcernParseStatus = readConcernArgs.initialize(cmdObj);
if (!readConcernParseStatus.isOK()) {
return readConcernParseStatus;
}
- if (!supportsNonLocalReadConcern &&
- readConcernArgs.getLevel() != repl::ReadConcernLevel::kLocalReadConcern) {
+ if (!command->supportsReadConcern(dbName, cmdObj, readConcernArgs.getLevel())) {
return {ErrorCodes::InvalidOptions,
- str::stream() << "Command does not support non local read concern"};
+ str::stream() << "Command does not support read concern "
+ << readConcernArgs.toString()};
}
return readConcernArgs;
@@ -476,10 +477,8 @@ bool runCommandImpl(OperationContext* opCtx,
// When a linearizable read command is passed in, check to make sure we're reading
// from the primary.
- if (command->supportsNonLocalReadConcern(db, cmd) &&
- (repl::ReadConcernArgs::get(opCtx).getLevel() ==
- repl::ReadConcernLevel::kLinearizableReadConcern) &&
- (request.getCommandName() != "getMore")) {
+ if (repl::ReadConcernArgs::get(opCtx).getLevel() ==
+ repl::ReadConcernLevel::kLinearizableReadConcern) {
auto linearizableReadStatus = waitForLinearizableReadConcern(opCtx);
@@ -677,8 +676,7 @@ void execCommandDatabase(OperationContext* opCtx,
}
auto& readConcernArgs = repl::ReadConcernArgs::get(opCtx);
- readConcernArgs = uassertStatusOK(_extractReadConcern(
- request.body, command->supportsNonLocalReadConcern(dbname, request.body)));
+ readConcernArgs = uassertStatusOK(_extractReadConcern(command, dbname, request.body));
auto& oss = OperationShardingState::get(opCtx);
@@ -687,7 +685,7 @@ void execCommandDatabase(OperationContext* opCtx,
(iAmPrimary ||
((serverGlobalParams.featureCompatibility.getVersion() ==
ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo36) &&
- (readConcernArgs.hasLevel() || readConcernArgs.getArgsClusterTime())))) {
+ (readConcernArgs.hasLevel() || readConcernArgs.getArgsAfterClusterTime())))) {
oss.initializeShardVersion(NamespaceString(command->parseNs(dbname, request.body)),
shardVersionFieldIdx);
@@ -741,8 +739,7 @@ void execCommandDatabase(OperationContext* opCtx,
// Note: the read concern may not have been successfully or yet placed on the opCtx, so
// parsing it separately here.
const std::string db = request.getDatabase().toString();
- auto readConcernArgsStatus = _extractReadConcern(
- request.body, command->supportsNonLocalReadConcern(db, request.body));
+ auto readConcernArgsStatus = _extractReadConcern(command, db, request.body);
auto operationTime = readConcernArgsStatus.isOK()
? computeOperationTime(
opCtx, startOperationTime, readConcernArgsStatus.getValue().getLevel())
diff --git a/src/mongo/db/storage/kv/kv_engine.h b/src/mongo/db/storage/kv/kv_engine.h
index 5c277ee2429..0a4d3b01aa2 100644
--- a/src/mongo/db/storage/kv/kv_engine.h
+++ b/src/mongo/db/storage/kv/kv_engine.h
@@ -269,6 +269,13 @@ public:
}
/**
+ * See `StorageEngine::supportsReadConcernSnapshot`
+ */
+ virtual bool supportsReadConcernSnapshot() const {
+ return false;
+ }
+
+ /**
* See `StorageEngine::replicationBatchIsComplete()`
*/
virtual void replicationBatchIsComplete() const {};
diff --git a/src/mongo/db/storage/kv/kv_storage_engine.cpp b/src/mongo/db/storage/kv/kv_storage_engine.cpp
index 9d35813b374..dc4fed07d78 100644
--- a/src/mongo/db/storage/kv/kv_storage_engine.cpp
+++ b/src/mongo/db/storage/kv/kv_storage_engine.cpp
@@ -380,6 +380,10 @@ bool KVStorageEngine::supportsRecoverToStableTimestamp() const {
return _engine->supportsRecoverToStableTimestamp();
}
+bool KVStorageEngine::supportsReadConcernSnapshot() const {
+ return _engine->supportsReadConcernSnapshot();
+}
+
void KVStorageEngine::replicationBatchIsComplete() const {
return _engine->replicationBatchIsComplete();
}
diff --git a/src/mongo/db/storage/kv/kv_storage_engine.h b/src/mongo/db/storage/kv/kv_storage_engine.h
index 050fb5d812a..aca5e6ad6b6 100644
--- a/src/mongo/db/storage/kv/kv_storage_engine.h
+++ b/src/mongo/db/storage/kv/kv_storage_engine.h
@@ -121,6 +121,8 @@ public:
virtual bool supportsRecoverToStableTimestamp() const override;
+ bool supportsReadConcernSnapshot() const final;
+
virtual void replicationBatchIsComplete() const override;
SnapshotManager* getSnapshotManager() const final;
diff --git a/src/mongo/db/storage/storage_engine.h b/src/mongo/db/storage/storage_engine.h
index d3ca77a3b86..2645be2776f 100644
--- a/src/mongo/db/storage/storage_engine.h
+++ b/src/mongo/db/storage/storage_engine.h
@@ -302,6 +302,13 @@ public:
}
/**
+ * Returns true if the storage engine supports the readConcern level "snapshot".
+ */
+ virtual bool supportsReadConcernSnapshot() const {
+ return false;
+ }
+
+ /**
* Recovers the storage engine state to the last stable timestamp. "Stable" in this case
* refers to a timestamp that is guaranteed to never be rolled back. The stable timestamp
* used should be one provided by StorageEngine::setStableTimestamp().
diff --git a/src/mongo/db/storage/storage_init.cpp b/src/mongo/db/storage/storage_init.cpp
index dc21864d5b5..1f24b6d8af9 100644
--- a/src/mongo/db/storage/storage_init.cpp
+++ b/src/mongo/db/storage/storage_init.cpp
@@ -57,6 +57,8 @@ public:
auto engine = opCtx->getClient()->getServiceContext()->getGlobalStorageEngine();
return BSON("name" << storageGlobalParams.engine << "supportsCommittedReads"
<< bool(engine->getSnapshotManager())
+ << "supportsSnapshotReadConcern"
+ << engine->supportsReadConcernSnapshot()
<< "readOnly"
<< storageGlobalParams.readOnly
<< "persistent"
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
index 595e2909b06..f9f9bdf3cb5 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
@@ -1112,6 +1112,10 @@ bool WiredTigerKVEngine::supportsRecoverToStableTimestamp() const {
return _checkpointThread->supportsRecoverToStableTimestamp();
}
+bool WiredTigerKVEngine::supportsReadConcernSnapshot() const {
+ return true;
+}
+
void WiredTigerKVEngine::startOplogManager(OperationContext* opCtx,
const std::string& uri,
WiredTigerRecordStore* oplogRecordStore) {
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
index 92c3ee81bb7..70b97c12d14 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
@@ -175,6 +175,8 @@ public:
virtual bool supportsRecoverToStableTimestamp() const override;
+ bool supportsReadConcernSnapshot() const final;
+
// wiredtiger specific
// Calls WT_CONNECTION::reconfigure on the underlying WT_CONNECTION
// held by this class
diff --git a/src/mongo/s/commands/strategy.cpp b/src/mongo/s/commands/strategy.cpp
index efea41cb54d..1eae00e206f 100644
--- a/src/mongo/s/commands/strategy.cpp
+++ b/src/mongo/s/commands/strategy.cpp
@@ -203,6 +203,19 @@ void execCommandClient(OperationContext* opCtx,
return;
}
+ repl::ReadConcernArgs readConcernArgs;
+ auto readConcernParseStatus = readConcernArgs.initialize(request.body);
+ if (!readConcernParseStatus.isOK()) {
+ CommandHelpers::appendCommandStatus(result, readConcernParseStatus);
+ return;
+ }
+ if (readConcernArgs.getLevel() == repl::ReadConcernLevel::kSnapshotReadConcern) {
+ CommandHelpers::appendCommandStatus(
+ result,
+ Status(ErrorCodes::InvalidOptions, "read concern snapshot is not supported on mongos"));
+ return;
+ }
+
// attach tracking
rpc::TrackingMetadata trackingMetadata;
trackingMetadata.initWithOperName(c->getName());