summaryrefslogtreecommitdiff
path: root/src/mongo
diff options
context:
space:
mode:
authorKevin Pulo <kevin.pulo@mongodb.com>2020-02-20 21:42:07 +1100
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-03-05 13:03:43 +0000
commit747ff353cbc819d032fa727d4bd7ffad16ea0437 (patch)
treed9b3d7e9af26138d7b74e0416a93d6110e326af0 /src/mongo
parent7c4b875a8858c5bd5efc9bf4f285f7f440fdfdc0 (diff)
downloadmongo-747ff353cbc819d032fa727d4bd7ffad16ea0437.tar.gz
SERVER-45692 add explicit RWC to inter-node commands (even if merely kImplicitDefault)
Diffstat (limited to 'src/mongo')
-rw-r--r--src/mongo/client/dbclient_base.cpp193
-rw-r--r--src/mongo/client/dbclient_base.h129
-rw-r--r--src/mongo/client/dbclient_connection.cpp10
-rw-r--r--src/mongo/client/dbclient_connection.h29
-rw-r--r--src/mongo/client/dbclient_cursor.cpp21
-rw-r--r--src/mongo/client/dbclient_cursor.h7
-rw-r--r--src/mongo/client/dbclient_rs.cpp59
-rw-r--r--src/mongo/client/dbclient_rs.h44
-rw-r--r--src/mongo/db/cloner.cpp5
-rw-r--r--src/mongo/db/dbdirectclient.cpp15
-rw-r--r--src/mongo/db/dbdirectclient.h19
-rw-r--r--src/mongo/db/pipeline/process_interface/replica_set_node_process_interface.cpp5
-rw-r--r--src/mongo/db/pipeline/process_interface/shardsvr_process_interface.cpp46
-rw-r--r--src/mongo/db/pipeline/sharded_agg_helpers.cpp17
-rw-r--r--src/mongo/db/query/query_request.cpp4
-rw-r--r--src/mongo/db/read_concern_mongod.cpp4
-rw-r--r--src/mongo/db/repl/SConscript1
-rw-r--r--src/mongo/db/repl/collection_cloner.cpp10
-rw-r--r--src/mongo/db/repl/initial_syncer.cpp3
-rw-r--r--src/mongo/db/repl/oplog_interface_remote.cpp11
-rw-r--r--src/mongo/db/repl/read_concern_args.cpp5
-rw-r--r--src/mongo/db/repl/read_concern_args.h2
-rw-r--r--src/mongo/db/repl/replication_info.cpp10
-rw-r--r--src/mongo/db/repl/roll_back_local_operations_test.cpp3
-rw-r--r--src/mongo/db/repl/rollback_source_impl.cpp9
-rw-r--r--src/mongo/db/repl/sync_source_resolver.cpp8
-rw-r--r--src/mongo/db/s/add_shard_cmd.cpp2
-rw-r--r--src/mongo/db/s/balancer/migration_manager.cpp11
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp22
-rw-r--r--src/mongo/db/s/migration_util.cpp4
-rw-r--r--src/mongo/db/s/shard_key_util.cpp3
-rw-r--r--src/mongo/db/sessions_collection.cpp12
-rw-r--r--src/mongo/db/write_concern_options.cpp6
-rw-r--r--src/mongo/db/write_concern_options.h1
-rw-r--r--src/mongo/dbtests/mock/mock_dbclient_connection.cpp27
-rw-r--r--src/mongo/dbtests/mock/mock_dbclient_connection.h34
-rw-r--r--src/mongo/dbtests/mock/mock_remote_db_server.cpp3
-rw-r--r--src/mongo/dbtests/mock/mock_remote_db_server.h3
-rw-r--r--src/mongo/s/commands/cluster_find_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_write_cmd.cpp24
-rw-r--r--src/mongo/s/query/cluster_aggregation_planner.cpp10
-rw-r--r--src/mongo/s/request_types/merge_chunk_request_test.cpp6
-rw-r--r--src/mongo/s/request_types/merge_chunk_request_type.cpp3
-rw-r--r--src/mongo/s/request_types/migration_secondary_throttle_options.cpp8
-rw-r--r--src/mongo/s/request_types/migration_secondary_throttle_options_test.cpp32
-rw-r--r--src/mongo/s/request_types/split_chunk_request_test.cpp6
-rw-r--r--src/mongo/s/request_types/split_chunk_request_type.cpp3
-rw-r--r--src/mongo/s/transaction_router.cpp18
-rw-r--r--src/mongo/s/transaction_router_test.cpp7
-rw-r--r--src/mongo/shell/encrypted_dbclient_base.cpp26
-rw-r--r--src/mongo/shell/encrypted_dbclient_base.h16
-rw-r--r--src/mongo/shell/feature_compatibility_version.js8
-rw-r--r--src/mongo/shell/mongo.js16
-rw-r--r--src/mongo/shell/replsettest.js61
54 files changed, 711 insertions, 332 deletions
diff --git a/src/mongo/client/dbclient_base.cpp b/src/mongo/client/dbclient_base.cpp
index caad3d6cc98..4e2d1b36a19 100644
--- a/src/mongo/client/dbclient_base.cpp
+++ b/src/mongo/client/dbclient_base.cpp
@@ -319,10 +319,14 @@ bool DBClientBase::runPseudoCommand(StringData db,
return success;
}
-long long DBClientBase::count(
- const NamespaceStringOrUUID nsOrUuid, const BSONObj& query, int options, int limit, int skip) {
+long long DBClientBase::count(const NamespaceStringOrUUID nsOrUuid,
+ const BSONObj& query,
+ int options,
+ int limit,
+ int skip,
+ boost::optional<BSONObj> readConcernObj) {
auto dbName = (nsOrUuid.uuid() ? nsOrUuid.dbname() : (*nsOrUuid.nss()).db().toString());
- BSONObj cmd = _countCmd(nsOrUuid, query, options, limit, skip);
+ BSONObj cmd = _countCmd(nsOrUuid, query, options, limit, skip, readConcernObj);
BSONObj res;
if (!runCommand(dbName, cmd, res, options)) {
auto status = getStatusFromCommandResult(res);
@@ -332,8 +336,12 @@ long long DBClientBase::count(
return res["n"].numberLong();
}
-BSONObj DBClientBase::_countCmd(
- const NamespaceStringOrUUID nsOrUuid, const BSONObj& query, int options, int limit, int skip) {
+BSONObj DBClientBase::_countCmd(const NamespaceStringOrUUID nsOrUuid,
+ const BSONObj& query,
+ int options,
+ int limit,
+ int skip,
+ boost::optional<BSONObj> readConcernObj) {
BSONObjBuilder b;
if (nsOrUuid.uuid()) {
const auto uuid = *nsOrUuid.uuid();
@@ -346,6 +354,9 @@ BSONObj DBClientBase::_countCmd(
b.append("limit", limit);
if (skip)
b.append("skip", skip);
+ if (readConcernObj) {
+ b.append(repl::ReadConcernArgs::kReadConcernFieldName, *readConcernObj);
+ }
return b.obj();
}
@@ -541,8 +552,12 @@ bool DBClientBase::isMaster(bool& isMaster, BSONObj* info) {
return ok;
}
-bool DBClientBase::createCollection(
- const string& ns, long long size, bool capped, int max, BSONObj* info) {
+bool DBClientBase::createCollection(const string& ns,
+ long long size,
+ bool capped,
+ int max,
+ BSONObj* info,
+ boost::optional<BSONObj> writeConcernObj) {
verify(!capped || size);
BSONObj o;
if (info == nullptr)
@@ -556,6 +571,9 @@ bool DBClientBase::createCollection(
b.append("capped", true);
if (max)
b.append("max", max);
+ if (writeConcernObj) {
+ b.append(WriteConcernOptions::kWriteConcernField, *writeConcernObj);
+ }
return runCommand(db.c_str(), b.done(), *info);
}
@@ -644,11 +662,18 @@ void DBClientBase::findN(vector<BSONObj>& out,
int nToReturn,
int nToSkip,
const BSONObj* fieldsToReturn,
- int queryOptions) {
+ int queryOptions,
+ boost::optional<BSONObj> readConcernObj) {
out.reserve(nToReturn);
- unique_ptr<DBClientCursor> c =
- this->query(NamespaceString(ns), query, nToReturn, nToSkip, fieldsToReturn, queryOptions);
+ unique_ptr<DBClientCursor> c = this->query(NamespaceString(ns),
+ query,
+ nToReturn,
+ nToSkip,
+ fieldsToReturn,
+ queryOptions,
+ 0 /* batchSize */,
+ readConcernObj);
// query() throws on network error so OK to uassert with numeric code here.
uassert(10276,
@@ -672,15 +697,18 @@ void DBClientBase::findN(vector<BSONObj>& out,
BSONObj DBClientBase::findOne(const string& ns,
const Query& query,
const BSONObj* fieldsToReturn,
- int queryOptions) {
+ int queryOptions,
+ boost::optional<BSONObj> readConcernObj) {
vector<BSONObj> v;
- findN(v, ns, query, 1, 0, fieldsToReturn, queryOptions);
+ findN(v, ns, query, 1, 0, fieldsToReturn, queryOptions, readConcernObj);
return v.empty() ? BSONObj() : v[0];
}
-std::pair<BSONObj, NamespaceString> DBClientBase::findOneByUUID(const std::string& db,
- UUID uuid,
- const BSONObj& filter) {
+std::pair<BSONObj, NamespaceString> DBClientBase::findOneByUUID(
+ const std::string& db,
+ UUID uuid,
+ const BSONObj& filter,
+ boost::optional<BSONObj> readConcernObj) {
list<BSONObj> results;
BSONObj res;
@@ -689,6 +717,9 @@ std::pair<BSONObj, NamespaceString> DBClientBase::findOneByUUID(const std::strin
cmdBuilder.append("filter", filter);
cmdBuilder.append("limit", 1);
cmdBuilder.append("singleBatch", true);
+ if (readConcernObj) {
+ cmdBuilder.append(repl::ReadConcernArgs::kReadConcernFieldName, *readConcernObj);
+ }
BSONObj cmd = cmdBuilder.obj();
@@ -721,9 +752,17 @@ unique_ptr<DBClientCursor> DBClientBase::query(const NamespaceStringOrUUID& nsOr
int nToSkip,
const BSONObj* fieldsToReturn,
int queryOptions,
- int batchSize) {
- unique_ptr<DBClientCursor> c(new DBClientCursor(
- this, nsOrUuid, query.obj, nToReturn, nToSkip, fieldsToReturn, queryOptions, batchSize));
+ int batchSize,
+ boost::optional<BSONObj> readConcernObj) {
+ unique_ptr<DBClientCursor> c(new DBClientCursor(this,
+ nsOrUuid,
+ query.obj,
+ nToReturn,
+ nToSkip,
+ fieldsToReturn,
+ queryOptions,
+ batchSize,
+ readConcernObj));
if (c->init())
return c;
return nullptr;
@@ -754,11 +793,13 @@ unsigned long long DBClientBase::query(std::function<void(const BSONObj&)> f,
Query query,
const BSONObj* fieldsToReturn,
int queryOptions,
- int batchSize) {
+ int batchSize,
+ boost::optional<BSONObj> readConcernObj) {
DBClientFunConvertor fun;
fun._f = f;
std::function<void(DBClientCursorBatchIterator&)> ptr(fun);
- return this->query(ptr, nsOrUuid, query, fieldsToReturn, queryOptions, batchSize);
+ return this->query(
+ ptr, nsOrUuid, query, fieldsToReturn, queryOptions, batchSize, readConcernObj);
}
unsigned long long DBClientBase::query(std::function<void(DBClientCursorBatchIterator&)> f,
@@ -766,12 +807,13 @@ unsigned long long DBClientBase::query(std::function<void(DBClientCursorBatchIte
Query query,
const BSONObj* fieldsToReturn,
int queryOptions,
- int batchSize) {
+ int batchSize,
+ boost::optional<BSONObj> readConcernObj) {
// mask options
queryOptions &= (int)(QueryOption_NoCursorTimeout | QueryOption_SlaveOk);
- unique_ptr<DBClientCursor> c(
- this->query(nsOrUuid, query, 0, 0, fieldsToReturn, queryOptions, batchSize));
+ unique_ptr<DBClientCursor> c(this->query(
+ nsOrUuid, query, 0, 0, fieldsToReturn, queryOptions, batchSize, readConcernObj));
// query() throws on network error so OK to uassert with numeric code here.
uassert(16090, "socket error for mapping query", c.get());
@@ -785,34 +827,63 @@ unsigned long long DBClientBase::query(std::function<void(DBClientCursorBatchIte
return n;
}
-void DBClientBase::insert(const string& ns, BSONObj obj, int flags) {
- insert(ns, std::vector<BSONObj>{obj}, flags);
+void DBClientBase::insert(const string& ns,
+ BSONObj obj,
+ int flags,
+ boost::optional<BSONObj> writeConcernObj) {
+ insert(ns, std::vector<BSONObj>{obj}, flags, writeConcernObj);
}
-void DBClientBase::insert(const string& ns, const vector<BSONObj>& v, int flags) {
+void DBClientBase::insert(const string& ns,
+ const vector<BSONObj>& v,
+ int flags,
+ boost::optional<BSONObj> writeConcernObj) {
bool ordered = !(flags & InsertOption_ContinueOnError);
auto nss = NamespaceString(ns);
- auto request =
- OpMsgRequest::fromDBAndBody(nss.db(), BSON("insert" << nss.coll() << "ordered" << ordered));
+ BSONObjBuilder cmdBuilder;
+ cmdBuilder.append("insert", nss.coll());
+ cmdBuilder.append("ordered", ordered);
+ if (writeConcernObj) {
+ cmdBuilder.append(WriteConcernOptions::kWriteConcernField, *writeConcernObj);
+ }
+ auto request = OpMsgRequest::fromDBAndBody(nss.db(), cmdBuilder.obj());
request.sequences.push_back({"documents", v});
runFireAndForgetCommand(std::move(request));
}
-void DBClientBase::remove(const string& ns, Query obj, int flags) {
+void DBClientBase::remove(const string& ns,
+ Query obj,
+ int flags,
+ boost::optional<BSONObj> writeConcernObj) {
int limit = (flags & RemoveOption_JustOne) ? 1 : 0;
auto nss = NamespaceString(ns);
- auto request = OpMsgRequest::fromDBAndBody(nss.db(), BSON("delete" << nss.coll()));
+ BSONObjBuilder cmdBuilder;
+ cmdBuilder.append("delete", nss.coll());
+ if (writeConcernObj) {
+ cmdBuilder.append(WriteConcernOptions::kWriteConcernField, *writeConcernObj);
+ }
+ auto request = OpMsgRequest::fromDBAndBody(nss.db(), cmdBuilder.obj());
request.sequences.push_back({"deletes", {BSON("q" << obj.obj << "limit" << limit)}});
runFireAndForgetCommand(std::move(request));
}
-void DBClientBase::update(const string& ns, Query query, BSONObj obj, bool upsert, bool multi) {
+void DBClientBase::update(const string& ns,
+ Query query,
+ BSONObj obj,
+ bool upsert,
+ bool multi,
+ boost::optional<BSONObj> writeConcernObj) {
auto nss = NamespaceString(ns);
- auto request = OpMsgRequest::fromDBAndBody(nss.db(), BSON("update" << nss.coll()));
+ BSONObjBuilder cmdBuilder;
+ cmdBuilder.append("update", nss.coll());
+ if (writeConcernObj) {
+ cmdBuilder.append(WriteConcernOptions::kWriteConcernField, *writeConcernObj);
+ }
+ auto request = OpMsgRequest::fromDBAndBody(nss.db(), cmdBuilder.obj());
request.sequences.push_back(
{"updates",
{BSON("q" << query.obj << "u" << obj << "upsert" << upsert << "multi" << multi)}});
@@ -820,12 +891,17 @@ void DBClientBase::update(const string& ns, Query query, BSONObj obj, bool upser
runFireAndForgetCommand(std::move(request));
}
-void DBClientBase::update(const string& ns, Query query, BSONObj obj, int flags) {
+void DBClientBase::update(const string& ns,
+ Query query,
+ BSONObj obj,
+ int flags,
+ boost::optional<BSONObj> writeConcernObj) {
update(ns,
std::move(query),
std::move(obj),
flags & UpdateOption_Upsert,
- flags & UpdateOption_Multi);
+ flags & UpdateOption_Multi,
+ writeConcernObj);
}
void DBClientBase::killCursor(const NamespaceString& ns, long long cursorId) {
@@ -914,16 +990,24 @@ std::list<BSONObj> DBClientBase::_getIndexSpecs(const NamespaceStringOrUUID& nsO
}
-void DBClientBase::dropIndex(const string& ns, BSONObj keys) {
- dropIndex(ns, genIndexName(keys));
+void DBClientBase::dropIndex(const string& ns,
+ BSONObj keys,
+ boost::optional<BSONObj> writeConcernObj) {
+ dropIndex(ns, genIndexName(keys), writeConcernObj);
}
-void DBClientBase::dropIndex(const string& ns, const string& indexName) {
+void DBClientBase::dropIndex(const string& ns,
+ const string& indexName,
+ boost::optional<BSONObj> writeConcernObj) {
+ BSONObjBuilder cmdBuilder;
+ cmdBuilder.append("dropIndexes", nsToCollectionSubstring(ns));
+ cmdBuilder.append("index", indexName);
+ if (writeConcernObj) {
+ cmdBuilder.append(WriteConcernOptions::kWriteConcernField, *writeConcernObj);
+ }
BSONObj info;
- if (!runCommand(nsToDatabase(ns),
- BSON("dropIndexes" << nsToCollectionSubstring(ns) << "index" << indexName),
- info)) {
+ if (!runCommand(nsToDatabase(ns), cmdBuilder.obj(), info)) {
LOGV2_DEBUG(20118,
logSeverityV1toV2(_logLevel).toInt(),
"dropIndex failed: {info}",
@@ -932,14 +1016,15 @@ void DBClientBase::dropIndex(const string& ns, const string& indexName) {
}
}
-void DBClientBase::dropIndexes(const string& ns) {
+void DBClientBase::dropIndexes(const string& ns, boost::optional<BSONObj> writeConcernObj) {
+ BSONObjBuilder cmdBuilder;
+ cmdBuilder.append("dropIndexes", nsToCollectionSubstring(ns));
+ cmdBuilder.append("index", "*");
+ if (writeConcernObj) {
+ cmdBuilder.append(WriteConcernOptions::kWriteConcernField, *writeConcernObj);
+ }
BSONObj info;
- uassert(10008,
- "dropIndexes failed",
- runCommand(nsToDatabase(ns),
- BSON("dropIndexes" << nsToCollectionSubstring(ns) << "index"
- << "*"),
- info));
+ uassert(10008, "dropIndexes failed", runCommand(nsToDatabase(ns), cmdBuilder.obj(), info));
}
void DBClientBase::reIndex(const string& ns) {
@@ -971,7 +1056,9 @@ string DBClientBase::genIndexName(const BSONObj& keys) {
return ss.str();
}
-void DBClientBase::createIndexes(StringData ns, const std::vector<const IndexSpec*>& descriptors) {
+void DBClientBase::createIndexes(StringData ns,
+ const std::vector<const IndexSpec*>& descriptors,
+ boost::optional<BSONObj> writeConcernObj) {
BSONObjBuilder command;
command.append("createIndexes", nsToCollectionSubstring(ns));
{
@@ -980,6 +1067,9 @@ void DBClientBase::createIndexes(StringData ns, const std::vector<const IndexSpe
indexes.append(desc->toBSON());
}
}
+ if (writeConcernObj) {
+ command.append(WriteConcernOptions::kWriteConcernField, *writeConcernObj);
+ }
const BSONObj commandObj = command.done();
BSONObj infoObj;
@@ -990,7 +1080,9 @@ void DBClientBase::createIndexes(StringData ns, const std::vector<const IndexSpe
}
}
-void DBClientBase::createIndexes(StringData ns, const std::vector<BSONObj>& specs) {
+void DBClientBase::createIndexes(StringData ns,
+ const std::vector<BSONObj>& specs,
+ boost::optional<BSONObj> writeConcernObj) {
BSONObjBuilder command;
command.append("createIndexes", nsToCollectionSubstring(ns));
{
@@ -999,6 +1091,9 @@ void DBClientBase::createIndexes(StringData ns, const std::vector<BSONObj>& spec
indexes.append(spec);
}
}
+ if (writeConcernObj) {
+ command.append(WriteConcernOptions::kWriteConcernField, *writeConcernObj);
+ }
const BSONObj commandObj = command.done();
BSONObj infoObj;
diff --git a/src/mongo/client/dbclient_base.h b/src/mongo/client/dbclient_base.h
index 5a3dfcfc766..5db09d9412e 100644
--- a/src/mongo/client/dbclient_base.h
+++ b/src/mongo/client/dbclient_base.h
@@ -73,27 +73,31 @@ std::string nsGetCollection(const std::string& ns);
* them as "final" or "override" as appropriate.
*/
class DBClientQueryInterface {
- virtual std::unique_ptr<DBClientCursor> query(const NamespaceStringOrUUID& nsOrUuid,
- Query query,
- int nToReturn = 0,
- int nToSkip = 0,
- const BSONObj* fieldsToReturn = nullptr,
- int queryOptions = 0,
- int batchSize = 0) = 0;
+ virtual std::unique_ptr<DBClientCursor> query(
+ const NamespaceStringOrUUID& nsOrUuid,
+ Query query,
+ int nToReturn = 0,
+ int nToSkip = 0,
+ const BSONObj* fieldsToReturn = nullptr,
+ int queryOptions = 0,
+ int batchSize = 0,
+ boost::optional<BSONObj> readConcernObj = boost::none) = 0;
virtual unsigned long long query(std::function<void(const BSONObj&)> f,
const NamespaceStringOrUUID& nsOrUuid,
Query query,
const BSONObj* fieldsToReturn = nullptr,
int queryOptions = 0,
- int batchSize = 0) = 0;
+ int batchSize = 0,
+ boost::optional<BSONObj> readConcernObj = boost::none) = 0;
virtual unsigned long long query(std::function<void(DBClientCursorBatchIterator&)> f,
const NamespaceStringOrUUID& nsOrUuid,
Query query,
const BSONObj* fieldsToReturn = nullptr,
int queryOptions = 0,
- int batchSize = 0) = 0;
+ int batchSize = 0,
+ boost::optional<BSONObj> readConcernObj = boost::none) = 0;
};
/**
@@ -119,7 +123,8 @@ public:
virtual BSONObj findOne(const std::string& ns,
const Query& query,
const BSONObj* fieldsToReturn = nullptr,
- int queryOptions = 0);
+ int queryOptions = 0,
+ boost::optional<BSONObj> readConcernObj = boost::none);
/** query N objects from the database into an array. makes sense mostly when you want a small
* number of results. if a huge number, use query() and iterate the cursor.
@@ -130,7 +135,8 @@ public:
int nToReturn,
int nToSkip = 0,
const BSONObj* fieldsToReturn = nullptr,
- int queryOptions = 0);
+ int queryOptions = 0,
+ boost::optional<BSONObj> readConcernObj = boost::none);
/**
* @return a pair with a single object that matches the filter within the collection specified
@@ -140,9 +146,11 @@ public:
* the query, an empty BSONObj is returned.
* @throws AssertionException
*/
- virtual std::pair<BSONObj, NamespaceString> findOneByUUID(const std::string& db,
- UUID uuid,
- const BSONObj& filter);
+ virtual std::pair<BSONObj, NamespaceString> findOneByUUID(
+ const std::string& db,
+ UUID uuid,
+ const BSONObj& filter,
+ boost::optional<BSONObj> readConcernObj = boost::none);
virtual std::string getServerAddress() const = 0;
@@ -351,11 +359,12 @@ public:
/** count number of objects in collection ns that match the query criteria specified
throws UserAssertion if database returns an error
*/
- virtual long long count(NamespaceStringOrUUID nsOrUuid,
+ virtual long long count(const NamespaceStringOrUUID nsOrUuid,
const BSONObj& query = BSONObj(),
int options = 0,
int limit = 0,
- int skip = 0);
+ int skip = 0,
+ boost::optional<BSONObj> readConcernObj = boost::none);
static std::string createPasswordDigest(const std::string& username,
const std::string& clearTextPassword);
@@ -390,7 +399,8 @@ public:
long long size = 0,
bool capped = false,
int max = 0,
- BSONObj* info = nullptr);
+ BSONObj* info = nullptr,
+ boost::optional<BSONObj> writeConcernObj = boost::none);
/** Get error result from the last write operation (insert/update/delete) on this connection.
db doesn't change the command's behavior - it is just for auth checks.
@@ -483,8 +493,10 @@ public:
* @param keys Document describing keys and index types. You must provide at least one
* field and its direction.
*/
- void createIndex(StringData ns, const BSONObj& keys) {
- return createIndex(ns, IndexSpec().addKeys(keys));
+ void createIndex(StringData ns,
+ const BSONObj& keys,
+ boost::optional<BSONObj> writeConcernObj = boost::none) {
+ return createIndex(ns, IndexSpec().addKeys(keys), writeConcernObj);
}
/** Create an index on the collection 'ns' as described by the given
@@ -495,20 +507,26 @@ public:
* @param descriptor Configuration object describing the index to create. The
* descriptor must describe at least one key and index type.
*/
- virtual void createIndex(StringData ns, const IndexSpec& descriptor) {
+ virtual void createIndex(StringData ns,
+ const IndexSpec& descriptor,
+ boost::optional<BSONObj> writeConcernObj = boost::none) {
std::vector<const IndexSpec*> toBuild;
toBuild.push_back(&descriptor);
- createIndexes(ns, toBuild);
+ createIndexes(ns, toBuild, writeConcernObj);
}
- virtual void createIndexes(StringData ns, const std::vector<const IndexSpec*>& descriptor);
+ virtual void createIndexes(StringData ns,
+ const std::vector<const IndexSpec*>& descriptor,
+ boost::optional<BSONObj> writeConcernObj = boost::none);
/**
* Creates indexes on the collection 'ns' as described by 'specs'.
*
* Failure to construct the indexes is reported by throwing an AssertionException.
*/
- virtual void createIndexes(StringData ns, const std::vector<BSONObj>& specs);
+ virtual void createIndexes(StringData ns,
+ const std::vector<BSONObj>& specs,
+ boost::optional<BSONObj> writeConcernObj = boost::none);
/**
* Lists indexes on the collection 'nsOrUuid'.
@@ -523,13 +541,18 @@ public:
virtual std::list<BSONObj> getReadyIndexSpecs(const NamespaceStringOrUUID& nsOrUuid,
int options = 0);
- virtual void dropIndex(const std::string& ns, BSONObj keys);
- virtual void dropIndex(const std::string& ns, const std::string& indexName);
+ virtual void dropIndex(const std::string& ns,
+ BSONObj keys,
+ boost::optional<BSONObj> writeConcernObj = boost::none);
+ virtual void dropIndex(const std::string& ns,
+ const std::string& indexName,
+ boost::optional<BSONObj> writeConcernObj = boost::none);
/**
drops all indexes for the collection
*/
- virtual void dropIndexes(const std::string& ns);
+ virtual void dropIndexes(const std::string& ns,
+ boost::optional<BSONObj> writeConcernObj = boost::none);
virtual void reIndex(const std::string& ns);
@@ -600,13 +623,15 @@ public:
@return cursor. 0 if error (connection failure)
@throws AssertionException
*/
- std::unique_ptr<DBClientCursor> query(const NamespaceStringOrUUID& nsOrUuid,
- Query query,
- int nToReturn = 0,
- int nToSkip = 0,
- const BSONObj* fieldsToReturn = nullptr,
- int queryOptions = 0,
- int batchSize = 0) override;
+ std::unique_ptr<DBClientCursor> query(
+ const NamespaceStringOrUUID& nsOrUuid,
+ Query query,
+ int nToReturn = 0,
+ int nToSkip = 0,
+ const BSONObj* fieldsToReturn = nullptr,
+ int queryOptions = 0,
+ int batchSize = 0,
+ boost::optional<BSONObj> readConcernObj = boost::none) override;
/** Uses QueryOption_Exhaust, when available and specified in 'queryOptions'.
@@ -628,14 +653,16 @@ public:
Query query,
const BSONObj* fieldsToReturn = nullptr,
int queryOptions = QueryOption_Exhaust,
- int batchSize = 0) final;
+ int batchSize = 0,
+ boost::optional<BSONObj> readConcernObj = boost::none) final;
unsigned long long query(std::function<void(DBClientCursorBatchIterator&)> f,
const NamespaceStringOrUUID& nsOrUuid,
Query query,
const BSONObj* fieldsToReturn = nullptr,
int queryOptions = QueryOption_Exhaust,
- int batchSize = 0) override;
+ int batchSize = 0,
+ boost::optional<BSONObj> readConcernObj = boost::none) override;
/** don't use this - called automatically by DBClientCursor for you
@@ -651,22 +678,39 @@ public:
/**
insert an object into the database
*/
- virtual void insert(const std::string& ns, BSONObj obj, int flags = 0);
+ virtual void insert(const std::string& ns,
+ BSONObj obj,
+ int flags = 0,
+ boost::optional<BSONObj> writeConcernObj = boost::none);
/**
insert a vector of objects into the database
*/
- virtual void insert(const std::string& ns, const std::vector<BSONObj>& v, int flags = 0);
+ virtual void insert(const std::string& ns,
+ const std::vector<BSONObj>& v,
+ int flags = 0,
+ boost::optional<BSONObj> writeConcernObj = boost::none);
/**
updates objects matching query
*/
- virtual void update(
- const std::string& ns, Query query, BSONObj obj, bool upsert = false, bool multi = false);
+ virtual void update(const std::string& ns,
+ Query query,
+ BSONObj obj,
+ bool upsert = false,
+ bool multi = false,
+ boost::optional<BSONObj> writeConcernObj = boost::none);
- virtual void update(const std::string& ns, Query query, BSONObj obj, int flags);
+ virtual void update(const std::string& ns,
+ Query query,
+ BSONObj obj,
+ int flags,
+ boost::optional<BSONObj> writeConcernObj = boost::none);
- virtual void remove(const std::string& ns, Query query, int flags = 0);
+ virtual void remove(const std::string& ns,
+ Query query,
+ int flags = 0,
+ boost::optional<BSONObj> writeConcernObj = boost::none);
virtual bool isFailed() const = 0;
@@ -716,7 +760,8 @@ protected:
const BSONObj& query,
int options,
int limit,
- int skip);
+ int skip,
+ boost::optional<BSONObj> readConcernObj);
/**
* Look up the options available on this client. Caches the answer from
diff --git a/src/mongo/client/dbclient_connection.cpp b/src/mongo/client/dbclient_connection.cpp
index c7170733b1b..a5a6f3005ce 100644
--- a/src/mongo/client/dbclient_connection.cpp
+++ b/src/mongo/client/dbclient_connection.cpp
@@ -625,16 +625,18 @@ unsigned long long DBClientConnection::query(std::function<void(DBClientCursorBa
Query query,
const BSONObj* fieldsToReturn,
int queryOptions,
- int batchSize) {
+ int batchSize,
+ boost::optional<BSONObj> readConcernObj) {
if (!(queryOptions & QueryOption_Exhaust) || !(availableOptions() & QueryOption_Exhaust)) {
- return DBClientBase::query(f, nsOrUuid, query, fieldsToReturn, queryOptions, batchSize);
+ return DBClientBase::query(
+ f, nsOrUuid, query, fieldsToReturn, queryOptions, batchSize, readConcernObj);
}
// mask options
queryOptions &= (int)(QueryOption_NoCursorTimeout | QueryOption_SlaveOk | QueryOption_Exhaust);
- unique_ptr<DBClientCursor> c(
- this->query(nsOrUuid, query, 0, 0, fieldsToReturn, queryOptions, batchSize));
+ unique_ptr<DBClientCursor> c(this->query(
+ nsOrUuid, query, 0, 0, fieldsToReturn, queryOptions, batchSize, readConcernObj));
// Note that this->query will throw for network errors, so it is OK to return a numeric
// error code here.
uassert(13386, "socket error for mapping query", c.get());
diff --git a/src/mongo/client/dbclient_connection.h b/src/mongo/client/dbclient_connection.h
index 5d76701bc59..6c7a59b4995 100644
--- a/src/mongo/client/dbclient_connection.h
+++ b/src/mongo/client/dbclient_connection.h
@@ -151,16 +151,24 @@ public:
*/
void logout(const std::string& dbname, BSONObj& info) override;
- std::unique_ptr<DBClientCursor> query(const NamespaceStringOrUUID& nsOrUuid,
- Query query = Query(),
- int nToReturn = 0,
- int nToSkip = 0,
- const BSONObj* fieldsToReturn = nullptr,
- int queryOptions = 0,
- int batchSize = 0) override {
+ std::unique_ptr<DBClientCursor> query(
+ const NamespaceStringOrUUID& nsOrUuid,
+ Query query = Query(),
+ int nToReturn = 0,
+ int nToSkip = 0,
+ const BSONObj* fieldsToReturn = nullptr,
+ int queryOptions = 0,
+ int batchSize = 0,
+ boost::optional<BSONObj> readConcernObj = boost::none) override {
checkConnection();
- return DBClientBase::query(
- nsOrUuid, query, nToReturn, nToSkip, fieldsToReturn, queryOptions, batchSize);
+ return DBClientBase::query(nsOrUuid,
+ query,
+ nToReturn,
+ nToSkip,
+ fieldsToReturn,
+ queryOptions,
+ batchSize,
+ readConcernObj);
}
unsigned long long query(std::function<void(DBClientCursorBatchIterator&)> f,
@@ -168,7 +176,8 @@ public:
Query query,
const BSONObj* fieldsToReturn,
int queryOptions,
- int batchSize = 0) override;
+ int batchSize = 0,
+ boost::optional<BSONObj> readConcernObj = boost::none) override;
using DBClientBase::runCommandWithTarget;
std::pair<rpc::UniqueReply, DBClientBase*> runCommandWithTarget(OpMsgRequest request) override;
diff --git a/src/mongo/client/dbclient_cursor.cpp b/src/mongo/client/dbclient_cursor.cpp
index 77d5c802eb7..60034c05514 100644
--- a/src/mongo/client/dbclient_cursor.cpp
+++ b/src/mongo/client/dbclient_cursor.cpp
@@ -153,6 +153,12 @@ Message DBClientCursor::_assembleInit() {
// QueryRequest doesn't handle $readPreference.
cmd = BSONObjBuilder(std::move(cmd)).append(readPref).obj();
}
+ if (!cmd.hasField(repl::ReadConcernArgs::kReadConcernFieldName) && _readConcernObj) {
+ cmd = BSONObjBuilder(std::move(cmd))
+ .append(repl::ReadConcernArgs::kReadConcernFieldName, *_readConcernObj)
+ .obj();
+ }
+
return assembleCommandRequest(_client, ns.db(), opts, std::move(cmd));
}
// else use legacy OP_QUERY request.
@@ -516,7 +522,8 @@ DBClientCursor::DBClientCursor(DBClientBase* client,
int nToSkip,
const BSONObj* fieldsToReturn,
int queryOptions,
- int batchSize)
+ int batchSize,
+ boost::optional<BSONObj> readConcernObj)
: DBClientCursor(client,
nsOrUuid,
query,
@@ -526,7 +533,8 @@ DBClientCursor::DBClientCursor(DBClientBase* client,
fieldsToReturn,
queryOptions,
batchSize,
- {}) {}
+ {},
+ readConcernObj) {}
DBClientCursor::DBClientCursor(DBClientBase* client,
const NamespaceStringOrUUID& nsOrUuid,
@@ -543,7 +551,8 @@ DBClientCursor::DBClientCursor(DBClientBase* client,
nullptr, // fieldsToReturn
queryOptions,
0,
- std::move(initialBatch)) {} // batchSize
+ std::move(initialBatch), // batchSize
+ boost::none) {}
DBClientCursor::DBClientCursor(DBClientBase* client,
const NamespaceStringOrUUID& nsOrUuid,
@@ -554,7 +563,8 @@ DBClientCursor::DBClientCursor(DBClientBase* client,
const BSONObj* fieldsToReturn,
int queryOptions,
int batchSize,
- std::vector<BSONObj> initialBatch)
+ std::vector<BSONObj> initialBatch,
+ boost::optional<BSONObj> readConcernObj)
: batch{std::move(initialBatch)},
_client(client),
_originalHost(_client->getServerAddress()),
@@ -572,7 +582,8 @@ DBClientCursor::DBClientCursor(DBClientBase* client,
cursorId(cursorId),
_ownCursor(true),
wasError(false),
- _enabledBSONVersion(Validator<BSONObj>::enabledBSONVersion()) {
+ _enabledBSONVersion(Validator<BSONObj>::enabledBSONVersion()),
+ _readConcernObj(readConcernObj) {
if (queryOptions & QueryOptionLocal_forceOpQuery) {
// Legacy OP_QUERY does not support UUIDs.
invariant(!_nsOrUuid.uuid());
diff --git a/src/mongo/client/dbclient_cursor.h b/src/mongo/client/dbclient_cursor.h
index b0e1551802b..33fb9f2e3c9 100644
--- a/src/mongo/client/dbclient_cursor.h
+++ b/src/mongo/client/dbclient_cursor.h
@@ -152,7 +152,8 @@ public:
int nToSkip,
const BSONObj* fieldsToReturn,
int queryOptions,
- int bs);
+ int bs,
+ boost::optional<BSONObj> readConcernObj = boost::none);
DBClientCursor(DBClientBase* client,
const NamespaceStringOrUUID& nsOrUuid,
@@ -273,7 +274,8 @@ private:
const BSONObj* fieldsToReturn,
int queryOptions,
int bs,
- std::vector<BSONObj> initialBatch);
+ std::vector<BSONObj> initialBatch,
+ boost::optional<BSONObj> readConcernObj);
int nextBatchSize();
@@ -307,6 +309,7 @@ private:
boost::optional<long long> _term;
boost::optional<repl::OpTime> _lastKnownCommittedOpTime;
boost::optional<BSONObj> _postBatchResumeToken;
+ boost::optional<BSONObj> _readConcernObj;
void dataReceived(const Message& reply) {
bool retry;
diff --git a/src/mongo/client/dbclient_rs.cpp b/src/mongo/client/dbclient_rs.cpp
index 126b7d5f03b..d7744d2c841 100644
--- a/src/mongo/client/dbclient_rs.cpp
+++ b/src/mongo/client/dbclient_rs.cpp
@@ -522,20 +522,33 @@ void DBClientReplicaSet::logout(const string& dbname, BSONObj& info) {
// ------------- simple functions -----------------
-void DBClientReplicaSet::insert(const string& ns, BSONObj obj, int flags) {
- checkMaster()->insert(ns, obj, flags);
+void DBClientReplicaSet::insert(const string& ns,
+ BSONObj obj,
+ int flags,
+ boost::optional<BSONObj> writeConcernObj) {
+ checkMaster()->insert(ns, obj, flags, writeConcernObj);
}
-void DBClientReplicaSet::insert(const string& ns, const vector<BSONObj>& v, int flags) {
- checkMaster()->insert(ns, v, flags);
+void DBClientReplicaSet::insert(const string& ns,
+ const vector<BSONObj>& v,
+ int flags,
+ boost::optional<BSONObj> writeConcernObj) {
+ checkMaster()->insert(ns, v, flags, writeConcernObj);
}
-void DBClientReplicaSet::remove(const string& ns, Query obj, int flags) {
- checkMaster()->remove(ns, obj, flags);
+void DBClientReplicaSet::remove(const string& ns,
+ Query obj,
+ int flags,
+ boost::optional<BSONObj> writeConcernObj) {
+ checkMaster()->remove(ns, obj, flags, writeConcernObj);
}
-void DBClientReplicaSet::update(const string& ns, Query query, BSONObj obj, int flags) {
- return checkMaster()->update(ns, query, obj, flags);
+void DBClientReplicaSet::update(const string& ns,
+ Query query,
+ BSONObj obj,
+ int flags,
+ boost::optional<BSONObj> writeConcernObj) {
+ return checkMaster()->update(ns, query, obj, flags, writeConcernObj);
}
unique_ptr<DBClientCursor> DBClientReplicaSet::query(const NamespaceStringOrUUID& nsOrUuid,
@@ -544,7 +557,8 @@ unique_ptr<DBClientCursor> DBClientReplicaSet::query(const NamespaceStringOrUUID
int nToSkip,
const BSONObj* fieldsToReturn,
int queryOptions,
- int batchSize) {
+ int batchSize,
+ boost::optional<BSONObj> readConcernObj) {
shared_ptr<ReadPreferenceSetting> readPref(_extractReadPref(query.obj, queryOptions));
invariant(nsOrUuid.nss());
const string ns = nsOrUuid.nss()->ns();
@@ -573,8 +587,14 @@ unique_ptr<DBClientCursor> DBClientReplicaSet::query(const NamespaceStringOrUUID
break;
}
- unique_ptr<DBClientCursor> cursor = conn->query(
- nsOrUuid, query, nToReturn, nToSkip, fieldsToReturn, queryOptions, batchSize);
+ unique_ptr<DBClientCursor> cursor = conn->query(nsOrUuid,
+ query,
+ nToReturn,
+ nToSkip,
+ fieldsToReturn,
+ queryOptions,
+ batchSize,
+ readConcernObj);
return checkSlaveQueryResult(std::move(cursor));
} catch (const DBException& ex) {
@@ -599,14 +619,21 @@ unique_ptr<DBClientCursor> DBClientReplicaSet::query(const NamespaceStringOrUUID
"dbclient_rs query to primary node in {getMonitor_getName}",
"getMonitor_getName"_attr = _getMonitor()->getName());
- return checkMaster()->query(
- nsOrUuid, query, nToReturn, nToSkip, fieldsToReturn, queryOptions, batchSize);
+ return checkMaster()->query(nsOrUuid,
+ query,
+ nToReturn,
+ nToSkip,
+ fieldsToReturn,
+ queryOptions,
+ batchSize,
+ readConcernObj);
}
BSONObj DBClientReplicaSet::findOne(const string& ns,
const Query& query,
const BSONObj* fieldsToReturn,
- int queryOptions) {
+ int queryOptions,
+ boost::optional<BSONObj> readConcernObj) {
shared_ptr<ReadPreferenceSetting> readPref(_extractReadPref(query.obj, queryOptions));
if (_isSecondaryQuery(ns, query.obj, *readPref)) {
LOGV2_DEBUG(20135,
@@ -633,7 +660,7 @@ BSONObj DBClientReplicaSet::findOne(const string& ns,
break;
}
- return conn->findOne(ns, query, fieldsToReturn, queryOptions);
+ return conn->findOne(ns, query, fieldsToReturn, queryOptions, readConcernObj);
} catch (const DBException& ex) {
const Status status = ex.toStatus(str::stream() << "can't findone replica set node "
<< _lastSlaveOkHost.toString());
@@ -656,7 +683,7 @@ BSONObj DBClientReplicaSet::findOne(const string& ns,
"dbclient_rs findOne to primary node in {getMonitor_getName}",
"getMonitor_getName"_attr = _getMonitor()->getName());
- return checkMaster()->findOne(ns, query, fieldsToReturn, queryOptions);
+ return checkMaster()->findOne(ns, query, fieldsToReturn, queryOptions, readConcernObj);
}
void DBClientReplicaSet::killCursor(const NamespaceString& ns, long long cursorID) {
diff --git a/src/mongo/client/dbclient_rs.h b/src/mongo/client/dbclient_rs.h
index 48bb2c01c7e..9603842f7d9 100644
--- a/src/mongo/client/dbclient_rs.h
+++ b/src/mongo/client/dbclient_rs.h
@@ -89,29 +89,45 @@ public:
// ----------- simple functions --------------
/** throws userassertion "no master found" */
- std::unique_ptr<DBClientCursor> query(const NamespaceStringOrUUID& nsOrUuid,
- Query query,
- int nToReturn = 0,
- int nToSkip = 0,
- const BSONObj* fieldsToReturn = nullptr,
- int queryOptions = 0,
- int batchSize = 0) override;
+ std::unique_ptr<DBClientCursor> query(
+ const NamespaceStringOrUUID& nsOrUuid,
+ Query query,
+ int nToReturn = 0,
+ int nToSkip = 0,
+ const BSONObj* fieldsToReturn = nullptr,
+ int queryOptions = 0,
+ int batchSize = 0,
+ boost::optional<BSONObj> readConcernObj = boost::none) override;
/** throws userassertion "no master found" */
BSONObj findOne(const std::string& ns,
const Query& query,
const BSONObj* fieldsToReturn = nullptr,
- int queryOptions = 0) override;
+ int queryOptions = 0,
+ boost::optional<BSONObj> readConcernObj = boost::none) override;
- void insert(const std::string& ns, BSONObj obj, int flags = 0) override;
+ void insert(const std::string& ns,
+ BSONObj obj,
+ int flags = 0,
+ boost::optional<BSONObj> writeConcernObj = boost::none) override;
/** insert multiple objects. Note that single object insert is asynchronous, so this version
is only nominally faster and not worth a special effort to try to use. */
- void insert(const std::string& ns, const std::vector<BSONObj>& v, int flags = 0) override;
-
- void remove(const std::string& ns, Query obj, int flags) override;
-
- void update(const std::string& ns, Query query, BSONObj obj, int flags) override;
+ void insert(const std::string& ns,
+ const std::vector<BSONObj>& v,
+ int flags = 0,
+ boost::optional<BSONObj> writeConcernObj = boost::none) override;
+
+ void remove(const std::string& ns,
+ Query obj,
+ int flags,
+ boost::optional<BSONObj> writeConcernObj = boost::none) override;
+
+ void update(const std::string& ns,
+ Query query,
+ BSONObj obj,
+ int flags,
+ boost::optional<BSONObj> writeConcernObj = boost::none) override;
void killCursor(const NamespaceString& ns, long long cursorID) override;
diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp
index 4f444b9f755..c76f86163b3 100644
--- a/src/mongo/db/cloner.cpp
+++ b/src/mongo/db/cloner.cpp
@@ -58,6 +58,7 @@
#include "mongo/db/op_observer.h"
#include "mongo/db/ops/insert.h"
#include "mongo/db/repl/isself.h"
+#include "mongo/db/repl/read_concern_args.h"
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/service_context.h"
#include "mongo/db/storage/durable_catalog.h"
@@ -311,7 +312,9 @@ void Cloner::copy(OperationContext* opCtx,
from_collection,
query,
nullptr,
- options);
+ options,
+ 0 /* batchSize */,
+ repl::ReadConcernArgs::kImplicitDefault);
}
uassert(ErrorCodes::PrimarySteppedDown,
diff --git a/src/mongo/db/dbdirectclient.cpp b/src/mongo/db/dbdirectclient.cpp
index 84818dcf8c4..6c8a8d3d43e 100644
--- a/src/mongo/db/dbdirectclient.cpp
+++ b/src/mongo/db/dbdirectclient.cpp
@@ -166,15 +166,22 @@ unique_ptr<DBClientCursor> DBDirectClient::query(const NamespaceStringOrUUID& ns
int nToSkip,
const BSONObj* fieldsToReturn,
int queryOptions,
- int batchSize) {
+ int batchSize,
+ boost::optional<BSONObj> readConcernObj) {
+ invariant(!readConcernObj, "passing readConcern to DBDirectClient functions is not supported");
return DBClientBase::query(
nsOrUuid, query, nToReturn, nToSkip, fieldsToReturn, queryOptions, batchSize);
}
-long long DBDirectClient::count(
- const NamespaceStringOrUUID nsOrUuid, const BSONObj& query, int options, int limit, int skip) {
+long long DBDirectClient::count(const NamespaceStringOrUUID nsOrUuid,
+ const BSONObj& query,
+ int options,
+ int limit,
+ int skip,
+ boost::optional<BSONObj> readConcernObj) {
+ invariant(!readConcernObj, "passing readConcern to DBDirectClient functions is not supported");
DirectClientScope directClientScope(_opCtx);
- BSONObj cmdObj = _countCmd(nsOrUuid, query, options, limit, skip);
+ BSONObj cmdObj = _countCmd(nsOrUuid, query, options, limit, skip, boost::none);
auto dbName = (nsOrUuid.uuid() ? nsOrUuid.dbname() : (*nsOrUuid.nss()).db().toString());
diff --git a/src/mongo/db/dbdirectclient.h b/src/mongo/db/dbdirectclient.h
index 9ff0dc9cd80..0554334d6fd 100644
--- a/src/mongo/db/dbdirectclient.h
+++ b/src/mongo/db/dbdirectclient.h
@@ -58,13 +58,15 @@ public:
// XXX: is this valid or useful?
void setOpCtx(OperationContext* opCtx);
- virtual std::unique_ptr<DBClientCursor> query(const NamespaceStringOrUUID& nsOrUuid,
- Query query,
- int nToReturn = 0,
- int nToSkip = 0,
- const BSONObj* fieldsToReturn = nullptr,
- int queryOptions = 0,
- int batchSize = 0);
+ virtual std::unique_ptr<DBClientCursor> query(
+ const NamespaceStringOrUUID& nsOrUuid,
+ Query query,
+ int nToReturn = 0,
+ int nToSkip = 0,
+ const BSONObj* fieldsToReturn = nullptr,
+ int queryOptions = 0,
+ int batchSize = 0,
+ boost::optional<BSONObj> readConcernObj = boost::none);
virtual bool isFailed() const;
@@ -85,7 +87,8 @@ public:
const BSONObj& query = BSONObj(),
int options = 0,
int limit = 0,
- int skip = 0);
+ int skip = 0,
+ boost::optional<BSONObj> readConcernObj = boost::none);
virtual ConnectionString::ConnectionType type() const;
diff --git a/src/mongo/db/pipeline/process_interface/replica_set_node_process_interface.cpp b/src/mongo/db/pipeline/process_interface/replica_set_node_process_interface.cpp
index 009168fd88d..70720b4442b 100644
--- a/src/mongo/db/pipeline/process_interface/replica_set_node_process_interface.cpp
+++ b/src/mongo/db/pipeline/process_interface/replica_set_node_process_interface.cpp
@@ -210,10 +210,7 @@ StatusWith<BSONObj> ReplicaSetNodeProcessInterface::_executeCommandOnPrimary(
void ReplicaSetNodeProcessInterface::_attachGenericCommandArgs(OperationContext* opCtx,
BSONObjBuilder* cmd) const {
- auto writeConcern = opCtx->getWriteConcern();
- if (!writeConcern.usedDefault) {
- cmd->append(WriteConcernOptions::kWriteConcernField, writeConcern.toBSON());
- }
+ cmd->append(WriteConcernOptions::kWriteConcernField, opCtx->getWriteConcern().toBSON());
logical_session_id_helpers::serializeLsidAndTxnNumber(opCtx, cmd);
}
diff --git a/src/mongo/db/pipeline/process_interface/shardsvr_process_interface.cpp b/src/mongo/db/pipeline/process_interface/shardsvr_process_interface.cpp
index 9fa7b4fc59d..dcca4ab8dd6 100644
--- a/src/mongo/db/pipeline/process_interface/shardsvr_process_interface.cpp
+++ b/src/mongo/db/pipeline/process_interface/shardsvr_process_interface.cpp
@@ -59,18 +59,6 @@ using write_ops::Insert;
using write_ops::Update;
using write_ops::UpdateOpEntry;
-namespace {
-
-// Attaches the write concern to the given batch request. If it looks like 'writeConcern' has
-// been default initialized to {w: 0, wtimeout: 0} then we do not bother attaching it.
-void attachWriteConcern(const WriteConcernOptions& writeConcern, BatchedCommandRequest* request) {
- if (!writeConcern.wMode.empty() || writeConcern.wNumNodes > 0) {
- request->setWriteConcern(writeConcern.toBSON());
- }
-}
-
-} // namespace
-
bool ShardServerProcessInterface::isSharded(OperationContext* opCtx, const NamespaceString& nss) {
Lock::DBLock dbLock(opCtx, nss.db(), MODE_IS);
Lock::CollectionLock collLock(opCtx, nss, MODE_IS);
@@ -123,8 +111,7 @@ Status ShardServerProcessInterface::insert(const boost::intrusive_ptr<Expression
BatchedCommandRequest insertCommand(
buildInsertOp(ns, std::move(objs), expCtx->bypassDocumentValidation));
- // If applicable, attach a write concern to the batched command request.
- attachWriteConcern(wc, &insertCommand);
+ insertCommand.setWriteConcern(wc.toBSON());
ClusterWriter::write(expCtx->opCtx, insertCommand, &stats, &response, targetEpoch);
@@ -144,8 +131,7 @@ StatusWith<MongoProcessInterface::UpdateResult> ShardServerProcessInterface::upd
BatchedCommandRequest updateCommand(buildUpdateOp(expCtx, ns, std::move(batch), upsert, multi));
- // If applicable, attach a write concern to the batched command request.
- attachWriteConcern(wc, &updateCommand);
+ updateCommand.setWriteConcern(wc.toBSON());
ClusterWriter::write(expCtx->opCtx, updateCommand, &stats, &response, targetEpoch);
@@ -176,10 +162,8 @@ void ShardServerProcessInterface::renameIfOptionsAndIndexesHaveNotChanged(
auto newCmdObj = CommonMongodProcessInterface::_convertRenameToInternalRename(
opCtx, renameCommandObj, originalCollectionOptions, originalIndexes);
BSONObjBuilder newCmdWithWriteConcernBuilder(std::move(newCmdObj));
- if (!opCtx->getWriteConcern().usedDefault) {
- newCmdWithWriteConcernBuilder.append(WriteConcernOptions::kWriteConcernField,
- opCtx->getWriteConcern().toBSON());
- }
+ newCmdWithWriteConcernBuilder.append(WriteConcernOptions::kWriteConcernField,
+ opCtx->getWriteConcern().toBSON());
newCmdObj = newCmdWithWriteConcernBuilder.done();
auto cachedDbInfo =
uassertStatusOK(Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, destinationNs.db()));
@@ -226,12 +210,10 @@ void ShardServerProcessInterface::createCollection(OperationContext* opCtx,
const BSONObj& cmdObj) {
auto cachedDbInfo =
uassertStatusOK(Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, dbName));
- BSONObj finalCmdObj = cmdObj;
- if (!opCtx->getWriteConcern().usedDefault) {
- auto writeObj =
- BSON(WriteConcernOptions::kWriteConcernField << opCtx->getWriteConcern().toBSON());
- finalCmdObj = cmdObj.addField(writeObj.getField(WriteConcernOptions::kWriteConcernField));
- }
+ BSONObjBuilder finalCmdBuilder(cmdObj);
+ finalCmdBuilder.append(WriteConcernOptions::kWriteConcernField,
+ opCtx->getWriteConcern().toBSON());
+ BSONObj finalCmdObj = finalCmdBuilder.obj();
auto response =
executeCommandAgainstDatabasePrimary(opCtx,
dbName,
@@ -256,10 +238,8 @@ void ShardServerProcessInterface::createIndexesOnEmptyCollection(
BSONObjBuilder newCmdBuilder;
newCmdBuilder.append("createIndexes", ns.coll());
newCmdBuilder.append("indexes", indexSpecs);
- if (!opCtx->getWriteConcern().usedDefault) {
- newCmdBuilder.append(WriteConcernOptions::kWriteConcernField,
- opCtx->getWriteConcern().toBSON());
- }
+ newCmdBuilder.append(WriteConcernOptions::kWriteConcernField,
+ opCtx->getWriteConcern().toBSON());
auto cmdObj = newCmdBuilder.done();
auto response =
executeCommandAgainstDatabasePrimary(opCtx,
@@ -285,10 +265,8 @@ void ShardServerProcessInterface::dropCollection(OperationContext* opCtx,
uassertStatusOK(Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, ns.db()));
BSONObjBuilder newCmdBuilder;
newCmdBuilder.append("drop", ns.coll());
- if (!opCtx->getWriteConcern().usedDefault) {
- newCmdBuilder.append(WriteConcernOptions::kWriteConcernField,
- opCtx->getWriteConcern().toBSON());
- }
+ newCmdBuilder.append(WriteConcernOptions::kWriteConcernField,
+ opCtx->getWriteConcern().toBSON());
auto cmdObj = newCmdBuilder.done();
auto response =
executeCommandAgainstDatabasePrimary(opCtx,
diff --git a/src/mongo/db/pipeline/sharded_agg_helpers.cpp b/src/mongo/db/pipeline/sharded_agg_helpers.cpp
index 5b3556b6089..eeb614813ef 100644
--- a/src/mongo/db/pipeline/sharded_agg_helpers.cpp
+++ b/src/mongo/db/pipeline/sharded_agg_helpers.cpp
@@ -108,13 +108,16 @@ RemoteCursor openChangeStreamNewShardMonitor(const boost::intrusive_ptr<Expressi
aggReq.setFromMongos(true);
aggReq.setNeedsMerge(true);
aggReq.setBatchSize(0);
- auto configCursor =
- establishCursors(expCtx->opCtx,
- expCtx->mongoProcessInterface->taskExecutor,
- aggReq.getNamespaceString(),
- ReadPreferenceSetting{ReadPreference::PrimaryPreferred},
- {{configShard->getId(), aggReq.serializeToCommandObj().toBson()}},
- false);
+ auto cmdObjWithRWC = applyReadWriteConcern(expCtx->opCtx,
+ true, /* appendRC */
+ !expCtx->explain, /* appendWC */
+ aggReq.serializeToCommandObj().toBson());
+ auto configCursor = establishCursors(expCtx->opCtx,
+ expCtx->mongoProcessInterface->taskExecutor,
+ aggReq.getNamespaceString(),
+ ReadPreferenceSetting{ReadPreference::PrimaryPreferred},
+ {{configShard->getId(), cmdObjWithRWC}},
+ false);
invariant(configCursor.size() == 1);
return std::move(*configCursor.begin());
}
diff --git a/src/mongo/db/query/query_request.cpp b/src/mongo/db/query/query_request.cpp
index cd308aae77a..dd80516535c 100644
--- a/src/mongo/db/query/query_request.cpp
+++ b/src/mongo/db/query/query_request.cpp
@@ -835,6 +835,10 @@ Status QueryRequest::init(int ntoskip,
} else {
_filter = queryObj.getOwned();
}
+ // It's not possible to specify readConcern in a legacy query message, so initialize it to
+ // an empty readConcern object, ie. equivalent to `readConcern: {}`. This ensures that
+ // mongos passes this empty readConcern to shards.
+ _readConcern = BSONObj();
} else {
// This is the debugging code path.
_filter = queryObj.getOwned();
diff --git a/src/mongo/db/read_concern_mongod.cpp b/src/mongo/db/read_concern_mongod.cpp
index 54588856e58..dbe4ba193cf 100644
--- a/src/mongo/db/read_concern_mongod.cpp
+++ b/src/mongo/db/read_concern_mongod.cpp
@@ -180,7 +180,9 @@ Status makeNoopWriteIfNeeded(OperationContext* opCtx, LogicalTime clusterTime) {
"admin",
BSON("appendOplogNote"
<< 1 << "maxClusterTime" << clusterTime.asTimestamp() << "data"
- << BSON("noop write for afterClusterTime read concern" << 1)),
+ << BSON("noop write for afterClusterTime read concern" << 1)
+ << WriteConcernOptions::kWriteConcernField
+ << WriteConcernOptions::kImplicitDefault),
Shard::RetryPolicy::kIdempotent);
status = swRes.getStatus();
std::get<1>(myWriteRequest)->set(status);
diff --git a/src/mongo/db/repl/SConscript b/src/mongo/db/repl/SConscript
index e983f6acbd4..4425209c8db 100644
--- a/src/mongo/db/repl/SConscript
+++ b/src/mongo/db/repl/SConscript
@@ -895,6 +895,7 @@ env.Library(
LIBDEPS=[
'oplog_entry',
'optime',
+ 'read_concern_args',
'$BUILD_DIR/mongo/base',
'$BUILD_DIR/mongo/client/fetcher',
'$BUILD_DIR/mongo/db/namespace_string',
diff --git a/src/mongo/db/repl/collection_cloner.cpp b/src/mongo/db/repl/collection_cloner.cpp
index b4c54057e31..c98cb47d034 100644
--- a/src/mongo/db/repl/collection_cloner.cpp
+++ b/src/mongo/db/repl/collection_cloner.cpp
@@ -132,7 +132,12 @@ BaseCloner::AfterStageBehavior CollectionCloner::CollectionClonerStage::run() {
}
BaseCloner::AfterStageBehavior CollectionCloner::countStage() {
- auto count = getClient()->count(_sourceDbAndUuid, {} /* Query */, QueryOption_SlaveOk);
+ auto count = getClient()->count(_sourceDbAndUuid,
+ {} /* Query */,
+ QueryOption_SlaveOk,
+ 0 /* limit */,
+ 0 /* skip */,
+ ReadConcernArgs::kImplicitDefault);
// The count command may return a negative value after an unclean shutdown,
// so we set it to zero here to avoid aborting the collection clone.
@@ -231,7 +236,8 @@ void CollectionCloner::runQuery() {
nullptr /* fieldsToReturn */,
QueryOption_NoCursorTimeout | QueryOption_SlaveOk |
(collectionClonerUsesExhaust ? QueryOption_Exhaust : 0),
- _collectionClonerBatchSize);
+ _collectionClonerBatchSize,
+ ReadConcernArgs::kImplicitDefault);
} catch (...) {
auto status = exceptionToStatus();
diff --git a/src/mongo/db/repl/initial_syncer.cpp b/src/mongo/db/repl/initial_syncer.cpp
index 6bc07e723b7..39ad438a288 100644
--- a/src/mongo/db/repl/initial_syncer.cpp
+++ b/src/mongo/db/repl/initial_syncer.cpp
@@ -1736,7 +1736,8 @@ void InitialSyncer::_finishCallback(StatusWith<OpTimeAndWallTime> lastApplied) {
Status InitialSyncer::_scheduleLastOplogEntryFetcher_inlock(
Fetcher::CallbackFn callback, LastOplogEntryFetcherRetryStrategy retryStrategy) {
BSONObj query = BSON("find" << _opts.remoteOplogNS.coll() << "sort" << BSON("$natural" << -1)
- << "limit" << 1);
+ << "limit" << 1 << ReadConcernArgs::kReadConcernFieldName
+ << ReadConcernArgs::kImplicitDefault);
_lastOplogEntryFetcher = std::make_unique<Fetcher>(
_exec,
diff --git a/src/mongo/db/repl/oplog_interface_remote.cpp b/src/mongo/db/repl/oplog_interface_remote.cpp
index a71aae7cb15..883596318b9 100644
--- a/src/mongo/db/repl/oplog_interface_remote.cpp
+++ b/src/mongo/db/repl/oplog_interface_remote.cpp
@@ -34,6 +34,7 @@
#include "mongo/client/dbclient_base.h"
#include "mongo/client/dbclient_cursor.h"
#include "mongo/db/jsobj.h"
+#include "mongo/db/repl/read_concern_args.h"
#include "mongo/util/str.h"
namespace mongo {
@@ -83,8 +84,14 @@ std::unique_ptr<OplogInterface::Iterator> OplogInterfaceRemote::makeIterator() c
const Query query = Query().sort(BSON("$natural" << -1));
const BSONObj fields = BSON("ts" << 1 << "t" << 1);
return std::unique_ptr<OplogInterface::Iterator>(
- new OplogIteratorRemote(_getConnection()->query(
- NamespaceString(_collectionName), query, 0, 0, &fields, 0, _batchSize)));
+ new OplogIteratorRemote(_getConnection()->query(NamespaceString(_collectionName),
+ query,
+ 0,
+ 0,
+ &fields,
+ 0,
+ _batchSize,
+ ReadConcernArgs::kImplicitDefault)));
}
std::unique_ptr<TransactionHistoryIteratorBase>
diff --git a/src/mongo/db/repl/read_concern_args.cpp b/src/mongo/db/repl/read_concern_args.cpp
index 2ef071a38cc..0c446ce4780 100644
--- a/src/mongo/db/repl/read_concern_args.cpp
+++ b/src/mongo/db/repl/read_concern_args.cpp
@@ -57,6 +57,11 @@ const ReadConcernArgs& ReadConcernArgs::get(const OperationContext* opCtx) {
}
+// The "kImplicitDefault" read concern, used by internal operations, is deliberately empty (no
+// 'level' specified). This allows internal operations to specify a read concern, while still
+// allowing it to be either local or available on sharded secondaries.
+const BSONObj ReadConcernArgs::kImplicitDefault;
+
ReadConcernArgs::ReadConcernArgs() : _specified(false) {}
ReadConcernArgs::ReadConcernArgs(boost::optional<ReadConcernLevel> level)
diff --git a/src/mongo/db/repl/read_concern_args.h b/src/mongo/db/repl/read_concern_args.h
index 1adba9777fc..46353040528 100644
--- a/src/mongo/db/repl/read_concern_args.h
+++ b/src/mongo/db/repl/read_concern_args.h
@@ -55,6 +55,8 @@ public:
static constexpr StringData kAtClusterTimeFieldName = "atClusterTime"_sd;
static constexpr StringData kLevelFieldName = "level"_sd;
+ static const BSONObj kImplicitDefault;
+
/**
* Represents the internal mechanism an operation uses to satisfy 'majority' read concern.
*/
diff --git a/src/mongo/db/repl/replication_info.cpp b/src/mongo/db/repl/replication_info.cpp
index fd8ce22e8d0..bf33e4334d7 100644
--- a/src/mongo/db/repl/replication_info.cpp
+++ b/src/mongo/db/repl/replication_info.cpp
@@ -185,9 +185,15 @@ TopologyVersion appendReplicationInfo(OperationContext* opCtx,
DBClientConnection* cliConn = dynamic_cast<DBClientConnection*>(&conn.conn());
if (cliConn && replAuthenticate(cliConn).isOK()) {
BSONObj first = conn->findOne((string) "local.oplog.$" + sourcename,
- Query().sort(BSON("$natural" << 1)));
+ Query().sort(BSON("$natural" << 1)),
+ nullptr /* fieldsToReturn */,
+ 0 /* queryOptions */,
+ ReadConcernArgs::kImplicitDefault);
BSONObj last = conn->findOne((string) "local.oplog.$" + sourcename,
- Query().sort(BSON("$natural" << -1)));
+ Query().sort(BSON("$natural" << -1)),
+ nullptr /* fieldsToReturn */,
+ 0 /* queryOptions */,
+ ReadConcernArgs::kImplicitDefault);
bb.appendDate("masterFirst", first["ts"].timestampTime());
bb.appendDate("masterLast", last["ts"].timestampTime());
const auto lag = (last["ts"].timestampTime() - s["syncedTo"].timestampTime());
diff --git a/src/mongo/db/repl/roll_back_local_operations_test.cpp b/src/mongo/db/repl/roll_back_local_operations_test.cpp
index f51e5404d4b..6862dbf20bb 100644
--- a/src/mongo/db/repl/roll_back_local_operations_test.cpp
+++ b/src/mongo/db/repl/roll_back_local_operations_test.cpp
@@ -326,7 +326,8 @@ public:
int nToSkip,
const BSONObj* fieldsToReturn,
int queryOptions,
- int batchSize) override {
+ int batchSize,
+ boost::optional<BSONObj> readConcernObj) override {
if (_initFailuresLeft > 0) {
_initFailuresLeft--;
LOGV2(21657,
diff --git a/src/mongo/db/repl/rollback_source_impl.cpp b/src/mongo/db/repl/rollback_source_impl.cpp
index 8d06503b597..bc111c75397 100644
--- a/src/mongo/db/repl/rollback_source_impl.cpp
+++ b/src/mongo/db/repl/rollback_source_impl.cpp
@@ -35,6 +35,7 @@
#include "mongo/db/cloner.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/namespace_string.h"
+#include "mongo/db/repl/read_concern_args.h"
#include "mongo/db/repl/replication_auth.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/str.h"
@@ -68,19 +69,21 @@ int RollbackSourceImpl::getRollbackId() const {
BSONObj RollbackSourceImpl::getLastOperation() const {
const Query query = Query().sort(BSON("$natural" << -1));
- return _getConnection()->findOne(_collectionName, query, nullptr, QueryOption_SlaveOk);
+ return _getConnection()->findOne(
+ _collectionName, query, nullptr, QueryOption_SlaveOk, ReadConcernArgs::kImplicitDefault);
}
BSONObj RollbackSourceImpl::findOne(const NamespaceString& nss, const BSONObj& filter) const {
return _getConnection()
- ->findOne(nss.toString(), filter, nullptr, QueryOption_SlaveOk)
+ ->findOne(
+ nss.toString(), filter, nullptr, QueryOption_SlaveOk, ReadConcernArgs::kImplicitDefault)
.getOwned();
}
std::pair<BSONObj, NamespaceString> RollbackSourceImpl::findOneByUUID(const std::string& db,
UUID uuid,
const BSONObj& filter) const {
- return _getConnection()->findOneByUUID(db, uuid, filter);
+ return _getConnection()->findOneByUUID(db, uuid, filter, ReadConcernArgs::kImplicitDefault);
}
void RollbackSourceImpl::copyCollectionFromRemote(OperationContext* opCtx,
diff --git a/src/mongo/db/repl/sync_source_resolver.cpp b/src/mongo/db/repl/sync_source_resolver.cpp
index a7e289e3996..e75c1782192 100644
--- a/src/mongo/db/repl/sync_source_resolver.cpp
+++ b/src/mongo/db/repl/sync_source_resolver.cpp
@@ -173,9 +173,7 @@ std::unique_ptr<Fetcher> SyncSourceResolver::_makeFirstOplogEntryFetcher(
<< "projection"
<< BSON(OplogEntryBase::kTimestampFieldName
<< 1 << OplogEntryBase::kTermFieldName << 1)
- << "readConcern"
- << BSON("level"
- << "local")),
+ << ReadConcernArgs::kReadConcernFieldName << ReadConcernArgs::kImplicitDefault),
[=](const StatusWith<Fetcher::QueryResponse>& response,
Fetcher::NextAction*,
BSONObjBuilder*) {
@@ -198,9 +196,7 @@ std::unique_ptr<Fetcher> SyncSourceResolver::_makeRequiredOpTimeFetcher(HostAndP
BSON("find" << kLocalOplogNss.coll() << "oplogReplay" << true << "filter"
<< BSON("ts" << BSON("$gte" << _requiredOpTime.getTimestamp() << "$lte"
<< _requiredOpTime.getTimestamp()))
- << "readConcern"
- << BSON("level"
- << "local")),
+ << ReadConcernArgs::kReadConcernFieldName << ReadConcernArgs::kImplicitDefault),
[=](const StatusWith<Fetcher::QueryResponse>& response,
Fetcher::NextAction*,
BSONObjBuilder*) {
diff --git a/src/mongo/db/s/add_shard_cmd.cpp b/src/mongo/db/s/add_shard_cmd.cpp
index c2e8e3684a2..aab1f4d7b79 100644
--- a/src/mongo/db/s/add_shard_cmd.cpp
+++ b/src/mongo/db/s/add_shard_cmd.cpp
@@ -79,7 +79,7 @@ public:
private:
bool supportsWriteConcern() const override {
- return true;
+ return false;
}
// The command parameter happens to be string so it's historically been interpreted
diff --git a/src/mongo/db/s/balancer/migration_manager.cpp b/src/mongo/db/s/balancer/migration_manager.cpp
index 4a1d614d34d..100855905b7 100644
--- a/src/mongo/db/s/balancer/migration_manager.cpp
+++ b/src/mongo/db/s/balancer/migration_manager.cpp
@@ -469,6 +469,17 @@ shared_ptr<Notification<RemoteCommandResponse>> MigrationManager::_schedule(
waitForDelete,
migrateInfo.forceJumbo);
+ // In 4.4, commands sent to shards that accept writeConcern, must always have writeConcern.
+ // So if the MoveChunkRequest didn't add writeConcern (from secondaryThrottle), then we add
+ // the implicit server default writeConcern.
+ if (!builder.hasField(WriteConcernOptions::kWriteConcernField) &&
+ serverGlobalParams.featureCompatibility.isVersionInitialized() &&
+ serverGlobalParams.featureCompatibility.getVersion() ==
+ ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44) {
+ builder.append(WriteConcernOptions::kWriteConcernField,
+ WriteConcernOptions::kImplicitDefault);
+ }
+
stdx::lock_guard<Latch> lock(_mutex);
if (_state != State::kEnabled && _state != State::kRecovering) {
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
index a3941cfde46..98766413fac 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
@@ -296,6 +296,17 @@ Status MigrationChunkClonerSourceLegacy::startClone(OperationContext* opCtx,
_shardKeyPattern.toBSON(),
_args.getSecondaryThrottle());
+ if (serverGlobalParams.featureCompatibility.isVersion(
+ ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44)) {
+ // In 4.4, commands sent to shards that accept writeConcern, must always have writeConcern.
+ // So if the StartChunkCloneRequest didn't add writeConcern (from secondaryThrottle), then
+ // we add the implicit server default writeConcern.
+ if (!cmdBuilder.hasField(WriteConcernOptions::kWriteConcernField)) {
+ cmdBuilder.append(WriteConcernOptions::kWriteConcernField,
+ WriteConcernOptions::kImplicitDefault);
+ }
+ }
+
auto startChunkCloneResponseStatus = _callRecipient(cmdBuilder.obj());
if (!startChunkCloneResponseStatus.isOK()) {
return startChunkCloneResponseStatus.getStatus();
@@ -367,6 +378,17 @@ Status MigrationChunkClonerSourceLegacy::startClone(OperationContext* opCtx) {
_shardKeyPattern.toBSON(),
_args.getSecondaryThrottle());
+ if (serverGlobalParams.featureCompatibility.isVersion(
+ ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44)) {
+ // In 4.4, commands sent to shards that accept writeConcern, must always have writeConcern.
+ // So if the StartChunkCloneRequest didn't add writeConcern (from secondaryThrottle), then
+ // we add the implicit server default writeConcern.
+ if (!cmdBuilder.hasField(WriteConcernOptions::kWriteConcernField)) {
+ cmdBuilder.append(WriteConcernOptions::kWriteConcernField,
+ WriteConcernOptions::kImplicitDefault);
+ }
+ }
+
auto startChunkCloneResponseStatus = _callRecipient(cmdBuilder.obj());
if (!startChunkCloneResponseStatus.isOK()) {
return startChunkCloneResponseStatus.getStatus();
diff --git a/src/mongo/db/s/migration_util.cpp b/src/mongo/db/s/migration_util.cpp
index ead3d722055..02d7e5bcc31 100644
--- a/src/mongo/db/s/migration_util.cpp
+++ b/src/mongo/db/s/migration_util.cpp
@@ -41,6 +41,7 @@
#include "mongo/client/query.h"
#include "mongo/db/catalog/collection_catalog_helper.h"
#include "mongo/db/catalog_raii.h"
+#include "mongo/db/commands.h"
#include "mongo/db/dbdirectclient.h"
#include "mongo/db/logical_session_cache.h"
#include "mongo/db/namespace_string.h"
@@ -693,7 +694,8 @@ void ensureChunkVersionIsGreaterThan(OperationContext* opCtx,
newOpCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
"admin",
- ensureChunkVersionIsGreaterThanRequestBSON,
+ CommandHelpers::appendMajorityWriteConcern(
+ ensureChunkVersionIsGreaterThanRequestBSON),
Shard::RetryPolicy::kIdempotent);
const auto ensureChunkVersionIsGreaterThanStatus =
Shard::CommandResponse::getEffectiveStatus(ensureChunkVersionIsGreaterThanResponse);
diff --git a/src/mongo/db/s/shard_key_util.cpp b/src/mongo/db/s/shard_key_util.cpp
index 906757d353c..62c67614e2e 100644
--- a/src/mongo/db/s/shard_key_util.cpp
+++ b/src/mongo/db/s/shard_key_util.cpp
@@ -167,7 +167,8 @@ void validateShardKeyAgainstExistingIndexes(OperationContext* opCtx,
}
}
- auto countCmd = BSON("count" << nss.coll());
+ auto countCmd = BSON("count" << nss.coll() << repl::ReadConcernArgs::kReadConcernFieldName
+ << repl::ReadConcernArgs::kImplicitDefault);
auto countRes =
uassertStatusOK(primaryShard->runCommand(opCtx,
ReadPreferenceSetting(ReadPreference::PrimaryOnly),
diff --git a/src/mongo/db/sessions_collection.cpp b/src/mongo/db/sessions_collection.cpp
index 4e0e121abaf..b883bebf76c 100644
--- a/src/mongo/db/sessions_collection.cpp
+++ b/src/mongo/db/sessions_collection.cpp
@@ -40,6 +40,7 @@
#include "mongo/db/create_indexes_gen.h"
#include "mongo/db/logical_session_id.h"
#include "mongo/db/ops/write_ops.h"
+#include "mongo/db/repl/read_concern_args.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/s/write_ops/batched_command_response.h"
@@ -234,7 +235,10 @@ LogicalSessionIdSet SessionsCollection::_doFindRemoved(
LogicalSessionIdSet removed{sessions.begin(), sessions.end()};
auto wrappedSend = [&](BSONObj batch) {
- auto swBatchResult = send(batch);
+ BSONObjBuilder batchWithReadConcernLocal(batch);
+ batchWithReadConcernLocal.append(repl::ReadConcernArgs::kReadConcernFieldName,
+ repl::ReadConcernArgs::kImplicitDefault);
+ auto swBatchResult = send(batchWithReadConcernLocal.obj());
auto result =
SessionsCollectionFetchResult::parse("SessionsCollectionFetchResult"_sd, swBatchResult);
@@ -279,7 +283,9 @@ BSONObj SessionsCollection::generateCreateIndexesCmd() {
createIndexes.setCreateIndexes(NamespaceString::kLogicalSessionsNamespace.coll());
createIndexes.setIndexes(std::move(indexes));
- return createIndexes.toBSON();
+ return BSONObjBuilder(createIndexes.toBSON())
+ .append(WriteConcernOptions::kWriteConcernField, WriteConcernOptions::kImplicitDefault)
+ .obj();
}
BSONObj SessionsCollection::generateCollModCmd() {
@@ -292,6 +298,8 @@ BSONObj SessionsCollection::generateCollModCmd() {
indexBuilder << "expireAfterSeconds" << localLogicalSessionTimeoutMinutes * 60;
indexBuilder.done();
+ collModCmdBuilder.append(WriteConcernOptions::kWriteConcernField,
+ WriteConcernOptions::kImplicitDefault);
collModCmdBuilder.done();
return collModCmdBuilder.obj();
diff --git a/src/mongo/db/write_concern_options.cpp b/src/mongo/db/write_concern_options.cpp
index 298941fbdef..7ff06c9d224 100644
--- a/src/mongo/db/write_concern_options.cpp
+++ b/src/mongo/db/write_concern_options.cpp
@@ -72,6 +72,12 @@ const BSONObj WriteConcernOptions::Default = BSONObj();
const BSONObj WriteConcernOptions::Acknowledged(BSON("w" << W_NORMAL));
const BSONObj WriteConcernOptions::Unacknowledged(BSON("w" << W_NONE));
const BSONObj WriteConcernOptions::Majority(BSON("w" << WriteConcernOptions::kMajority));
+
+// The "kImplicitDefault" write concern, used by internal operations, is deliberately empty (no
+// 'w' or 'wtimeout' specified). This allows internal operations to specify a write concern, while
+// still allowing it to be either w:1 or automatically upconverted to w:majority on configsvrs.
+const BSONObj WriteConcernOptions::kImplicitDefault;
+
constexpr Seconds WriteConcernOptions::kWriteConcernTimeoutSystem;
constexpr Seconds WriteConcernOptions::kWriteConcernTimeoutMigration;
constexpr Seconds WriteConcernOptions::kWriteConcernTimeoutSharding;
diff --git a/src/mongo/db/write_concern_options.h b/src/mongo/db/write_concern_options.h
index 07cd6a6e1d7..0d9cca66889 100644
--- a/src/mongo/db/write_concern_options.h
+++ b/src/mongo/db/write_concern_options.h
@@ -54,6 +54,7 @@ public:
static const BSONObj Unacknowledged;
static const BSONObj Majority;
static const BSONObj ConfigMajority;
+ static const BSONObj kImplicitDefault;
static constexpr StringData kWriteConcernField = "writeConcern"_sd;
static const char kMajority[]; // = "majority"
diff --git a/src/mongo/dbtests/mock/mock_dbclient_connection.cpp b/src/mongo/dbtests/mock/mock_dbclient_connection.cpp
index 3bb3a845d3e..b360a283e2d 100644
--- a/src/mongo/dbtests/mock/mock_dbclient_connection.cpp
+++ b/src/mongo/dbtests/mock/mock_dbclient_connection.cpp
@@ -105,7 +105,8 @@ std::unique_ptr<mongo::DBClientCursor> MockDBClientConnection::query(
int nToSkip,
const BSONObj* fieldsToReturn,
int queryOptions,
- int batchSize) {
+ int batchSize,
+ boost::optional<BSONObj> readConcernObj) {
checkConnection();
try {
@@ -117,7 +118,8 @@ std::unique_ptr<mongo::DBClientCursor> MockDBClientConnection::query(
nToSkip,
fieldsToReturn,
queryOptions,
- batchSize));
+ batchSize,
+ readConcernObj));
BSONArray resultsInCursor;
@@ -189,26 +191,37 @@ unsigned long long MockDBClientConnection::query(
mongo::Query query,
const mongo::BSONObj* fieldsToReturn,
int queryOptions,
- int batchSize) {
- return DBClientBase::query(f, nsOrUuid, query, fieldsToReturn, queryOptions, batchSize);
+ int batchSize,
+ boost::optional<BSONObj> readConcernObj) {
+ return DBClientBase::query(
+ f, nsOrUuid, query, fieldsToReturn, queryOptions, batchSize, readConcernObj);
}
uint64_t MockDBClientConnection::getSockCreationMicroSec() const {
return _sockCreationTime;
}
-void MockDBClientConnection::insert(const string& ns, BSONObj obj, int flags) {
+void MockDBClientConnection::insert(const string& ns,
+ BSONObj obj,
+ int flags,
+ boost::optional<BSONObj> writeConcernObj) {
invariant(_remoteServer);
_remoteServer->insert(ns, obj, flags);
}
-void MockDBClientConnection::insert(const string& ns, const vector<BSONObj>& objList, int flags) {
+void MockDBClientConnection::insert(const string& ns,
+ const vector<BSONObj>& objList,
+ int flags,
+ boost::optional<BSONObj> writeConcernObj) {
for (vector<BSONObj>::const_iterator iter = objList.begin(); iter != objList.end(); ++iter) {
insert(ns, *iter, flags);
}
}
-void MockDBClientConnection::remove(const string& ns, Query query, int flags) {
+void MockDBClientConnection::remove(const string& ns,
+ Query query,
+ int flags,
+ boost::optional<BSONObj> writeConcernObj) {
invariant(_remoteServer);
_remoteServer->remove(ns, query, flags);
}
diff --git a/src/mongo/dbtests/mock/mock_dbclient_connection.h b/src/mongo/dbtests/mock/mock_dbclient_connection.h
index 5a8d9fa727e..4a7c4891db6 100644
--- a/src/mongo/dbtests/mock/mock_dbclient_connection.h
+++ b/src/mongo/dbtests/mock/mock_dbclient_connection.h
@@ -118,21 +118,32 @@ public:
using DBClientBase::runCommandWithTarget;
std::pair<rpc::UniqueReply, DBClientBase*> runCommandWithTarget(OpMsgRequest request) override;
- std::unique_ptr<mongo::DBClientCursor> query(const NamespaceStringOrUUID& nsOrUuid,
- mongo::Query query = mongo::Query(),
- int nToReturn = 0,
- int nToSkip = 0,
- const mongo::BSONObj* fieldsToReturn = nullptr,
- int queryOptions = 0,
- int batchSize = 0) override;
+ std::unique_ptr<mongo::DBClientCursor> query(
+ const NamespaceStringOrUUID& nsOrUuid,
+ mongo::Query query = mongo::Query(),
+ int nToReturn = 0,
+ int nToSkip = 0,
+ const mongo::BSONObj* fieldsToReturn = nullptr,
+ int queryOptions = 0,
+ int batchSize = 0,
+ boost::optional<BSONObj> readConcernObj = boost::none) override;
uint64_t getSockCreationMicroSec() const override;
- void insert(const std::string& ns, BSONObj obj, int flags = 0) override;
+ void insert(const std::string& ns,
+ BSONObj obj,
+ int flags = 0,
+ boost::optional<BSONObj> writeConcernObj = boost::none) override;
- void insert(const std::string& ns, const std::vector<BSONObj>& objList, int flags = 0) override;
+ void insert(const std::string& ns,
+ const std::vector<BSONObj>& objList,
+ int flags = 0,
+ boost::optional<BSONObj> writeConcernObj = boost::none) override;
- void remove(const std::string& ns, Query query, int flags = 0) override;
+ void remove(const std::string& ns,
+ Query query,
+ int flags = 0,
+ boost::optional<BSONObj> writeConcernObj = boost::none) override;
bool call(mongo::Message& toSend,
mongo::Message& response,
@@ -175,7 +186,8 @@ public:
mongo::Query query,
const mongo::BSONObj* fieldsToReturn = nullptr,
int queryOptions = 0,
- int batchSize = 0) override;
+ int batchSize = 0,
+ boost::optional<BSONObj> readConcernObj = boost::none) override;
//
// Unsupported methods (these are pure virtuals in the base class)
diff --git a/src/mongo/dbtests/mock/mock_remote_db_server.cpp b/src/mongo/dbtests/mock/mock_remote_db_server.cpp
index 0f8c0f69d4d..9e37c54c23e 100644
--- a/src/mongo/dbtests/mock/mock_remote_db_server.cpp
+++ b/src/mongo/dbtests/mock/mock_remote_db_server.cpp
@@ -181,7 +181,8 @@ mongo::BSONArray MockRemoteDBServer::query(MockRemoteDBServer::InstanceID id,
int nToSkip,
const BSONObj* fieldsToReturn,
int queryOptions,
- int batchSize) {
+ int batchSize,
+ boost::optional<BSONObj> readConcernObj) {
checkIfUp(id);
if (_delayMilliSec > 0) {
diff --git a/src/mongo/dbtests/mock/mock_remote_db_server.h b/src/mongo/dbtests/mock/mock_remote_db_server.h
index db6361ac915..e346923acf2 100644
--- a/src/mongo/dbtests/mock/mock_remote_db_server.h
+++ b/src/mongo/dbtests/mock/mock_remote_db_server.h
@@ -168,7 +168,8 @@ public:
int nToSkip = 0,
const mongo::BSONObj* fieldsToReturn = nullptr,
int queryOptions = 0,
- int batchSize = 0);
+ int batchSize = 0,
+ boost::optional<BSONObj> readConcernObj = boost::none);
//
// Getters
diff --git a/src/mongo/s/commands/cluster_find_cmd.cpp b/src/mongo/s/commands/cluster_find_cmd.cpp
index ff5c2d8ef4a..4e2bdcbac76 100644
--- a/src/mongo/s/commands/cluster_find_cmd.cpp
+++ b/src/mongo/s/commands/cluster_find_cmd.cpp
@@ -69,7 +69,7 @@ std::unique_ptr<QueryRequest> parseCmdObjectToQueryRequest(OperationContext* opC
// operation in a transaction, or not running in a transaction, then use the readConcern
// from the opCtx (which may be a cluster-wide default).
const auto& readConcernArgs = repl::ReadConcernArgs::get(opCtx);
- qr->setReadConcern(readConcernArgs.toBSON()["readConcern"].Obj());
+ qr->setReadConcern(readConcernArgs.toBSONInner());
}
}
uassert(
diff --git a/src/mongo/s/commands/cluster_write_cmd.cpp b/src/mongo/s/commands/cluster_write_cmd.cpp
index 823360ac898..d7fc3224347 100644
--- a/src/mongo/s/commands/cluster_write_cmd.cpp
+++ b/src/mongo/s/commands/cluster_write_cmd.cpp
@@ -467,6 +467,30 @@ private:
BSONObjBuilder& result) const {
BatchWriteExecStats stats;
BatchedCommandResponse response;
+
+ // The batched request will only have WC if it was supplied by the client. Otherwise, the
+ // batched request should use the WC from the opCtx.
+ if (!batchedRequest.hasWriteConcern()) {
+ if (opCtx->getWriteConcern().usedDefault) {
+ // Pass writeConcern: {}, rather than {w: 1, wtimeout: 0}, so as to not override the
+ // configsvr w:majority upconvert.
+ batchedRequest.setWriteConcern(BSONObj());
+ } else {
+ batchedRequest.setWriteConcern(opCtx->getWriteConcern().toBSON());
+ }
+ }
+
+ // Write ops are never allowed to have writeConcern inside transactions. Normally
+ // disallowing WC on non-terminal commands in a transaction is handled earlier, during
+ // command dispatch. However, if this is a regular write operation being automatically
+ // retried inside a transaction (such as changing a document's shard key across shards),
+ // then batchedRequest will have a writeConcern (added by the if() above) from when it was
+ // initially run outside a transaction. Thus it's necessary to unconditionally clear the
+ // writeConcern when in a transaction.
+ if (TransactionRouter::get(opCtx)) {
+ batchedRequest.unsetWriteConcern();
+ }
+
ClusterWriter::write(opCtx, batchedRequest, &stats, &response);
bool updatedShardKey = false;
diff --git a/src/mongo/s/query/cluster_aggregation_planner.cpp b/src/mongo/s/query/cluster_aggregation_planner.cpp
index 0988e6fff7f..aeeeb965b33 100644
--- a/src/mongo/s/query/cluster_aggregation_planner.cpp
+++ b/src/mongo/s/query/cluster_aggregation_planner.cpp
@@ -146,7 +146,10 @@ BSONObj createCommandForMergingShard(Document serializedCommand,
mergeCmd.remove("readConcern");
}
- return mergeCmd.freeze().toBson();
+ return applyReadWriteConcern(mergeCtx->opCtx,
+ !(txnRouter && mergingShardContributesData), /* appendRC */
+ !mergeCtx->explain, /* appendWC */
+ mergeCmd.freeze().toBson());
}
Status dispatchMergingPipeline(const boost::intrusive_ptr<ExpressionContext>& expCtx,
@@ -389,7 +392,10 @@ DispatchShardPipelineResults dispatchExchangeConsumerPipeline(
expCtx, serializedCommand, consumerPipelines.back(), boost::none, false);
requests.emplace_back(shardDispatchResults->exchangeSpec->consumerShards[idx],
- consumerCmdObj);
+ applyReadWriteConcern(opCtx,
+ true, /* appendRC */
+ !expCtx->explain, /* appendWC */
+ consumerCmdObj));
}
auto cursors = establishCursors(opCtx,
Grid::get(opCtx)->getExecutorPool()->getArbitraryExecutor(),
diff --git a/src/mongo/s/request_types/merge_chunk_request_test.cpp b/src/mongo/s/request_types/merge_chunk_request_test.cpp
index 94a7bf68511..88d5d165148 100644
--- a/src/mongo/s/request_types/merge_chunk_request_test.cpp
+++ b/src/mongo/s/request_types/merge_chunk_request_test.cpp
@@ -61,13 +61,13 @@ TEST(MergeChunkRequest, ConfigCommandtoBSON) {
<< BSON_ARRAY(BSON("a" << 1) << BSON("a" << 5) << BSON("a" << 10)) << "shard"
<< "shard0000"
<< "validAfter" << Timestamp{100});
- BSONObj writeConcernObj = BSON("writeConcern" << BSON("w"
- << "majority"));
+ BSONObj writeConcernObj = BSON("w"
+ << "majority");
BSONObjBuilder cmdBuilder;
{
cmdBuilder.appendElements(serializedRequest);
- cmdBuilder.appendElements(writeConcernObj);
+ cmdBuilder.append("writeConcern", writeConcernObj);
}
auto request = assertGet(MergeChunkRequest::parseFromConfigCommand(serializedRequest));
diff --git a/src/mongo/s/request_types/merge_chunk_request_type.cpp b/src/mongo/s/request_types/merge_chunk_request_type.cpp
index 4bd9844578b..387bacc16b0 100644
--- a/src/mongo/s/request_types/merge_chunk_request_type.cpp
+++ b/src/mongo/s/request_types/merge_chunk_request_type.cpp
@@ -33,6 +33,7 @@
#include "mongo/bson/bsonobjbuilder.h"
#include "mongo/bson/util/bson_extract.h"
+#include "mongo/db/write_concern_options.h"
namespace mongo {
namespace {
@@ -131,7 +132,7 @@ BSONObj MergeChunkRequest::toConfigCommandBSON(const BSONObj& writeConcern) {
appendAsConfigCommand(&cmdBuilder);
// Tack on passed-in writeConcern
- cmdBuilder.appendElements(writeConcern);
+ cmdBuilder.append(WriteConcernOptions::kWriteConcernField, writeConcern);
return cmdBuilder.obj();
}
diff --git a/src/mongo/s/request_types/migration_secondary_throttle_options.cpp b/src/mongo/s/request_types/migration_secondary_throttle_options.cpp
index d739671b1e3..c143fc9ea60 100644
--- a/src/mongo/s/request_types/migration_secondary_throttle_options.cpp
+++ b/src/mongo/s/request_types/migration_secondary_throttle_options.cpp
@@ -98,8 +98,12 @@ StatusWith<MigrationSecondaryThrottleOptions> MigrationSecondaryThrottleOptions:
}
if (secondaryThrottle != kOn) {
- return Status(ErrorCodes::UnsupportedFormat,
- "Cannot specify write concern when secondaryThrottle is not set");
+ // Ignore the specified writeConcern, since it won't be used. This is necessary
+ // to normalize the otherwise non-standard way that moveChunk uses writeConcern (ie.
+ // only using it when secondaryThrottle: true), so that shardsvrs can enforce always
+ // receiving writeConcern on internalClient connections (at the ServiceEntryPoint
+ // layer).
+ return MigrationSecondaryThrottleOptions(secondaryThrottle, boost::none);
}
writeConcernBSON = writeConcernElem.Obj().getOwned();
diff --git a/src/mongo/s/request_types/migration_secondary_throttle_options_test.cpp b/src/mongo/s/request_types/migration_secondary_throttle_options_test.cpp
index b295e3f0b3d..59c3fb130bd 100644
--- a/src/mongo/s/request_types/migration_secondary_throttle_options_test.cpp
+++ b/src/mongo/s/request_types/migration_secondary_throttle_options_test.cpp
@@ -168,20 +168,24 @@ TEST(MigrationSecondaryThrottleOptions, DisabledInBalancerConfig) {
ASSERT_EQ(MigrationSecondaryThrottleOptions::kOff, options.getSecondaryThrottle());
}
-TEST(MigrationSecondaryThrottleOptions, ParseFailsDisabledInCommandBSONWriteConcernSpecified) {
- auto status = MigrationSecondaryThrottleOptions::createFromCommand(
- BSON("someOtherField" << 1 << "secondaryThrottle" << false << "writeConcern"
- << BSON("w"
- << "majority")));
- ASSERT_EQ(ErrorCodes::UnsupportedFormat, status.getStatus().code());
-}
-
-TEST(MigrationSecondaryThrottleOptions, ParseFailsNotSpecifiedInCommandBSONWriteConcernSpecified) {
- auto status = MigrationSecondaryThrottleOptions::createFromCommand(
- BSON("someOtherField" << 1 << "writeConcern"
- << BSON("w"
- << "majority")));
- ASSERT_EQ(ErrorCodes::UnsupportedFormat, status.getStatus().code());
+TEST(MigrationSecondaryThrottleOptions, IgnoreWriteConcernWhenSecondaryThrottleOff) {
+ MigrationSecondaryThrottleOptions options =
+ assertGet(MigrationSecondaryThrottleOptions::createFromCommand(
+ BSON("someOtherField" << 1 << "_secondaryThrottle" << false << "writeConcern"
+ << BSON("w"
+ << "majority"))));
+ ASSERT_EQ(MigrationSecondaryThrottleOptions::kOff, options.getSecondaryThrottle());
+ ASSERT(!options.isWriteConcernSpecified());
+}
+
+TEST(MigrationSecondaryThrottleOptions, IgnoreWriteConcernWhenSecondaryThrottleAbsent) {
+ MigrationSecondaryThrottleOptions options =
+ assertGet(MigrationSecondaryThrottleOptions::createFromCommand(
+ BSON("someOtherField" << 1 << "writeConcern"
+ << BSON("w"
+ << "majority"))));
+ ASSERT_EQ(MigrationSecondaryThrottleOptions::kDefault, options.getSecondaryThrottle());
+ ASSERT(!options.isWriteConcernSpecified());
}
TEST(MigrationSecondaryThrottleOptions, EqualityOperatorSameValue) {
diff --git a/src/mongo/s/request_types/split_chunk_request_test.cpp b/src/mongo/s/request_types/split_chunk_request_test.cpp
index 1727c3aa792..5759519a2b4 100644
--- a/src/mongo/s/request_types/split_chunk_request_test.cpp
+++ b/src/mongo/s/request_types/split_chunk_request_test.cpp
@@ -77,13 +77,13 @@ TEST(SplitChunkRequest, ConfigCommandtoBSON) {
<< "collEpoch" << OID("7fffffff0000000000000001") << "min" << BSON("a" << 1) << "max"
<< BSON("a" << 10) << "splitPoints" << BSON_ARRAY(BSON("a" << 5)) << "shard"
<< "shard0000");
- BSONObj writeConcernObj = BSON("writeConcern" << BSON("w"
- << "majority"));
+ BSONObj writeConcernObj = BSON("w"
+ << "majority");
BSONObjBuilder cmdBuilder;
{
cmdBuilder.appendElements(serializedRequest);
- cmdBuilder.appendElements(writeConcernObj);
+ cmdBuilder.append("writeConcern", writeConcernObj);
}
auto request = assertGet(SplitChunkRequest::parseFromConfigCommand(serializedRequest));
diff --git a/src/mongo/s/request_types/split_chunk_request_type.cpp b/src/mongo/s/request_types/split_chunk_request_type.cpp
index 6773e413197..20e826c1400 100644
--- a/src/mongo/s/request_types/split_chunk_request_type.cpp
+++ b/src/mongo/s/request_types/split_chunk_request_type.cpp
@@ -33,6 +33,7 @@
#include "mongo/bson/bsonobjbuilder.h"
#include "mongo/bson/util/bson_extract.h"
+#include "mongo/db/write_concern_options.h"
namespace mongo {
@@ -120,7 +121,7 @@ BSONObj SplitChunkRequest::toConfigCommandBSON(const BSONObj& writeConcern) {
appendAsConfigCommand(&cmdBuilder);
// Tack on passed-in writeConcern
- cmdBuilder.appendElements(writeConcern);
+ cmdBuilder.append(WriteConcernOptions::kWriteConcernField, writeConcern);
return cmdBuilder.obj();
}
diff --git a/src/mongo/s/transaction_router.cpp b/src/mongo/s/transaction_router.cpp
index 5d5b2ee0417..a37ccd808b8 100644
--- a/src/mongo/s/transaction_router.cpp
+++ b/src/mongo/s/transaction_router.cpp
@@ -129,9 +129,9 @@ BSONObjBuilder appendFieldsForStartTransaction(BSONObj cmd,
repl::ReadConcernArgs readConcernArgs,
boost::optional<LogicalTime> atClusterTime,
bool doAppendStartTransaction) {
- auto cmdWithReadConcern = !readConcernArgs.isEmpty()
- ? appendReadConcernForTxn(std::move(cmd), readConcernArgs, atClusterTime)
- : std::move(cmd);
+ // startTransaction: true always requires readConcern, even if it's empty.
+ auto cmdWithReadConcern =
+ appendReadConcernForTxn(std::move(cmd), readConcernArgs, atClusterTime);
BSONObjBuilder bob(std::move(cmdWithReadConcern));
@@ -668,8 +668,8 @@ void TransactionRouter::Router::_assertAbortStatusIsOkOrNoSuchTransaction(
<< " from shard: " << response.shardId,
status.isOK() || status.code() == ErrorCodes::NoSuchTransaction);
- // abortTransaction is sent with no write concern, so there's no need to check for a write
- // concern error.
+ // abortTransaction is sent with "local" write concern (w: 1), so there's no need to check for a
+ // write concern error.
}
std::vector<ShardId> TransactionRouter::Router::_getPendingParticipants() const {
@@ -689,7 +689,10 @@ void TransactionRouter::Router::_clearPendingParticipants(OperationContext* opCt
// transactions will be left open if the retry does not re-target any of these shards.
std::vector<AsyncRequestsSender::Request> abortRequests;
for (const auto& participant : pendingParticipants) {
- abortRequests.emplace_back(participant, BSON("abortTransaction" << 1));
+ abortRequests.emplace_back(participant,
+ BSON("abortTransaction"
+ << 1 << WriteConcernOptions::kWriteConcernField
+ << WriteConcernOptions().toBSON()));
}
auto responses = gatherResponses(opCtx,
NamespaceString::kAdminDb,
@@ -1224,7 +1227,8 @@ void TransactionRouter::Router::implicitlyAbortTransaction(OperationContext* opC
p().terminationInitiated = true;
- auto abortCmd = BSON("abortTransaction" << 1);
+ auto abortCmd = BSON("abortTransaction" << 1 << WriteConcernOptions::kWriteConcernField
+ << WriteConcernOptions().toBSON());
std::vector<AsyncRequestsSender::Request> abortRequests;
for (const auto& participantEntry : o().participants) {
abortRequests.emplace_back(ShardId(participantEntry.first), abortCmd);
diff --git a/src/mongo/s/transaction_router_test.cpp b/src/mongo/s/transaction_router_test.cpp
index 18bc6833570..b5c313c4299 100644
--- a/src/mongo/s/transaction_router_test.cpp
+++ b/src/mongo/s/transaction_router_test.cpp
@@ -733,7 +733,7 @@ TEST_F(TransactionRouterTestWithDefaultSession, CannotSpecifyReadConcernAfterFir
ErrorCodes::InvalidOptions);
}
-TEST_F(TransactionRouterTestWithDefaultSession, PassesThroughNoReadConcernToParticipants) {
+TEST_F(TransactionRouterTestWithDefaultSession, PassesThroughEmptyReadConcernToParticipants) {
repl::ReadConcernArgs::get(operationContext()) = repl::ReadConcernArgs();
TxnNumber txnNum{3};
@@ -745,8 +745,9 @@ TEST_F(TransactionRouterTestWithDefaultSession, PassesThroughNoReadConcernToPart
BSONObj expectedNewObj = BSON("insert"
<< "test"
- << "startTransaction" << true << "coordinator" << true
- << "autocommit" << false << "txnNumber" << txnNum);
+ << "readConcern" << BSONObj() << "startTransaction" << true
+ << "coordinator" << true << "autocommit" << false << "txnNumber"
+ << txnNum);
auto newCmd = txnRouter.attachTxnFieldsIfNeeded(operationContext(),
shard1,
diff --git a/src/mongo/shell/encrypted_dbclient_base.cpp b/src/mongo/shell/encrypted_dbclient_base.cpp
index 883453e11db..f08af53e152 100644
--- a/src/mongo/shell/encrypted_dbclient_base.cpp
+++ b/src/mongo/shell/encrypted_dbclient_base.cpp
@@ -500,15 +500,23 @@ JS::Value EncryptedDBClientBase::getCollection() const {
}
-std::unique_ptr<DBClientCursor> EncryptedDBClientBase::query(const NamespaceStringOrUUID& nsOrUuid,
- Query query,
- int nToReturn,
- int nToSkip,
- const BSONObj* fieldsToReturn,
- int queryOptions,
- int batchSize) {
- return _conn->query(
- nsOrUuid, query, nToReturn, nToSkip, fieldsToReturn, queryOptions, batchSize);
+std::unique_ptr<DBClientCursor> EncryptedDBClientBase::query(
+ const NamespaceStringOrUUID& nsOrUuid,
+ Query query,
+ int nToReturn,
+ int nToSkip,
+ const BSONObj* fieldsToReturn,
+ int queryOptions,
+ int batchSize,
+ boost::optional<BSONObj> readConcernObj) {
+ return _conn->query(nsOrUuid,
+ query,
+ nToReturn,
+ nToSkip,
+ fieldsToReturn,
+ queryOptions,
+ batchSize,
+ readConcernObj);
}
bool EncryptedDBClientBase::isFailed() const {
diff --git a/src/mongo/shell/encrypted_dbclient_base.h b/src/mongo/shell/encrypted_dbclient_base.h
index 518c3631a5f..406c5884391 100644
--- a/src/mongo/shell/encrypted_dbclient_base.h
+++ b/src/mongo/shell/encrypted_dbclient_base.h
@@ -118,13 +118,15 @@ public:
void trace(JSTracer* trc) final;
using DBClientBase::query;
- std::unique_ptr<DBClientCursor> query(const NamespaceStringOrUUID& nsOrUuid,
- Query query,
- int nToReturn,
- int nToSkip,
- const BSONObj* fieldsToReturn,
- int queryOptions,
- int batchSize) final;
+ std::unique_ptr<DBClientCursor> query(
+ const NamespaceStringOrUUID& nsOrUuid,
+ Query query,
+ int nToReturn,
+ int nToSkip,
+ const BSONObj* fieldsToReturn,
+ int queryOptions,
+ int batchSize,
+ boost::optional<BSONObj> readConcernObj = boost::none) final;
bool isFailed() const final;
diff --git a/src/mongo/shell/feature_compatibility_version.js b/src/mongo/shell/feature_compatibility_version.js
index 0894a2a0fd2..599e7a4c79f 100644
--- a/src/mongo/shell/feature_compatibility_version.js
+++ b/src/mongo/shell/feature_compatibility_version.js
@@ -25,7 +25,13 @@ function checkFCV(adminDB, version, targetVersion) {
assert.eq(res.featureCompatibilityVersion.version, version, tojson(res));
assert.eq(res.featureCompatibilityVersion.targetVersion, targetVersion, tojson(res));
- let doc = adminDB.system.version.findOne({_id: "featureCompatibilityVersion"});
+ // This query specifies an explicit readConcern because some FCV tests pass a connection that
+ // has manually run isMaster with internalClient, and mongod expects internalClients (ie. other
+ // cluster members) to include read/write concern (on commands that accept read/write concern).
+ let doc = adminDB.system.version.find({_id: "featureCompatibilityVersion"})
+ .limit(1)
+ .readConcern("local")
+ .next();
assert.eq(doc.version, version, tojson(doc));
assert.eq(doc.targetVersion, targetVersion, tojson(doc));
}
diff --git a/src/mongo/shell/mongo.js b/src/mongo/shell/mongo.js
index 2503a989d74..1fe4a678b20 100644
--- a/src/mongo/shell/mongo.js
+++ b/src/mongo/shell/mongo.js
@@ -484,6 +484,22 @@ Mongo.prototype.readMode = function() {
return this._readMode;
};
+/**
+ * Run a function while forcing a certain readMode, and then return the readMode to its original
+ * setting afterwards. Passes this connection to the given function, and returns the function's
+ * result.
+ */
+Mongo.prototype._runWithForcedReadMode = function(forcedReadMode, fn) {
+ let origReadMode = this.readMode();
+ this.forceReadMode(forcedReadMode);
+ try {
+ var res = fn(this);
+ } finally {
+ this.forceReadMode(origReadMode);
+ }
+ return res;
+};
+
//
// Write Concern can be set at the connection level, and is used for all write operations unless
// overridden at the collection level.
diff --git a/src/mongo/shell/replsettest.js b/src/mongo/shell/replsettest.js
index 46f1e7bbab5..e20de8ff38f 100644
--- a/src/mongo/shell/replsettest.js
+++ b/src/mongo/shell/replsettest.js
@@ -1672,17 +1672,6 @@ var ReplSetTest = function(opts) {
"established on " + id);
};
- function _runInCommandReadMode(conn, fn) {
- let origReadMode = conn.readMode();
- conn.forceReadMode("commands");
- try {
- var res = fn();
- } finally {
- conn.forceReadMode(origReadMode);
- }
- return res;
- }
-
// Wait until the optime of the specified type reaches the primary's last applied optime. Blocks
// on all secondary nodes or just 'slaves', if specified. The timeout will reset if any of the
// secondaries makes progress.
@@ -1747,13 +1736,13 @@ var ReplSetTest = function(opts) {
var slaveName = slave.host;
var slaveConfigVersion =
- _runInCommandReadMode(slave,
- () => slave.getDB("local")['system.replset']
- .find()
- .readConcern("local")
- .limit(1)
- .next()
- .version);
+ slave._runWithForcedReadMode("commands",
+ () => slave.getDB("local")['system.replset']
+ .find()
+ .readConcern("local")
+ .limit(1)
+ .next()
+ .version);
if (masterConfigVersion != slaveConfigVersion) {
print("ReplSetTest awaitReplication: secondary #" + secondaryCount + ", " +
@@ -1763,13 +1752,13 @@ var ReplSetTest = function(opts) {
if (slaveConfigVersion > masterConfigVersion) {
master = self.getPrimary();
masterConfigVersion =
- _runInCommandReadMode(master,
- () => master.getDB("local")['system.replset']
- .find()
- .readConcern("local")
- .limit(1)
- .next()
- .version);
+ master._runWithForcedReadMode("commands",
+ () => master.getDB("local")['system.replset']
+ .find()
+ .readConcern("local")
+ .limit(1)
+ .next()
+ .version);
masterName = master.host;
print("ReplSetTest awaitReplication: optime for primary, " + masterName +
@@ -2455,7 +2444,8 @@ var ReplSetTest = function(opts) {
}
try {
- return _runInCommandReadMode(this.mongo, () => operation(this.cursor));
+ return this.mongo._runWithForcedReadMode("commands",
+ () => operation(this.cursor));
} catch (err) {
print("Error: " + name + " threw '" + err.message + "' on " + this.mongo.host);
// Occasionally, the capped collection will get truncated while we are iterating
@@ -2491,20 +2481,21 @@ var ReplSetTest = function(opts) {
// changed "cursorTimeoutMillis" to a short time period.
this._cursorExhausted = false;
// Although this line sets the read concern, it does not need to be called via
- // _runInCommandReadMode() because it only creates the client-side cursor. It's not
- // until next()/hasNext() are called that the find command gets sent to the server.
+ // _runWithForcedReadMode() because it only creates the client-side cursor. It's
+ // not until next()/hasNext() are called that the find command gets sent to the
+ // server.
this.cursor =
coll.find(query).sort({$natural: -1}).noCursorTimeout().readConcern("local");
};
this.getFirstDoc = function() {
- return _runInCommandReadMode(this.mongo,
- () => this.getOplogColl()
- .find()
- .sort({$natural: 1})
- .readConcern("local")
- .limit(-1)
- .next());
+ return this.mongo._runWithForcedReadMode("commands",
+ () => this.getOplogColl()
+ .find()
+ .sort({$natural: 1})
+ .readConcern("local")
+ .limit(-1)
+ .next());
};
this.getOplogColl = function() {