summaryrefslogtreecommitdiff
path: root/src/mongo
diff options
context:
space:
mode:
authorGregory Wlodarek <gregory.wlodarek@mongodb.com>2022-04-22 19:42:03 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-04-22 20:12:32 +0000
commit90d6271d2f663a4aaf633de5e362972e50e90c21 (patch)
treed24da0cee090d7d92972fa8f43fa4f0eb41ac915 /src/mongo
parent983a7174c6d792049ef0f54e36a55f93801df89c (diff)
downloadmongo-90d6271d2f663a4aaf633de5e362972e50e90c21.tar.gz
SERVER-60761 Move the global read-only flag to be an OperationContext function
Diffstat (limited to 'src/mongo')
-rw-r--r--src/mongo/db/SConscript1
-rw-r--r--src/mongo/db/auth/authorization_checks.cpp15
-rw-r--r--src/mongo/db/auth/authorization_checks.h6
-rw-r--r--src/mongo/db/catalog/drop_database.cpp2
-rw-r--r--src/mongo/db/commands/count_cmd.cpp1
-rw-r--r--src/mongo/db/commands/create_command.cpp2
-rw-r--r--src/mongo/db/commands/dbcommands.cpp3
-rw-r--r--src/mongo/db/commands/distinct.cpp1
-rw-r--r--src/mongo/db/commands/feature_compatibility_version.cpp10
-rw-r--r--src/mongo/db/commands/find_cmd.cpp1
-rw-r--r--src/mongo/db/commands/list_collections.cpp2
-rw-r--r--src/mongo/db/commands/pipeline_command.cpp1
-rw-r--r--src/mongo/db/commands/validate.cpp4
-rw-r--r--src/mongo/db/error_labels.cpp6
-rw-r--r--src/mongo/db/mongod_main.cpp11
-rw-r--r--src/mongo/db/mongod_options.cpp2
-rw-r--r--src/mongo/db/operation_context.h13
-rw-r--r--src/mongo/db/pipeline/aggregation_request_helper.cpp30
-rw-r--r--src/mongo/db/pipeline/aggregation_request_helper.h6
-rw-r--r--src/mongo/db/pipeline/document_source_change_stream_handle_topology_change.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_group.cpp2
-rw-r--r--src/mongo/db/pipeline/pipeline.cpp2
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_impl.cpp3
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp5
-rw-r--r--src/mongo/db/repl/replication_info.cpp2
-rw-r--r--src/mongo/db/repl/replication_recovery.cpp10
-rw-r--r--src/mongo/db/repl/replication_recovery_test.cpp5
-rw-r--r--src/mongo/db/repl/storage_timestamp_test.cpp2
-rw-r--r--src/mongo/db/repl/timestamp_block.cpp2
-rw-r--r--src/mongo/db/s/cluster_pipeline_cmd_d.cpp4
-rw-r--r--src/mongo/db/s/flush_database_cache_updates_command.cpp2
-rw-r--r--src/mongo/db/s/flush_resharding_state_change_command.cpp2
-rw-r--r--src/mongo/db/s/flush_routing_table_cache_updates_command.cpp2
-rw-r--r--src/mongo/db/s/periodic_sharded_index_consistency_checker.cpp2
-rw-r--r--src/mongo/db/s/sharding_initialization_mongod.cpp8
-rw-r--r--src/mongo/db/s/sharding_initialization_mongod_test.cpp84
-rw-r--r--src/mongo/db/s/shardsvr_collmod_command.cpp3
-rw-r--r--src/mongo/db/service_context.h21
-rw-r--r--src/mongo/db/service_entry_point_common.cpp2
-rw-r--r--src/mongo/db/sorter/sorter.cpp3
-rw-r--r--src/mongo/db/startup_recovery.cpp24
-rw-r--r--src/mongo/db/startup_warnings_mongod.cpp7
-rw-r--r--src/mongo/db/storage/control/storage_control.cpp2
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_kv_engine_test.cpp247
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit.cpp2
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit.h4
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit_test.cpp2
-rw-r--r--src/mongo/db/storage/record_store.cpp18
-rw-r--r--src/mongo/db/storage/recovery_unit.cpp28
-rw-r--r--src/mongo/db/storage/recovery_unit.h24
-rw-r--r--src/mongo/db/storage/recovery_unit_noop.h4
-rw-r--r--src/mongo/db/storage/recovery_unit_test_harness.cpp47
-rw-r--r--src/mongo/db/storage/storage_engine.h13
-rw-r--r--src/mongo/db/storage/storage_engine_impl.cpp4
-rw-r--r--src/mongo/db/storage/storage_engine_init.cpp48
-rw-r--r--src/mongo/db/storage/storage_init.cpp2
-rw-r--r--src/mongo/db/storage/storage_options.cpp2
-rw-r--r--src/mongo/db/storage/storage_options.h7
-rw-r--r--src/mongo/db/storage/storage_repair_observer.cpp2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp28
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_index.h15
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_init.cpp5
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp58
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h6
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine_test.cpp13
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp16
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h1
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp7
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test_harness.cpp28
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit_test.cpp28
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.cpp15
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h3
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_standard_record_store_test.cpp14
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp5
-rw-r--r--src/mongo/db/storage/write_unit_of_work.cpp17
-rw-r--r--src/mongo/db/system_index.cpp7
-rw-r--r--src/mongo/embedded/embedded.cpp15
-rw-r--r--src/mongo/embedded/embedded_ismaster.cpp2
-rw-r--r--src/mongo/s/commands/cluster_collection_mod_cmd.cpp3
-rw-r--r--src/mongo/s/commands/cluster_count_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_create_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_distinct_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_find_cmd.h2
-rw-r--r--src/mongo/s/commands/cluster_pipeline_cmd.h3
-rw-r--r--src/mongo/s/commands/cluster_pipeline_cmd_s.cpp8
87 files changed, 556 insertions, 525 deletions
diff --git a/src/mongo/db/SConscript b/src/mongo/db/SConscript
index 9f0bc23aa28..e028a244889 100644
--- a/src/mongo/db/SConscript
+++ b/src/mongo/db/SConscript
@@ -151,6 +151,7 @@ env.Library(
],
LIBDEPS=[
'$BUILD_DIR/mongo/db/repl/repl_coordinator_interface',
+ '$BUILD_DIR/mongo/db/repl/repl_settings',
'$BUILD_DIR/mongo/util/processinfo',
'startup_warnings_common',
]
diff --git a/src/mongo/db/auth/authorization_checks.cpp b/src/mongo/db/auth/authorization_checks.cpp
index 6beb7def53a..06fa2164fbc 100644
--- a/src/mongo/db/auth/authorization_checks.cpp
+++ b/src/mongo/db/auth/authorization_checks.cpp
@@ -43,7 +43,8 @@ namespace {
// Checks if this connection has the privileges necessary to create or modify the view 'viewNs'
// to be a view on 'viewOnNs' with pipeline 'viewPipeline'. Call this function after verifying
// that the user has the 'createCollection' or 'collMod' action, respectively.
-Status checkAuthForCreateOrModifyView(AuthorizationSession* authzSession,
+Status checkAuthForCreateOrModifyView(OperationContext* opCtx,
+ AuthorizationSession* authzSession,
const NamespaceString& viewNs,
const NamespaceString& viewOnNs,
const BSONArray& viewPipeline,
@@ -54,6 +55,7 @@ Status checkAuthForCreateOrModifyView(AuthorizationSession* authzSession,
}
auto request = aggregation_request_helper::parseFromBSON(
+ opCtx,
viewNs,
BSON("aggregate" << viewOnNs.coll() << "pipeline" << viewPipeline << "cursor" << BSONObj()
<< "$db" << viewOnNs.db()),
@@ -198,7 +200,8 @@ Status checkAuthForKillCursors(AuthorizationSession* authSession,
str::stream() << "not authorized to kill cursor on " << ns.ns());
}
-Status checkAuthForCreate(AuthorizationSession* authSession,
+Status checkAuthForCreate(OperationContext* opCtx,
+ AuthorizationSession* authSession,
const CreateCommand& cmd,
bool isMongos) {
auto ns = cmd.getNamespace();
@@ -229,7 +232,7 @@ Status checkAuthForCreate(AuthorizationSession* authSession,
pipelineArray.append(stage);
}
return checkAuthForCreateOrModifyView(
- authSession, ns, viewOnNs, pipelineArray.arr(), isMongos);
+ opCtx, authSession, ns, viewOnNs, pipelineArray.arr(), isMongos);
}
// To create a regular collection, ActionType::createCollection or ActionType::insert are
@@ -242,7 +245,8 @@ Status checkAuthForCreate(AuthorizationSession* authSession,
return Status(ErrorCodes::Unauthorized, "unauthorized");
}
-Status checkAuthForCollMod(AuthorizationSession* authSession,
+Status checkAuthForCollMod(OperationContext* opCtx,
+ AuthorizationSession* authSession,
const NamespaceString& ns,
const BSONObj& cmdObj,
bool isMongos) {
@@ -264,7 +268,8 @@ Status checkAuthForCollMod(AuthorizationSession* authSession,
if (hasViewOn) {
NamespaceString viewOnNs(ns.db(), cmdObj["viewOn"].checkAndGetStringData());
auto viewPipeline = BSONArray(cmdObj["pipeline"].Obj());
- return checkAuthForCreateOrModifyView(authSession, ns, viewOnNs, viewPipeline, isMongos);
+ return checkAuthForCreateOrModifyView(
+ opCtx, authSession, ns, viewOnNs, viewPipeline, isMongos);
}
return Status::OK();
diff --git a/src/mongo/db/auth/authorization_checks.h b/src/mongo/db/auth/authorization_checks.h
index 326722c6a5b..4a211fb0ee1 100644
--- a/src/mongo/db/auth/authorization_checks.h
+++ b/src/mongo/db/auth/authorization_checks.h
@@ -90,13 +90,15 @@ StatusWith<PrivilegeVector> getPrivilegesForAggregate(AuthorizationSession* auth
// Checks if this connection has the privileges necessary to create 'ns' with the options
// supplied in 'cmdObj' either directly on mongoD or via mongoS.
-Status checkAuthForCreate(AuthorizationSession* authSession,
+Status checkAuthForCreate(OperationContext* opCtx,
+ AuthorizationSession* authSession,
const CreateCommand& cmd,
bool isMongos);
// Checks if this connection has the privileges necessary to modify 'ns' with the options
// supplied in 'cmdObj' either directly on mongoD or via mongoS.
-Status checkAuthForCollMod(AuthorizationSession* authSession,
+Status checkAuthForCollMod(OperationContext* opCtx,
+ AuthorizationSession* authSession,
const NamespaceString& ns,
const BSONObj& cmdObj,
bool isMongos);
diff --git a/src/mongo/db/catalog/drop_database.cpp b/src/mongo/db/catalog/drop_database.cpp
index 85e2087a321..9445deb39d5 100644
--- a/src/mongo/db/catalog/drop_database.cpp
+++ b/src/mongo/db/catalog/drop_database.cpp
@@ -129,7 +129,7 @@ Status _dropDatabase(OperationContext* opCtx, const std::string& dbName, bool ab
uassert(ErrorCodes::IllegalOperation,
"Cannot drop a database in read-only mode",
- !storageGlobalParams.readOnly);
+ !opCtx->readOnly());
// As of SERVER-32205, dropping the admin database is prohibited.
uassert(ErrorCodes::IllegalOperation,
diff --git a/src/mongo/db/commands/count_cmd.cpp b/src/mongo/db/commands/count_cmd.cpp
index c548bf3ef1c..35b360eb9b9 100644
--- a/src/mongo/db/commands/count_cmd.cpp
+++ b/src/mongo/db/commands/count_cmd.cpp
@@ -175,6 +175,7 @@ public:
auto viewAggCmd =
OpMsgRequest::fromDBAndBody(nss.db(), viewAggregation.getValue()).body;
auto viewAggRequest = aggregation_request_helper::parseFromBSON(
+ opCtx,
nss,
viewAggCmd,
verbosity,
diff --git a/src/mongo/db/commands/create_command.cpp b/src/mongo/db/commands/create_command.cpp
index ebe478ca160..10160cdcc62 100644
--- a/src/mongo/db/commands/create_command.cpp
+++ b/src/mongo/db/commands/create_command.cpp
@@ -97,7 +97,7 @@ public:
void doCheckAuthorization(OperationContext* opCtx) const final {
uassertStatusOK(auth::checkAuthForCreate(
- AuthorizationSession::get(opCtx->getClient()), request(), false));
+ opCtx, AuthorizationSession::get(opCtx->getClient()), request(), false));
}
NamespaceString ns() const final {
diff --git a/src/mongo/db/commands/dbcommands.cpp b/src/mongo/db/commands/dbcommands.cpp
index 0e88f993f93..0d219fe856e 100644
--- a/src/mongo/db/commands/dbcommands.cpp
+++ b/src/mongo/db/commands/dbcommands.cpp
@@ -542,7 +542,8 @@ public:
const std::string& dbname,
const BSONObj& cmdObj) const {
const NamespaceString nss(parseNs(dbname, cmdObj));
- return auth::checkAuthForCollMod(AuthorizationSession::get(client), nss, cmdObj, false);
+ return auth::checkAuthForCollMod(
+ client->getOperationContext(), AuthorizationSession::get(client), nss, cmdObj, false);
}
bool runWithRequestParser(OperationContext* opCtx,
diff --git a/src/mongo/db/commands/distinct.cpp b/src/mongo/db/commands/distinct.cpp
index 57585a88401..da5b72c80e6 100644
--- a/src/mongo/db/commands/distinct.cpp
+++ b/src/mongo/db/commands/distinct.cpp
@@ -164,6 +164,7 @@ public:
auto viewAggCmd =
OpMsgRequest::fromDBAndBody(nss.db(), viewAggregation.getValue()).body;
auto viewAggRequest = aggregation_request_helper::parseFromBSON(
+ opCtx,
nss,
viewAggCmd,
verbosity,
diff --git a/src/mongo/db/commands/feature_compatibility_version.cpp b/src/mongo/db/commands/feature_compatibility_version.cpp
index b9af74e8847..7062c8ec1e4 100644
--- a/src/mongo/db/commands/feature_compatibility_version.cpp
+++ b/src/mongo/db/commands/feature_compatibility_version.cpp
@@ -200,10 +200,6 @@ Lock::ResourceMutex fcvLock("featureCompatibilityVersionLock");
Timestamp lastFCVUpdateTimestamp;
SimpleMutex lastFCVUpdateTimestampMutex;
-bool isWriteableStorageEngine() {
- return !storageGlobalParams.readOnly && (storageGlobalParams.engine != "devnull");
-}
-
/**
* Build update command for featureCompatibilityVersion document updates.
*/
@@ -498,8 +494,10 @@ void FeatureCompatibilityVersion::fassertInitializedAfterStartup(OperationContex
// If we are part of a replica set and are started up with no data files, we do not set the
// featureCompatibilityVersion until a primary is chosen. For this case, we expect the in-memory
- // featureCompatibilityVersion parameter to still be uninitialized until after startup.
- if (isWriteableStorageEngine() && (!usingReplication || nonLocalDatabases)) {
+ // featureCompatibilityVersion parameter to still be uninitialized until after startup. In
+ // standalone mode, FCV is initialized during startup, even in read-only mode.
+ bool isWriteableStorageEngine = storageGlobalParams.engine != "devnull";
+ if (isWriteableStorageEngine && (!usingReplication || nonLocalDatabases)) {
invariant(serverGlobalParams.featureCompatibility.isVersionInitialized());
}
}
diff --git a/src/mongo/db/commands/find_cmd.cpp b/src/mongo/db/commands/find_cmd.cpp
index fc81cb6904d..3aabc3ddf6b 100644
--- a/src/mongo/db/commands/find_cmd.cpp
+++ b/src/mongo/db/commands/find_cmd.cpp
@@ -345,6 +345,7 @@ public:
// Create the agg request equivalent of the find operation, with the explain
// verbosity included.
auto aggRequest = aggregation_request_helper::parseFromBSON(
+ opCtx,
nss,
viewAggCmd,
verbosity,
diff --git a/src/mongo/db/commands/list_collections.cpp b/src/mongo/db/commands/list_collections.cpp
index bc144913d4f..093ff11a439 100644
--- a/src/mongo/db/commands/list_collections.cpp
+++ b/src/mongo/db/commands/list_collections.cpp
@@ -239,7 +239,7 @@ BSONObj buildCollectionBson(OperationContext* opCtx,
b.append("options", options.toBSON(false));
BSONObjBuilder infoBuilder;
- infoBuilder.append("readOnly", storageGlobalParams.readOnly);
+ infoBuilder.append("readOnly", opCtx->readOnly());
if (options.uuid)
infoBuilder.appendElements(options.uuid->toBSON());
b.append("info", infoBuilder.obj());
diff --git a/src/mongo/db/commands/pipeline_command.cpp b/src/mongo/db/commands/pipeline_command.cpp
index eb5f74d4387..1d2e0f25059 100644
--- a/src/mongo/db/commands/pipeline_command.cpp
+++ b/src/mongo/db/commands/pipeline_command.cpp
@@ -74,6 +74,7 @@ public:
const OpMsgRequest& opMsgRequest,
boost::optional<ExplainOptions::Verbosity> explainVerbosity) override {
const auto aggregationRequest = aggregation_request_helper::parseFromBSON(
+ opCtx,
opMsgRequest.getDatabase().toString(),
opMsgRequest.body,
explainVerbosity,
diff --git a/src/mongo/db/commands/validate.cpp b/src/mongo/db/commands/validate.cpp
index c6724076b9f..d17529cdaec 100644
--- a/src/mongo/db/commands/validate.cpp
+++ b/src/mongo/db/commands/validate.cpp
@@ -148,7 +148,7 @@ public:
}
const bool repair = cmdObj["repair"].trueValue();
- if (storageGlobalParams.readOnly && repair) {
+ if (opCtx->readOnly() && repair) {
uasserted(ErrorCodes::InvalidOptions,
str::stream() << "Running the validate command with { repair: true } in"
<< " read-only mode is not supported.");
@@ -226,7 +226,7 @@ public:
}();
auto repairMode = [&] {
- if (storageGlobalParams.readOnly) {
+ if (opCtx->readOnly()) {
// On read-only mode we can't make any adjustments.
return CollectionValidation::RepairMode::kNone;
}
diff --git a/src/mongo/db/error_labels.cpp b/src/mongo/db/error_labels.cpp
index b66d0df2d74..23fa5994b46 100644
--- a/src/mongo/db/error_labels.cpp
+++ b/src/mongo/db/error_labels.cpp
@@ -117,10 +117,10 @@ bool ErrorLabelBuilder::isResumableChangeStreamError() const {
bool apiStrict = APIParameters::get(_opCtx).getAPIStrict().value_or(false);
// Do enough parsing to confirm that this is a well-formed pipeline with a $changeStream.
- const auto swLitePipe = [&nss, &cmdObj, apiStrict]() -> StatusWith<LiteParsedPipeline> {
+ const auto swLitePipe = [this, &nss, &cmdObj, apiStrict]() -> StatusWith<LiteParsedPipeline> {
try {
- auto aggRequest =
- aggregation_request_helper::parseFromBSON(nss, cmdObj, boost::none, apiStrict);
+ auto aggRequest = aggregation_request_helper::parseFromBSON(
+ _opCtx, nss, cmdObj, boost::none, apiStrict);
return LiteParsedPipeline(aggRequest);
} catch (const DBException& ex) {
return ex.toStatus();
diff --git a/src/mongo/db/mongod_main.cpp b/src/mongo/db/mongod_main.cpp
index bce1bc1815c..94aaa958448 100644
--- a/src/mongo/db/mongod_main.cpp
+++ b/src/mongo/db/mongod_main.cpp
@@ -389,6 +389,13 @@ ExitCode _initAndListen(ServiceContext* serviceContext, int listenPort) {
auto runner = makePeriodicRunner(serviceContext);
serviceContext->setPeriodicRunner(std::move(runner));
+ // When starting the server with --queryableBackupMode or --recoverFromOplogAsStandalone, we are
+ // in read-only mode and don't allow user-originating operations to perform writes
+ if (storageGlobalParams.queryableBackupMode ||
+ repl::ReplSettings::shouldRecoverFromOplogAsStandalone()) {
+ serviceContext->disallowUserWrites();
+ }
+
#ifdef MONGO_CONFIG_SSL
OCSPManager::start(serviceContext);
CertificateExpirationMonitor::get()->start(serviceContext);
@@ -650,7 +657,7 @@ ExitCode _initAndListen(ServiceContext* serviceContext, int listenPort) {
readWriteConcernDefaultsMongodStartupChecks(startupOpCtx.get());
// Perform replication recovery for queryable backup mode if needed.
- if (storageGlobalParams.readOnly) {
+ if (storageGlobalParams.queryableBackupMode) {
uassert(ErrorCodes::BadValue,
str::stream() << "Cannot specify both queryableBackupMode and "
<< "recoverFromOplogAsStandalone at the same time",
@@ -673,7 +680,7 @@ ExitCode _initAndListen(ServiceContext* serviceContext, int listenPort) {
replCoord->startup(startupOpCtx.get(), lastShutdownState);
}
- if (!storageGlobalParams.readOnly) {
+ if (!storageGlobalParams.queryableBackupMode) {
if (storageEngine->supportsCappedCollections()) {
logStartup(startupOpCtx.get());
diff --git a/src/mongo/db/mongod_options.cpp b/src/mongo/db/mongod_options.cpp
index d474782085c..6e3772f4083 100644
--- a/src/mongo/db/mongod_options.cpp
+++ b/src/mongo/db/mongod_options.cpp
@@ -438,7 +438,7 @@ Status storeMongodOptions(const moe::Environment& params) {
if (params.count("storage.queryableBackupMode") &&
params["storage.queryableBackupMode"].as<bool>()) {
- storageGlobalParams.readOnly = true;
+ storageGlobalParams.queryableBackupMode = true;
}
if (params.count("storage.groupCollections")) {
diff --git a/src/mongo/db/operation_context.h b/src/mongo/db/operation_context.h
index be234281d8b..ec3d01f4133 100644
--- a/src/mongo/db/operation_context.h
+++ b/src/mongo/db/operation_context.h
@@ -588,6 +588,19 @@ public:
*/
void restoreMaxTimeMS();
+ /**
+ * Returns whether this operation must run in read-only mode.
+ *
+ * If the read-only flag is set on the ServiceContext then:
+ * - Internal operations are allowed to perform writes.
+ * - User originating operations are not allowed to perform writes.
+ */
+ bool readOnly() const {
+ if (!getClient()->isFromUserConnection())
+ return false;
+ return !getServiceContext()->userWritesAllowed();
+ }
+
private:
StatusWith<stdx::cv_status> waitForConditionOrInterruptNoAssertUntil(
stdx::condition_variable& cv, BasicLockableAdapter m, Date_t deadline) noexcept override;
diff --git a/src/mongo/db/pipeline/aggregation_request_helper.cpp b/src/mongo/db/pipeline/aggregation_request_helper.cpp
index 5bfa4b7c2a3..04fcc4023e3 100644
--- a/src/mongo/db/pipeline/aggregation_request_helper.cpp
+++ b/src/mongo/db/pipeline/aggregation_request_helper.cpp
@@ -49,15 +49,17 @@ namespace aggregation_request_helper {
/**
* Validate the aggregate command object.
*/
-void validate(const BSONObj& cmdObj,
+void validate(OperationContext* opCtx,
+ const BSONObj& cmdObj,
const NamespaceString& nss,
boost::optional<ExplainOptions::Verbosity> explainVerbosity);
-AggregateCommandRequest parseFromBSON(const std::string& dbName,
+AggregateCommandRequest parseFromBSON(OperationContext* opCtx,
+ const std::string& dbName,
const BSONObj& cmdObj,
boost::optional<ExplainOptions::Verbosity> explainVerbosity,
bool apiStrict) {
- return parseFromBSON(parseNs(dbName, cmdObj), cmdObj, explainVerbosity, apiStrict);
+ return parseFromBSON(opCtx, parseNs(dbName, cmdObj), cmdObj, explainVerbosity, apiStrict);
}
StatusWith<AggregateCommandRequest> parseFromBSONForTests(
@@ -66,7 +68,7 @@ StatusWith<AggregateCommandRequest> parseFromBSONForTests(
boost::optional<ExplainOptions::Verbosity> explainVerbosity,
bool apiStrict) {
try {
- return parseFromBSON(nss, cmdObj, explainVerbosity, apiStrict);
+ return parseFromBSON(/*opCtx=*/nullptr, nss, cmdObj, explainVerbosity, apiStrict);
} catch (const AssertionException&) {
return exceptionToStatus();
}
@@ -78,13 +80,14 @@ StatusWith<AggregateCommandRequest> parseFromBSONForTests(
boost::optional<ExplainOptions::Verbosity> explainVerbosity,
bool apiStrict) {
try {
- return parseFromBSON(dbName, cmdObj, explainVerbosity, apiStrict);
+ return parseFromBSON(/*opCtx=*/nullptr, dbName, cmdObj, explainVerbosity, apiStrict);
} catch (const AssertionException&) {
return exceptionToStatus();
}
}
-AggregateCommandRequest parseFromBSON(NamespaceString nss,
+AggregateCommandRequest parseFromBSON(OperationContext* opCtx,
+ NamespaceString nss,
const BSONObj& cmdObj,
boost::optional<ExplainOptions::Verbosity> explainVerbosity,
bool apiStrict) {
@@ -111,7 +114,7 @@ AggregateCommandRequest parseFromBSON(NamespaceString nss,
request.setExplain(explainVerbosity);
}
- validate(cmdObj, nss, explainVerbosity);
+ validate(opCtx, cmdObj, nss, explainVerbosity);
return request;
}
@@ -149,7 +152,8 @@ Document serializeToCommandDoc(const AggregateCommandRequest& request) {
return Document(request.toBSON(BSONObj()).getOwned());
}
-void validate(const BSONObj& cmdObj,
+void validate(OperationContext* opCtx,
+ const BSONObj& cmdObj,
const NamespaceString& nss,
boost::optional<ExplainOptions::Verbosity> explainVerbosity) {
bool hasAllowDiskUseElem = cmdObj.hasField(AggregateCommandRequest::kAllowDiskUseFieldName);
@@ -177,10 +181,12 @@ void validate(const BSONObj& cmdObj,
<< "' without '" << AggregateCommandRequest::kFromMongosFieldName << "'",
(!hasNeedsMergeElem || hasFromMongosElem));
- uassert(ErrorCodes::IllegalOperation,
- str::stream() << "The '" << AggregateCommandRequest::kAllowDiskUseFieldName
- << "' option is not permitted in read-only mode.",
- (!hasAllowDiskUseElem || !storageGlobalParams.readOnly));
+ if (opCtx) {
+ uassert(ErrorCodes::IllegalOperation,
+ str::stream() << "The '" << AggregateCommandRequest::kAllowDiskUseFieldName
+ << "' option is not permitted in read-only mode.",
+ (!hasAllowDiskUseElem || !opCtx->readOnly()));
+ }
auto requestReshardingResumeTokenElem =
cmdObj[AggregateCommandRequest::kRequestReshardingResumeTokenFieldName];
diff --git a/src/mongo/db/pipeline/aggregation_request_helper.h b/src/mongo/db/pipeline/aggregation_request_helper.h
index bb447712f7f..a0af24777f5 100644
--- a/src/mongo/db/pipeline/aggregation_request_helper.h
+++ b/src/mongo/db/pipeline/aggregation_request_helper.h
@@ -68,7 +68,8 @@ static constexpr long long kDefaultBatchSize = 101;
* then 'explainVerbosity' contains this information. In this case, 'cmdObj' may not itself
* contain the explain specifier. Otherwise, 'explainVerbosity' should be boost::none.
*/
-AggregateCommandRequest parseFromBSON(NamespaceString nss,
+AggregateCommandRequest parseFromBSON(OperationContext* opCtx,
+ NamespaceString nss,
const BSONObj& cmdObj,
boost::optional<ExplainOptions::Verbosity> explainVerbosity,
bool apiStrict);
@@ -83,7 +84,8 @@ StatusWith<AggregateCommandRequest> parseFromBSONForTests(
* Convenience overload which constructs the request's NamespaceString from the given database
* name and command object.
*/
-AggregateCommandRequest parseFromBSON(const std::string& dbName,
+AggregateCommandRequest parseFromBSON(OperationContext* opCtx,
+ const std::string& dbName,
const BSONObj& cmdObj,
boost::optional<ExplainOptions::Verbosity> explainVerbosity,
bool apiStrict);
diff --git a/src/mongo/db/pipeline/document_source_change_stream_handle_topology_change.cpp b/src/mongo/db/pipeline/document_source_change_stream_handle_topology_change.cpp
index 0f32b20aa47..56cd64d653c 100644
--- a/src/mongo/db/pipeline/document_source_change_stream_handle_topology_change.cpp
+++ b/src/mongo/db/pipeline/document_source_change_stream_handle_topology_change.cpp
@@ -214,7 +214,7 @@ BSONObj DocumentSourceChangeStreamHandleTopologyChange::createUpdatedCommandForN
// Create the 'AggregateCommandRequest' object which will help in creating the parsed pipeline.
auto aggCmdRequest = aggregation_request_helper::parseFromBSON(
- pExpCtx->ns, shardCommand, boost::none, apiStrict);
+ opCtx, pExpCtx->ns, shardCommand, boost::none, apiStrict);
// Parse and optimize the pipeline.
auto pipeline = Pipeline::parse(aggCmdRequest.getPipeline(), pExpCtx);
diff --git a/src/mongo/db/pipeline/document_source_group.cpp b/src/mongo/db/pipeline/document_source_group.cpp
index 80a2dc60488..9f7197c43e2 100644
--- a/src/mongo/db/pipeline/document_source_group.cpp
+++ b/src/mongo/db/pipeline/document_source_group.cpp
@@ -610,7 +610,7 @@ MONGO_COMPILER_NOINLINE DocumentSource::GetNextResult DocumentSourceGroup::initi
}
}
- if (kDebugBuild && !storageGlobalParams.readOnly) {
+ if (kDebugBuild && !pExpCtx->opCtx->readOnly()) {
// In debug mode, spill every time we have a duplicate id to stress merge logic.
if (!inserted && // is a dup
!pExpCtx->inMongos && // can't spill to disk in mongos
diff --git a/src/mongo/db/pipeline/pipeline.cpp b/src/mongo/db/pipeline/pipeline.cpp
index d92a6fa3b29..b85a1e379ee 100644
--- a/src/mongo/db/pipeline/pipeline.cpp
+++ b/src/mongo/db/pipeline/pipeline.cpp
@@ -605,7 +605,7 @@ Status Pipeline::_pipelineCanRunOnMongoS() const {
const bool mustWriteToDisk =
(constraints.diskRequirement == DiskUseRequirement::kWritesPersistentData);
const bool mayWriteTmpDataAndDiskUseIsAllowed =
- (pCtx->allowDiskUse && !storageGlobalParams.readOnly &&
+ (pCtx->allowDiskUse && !pCtx->opCtx->readOnly() &&
constraints.diskRequirement == DiskUseRequirement::kWritesTmpData);
const bool needsDisk = (mustWriteToDisk || mayWriteTmpDataAndDiskUseIsAllowed);
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
index b3faa85859f..3bbe0000e1a 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
@@ -399,8 +399,7 @@ void ReplicationCoordinatorExternalStateImpl::shutdown(OperationContext* opCtx)
// primary shutdown. Stepdown is similarly safe from holes and halts updates to and clears
// the truncate point. The other replication states do need truncation if the truncate point
// is set: e.g. interruption mid batch application can leave oplog holes.
- if (!storageGlobalParams.readOnly &&
- _replicationProcess->getConsistencyMarkers()
+ if (_replicationProcess->getConsistencyMarkers()
->isOplogTruncateAfterPointBeingUsedForPrimary()) {
_stopAsyncUpdatesOfAndClearOplogTruncateAfterPoint();
}
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index a1296d98522..3e6dca73357 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -898,7 +898,7 @@ void ReplicationCoordinatorImpl::startup(OperationContext* opCtx,
_replicationProcess->getReplicationRecovery()->recoverFromOplogAsStandalone(opCtx);
}
- if (storageGlobalParams.readOnly && !recoverToOplogTimestamp.empty()) {
+ if (storageGlobalParams.queryableBackupMode && !recoverToOplogTimestamp.empty()) {
BSONObj recoverToTimestampObj = fromjson(recoverToOplogTimestamp);
uassert(ErrorCodes::BadValue,
str::stream() << "'recoverToOplogTimestamp' needs to have a 'timestamp' field",
@@ -917,9 +917,6 @@ void ReplicationCoordinatorImpl::startup(OperationContext* opCtx,
cfg.isOK());
// Need to perform replication recovery up to and including the given timestamp.
- // Temporarily turn off read-only mode for this procedure as we'll have to do writes.
- storageGlobalParams.readOnly = false;
- ON_BLOCK_EXIT([&] { storageGlobalParams.readOnly = true; });
_replicationProcess->getReplicationRecovery()->recoverFromOplogUpTo(opCtx,
recoverToTimestamp);
}
diff --git a/src/mongo/db/repl/replication_info.cpp b/src/mongo/db/repl/replication_info.cpp
index 2172307d791..110831f1663 100644
--- a/src/mongo/db/repl/replication_info.cpp
+++ b/src/mongo/db/repl/replication_info.cpp
@@ -493,7 +493,7 @@ public:
wireSpec->incomingExternalClient.maxWireVersion);
}
- result.append(HelloCommandReply::kReadOnlyFieldName, storageGlobalParams.readOnly);
+ result.append(HelloCommandReply::kReadOnlyFieldName, opCtx->readOnly());
if (auto param = ServerParameterSet::getNodeParameterSet()->getIfExists(
kAutomationServiceDescriptorFieldName)) {
diff --git a/src/mongo/db/repl/replication_recovery.cpp b/src/mongo/db/repl/replication_recovery.cpp
index 5cd96227393..ca20262c266 100644
--- a/src/mongo/db/repl/replication_recovery.cpp
+++ b/src/mongo/db/repl/replication_recovery.cpp
@@ -364,16 +364,6 @@ void ReplicationRecoveryImpl::recoverFromOplogAsStandalone(OperationContext* opC
if (!_duringInitialSync) {
// Initial sync will reconstruct prepared transactions when it is completely done.
reconstructPreparedTransactions(opCtx, OplogApplication::Mode::kRecovering);
-
- // Two-phase index builds are built in the background, which may still be in-progress after
- // recovering from the oplog. To prevent crashing the server, skip enabling read-only mode.
- if (IndexBuildsCoordinator::get(opCtx)->noIndexBuildInProgress()) {
- LOGV2_WARNING(21558,
- "Setting mongod to readOnly mode as a result of specifying "
- "'recoverFromOplogAsStandalone'");
-
- storageGlobalParams.readOnly = true;
- }
}
}
diff --git a/src/mongo/db/repl/replication_recovery_test.cpp b/src/mongo/db/repl/replication_recovery_test.cpp
index ee937d39648..fa01fb5ebb0 100644
--- a/src/mongo/db/repl/replication_recovery_test.cpp
+++ b/src/mongo/db/repl/replication_recovery_test.cpp
@@ -222,7 +222,6 @@ private:
serverGlobalParams.enableMajorityReadConcern = _stashedEnableMajorityReadConcern;
ServiceContextMongoDTest::tearDown();
- storageGlobalParams.readOnly = false;
gTakeUnstableCheckpointOnShutdown = false;
}
@@ -1478,10 +1477,6 @@ TEST_F(ReplicationRecoveryTest, RecoverFromOplogAsStandaloneRecoversOplog) {
recovery.recoverFromOplogAsStandalone(opCtx);
_assertDocsInTestCollection(opCtx, {5});
-
- // Test the node is readOnly.
- ASSERT_THROWS(getStorageInterface()->insertDocument(opCtx, testNs, {_makeInsertDocument(2)}, 1),
- AssertionException);
}
TEST_F(ReplicationRecoveryTest,
diff --git a/src/mongo/db/repl/storage_timestamp_test.cpp b/src/mongo/db/repl/storage_timestamp_test.cpp
index fead4dab5ee..e6b65302e25 100644
--- a/src/mongo/db/repl/storage_timestamp_test.cpp
+++ b/src/mongo/db/repl/storage_timestamp_test.cpp
@@ -320,7 +320,7 @@ public:
void dumpOplog() {
OneOffRead oor(_opCtx, Timestamp::min());
- _opCtx->recoveryUnit()->beginUnitOfWork(_opCtx);
+ _opCtx->recoveryUnit()->beginUnitOfWork(_opCtx->readOnly());
LOGV2(8423335, "Dumping oplog collection");
AutoGetCollectionForRead oplogRaii(_opCtx, NamespaceString::kRsOplogNamespace);
const CollectionPtr& oplogColl = oplogRaii.getCollection();
diff --git a/src/mongo/db/repl/timestamp_block.cpp b/src/mongo/db/repl/timestamp_block.cpp
index 41992c8ed58..211d61d3f9c 100644
--- a/src/mongo/db/repl/timestamp_block.cpp
+++ b/src/mongo/db/repl/timestamp_block.cpp
@@ -40,7 +40,7 @@ namespace mongo {
TimestampBlock::TimestampBlock(OperationContext* opCtx, Timestamp ts) : _opCtx(opCtx), _ts(ts) {
uassert(ErrorCodes::IllegalOperation,
"Cannot timestamp a write operation in read-only mode",
- !storageGlobalParams.readOnly);
+ !_opCtx->readOnly());
if (!_ts.isNull()) {
_opCtx->recoveryUnit()->setCommitTimestamp(_ts);
}
diff --git a/src/mongo/db/s/cluster_pipeline_cmd_d.cpp b/src/mongo/db/s/cluster_pipeline_cmd_d.cpp
index cd78fa52e18..8526a411f67 100644
--- a/src/mongo/db/s/cluster_pipeline_cmd_d.cpp
+++ b/src/mongo/db/s/cluster_pipeline_cmd_d.cpp
@@ -62,13 +62,15 @@ struct ClusterPipelineCommandD {
}
static AggregateCommandRequest parseAggregationRequest(
+ OperationContext* opCtx,
const OpMsgRequest& opMsgRequest,
boost::optional<ExplainOptions::Verbosity> explainVerbosity,
bool apiStrict) {
// Replace clusterAggregate in the request body because the parser doesn't recognize it.
auto modifiedRequestBody =
opMsgRequest.body.replaceFieldNames(BSON(AggregateCommandRequest::kCommandName << 1));
- return aggregation_request_helper::parseFromBSON(opMsgRequest.getDatabase().toString(),
+ return aggregation_request_helper::parseFromBSON(opCtx,
+ opMsgRequest.getDatabase().toString(),
modifiedRequestBody,
explainVerbosity,
apiStrict);
diff --git a/src/mongo/db/s/flush_database_cache_updates_command.cpp b/src/mongo/db/s/flush_database_cache_updates_command.cpp
index de95a293333..e56515581f7 100644
--- a/src/mongo/db/s/flush_database_cache_updates_command.cpp
+++ b/src/mongo/db/s/flush_database_cache_updates_command.cpp
@@ -115,7 +115,7 @@ public:
uassert(ErrorCodes::IllegalOperation,
"Can't call _flushDatabaseCacheUpdates if in read-only mode",
- !storageGlobalParams.readOnly);
+ !opCtx->readOnly());
boost::optional<SharedSemiFuture<void>> criticalSectionSignal;
diff --git a/src/mongo/db/s/flush_resharding_state_change_command.cpp b/src/mongo/db/s/flush_resharding_state_change_command.cpp
index c1049660307..9fb897dc90b 100644
--- a/src/mongo/db/s/flush_resharding_state_change_command.cpp
+++ b/src/mongo/db/s/flush_resharding_state_change_command.cpp
@@ -108,7 +108,7 @@ public:
uassert(ErrorCodes::IllegalOperation,
"Can't call _flushReshardingStateChange if in read-only mode",
- !storageGlobalParams.readOnly);
+ !opCtx->readOnly());
ExecutorFuture<void>(Grid::get(opCtx)->getExecutorPool()->getArbitraryExecutor())
.then([svcCtx = opCtx->getServiceContext(), nss = ns()] {
diff --git a/src/mongo/db/s/flush_routing_table_cache_updates_command.cpp b/src/mongo/db/s/flush_routing_table_cache_updates_command.cpp
index 943b78ece6d..901a8834517 100644
--- a/src/mongo/db/s/flush_routing_table_cache_updates_command.cpp
+++ b/src/mongo/db/s/flush_routing_table_cache_updates_command.cpp
@@ -110,7 +110,7 @@ public:
uassert(ErrorCodes::IllegalOperation,
str::stream() << "Can't call " << Derived::Request::kCommandName
<< " if in read-only mode",
- !storageGlobalParams.readOnly);
+ !opCtx->readOnly());
boost::optional<SharedSemiFuture<void>> criticalSectionSignal;
diff --git a/src/mongo/db/s/periodic_sharded_index_consistency_checker.cpp b/src/mongo/db/s/periodic_sharded_index_consistency_checker.cpp
index 16df6e4e20c..6145ddc0079 100644
--- a/src/mongo/db/s/periodic_sharded_index_consistency_checker.cpp
+++ b/src/mongo/db/s/periodic_sharded_index_consistency_checker.cpp
@@ -138,7 +138,7 @@ void PeriodicShardedIndexConsistencyChecker::_launchShardedIndexConsistencyCheck
}
auto request = aggregation_request_helper::parseFromBSON(
- nss, aggRequestBSON, boost::none, false);
+ opCtx, nss, aggRequestBSON, boost::none, false);
auto catalogCache = Grid::get(opCtx)->catalogCache();
shardVersionRetry(
diff --git a/src/mongo/db/s/sharding_initialization_mongod.cpp b/src/mongo/db/s/sharding_initialization_mongod.cpp
index 9d57fd74933..6b0cba8ed9b 100644
--- a/src/mongo/db/s/sharding_initialization_mongod.cpp
+++ b/src/mongo/db/s/sharding_initialization_mongod.cpp
@@ -289,9 +289,9 @@ void ShardingInitializationMongoD::shutDown(OperationContext* opCtx) {
bool ShardingInitializationMongoD::initializeShardingAwarenessIfNeeded(OperationContext* opCtx) {
invariant(!opCtx->lockState()->isLocked());
- // In sharded readOnly mode, we ignore the shardIdentity document on disk and instead *require*
- // a shardIdentity document to be passed through --overrideShardIdentity
- if (storageGlobalParams.readOnly) {
+ // In sharded queryableBackupMode mode, we ignore the shardIdentity document on disk and instead
+ // *require* a shardIdentity document to be passed through --overrideShardIdentity
+ if (storageGlobalParams.queryableBackupMode) {
if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) {
uassert(ErrorCodes::InvalidOptions,
"If started with --shardsvr in queryableBackupMode, a shardIdentity document "
@@ -516,7 +516,7 @@ void initializeGlobalShardingStateForMongoD(OperationContext* opCtx,
auto const service = opCtx->getServiceContext();
if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) {
- if (storageGlobalParams.readOnly) {
+ if (storageGlobalParams.queryableBackupMode) {
CatalogCacheLoader::set(service, std::make_unique<ReadOnlyCatalogCacheLoader>());
} else {
CatalogCacheLoader::set(service,
diff --git a/src/mongo/db/s/sharding_initialization_mongod_test.cpp b/src/mongo/db/s/sharding_initialization_mongod_test.cpp
index ab2bffb2ff7..da9d238f92a 100644
--- a/src/mongo/db/s/sharding_initialization_mongod_test.cpp
+++ b/src/mongo/db/s/sharding_initialization_mongod_test.cpp
@@ -94,7 +94,7 @@ protected:
_dbDirectClient.reset();
// Restore the defaults before calling tearDown
- storageGlobalParams.readOnly = false;
+ storageGlobalParams.queryableBackupMode = false;
serverGlobalParams.overrideShardIdentity = BSONObj();
CatalogCacheLoader::clearForTests(getServiceContext());
@@ -271,14 +271,15 @@ TEST_F(ShardingInitializationMongoDTest, InitializeAgainWithMatchingReplSetNameS
}
// The tests below check for different combinations of the compatible startup parameters for
-// --shardsvr, --overrideShardIdentity, and queryableBackup (readOnly) mode
+// --shardsvr, --overrideShardIdentity, and queryableBackup mode
/**
- * readOnly and --shardsvr
+ * queryableBackupMode and --shardsvr
*/
-TEST_F(ShardingInitializationMongoDTest,
- InitializeShardingAwarenessIfNeededReadOnlyAndShardServerAndNoOverrideShardIdentity) {
- storageGlobalParams.readOnly = true;
+TEST_F(
+ ShardingInitializationMongoDTest,
+ InitializeShardingAwarenessIfNeededQueryableBackupModeAndShardServerAndNoOverrideShardIdentity) {
+ storageGlobalParams.queryableBackupMode = true;
ASSERT_THROWS_CODE(
shardingInitialization()->initializeShardingAwarenessIfNeeded(operationContext()),
@@ -286,9 +287,10 @@ TEST_F(ShardingInitializationMongoDTest,
ErrorCodes::InvalidOptions);
}
-TEST_F(ShardingInitializationMongoDTest,
- InitializeShardingAwarenessIfNeededReadOnlyAndShardServerAndInvalidOverrideShardIdentity) {
- storageGlobalParams.readOnly = true;
+TEST_F(
+ ShardingInitializationMongoDTest,
+ InitializeShardingAwarenessIfNeededQueryableBackupModeAndShardServerAndInvalidOverrideShardIdentity) {
+ storageGlobalParams.queryableBackupMode = true;
serverGlobalParams.overrideShardIdentity =
BSON("_id"
<< "shardIdentity" << ShardIdentity::kShardNameFieldName << kShardName
@@ -301,9 +303,10 @@ TEST_F(ShardingInitializationMongoDTest,
ErrorCodes::UnsupportedFormat);
}
-TEST_F(ShardingInitializationMongoDTest,
- InitializeShardingAwarenessIfNeededReadOnlyAndShardServerAndValidOverrideShardIdentity) {
- storageGlobalParams.readOnly = true;
+TEST_F(
+ ShardingInitializationMongoDTest,
+ InitializeShardingAwarenessIfNeededQueryableBackupModeAndShardServerAndValidOverrideShardIdentity) {
+ storageGlobalParams.queryableBackupMode = true;
serverGlobalParams.clusterRole = ClusterRole::ShardServer;
serverGlobalParams.overrideShardIdentity = [] {
ShardIdentityType shardIdentity;
@@ -319,11 +322,12 @@ TEST_F(ShardingInitializationMongoDTest,
}
/**
- * readOnly and not --shardsvr
+ * queryableBackupMode and not --shardsvr
*/
-TEST_F(ShardingInitializationMongoDTest,
- InitializeShardingAwarenessIfNeededReadOnlyAndNotShardServerAndNoOverrideShardIdentity) {
- storageGlobalParams.readOnly = true;
+TEST_F(
+ ShardingInitializationMongoDTest,
+ InitializeShardingAwarenessIfNeededQueryableBackupModeAndNotShardServerAndNoOverrideShardIdentity) {
+ storageGlobalParams.queryableBackupMode = true;
serverGlobalParams.clusterRole = ClusterRole::None;
ASSERT(!shardingInitialization()->initializeShardingAwarenessIfNeeded(operationContext()));
@@ -331,8 +335,8 @@ TEST_F(ShardingInitializationMongoDTest,
TEST_F(
ShardingInitializationMongoDTest,
- InitializeShardingAwarenessIfNeededReadOnlyAndNotShardServerAndInvalidOverrideShardIdentity) {
- storageGlobalParams.readOnly = true;
+ InitializeShardingAwarenessIfNeededQueryableBackupModeAndNotShardServerAndInvalidOverrideShardIdentity) {
+ storageGlobalParams.queryableBackupMode = true;
serverGlobalParams.clusterRole = ClusterRole::None;
serverGlobalParams.overrideShardIdentity = BSON("_id"
<< "shardIdentity"
@@ -345,9 +349,10 @@ TEST_F(
ErrorCodes::InvalidOptions);
}
-TEST_F(ShardingInitializationMongoDTest,
- InitializeShardingAwarenessIfNeededReadOnlyAndNotShardServerAndValidOverrideShardIdentity) {
- storageGlobalParams.readOnly = true;
+TEST_F(
+ ShardingInitializationMongoDTest,
+ InitializeShardingAwarenessIfNeededQueryableBackupModeAndNotShardServerAndValidOverrideShardIdentity) {
+ storageGlobalParams.queryableBackupMode = true;
serverGlobalParams.clusterRole = ClusterRole::None;
serverGlobalParams.overrideShardIdentity = [] {
ShardIdentityType shardIdentity;
@@ -366,10 +371,10 @@ TEST_F(ShardingInitializationMongoDTest,
}
/**
- * not readOnly and --overrideShardIdentity
+ * not queryableBackupMode and --overrideShardIdentity
*/
TEST_F(ShardingInitializationMongoDTest,
- InitializeShardingAwarenessIfNeededNotReadOnlyAndInvalidOverrideShardIdentity) {
+ InitializeShardingAwarenessIfNeededNotQueryableBackupModeAndInvalidOverrideShardIdentity) {
serverGlobalParams.clusterRole = ClusterRole::ShardServer;
serverGlobalParams.overrideShardIdentity = BSON("_id"
<< "shardIdentity"
@@ -390,7 +395,7 @@ TEST_F(ShardingInitializationMongoDTest,
}
TEST_F(ShardingInitializationMongoDTest,
- InitializeShardingAwarenessIfNeededNotReadOnlyAndValidOverrideShardIdentity) {
+ InitializeShardingAwarenessIfNeededNotQueryableBackupModeAndValidOverrideShardIdentity) {
serverGlobalParams.clusterRole = ClusterRole::ShardServer;
serverGlobalParams.overrideShardIdentity = [] {
ShardIdentityType shardIdentity;
@@ -416,15 +421,16 @@ TEST_F(ShardingInitializationMongoDTest,
}
/**
- * not readOnly and --shardsvr
+ * not queryableBackupMode and --shardsvr
*/
TEST_F(ShardingInitializationMongoDTest,
- InitializeShardingAwarenessIfNeededNotReadOnlyAndShardServerAndNoShardIdentity) {
+ InitializeShardingAwarenessIfNeededNotQueryableBackupModeAndShardServerAndNoShardIdentity) {
ASSERT(!shardingInitialization()->initializeShardingAwarenessIfNeeded(operationContext()));
}
-TEST_F(ShardingInitializationMongoDTest,
- InitializeShardingAwarenessIfNeededNotReadOnlyAndShardServerAndInvalidShardIdentity) {
+TEST_F(
+ ShardingInitializationMongoDTest,
+ InitializeShardingAwarenessIfNeededNotQueryableBackupModeAndShardServerAndInvalidShardIdentity) {
// Insert the shardIdentity doc to disk while pretending that we are in "standalone" mode,
// otherwise OpObserver for inserts will prevent the insert from occurring because the
// shardIdentity doc is invalid
@@ -448,8 +454,9 @@ TEST_F(ShardingInitializationMongoDTest,
ErrorCodes::UnsupportedFormat);
}
-TEST_F(ShardingInitializationMongoDTest,
- InitializeShardingAwarenessIfNeededNotReadOnlyAndShardServerAndValidShardIdentity) {
+TEST_F(
+ ShardingInitializationMongoDTest,
+ InitializeShardingAwarenessIfNeededNotQueryableBackupModeAndShardServerAndValidShardIdentity) {
// Insert the shardIdentity doc to disk while pretending that we are in "standalone" mode,
// otherwise OpObserver for inserts will prevent the insert from occurring because the
// shardIdentity doc is invalid
@@ -474,17 +481,19 @@ TEST_F(ShardingInitializationMongoDTest,
}
/**
- * not readOnly and not --shardsvr
+ * not queryableBackupMode and not --shardsvr
*/
-TEST_F(ShardingInitializationMongoDTest,
- InitializeShardingAwarenessIfNeededNotReadOnlyAndNotShardServerAndNoShardIdentity) {
+TEST_F(
+ ShardingInitializationMongoDTest,
+ InitializeShardingAwarenessIfNeededNotQueryableBackupModeAndNotShardServerAndNoShardIdentity) {
ScopedSetStandaloneMode standalone(getServiceContext());
ASSERT(!shardingInitialization()->initializeShardingAwarenessIfNeeded(operationContext()));
}
-TEST_F(ShardingInitializationMongoDTest,
- InitializeShardingAwarenessIfNeededNotReadOnlyAndNotShardServerAndInvalidShardIdentity) {
+TEST_F(
+ ShardingInitializationMongoDTest,
+ InitializeShardingAwarenessIfNeededNotQueryableBackupModeAndNotShardServerAndInvalidShardIdentity) {
ScopedSetStandaloneMode standalone(getServiceContext());
_dbDirectClient->insert(NamespaceString::kServerConfigurationNamespace.toString(),
@@ -498,8 +507,9 @@ TEST_F(ShardingInitializationMongoDTest,
ASSERT(!shardingInitialization()->initializeShardingAwarenessIfNeeded(operationContext()));
}
-TEST_F(ShardingInitializationMongoDTest,
- InitializeShardingAwarenessIfNeededNotReadOnlyAndNotShardServerAndValidShardIdentity) {
+TEST_F(
+ ShardingInitializationMongoDTest,
+ InitializeShardingAwarenessIfNeededNotQueryableBackupModeAndNotShardServerAndValidShardIdentity) {
ScopedSetStandaloneMode standalone(getServiceContext());
BSONObj validShardIdentity = [&] {
diff --git a/src/mongo/db/s/shardsvr_collmod_command.cpp b/src/mongo/db/s/shardsvr_collmod_command.cpp
index 97f47538531..f1736ec156c 100644
--- a/src/mongo/db/s/shardsvr_collmod_command.cpp
+++ b/src/mongo/db/s/shardsvr_collmod_command.cpp
@@ -82,7 +82,8 @@ public:
const std::string& dbname,
const BSONObj& cmdObj) const override {
const NamespaceString nss(parseNs(dbname, cmdObj));
- return auth::checkAuthForCollMod(AuthorizationSession::get(client), nss, cmdObj, false);
+ return auth::checkAuthForCollMod(
+ client->getOperationContext(), AuthorizationSession::get(client), nss, cmdObj, false);
}
bool skipApiVersionCheck() const override {
diff --git a/src/mongo/db/service_context.h b/src/mongo/db/service_context.h
index 44e12901cea..10dd5e33203 100644
--- a/src/mongo/db/service_context.h
+++ b/src/mongo/db/service_context.h
@@ -603,6 +603,24 @@ public:
_catalogGeneration.fetchAndAdd(1);
}
+ void disallowUserWrites() {
+ _userWritesAllowed.store(false);
+ }
+
+ /**
+ * Returns true if user writes are allowed.
+ *
+ * User writes are disallowed when starting with queryableBackupMode or
+ * recoverFromOplogAsStandalone to prevent users from writing to replicated collections in
+ * standalone mode.
+ *
+ * To determine whether an operation must run in read-only mode, use
+ * OperationContext::readOnly().
+ */
+ bool userWritesAllowed() const {
+ return _userWritesAllowed.load();
+ }
+
LockedClient getLockedClient(OperationId id);
private:
@@ -749,6 +767,9 @@ private:
// When the catalog is restarted, the generation goes up by one each time.
AtomicWord<uint64_t> _catalogGeneration{0};
+ // Server-wide flag indicating whether users' writes are allowed.
+ AtomicWord<bool> _userWritesAllowed{true};
+
bool _startupComplete = false;
stdx::condition_variable _startupCompleteCondVar;
};
diff --git a/src/mongo/db/service_entry_point_common.cpp b/src/mongo/db/service_entry_point_common.cpp
index 43894e827c5..aaf3d228440 100644
--- a/src/mongo/db/service_entry_point_common.cpp
+++ b/src/mongo/db/service_entry_point_common.cpp
@@ -2184,7 +2184,7 @@ void HandleRequest::completeOperation(DbResponse& response) {
// TODO SERVER-26825: Fix race condition where fsyncLock is acquired post
// lockedForWriting() call but prior to profile collection lock acquisition.
LOGV2_DEBUG(21972, 1, "Note: not profiling because doing fsync+lock");
- } else if (storageGlobalParams.readOnly) {
+ } else if (opCtx->readOnly()) {
LOGV2_DEBUG(21973, 1, "Note: not profiling because server is read-only");
} else {
invariant(!opCtx->lockState()->inAWriteUnitOfWork());
diff --git a/src/mongo/db/sorter/sorter.cpp b/src/mongo/db/sorter/sorter.cpp
index 3fa9ebcab32..c3a846772d0 100644
--- a/src/mongo/db/sorter/sorter.cpp
+++ b/src/mongo/db/sorter/sorter.cpp
@@ -1075,9 +1075,6 @@ private:
<< " Pass allowDiskUse:true to opt in.");
}
- // We should check readOnly before getting here.
- invariant(!storageGlobalParams.readOnly);
-
sort();
updateCutoff();
diff --git a/src/mongo/db/startup_recovery.cpp b/src/mongo/db/startup_recovery.cpp
index ca1b6fb94e0..f6dfd5352c6 100644
--- a/src/mongo/db/startup_recovery.cpp
+++ b/src/mongo/db/startup_recovery.cpp
@@ -74,7 +74,7 @@ MONGO_FAIL_POINT_DEFINE(exitBeforeRepairInvalidatesConfig);
// Returns true if storage engine is writable.
bool isWriteableStorageEngine() {
- return !storageGlobalParams.readOnly && (storageGlobalParams.engine != "devnull");
+ return storageGlobalParams.engine != "devnull";
}
// Attempt to restore the featureCompatibilityVersion document if it is missing.
@@ -452,7 +452,7 @@ void setReplSetMemberInStandaloneMode(OperationContext* opCtx, StartupRecoveryMo
// Perform startup procedures for --repair mode.
void startupRepair(OperationContext* opCtx, StorageEngine* storageEngine) {
- invariant(!storageGlobalParams.readOnly);
+ invariant(!storageGlobalParams.queryableBackupMode);
if (MONGO_unlikely(exitBeforeDataRepair.shouldFail())) {
LOGV2(21006, "Exiting because 'exitBeforeDataRepair' fail point was set.");
@@ -539,26 +539,12 @@ void startupRepair(OperationContext* opCtx, StorageEngine* storageEngine) {
}
}
-// Perform startup procedures for read-only mode.
-void startupRecoveryReadOnly(OperationContext* opCtx, StorageEngine* storageEngine) {
- invariant(!storageGlobalParams.repair);
-
- setReplSetMemberInStandaloneMode(opCtx, StartupRecoveryMode::kAuto);
-
- FeatureCompatibilityVersion::initializeForStartup(opCtx);
-
- openDatabases(opCtx, storageEngine, [&](auto db) {
- // Ensures all collections meet requirements such as having _id indexes.
- uassertStatusOK(ensureCollectionProperties(opCtx, db, EnsureIndexPolicy::kError));
- });
-}
-
// Perform routine startup recovery procedure.
void startupRecovery(OperationContext* opCtx,
StorageEngine* storageEngine,
StorageEngine::LastShutdownState lastShutdownState,
StartupRecoveryMode mode) {
- invariant(!storageGlobalParams.readOnly && !storageGlobalParams.repair);
+ invariant(!storageGlobalParams.repair);
// Determine whether this is a replica set node running in standalone mode. This must be set
// before determining whether to restart index builds.
@@ -630,8 +616,6 @@ void repairAndRecoverDatabases(OperationContext* opCtx,
if (storageGlobalParams.repair) {
startupRepair(opCtx, storageEngine);
- } else if (storageGlobalParams.readOnly) {
- startupRecoveryReadOnly(opCtx, storageEngine);
} else {
startupRecovery(opCtx, storageEngine, lastShutdownState, StartupRecoveryMode::kAuto);
}
@@ -648,7 +632,7 @@ void runStartupRecoveryInMode(OperationContext* opCtx,
auto const storageEngine = opCtx->getServiceContext()->getStorageEngine();
Lock::GlobalWrite lk(opCtx);
- invariant(isWriteableStorageEngine() && !storageGlobalParams.readOnly);
+ invariant(isWriteableStorageEngine());
invariant(!storageGlobalParams.repair);
const bool usingReplication = repl::ReplicationCoordinator::get(opCtx)->isReplEnabled();
invariant(usingReplication);
diff --git a/src/mongo/db/startup_warnings_mongod.cpp b/src/mongo/db/startup_warnings_mongod.cpp
index ce7bdd92c6c..60e5bc1f134 100644
--- a/src/mongo/db/startup_warnings_mongod.cpp
+++ b/src/mongo/db/startup_warnings_mongod.cpp
@@ -388,5 +388,12 @@ void logMongodStartupWarnings(const StorageGlobalParams& storageParams,
{logv2::LogTag::kStartupWarnings},
"Running with --restore. This should only be used when restoring from a backup");
}
+
+ if (repl::ReplSettings::shouldRecoverFromOplogAsStandalone()) {
+ LOGV2_WARNING_OPTIONS(21558,
+ {logv2::LogTag::kStartupWarnings},
+ "Setting mongod to readOnly mode as a result of specifying "
+ "'recoverFromOplogAsStandalone'");
+ }
}
} // namespace mongo
diff --git a/src/mongo/db/storage/control/storage_control.cpp b/src/mongo/db/storage/control/storage_control.cpp
index 11222448eba..8b384cbc72f 100644
--- a/src/mongo/db/storage/control/storage_control.cpp
+++ b/src/mongo/db/storage/control/storage_control.cpp
@@ -83,7 +83,7 @@ void startStorageControls(ServiceContext* serviceContext, bool forTestOnly) {
JournalFlusher::set(serviceContext, std::move(journalFlusher));
}
- if (!storageEngine->isEphemeral() && !storageGlobalParams.readOnly) {
+ if (!storageEngine->isEphemeral() && !storageGlobalParams.queryableBackupMode) {
std::unique_ptr<Checkpointer> checkpointer =
std::make_unique<Checkpointer>(storageEngine->getEngine());
checkpointer->go();
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_kv_engine_test.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_kv_engine_test.cpp
index 88a76d7cab6..f129c04baa2 100644
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_kv_engine_test.cpp
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_kv_engine_test.cpp
@@ -77,19 +77,40 @@ class EphemeralForTestKVEngineTest : public ServiceContextTest {
public:
EphemeralForTestKVEngineTest() : _helper(getServiceContext()), _engine(_helper.getEngine()) {}
+ ServiceContext::UniqueOperationContext makeOpCtx() {
+ auto opCtx = makeOperationContext();
+ opCtx->setRecoveryUnit(std::unique_ptr<RecoveryUnit>(_engine->newRecoveryUnit()),
+ WriteUnitOfWork::RecoveryUnitState::kNotInUnitOfWork);
+ opCtx->swapLockState(std::make_unique<LockerNoop>(), WithLock::withoutLock());
+ return opCtx;
+ }
+
+ std::vector<std::pair<ServiceContext::UniqueClient, ServiceContext::UniqueOperationContext>>
+ makeOpCtxs(unsigned num) {
+ std::vector<std::pair<ServiceContext::UniqueClient, ServiceContext::UniqueOperationContext>>
+ opCtxs;
+ opCtxs.reserve(num);
+
+ for (unsigned i = 0; i < num; ++i) {
+ auto client = getServiceContext()->makeClient(std::to_string(i));
+
+ auto opCtx = client->makeOperationContext();
+ opCtx->setRecoveryUnit(std::unique_ptr<RecoveryUnit>(_engine->newRecoveryUnit()),
+ WriteUnitOfWork::RecoveryUnitState::kNotInUnitOfWork);
+ opCtx->swapLockState(std::make_unique<LockerNoop>(), WithLock::withoutLock());
+
+ opCtxs.emplace_back(std::move(client), std::move(opCtx));
+ }
+
+ return opCtxs;
+ }
+
protected:
std::unique_ptr<KVHarnessHelper> helper;
KVHarnessHelper _helper;
KVEngine* _engine;
};
-class OperationContextFromKVEngine : public OperationContextNoop {
-public:
- OperationContextFromKVEngine(KVEngine* engine)
- : OperationContextNoop(engine->newRecoveryUnit()) {}
-};
-
-
TEST_F(EphemeralForTestKVEngineTest, AvailableHistoryUpdate) {
NamespaceString nss("a.b");
std::string ident = "collection-1234";
@@ -98,9 +119,9 @@ TEST_F(EphemeralForTestKVEngineTest, AvailableHistoryUpdate) {
std::unique_ptr<mongo::RecordStore> rs;
{
- OperationContextFromKVEngine opCtx(_engine);
- ASSERT_OK(_engine->createRecordStore(&opCtx, nss, ident, defaultCollectionOptions));
- rs = _engine->getRecordStore(&opCtx, nss, ident, defaultCollectionOptions);
+ auto opCtx = makeOpCtx();
+ ASSERT_OK(_engine->createRecordStore(opCtx.get(), nss, ident, defaultCollectionOptions));
+ rs = _engine->getRecordStore(opCtx.get(), nss, ident, defaultCollectionOptions);
ASSERT(rs);
}
@@ -112,10 +133,10 @@ TEST_F(EphemeralForTestKVEngineTest, AvailableHistoryUpdate) {
ASSERT_EQ(_engine->getOldestTimestamp(), currentMaster);
{
- OperationContextFromKVEngine opCtx(_engine);
- WriteUnitOfWork uow(&opCtx);
+ auto opCtx = makeOpCtx();
+ WriteUnitOfWork uow(opCtx.get());
StatusWith<RecordId> res =
- rs->insertRecord(&opCtx, record.c_str(), record.length() + 1, Timestamp());
+ rs->insertRecord(opCtx.get(), record.c_str(), record.length() + 1, Timestamp());
ASSERT_OK(res.getStatus());
uow.commit();
}
@@ -135,9 +156,9 @@ TEST_F(EphemeralForTestKVEngineTest, PinningOldestTimestampWithReadTransaction)
std::unique_ptr<mongo::RecordStore> rs;
{
- OperationContextFromKVEngine opCtx(_engine);
- ASSERT_OK(_engine->createRecordStore(&opCtx, nss, ident, defaultCollectionOptions));
- rs = _engine->getRecordStore(&opCtx, nss, ident, defaultCollectionOptions);
+ auto opCtx = makeOpCtx();
+ ASSERT_OK(_engine->createRecordStore(opCtx.get(), nss, ident, defaultCollectionOptions));
+ rs = _engine->getRecordStore(opCtx.get(), nss, ident, defaultCollectionOptions);
ASSERT(rs);
}
@@ -146,24 +167,26 @@ TEST_F(EphemeralForTestKVEngineTest, PinningOldestTimestampWithReadTransaction)
RecordId loc;
{
- OperationContextFromKVEngine opCtx(_engine);
- WriteUnitOfWork uow(&opCtx);
+ auto opCtx = makeOpCtx();
+ WriteUnitOfWork uow(opCtx.get());
StatusWith<RecordId> res =
- rs->insertRecord(&opCtx, record.c_str(), record.length() + 1, Timestamp());
+ rs->insertRecord(opCtx.get(), record.c_str(), record.length() + 1, Timestamp());
ASSERT_OK(res.getStatus());
loc = res.getValue();
uow.commit();
}
- OperationContextFromKVEngine opCtxRead(_engine);
+ auto opCtxs = makeOpCtxs(2);
+
+ auto opCtxRead = opCtxs[0].second.get();
RecordData rd;
- ASSERT(rs->findRecord(&opCtxRead, loc, &rd));
+ ASSERT(rs->findRecord(opCtxRead, loc, &rd));
{
- OperationContextFromKVEngine opCtx(_engine);
- WriteUnitOfWork uow(&opCtx);
+ auto opCtx = opCtxs[1].second.get();
+ WriteUnitOfWork uow(opCtx);
StatusWith<RecordId> res =
- rs->insertRecord(&opCtx, record.c_str(), record.length() + 1, Timestamp());
+ rs->insertRecord(opCtx, record.c_str(), record.length() + 1, Timestamp());
ASSERT_OK(res.getStatus());
uow.commit();
}
@@ -181,9 +204,9 @@ TEST_F(EphemeralForTestKVEngineTest, SettingOldestTimestampClearsHistory) {
std::unique_ptr<mongo::RecordStore> rs;
{
- OperationContextFromKVEngine opCtx(_engine);
- ASSERT_OK(_engine->createRecordStore(&opCtx, nss, ident, defaultCollectionOptions));
- rs = _engine->getRecordStore(&opCtx, nss, ident, defaultCollectionOptions);
+ auto opCtx = makeOpCtx();
+ ASSERT_OK(_engine->createRecordStore(opCtx.get(), nss, ident, defaultCollectionOptions));
+ rs = _engine->getRecordStore(opCtx.get(), nss, ident, defaultCollectionOptions);
ASSERT(rs);
}
@@ -192,24 +215,26 @@ TEST_F(EphemeralForTestKVEngineTest, SettingOldestTimestampClearsHistory) {
RecordId loc;
{
- OperationContextFromKVEngine opCtx(_engine);
- WriteUnitOfWork uow(&opCtx);
+ auto opCtx = makeOpCtx();
+ WriteUnitOfWork uow(opCtx.get());
StatusWith<RecordId> res =
- rs->insertRecord(&opCtx, record.c_str(), record.length() + 1, Timestamp());
+ rs->insertRecord(opCtx.get(), record.c_str(), record.length() + 1, Timestamp());
ASSERT_OK(res.getStatus());
loc = res.getValue();
uow.commit();
}
- OperationContextFromKVEngine opCtxRead(_engine);
+ auto opCtxs = makeOpCtxs(2);
+
+ auto opCtxRead = opCtxs[0].second.get();
RecordData rd;
- ASSERT(rs->findRecord(&opCtxRead, loc, &rd));
+ ASSERT(rs->findRecord(opCtxRead, loc, &rd));
{
- OperationContextFromKVEngine opCtx(_engine);
- WriteUnitOfWork uow(&opCtx);
+ auto opCtx = opCtxs[1].second.get();
+ WriteUnitOfWork uow(opCtx);
StatusWith<RecordId> res =
- rs->insertRecord(&opCtx, record.c_str(), record.length() + 1, Timestamp());
+ rs->insertRecord(opCtx, record.c_str(), record.length() + 1, Timestamp());
ASSERT_OK(res.getStatus());
uow.commit();
}
@@ -227,17 +252,17 @@ TEST_F(EphemeralForTestKVEngineTest, SettingOldestTimestampToMax) {
std::unique_ptr<mongo::RecordStore> rs;
{
- OperationContextFromKVEngine opCtx(_engine);
- ASSERT_OK(_engine->createRecordStore(&opCtx, nss, ident, defaultCollectionOptions));
- rs = _engine->getRecordStore(&opCtx, nss, ident, defaultCollectionOptions);
+ auto opCtx = makeOpCtx();
+ ASSERT_OK(_engine->createRecordStore(opCtx.get(), nss, ident, defaultCollectionOptions));
+ rs = _engine->getRecordStore(opCtx.get(), nss, ident, defaultCollectionOptions);
ASSERT(rs);
}
{
- OperationContextFromKVEngine opCtx(_engine);
- WriteUnitOfWork uow(&opCtx);
+ auto opCtx = makeOpCtx();
+ WriteUnitOfWork uow(opCtx.get());
StatusWith<RecordId> res =
- rs->insertRecord(&opCtx, record.c_str(), record.length() + 1, Timestamp());
+ rs->insertRecord(opCtx.get(), record.c_str(), record.length() + 1, Timestamp());
ASSERT_OK(res.getStatus());
uow.commit();
}
@@ -258,9 +283,9 @@ TEST_F(EphemeralForTestKVEngineTest, CleanHistoryWithOpenTransaction) {
std::unique_ptr<mongo::RecordStore> rs;
{
- OperationContextFromKVEngine opCtx(_engine);
- ASSERT_OK(_engine->createRecordStore(&opCtx, nss, ident, defaultCollectionOptions));
- rs = _engine->getRecordStore(&opCtx, nss, ident, defaultCollectionOptions);
+ auto opCtx = makeOpCtx();
+ ASSERT_OK(_engine->createRecordStore(opCtx.get(), nss, ident, defaultCollectionOptions));
+ rs = _engine->getRecordStore(opCtx.get(), nss, ident, defaultCollectionOptions);
ASSERT(rs);
}
@@ -269,25 +294,27 @@ TEST_F(EphemeralForTestKVEngineTest, CleanHistoryWithOpenTransaction) {
RecordId loc;
{
- OperationContextFromKVEngine opCtx(_engine);
- WriteUnitOfWork uow(&opCtx);
+ auto opCtx = makeOpCtx();
+ WriteUnitOfWork uow(opCtx.get());
StatusWith<RecordId> res =
- rs->insertRecord(&opCtx, record.c_str(), record.length() + 1, Timestamp());
+ rs->insertRecord(opCtx.get(), record.c_str(), record.length() + 1, Timestamp());
ASSERT_OK(res.getStatus());
loc = res.getValue();
uow.commit();
}
- OperationContextFromKVEngine opCtxRead(_engine);
+ auto opCtxs = makeOpCtxs(2);
+
+ auto opCtxRead = opCtxs[0].second.get();
Timestamp readTime1 = _engine->getHistory_forTest().rbegin()->first;
RecordData rd;
- ASSERT(rs->findRecord(&opCtxRead, loc, &rd));
+ ASSERT(rs->findRecord(opCtxRead, loc, &rd));
{
- OperationContextFromKVEngine opCtx(_engine);
- WriteUnitOfWork uow(&opCtx);
+ auto opCtx = opCtxs[1].second.get();
+ WriteUnitOfWork uow(opCtx);
StatusWith<RecordId> res =
- rs->insertRecord(&opCtx, record.c_str(), record.length() + 1, Timestamp());
+ rs->insertRecord(opCtx, record.c_str(), record.length() + 1, Timestamp());
ASSERT_OK(res.getStatus());
uow.commit();
}
@@ -295,14 +322,17 @@ TEST_F(EphemeralForTestKVEngineTest, CleanHistoryWithOpenTransaction) {
Timestamp readTime2 = _engine->getHistory_forTest().rbegin()->first;
{
- OperationContextFromKVEngine opCtx(_engine);
- WriteUnitOfWork uow(&opCtx);
+ auto opCtx = opCtxs[1].second.get();
+ WriteUnitOfWork uow(opCtx);
StatusWith<RecordId> res =
- rs->insertRecord(&opCtx, record.c_str(), record.length() + 1, Timestamp());
+ rs->insertRecord(opCtx, record.c_str(), record.length() + 1, Timestamp());
ASSERT_OK(res.getStatus());
uow.commit();
}
+ // Destruct the client used for writes prior to checking use_count().
+ opCtxs.pop_back();
+
Timestamp readTime3 = _engine->getHistory_forTest().rbegin()->first;
_engine->cleanHistory();
@@ -321,44 +351,46 @@ TEST_F(EphemeralForTestKVEngineTest, ReadOlderSnapshotsSimple) {
std::unique_ptr<mongo::RecordStore> rs;
{
- OperationContextFromKVEngine opCtx(_engine);
- ASSERT_OK(_engine->createRecordStore(&opCtx, nss, ident, defaultCollectionOptions));
- rs = _engine->getRecordStore(&opCtx, nss, ident, defaultCollectionOptions);
+ auto opCtx = makeOpCtx();
+ ASSERT_OK(_engine->createRecordStore(opCtx.get(), nss, ident, defaultCollectionOptions));
+ rs = _engine->getRecordStore(opCtx.get(), nss, ident, defaultCollectionOptions);
ASSERT(rs);
}
+ auto opCtxs = makeOpCtxs(2);
+
// Pin oldest timestamp with a read transaction.
- OperationContextFromKVEngine pinningOldest(_engine);
- ASSERT(!rs->findRecord(&pinningOldest, RecordId(1), nullptr));
+ auto pinningOldest = opCtxs[0].second.get();
+ ASSERT(!rs->findRecord(pinningOldest, RecordId(1), nullptr));
// Set readFrom to timestamp with no committed transactions.
Timestamp readFrom = _engine->getHistory_forTest().rbegin()->first;
- OperationContextFromKVEngine opCtx(_engine);
- WriteUnitOfWork uow1(&opCtx);
+ auto opCtx = opCtxs[1].second.get();
+ WriteUnitOfWork uow1(opCtx);
StatusWith<RecordId> res1 =
- rs->insertRecord(&opCtx, record.c_str(), record.length() + 1, Timestamp());
+ rs->insertRecord(opCtx, record.c_str(), record.length() + 1, Timestamp());
ASSERT_OK(res1.getStatus());
RecordId loc1 = res1.getValue();
uow1.commit();
- WriteUnitOfWork uow2(&opCtx);
+ WriteUnitOfWork uow2(opCtx);
StatusWith<RecordId> res2 =
- rs->insertRecord(&opCtx, record.c_str(), record.length() + 1, Timestamp());
+ rs->insertRecord(opCtx, record.c_str(), record.length() + 1, Timestamp());
ASSERT_OK(res2.getStatus());
RecordId loc2 = res2.getValue();
uow2.commit();
RecordData rd;
- opCtx.recoveryUnit()->abandonSnapshot();
- opCtx.recoveryUnit()->setTimestampReadSource(RecoveryUnit::ReadSource::kProvided, readFrom);
- ASSERT(!rs->findRecord(&opCtx, loc1, &rd));
- ASSERT(!rs->findRecord(&opCtx, loc2, &rd));
-
- opCtx.recoveryUnit()->abandonSnapshot();
- opCtx.recoveryUnit()->setTimestampReadSource(RecoveryUnit::ReadSource::kNoTimestamp);
- ASSERT(rs->findRecord(&opCtx, loc1, &rd));
- ASSERT(rs->findRecord(&opCtx, loc2, &rd));
+ opCtx->recoveryUnit()->abandonSnapshot();
+ opCtx->recoveryUnit()->setTimestampReadSource(RecoveryUnit::ReadSource::kProvided, readFrom);
+ ASSERT(!rs->findRecord(opCtx, loc1, &rd));
+ ASSERT(!rs->findRecord(opCtx, loc2, &rd));
+
+ opCtx->recoveryUnit()->abandonSnapshot();
+ opCtx->recoveryUnit()->setTimestampReadSource(RecoveryUnit::ReadSource::kNoTimestamp);
+ ASSERT(rs->findRecord(opCtx, loc1, &rd));
+ ASSERT(rs->findRecord(opCtx, loc2, &rd));
}
TEST_F(EphemeralForTestKVEngineTest, ReadOutdatedSnapshot) {
@@ -369,44 +401,50 @@ TEST_F(EphemeralForTestKVEngineTest, ReadOutdatedSnapshot) {
std::unique_ptr<mongo::RecordStore> rs;
{
- OperationContextFromKVEngine opCtx(_engine);
- ASSERT_OK(_engine->createRecordStore(&opCtx, nss, ident, defaultCollectionOptions));
- rs = _engine->getRecordStore(&opCtx, nss, ident, defaultCollectionOptions);
+ auto opCtx = makeOpCtx();
+ ASSERT_OK(_engine->createRecordStore(opCtx.get(), nss, ident, defaultCollectionOptions));
+ rs = _engine->getRecordStore(opCtx.get(), nss, ident, defaultCollectionOptions);
ASSERT(rs);
}
RecordId loc1;
{
- OperationContextFromKVEngine opCtx(_engine);
- WriteUnitOfWork uow(&opCtx);
+ auto opCtx = makeOpCtx();
+ WriteUnitOfWork uow(opCtx.get());
StatusWith<RecordId> res =
- rs->insertRecord(&opCtx, record.c_str(), record.length() + 1, Timestamp());
+ rs->insertRecord(opCtx.get(), record.c_str(), record.length() + 1, Timestamp());
ASSERT_OK(res.getStatus());
loc1 = res.getValue();
uow.commit();
}
- OperationContextFromKVEngine opCtxRead(_engine);
+ auto opCtxs = makeOpCtxs(2);
+
+ auto opCtxRead = opCtxs[0].second.get();
RecordData rd;
- ASSERT(rs->findRecord(&opCtxRead, loc1, &rd));
+ ASSERT(rs->findRecord(opCtxRead, loc1, &rd));
Timestamp readFrom = _engine->getHistory_forTest().rbegin()->first;
RecordId loc2;
{
- OperationContextFromKVEngine opCtx(_engine);
- WriteUnitOfWork uow(&opCtx);
+ auto opCtx = opCtxs[1].second.get();
+ WriteUnitOfWork uow(opCtx);
StatusWith<RecordId> res =
- rs->insertRecord(&opCtx, record.c_str(), record.length() + 1, Timestamp());
+ rs->insertRecord(opCtx, record.c_str(), record.length() + 1, Timestamp());
ASSERT_OK(res.getStatus());
loc2 = res.getValue();
uow.commit();
}
- ASSERT(rs->findRecord(&opCtxRead, loc1, &rd));
- opCtxRead.recoveryUnit()->abandonSnapshot();
- opCtxRead.recoveryUnit()->setTimestampReadSource(RecoveryUnit::ReadSource::kProvided, readFrom);
+ // Destruct the client used for writes prior to trying to get a snapshot too old.
+ opCtxs.pop_back();
+
+ ASSERT(rs->findRecord(opCtxRead, loc1, &rd));
+ opCtxRead->recoveryUnit()->abandonSnapshot();
+ opCtxRead->recoveryUnit()->setTimestampReadSource(RecoveryUnit::ReadSource::kProvided,
+ readFrom);
ASSERT_THROWS_CODE(
- rs->findRecord(&opCtxRead, loc1, &rd), DBException, ErrorCodes::SnapshotTooOld);
+ rs->findRecord(opCtxRead, loc1, &rd), DBException, ErrorCodes::SnapshotTooOld);
}
TEST_F(EphemeralForTestKVEngineTest, SetReadTimestampBehindOldestTimestamp) {
@@ -417,18 +455,18 @@ TEST_F(EphemeralForTestKVEngineTest, SetReadTimestampBehindOldestTimestamp) {
std::unique_ptr<mongo::RecordStore> rs;
{
- OperationContextFromKVEngine opCtx(_engine);
- ASSERT_OK(_engine->createRecordStore(&opCtx, nss, ident, defaultCollectionOptions));
- rs = _engine->getRecordStore(&opCtx, nss, ident, defaultCollectionOptions);
+ auto opCtx = makeOpCtx();
+ ASSERT_OK(_engine->createRecordStore(opCtx.get(), nss, ident, defaultCollectionOptions));
+ rs = _engine->getRecordStore(opCtx.get(), nss, ident, defaultCollectionOptions);
ASSERT(rs);
}
RecordId loc1;
{
- OperationContextFromKVEngine opCtx(_engine);
- WriteUnitOfWork uow(&opCtx);
+ auto opCtx = makeOpCtx();
+ WriteUnitOfWork uow(opCtx.get());
StatusWith<RecordId> res =
- rs->insertRecord(&opCtx, record.c_str(), record.length() + 1, Timestamp());
+ rs->insertRecord(opCtx.get(), record.c_str(), record.length() + 1, Timestamp());
ASSERT_OK(res.getStatus());
loc1 = res.getValue();
uow.commit();
@@ -436,22 +474,23 @@ TEST_F(EphemeralForTestKVEngineTest, SetReadTimestampBehindOldestTimestamp) {
RecordData rd;
Timestamp readFrom = _engine->getHistory_forTest().begin()->first;
- OperationContextFromKVEngine opCtx(_engine);
- WriteUnitOfWork uow(&opCtx);
+ auto opCtx = makeOpCtx();
+ WriteUnitOfWork uow(opCtx.get());
StatusWith<RecordId> res =
- rs->insertRecord(&opCtx, record.c_str(), record.length() + 1, Timestamp());
+ rs->insertRecord(opCtx.get(), record.c_str(), record.length() + 1, Timestamp());
ASSERT_OK(res.getStatus());
RecordId loc2 = res.getValue();
uow.commit();
- opCtx.recoveryUnit()->setTimestampReadSource(RecoveryUnit::ReadSource::kProvided, readFrom);
+ opCtx->recoveryUnit()->setTimestampReadSource(RecoveryUnit::ReadSource::kProvided, readFrom);
_engine->setOldestTimestamp(Timestamp::max(), true);
- ASSERT_THROWS_CODE(rs->findRecord(&opCtx, loc2, &rd), DBException, ErrorCodes::SnapshotTooOld);
+ ASSERT_THROWS_CODE(
+ rs->findRecord(opCtx.get(), loc2, &rd), DBException, ErrorCodes::SnapshotTooOld);
- opCtx.recoveryUnit()->abandonSnapshot();
- opCtx.recoveryUnit()->setTimestampReadSource(RecoveryUnit::ReadSource::kNoTimestamp);
- ASSERT(rs->findRecord(&opCtx, loc1, &rd));
- ASSERT(rs->findRecord(&opCtx, loc2, &rd));
+ opCtx->recoveryUnit()->abandonSnapshot();
+ opCtx->recoveryUnit()->setTimestampReadSource(RecoveryUnit::ReadSource::kNoTimestamp);
+ ASSERT(rs->findRecord(opCtx.get(), loc1, &rd));
+ ASSERT(rs->findRecord(opCtx.get(), loc2, &rd));
}
} // namespace ephemeral_for_test
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit.cpp
index 252b3a0616e..e5b3752381d 100644
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit.cpp
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit.cpp
@@ -53,7 +53,7 @@ RecoveryUnit::~RecoveryUnit() {
_abort();
}
-void RecoveryUnit::beginUnitOfWork(OperationContext* opCtx) {
+void RecoveryUnit::doBeginUnitOfWork() {
invariant(!_inUnitOfWork(), toString(_getState()));
_setState(_isActive() ? State::kActive : State::kInactiveInUnitOfWork);
}
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit.h b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit.h
index c130c4f348f..3be769ef99b 100644
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit.h
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit.h
@@ -45,8 +45,6 @@ public:
RecoveryUnit(KVEngine* parentKVEngine, std::function<void()> cb = nullptr);
~RecoveryUnit();
- void beginUnitOfWork(OperationContext* opCtx) override final;
-
virtual bool waitUntilDurable(OperationContext* opCtx) override;
virtual void setOrderedCommit(bool orderedCommit) override;
@@ -109,6 +107,8 @@ public:
static RecoveryUnit* get(OperationContext* opCtx);
private:
+ void doBeginUnitOfWork() override final;
+
void doCommitUnitOfWork() override final;
void doAbortUnitOfWork() override final;
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit_test.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit_test.cpp
index 93781b84ed7..2b638c82c3e 100644
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit_test.cpp
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit_test.cpp
@@ -91,7 +91,7 @@ TEST_F(EphemeralForTestRecoveryUnitTestHarness, AbandonSnapshotAbortMode) {
const auto rs = harnessHelper->createRecordStore(opCtx.get(), "table1");
opCtx->lockState()->beginWriteUnitOfWork();
- ru->beginUnitOfWork(opCtx.get());
+ ru->beginUnitOfWork(opCtx->readOnly());
StatusWith<RecordId> rid1 = rs->insertRecord(opCtx.get(), "ABC", 3, Timestamp());
StatusWith<RecordId> rid2 = rs->insertRecord(opCtx.get(), "123", 3, Timestamp());
ASSERT_TRUE(rid1.isOK());
diff --git a/src/mongo/db/storage/record_store.cpp b/src/mongo/db/storage/record_store.cpp
index 8dd22c869f6..194bf1f1641 100644
--- a/src/mongo/db/storage/record_store.cpp
+++ b/src/mongo/db/storage/record_store.cpp
@@ -35,22 +35,22 @@
namespace mongo {
namespace {
-void validateWriteAllowed() {
+void validateWriteAllowed(OperationContext* opCtx) {
uassert(ErrorCodes::IllegalOperation,
"Cannot execute a write operation in read-only mode",
- !storageGlobalParams.readOnly);
+ !opCtx->readOnly());
}
} // namespace
void RecordStore::deleteRecord(OperationContext* opCtx, const RecordId& dl) {
- validateWriteAllowed();
+ validateWriteAllowed(opCtx);
doDeleteRecord(opCtx, dl);
}
Status RecordStore::insertRecords(OperationContext* opCtx,
std::vector<Record>* inOutRecords,
const std::vector<Timestamp>& timestamps) {
- validateWriteAllowed();
+ validateWriteAllowed(opCtx);
return doInsertRecords(opCtx, inOutRecords, timestamps);
}
@@ -58,7 +58,7 @@ Status RecordStore::updateRecord(OperationContext* opCtx,
const RecordId& recordId,
const char* data,
int len) {
- validateWriteAllowed();
+ validateWriteAllowed(opCtx);
return doUpdateRecord(opCtx, recordId, data, len);
}
@@ -67,22 +67,22 @@ StatusWith<RecordData> RecordStore::updateWithDamages(OperationContext* opCtx,
const RecordData& oldRec,
const char* damageSource,
const mutablebson::DamageVector& damages) {
- validateWriteAllowed();
+ validateWriteAllowed(opCtx);
return doUpdateWithDamages(opCtx, loc, oldRec, damageSource, damages);
}
Status RecordStore::truncate(OperationContext* opCtx) {
- validateWriteAllowed();
+ validateWriteAllowed(opCtx);
return doTruncate(opCtx);
}
void RecordStore::cappedTruncateAfter(OperationContext* opCtx, RecordId end, bool inclusive) {
- validateWriteAllowed();
+ validateWriteAllowed(opCtx);
doCappedTruncateAfter(opCtx, end, inclusive);
}
Status RecordStore::compact(OperationContext* opCtx) {
- validateWriteAllowed();
+ validateWriteAllowed(opCtx);
return doCompact(opCtx);
}
diff --git a/src/mongo/db/storage/recovery_unit.cpp b/src/mongo/db/storage/recovery_unit.cpp
index 7edc0197999..4727d564daf 100644
--- a/src/mongo/db/storage/recovery_unit.cpp
+++ b/src/mongo/db/storage/recovery_unit.cpp
@@ -92,6 +92,29 @@ void RecoveryUnit::commitRegisteredChanges(boost::optional<Timestamp> commitTime
_executeCommitHandlers(commitTimestamp);
}
+void RecoveryUnit::beginUnitOfWork(bool readOnly) {
+ _readOnly = readOnly;
+ if (!_readOnly) {
+ doBeginUnitOfWork();
+ }
+}
+
+void RecoveryUnit::commitUnitOfWork() {
+ invariant(!_readOnly);
+ doCommitUnitOfWork();
+ assignNextSnapshotId();
+}
+
+void RecoveryUnit::abortUnitOfWork() {
+ invariant(!_readOnly);
+ doAbortUnitOfWork();
+ assignNextSnapshotId();
+}
+
+void RecoveryUnit::endReadOnlyUnitOfWork() {
+ _readOnly = false;
+}
+
void RecoveryUnit::_executeCommitHandlers(boost::optional<Timestamp> commitTimestamp) {
for (auto& change : _changes) {
try {
@@ -158,9 +181,8 @@ void RecoveryUnit::_executeRollbackHandlers() {
}
void RecoveryUnit::validateInUnitOfWork() const {
- invariant(_inUnitOfWork() || storageGlobalParams.readOnly,
- fmt::format(
- "state: {}, readOnly: {}", toString(_getState()), storageGlobalParams.readOnly));
+ invariant(_inUnitOfWork() || _readOnly,
+ fmt::format("state: {}, readOnly: {}", toString(_getState()), _readOnly));
}
} // namespace mongo
diff --git a/src/mongo/db/storage/recovery_unit.h b/src/mongo/db/storage/recovery_unit.h
index a67d1f090f5..04101ad66dc 100644
--- a/src/mongo/db/storage/recovery_unit.h
+++ b/src/mongo/db/storage/recovery_unit.h
@@ -129,9 +129,12 @@ public:
* Marks the beginning of a unit of work. Each call must be matched with exactly one call to
* either commitUnitOfWork or abortUnitOfWork.
*
+ * When called with readOnly=true, no unit of work is started. Calling commitUnitOfWork or
+ * abortUnitOfWork will invariant.
+ *
* Should be called through WriteUnitOfWork rather than directly.
*/
- virtual void beginUnitOfWork(OperationContext* opCtx) = 0;
+ void beginUnitOfWork(bool readOnly);
/**
* Marks the end of a unit of work and commits all changes registered by calls to onCommit or
@@ -139,10 +142,7 @@ public:
*
* Should be called through WriteUnitOfWork rather than directly.
*/
- void commitUnitOfWork() {
- doCommitUnitOfWork();
- assignNextSnapshotId();
- }
+ void commitUnitOfWork();
/**
* Marks the end of a unit of work and rolls back all changes registered by calls to onRollback
@@ -151,10 +151,14 @@ public:
*
* Should be called through WriteUnitOfWork rather than directly.
*/
- void abortUnitOfWork() {
- doAbortUnitOfWork();
- assignNextSnapshotId();
- }
+ void abortUnitOfWork();
+
+ /**
+ * Cleans up any state set for this unit of work.
+ *
+ * Should be called through WriteUnitOfWork rather than directly.
+ */
+ void endReadOnlyUnitOfWork();
/**
* Transitions the active unit of work to the "prepared" state. Must be called after
@@ -803,6 +807,7 @@ private:
// Sets the snapshot associated with this RecoveryUnit to a new globally unique id number.
void assignNextSnapshotId();
+ virtual void doBeginUnitOfWork() = 0;
virtual void doAbandonSnapshot() = 0;
virtual void doCommitUnitOfWork() = 0;
virtual void doAbortUnitOfWork() = 0;
@@ -816,6 +821,7 @@ private:
std::unique_ptr<Change> _changeForCatalogVisibility;
State _state = State::kInactive;
uint64_t _mySnapshotId;
+ bool _readOnly = false;
};
/**
diff --git a/src/mongo/db/storage/recovery_unit_noop.h b/src/mongo/db/storage/recovery_unit_noop.h
index 4c06eeb62b2..dcc35495b02 100644
--- a/src/mongo/db/storage/recovery_unit_noop.h
+++ b/src/mongo/db/storage/recovery_unit_noop.h
@@ -40,8 +40,6 @@ class OperationContext;
class RecoveryUnitNoop : public RecoveryUnit {
public:
- void beginUnitOfWork(OperationContext* opCtx) final {}
-
bool waitUntilDurable(OperationContext* opCtx) override {
return true;
}
@@ -59,6 +57,8 @@ public:
}
private:
+ void doBeginUnitOfWork() final {}
+
void doCommitUnitOfWork() final {
_executeCommitHandlers(boost::none);
}
diff --git a/src/mongo/db/storage/recovery_unit_test_harness.cpp b/src/mongo/db/storage/recovery_unit_test_harness.cpp
index 0c4cd3b1739..ca9c0ca5f25 100644
--- a/src/mongo/db/storage/recovery_unit_test_harness.cpp
+++ b/src/mongo/db/storage/recovery_unit_test_harness.cpp
@@ -88,7 +88,7 @@ TEST_F(RecoveryUnitTestHarness, CommitUnitOfWork) {
Lock::GlobalLock globalLk(opCtx.get(), MODE_IX);
const auto rs = harnessHelper->createRecordStore(opCtx.get(), "table1");
opCtx->lockState()->beginWriteUnitOfWork();
- ru->beginUnitOfWork(opCtx.get());
+ ru->beginUnitOfWork(opCtx->readOnly());
StatusWith<RecordId> s = rs->insertRecord(opCtx.get(), "data", 4, Timestamp());
ASSERT_TRUE(s.isOK());
ASSERT_EQUALS(1, rs->numRecords(opCtx.get()));
@@ -102,7 +102,7 @@ TEST_F(RecoveryUnitTestHarness, AbortUnitOfWork) {
Lock::GlobalLock globalLk(opCtx.get(), MODE_IX);
const auto rs = harnessHelper->createRecordStore(opCtx.get(), "table1");
opCtx->lockState()->beginWriteUnitOfWork();
- ru->beginUnitOfWork(opCtx.get());
+ ru->beginUnitOfWork(opCtx->readOnly());
StatusWith<RecordId> s = rs->insertRecord(opCtx.get(), "data", 4, Timestamp());
ASSERT_TRUE(s.isOK());
ASSERT_EQUALS(1, rs->numRecords(opCtx.get()));
@@ -115,13 +115,13 @@ TEST_F(RecoveryUnitTestHarness, CommitAndRollbackChanges) {
int count = 0;
const auto rs = harnessHelper->createRecordStore(opCtx.get(), "table1");
- ru->beginUnitOfWork(opCtx.get());
+ ru->beginUnitOfWork(opCtx->readOnly());
ru->registerChange(std::make_unique<TestChange>(&count));
ASSERT_EQUALS(count, 0);
ru->commitUnitOfWork();
ASSERT_EQUALS(count, 1);
- ru->beginUnitOfWork(opCtx.get());
+ ru->beginUnitOfWork(opCtx->readOnly());
ru->registerChange(std::make_unique<TestChange>(&count));
ASSERT_EQUALS(count, 1);
ru->abortUnitOfWork();
@@ -132,7 +132,7 @@ TEST_F(RecoveryUnitTestHarness, CheckIsActiveWithCommit) {
Lock::GlobalLock globalLk(opCtx.get(), MODE_IX);
const auto rs = harnessHelper->createRecordStore(opCtx.get(), "table1");
opCtx->lockState()->beginWriteUnitOfWork();
- ru->beginUnitOfWork(opCtx.get());
+ ru->beginUnitOfWork(opCtx->readOnly());
// TODO SERVER-51787: to re-enable this.
// ASSERT_TRUE(ru->isActive());
StatusWith<RecordId> s = rs->insertRecord(opCtx.get(), "data", 4, Timestamp());
@@ -145,7 +145,7 @@ TEST_F(RecoveryUnitTestHarness, CheckIsActiveWithAbort) {
Lock::GlobalLock globalLk(opCtx.get(), MODE_IX);
const auto rs = harnessHelper->createRecordStore(opCtx.get(), "table1");
opCtx->lockState()->beginWriteUnitOfWork();
- ru->beginUnitOfWork(opCtx.get());
+ ru->beginUnitOfWork(opCtx->readOnly());
// TODO SERVER-51787: to re-enable this.
// ASSERT_TRUE(ru->isActive());
StatusWith<RecordId> s = rs->insertRecord(opCtx.get(), "data", 4, Timestamp());
@@ -156,7 +156,7 @@ TEST_F(RecoveryUnitTestHarness, CheckIsActiveWithAbort) {
TEST_F(RecoveryUnitTestHarness, BeginningUnitOfWorkDoesNotIncrementSnapshotId) {
auto snapshotIdBefore = ru->getSnapshotId();
- ru->beginUnitOfWork(opCtx.get());
+ ru->beginUnitOfWork(opCtx->readOnly());
ASSERT_EQ(snapshotIdBefore, ru->getSnapshotId());
ru->abortUnitOfWork();
}
@@ -174,14 +174,14 @@ TEST_F(RecoveryUnitTestHarness, AbandonSnapshotIncrementsSnapshotId) {
TEST_F(RecoveryUnitTestHarness, CommitUnitOfWorkIncrementsSnapshotId) {
auto snapshotIdBefore = ru->getSnapshotId();
- ru->beginUnitOfWork(opCtx.get());
+ ru->beginUnitOfWork(opCtx->readOnly());
ru->commitUnitOfWork();
ASSERT_NE(snapshotIdBefore, ru->getSnapshotId());
}
TEST_F(RecoveryUnitTestHarness, AbortUnitOfWorkIncrementsSnapshotId) {
auto snapshotIdBefore = ru->getSnapshotId();
- ru->beginUnitOfWork(opCtx.get());
+ ru->beginUnitOfWork(opCtx->readOnly());
ru->abortUnitOfWork();
ASSERT_NE(snapshotIdBefore, ru->getSnapshotId());
}
@@ -195,7 +195,7 @@ TEST_F(RecoveryUnitTestHarness, AbandonSnapshotCommitMode) {
const auto rs = harnessHelper->createRecordStore(opCtx.get(), "table1");
opCtx->lockState()->beginWriteUnitOfWork();
- ru->beginUnitOfWork(opCtx.get());
+ ru->beginUnitOfWork(opCtx->readOnly());
StatusWith<RecordId> rid1 = rs->insertRecord(opCtx.get(), "ABC", 3, Timestamp());
StatusWith<RecordId> rid2 = rs->insertRecord(opCtx.get(), "123", 3, Timestamp());
ASSERT_TRUE(rid1.isOK());
@@ -231,6 +231,17 @@ TEST_F(RecoveryUnitTestHarness, AbandonSnapshotCommitMode) {
ASSERT_EQ(strncmp(recordAfterAbandon->data.data(), "123", 3), 0);
}
+TEST_F(RecoveryUnitTestHarness, FlipReadOnly) {
+ ru->beginUnitOfWork(/*readOnly=*/true);
+ ru->endReadOnlyUnitOfWork();
+
+ ru->beginUnitOfWork(/*readOnly=*/false);
+ ru->commitUnitOfWork();
+
+ ru->beginUnitOfWork(/*readOnly=*/false);
+ ru->abortUnitOfWork();
+}
+
DEATH_TEST_F(RecoveryUnitTestHarness, RegisterChangeMustBeInUnitOfWork, "invariant") {
int count = 0;
opCtx->recoveryUnit()->registerChange(std::make_unique<TestChange>(&count));
@@ -245,7 +256,7 @@ DEATH_TEST_F(RecoveryUnitTestHarness, AbortMustBeInUnitOfWork, "invariant") {
}
DEATH_TEST_F(RecoveryUnitTestHarness, CannotHaveUnfinishedUnitOfWorkOnExit, "invariant") {
- opCtx->recoveryUnit()->beginUnitOfWork(opCtx.get());
+ opCtx->recoveryUnit()->beginUnitOfWork(opCtx->readOnly());
}
DEATH_TEST_F(RecoveryUnitTestHarness, PrepareMustBeInUnitOfWork, "invariant") {
@@ -258,14 +269,24 @@ DEATH_TEST_F(RecoveryUnitTestHarness, PrepareMustBeInUnitOfWork, "invariant") {
}
DEATH_TEST_F(RecoveryUnitTestHarness, WaitUntilDurableMustBeOutOfUnitOfWork, "invariant") {
- opCtx->recoveryUnit()->beginUnitOfWork(opCtx.get());
+ opCtx->recoveryUnit()->beginUnitOfWork(opCtx->readOnly());
opCtx->recoveryUnit()->waitUntilDurable(opCtx.get());
}
DEATH_TEST_F(RecoveryUnitTestHarness, AbandonSnapshotMustBeOutOfUnitOfWork, "invariant") {
- opCtx->recoveryUnit()->beginUnitOfWork(opCtx.get());
+ opCtx->recoveryUnit()->beginUnitOfWork(opCtx->readOnly());
opCtx->recoveryUnit()->abandonSnapshot();
}
+DEATH_TEST_F(RecoveryUnitTestHarness, CommitInReadOnly, "invariant") {
+ opCtx->recoveryUnit()->beginUnitOfWork(/*readOnly=*/true);
+ opCtx->recoveryUnit()->commitUnitOfWork();
+}
+
+DEATH_TEST_F(RecoveryUnitTestHarness, AbortInReadOnly, "invariant") {
+ opCtx->recoveryUnit()->beginUnitOfWork(/*readOnly=*/true);
+ opCtx->recoveryUnit()->abortUnitOfWork();
+}
+
} // namespace
} // namespace mongo
diff --git a/src/mongo/db/storage/storage_engine.h b/src/mongo/db/storage/storage_engine.h
index 7a973d9883f..313dcb8c439 100644
--- a/src/mongo/db/storage/storage_engine.h
+++ b/src/mongo/db/storage/storage_engine.h
@@ -100,8 +100,7 @@ public:
virtual ~Factory() {}
/**
- * Return a new instance of the StorageEngine. The lockFile parameter may be null if
- * params.readOnly is set. Caller owns the returned pointer.
+ * Return a new instance of the StorageEngine. Caller owns the returned pointer.
*/
virtual std::unique_ptr<StorageEngine> create(
OperationContext* opCtx,
@@ -161,13 +160,11 @@ public:
virtual BSONObj createMetadataOptions(const StorageGlobalParams& params) const = 0;
/**
- * Returns whether the engine supports read-only mode. If read-only mode is enabled, the
- * engine may be started on a read-only filesystem (either mounted read-only or with
- * read-only permissions). If readOnly mode is enabled, it is undefined behavior to call
- * methods that write data (e.g. insertRecord). This method is provided on the Factory
- * because it must be called before the storageEngine is instantiated.
+ * Returns whether the engine supports queryable backup mode. If queryable backup mode is
+ * enabled, user writes are not permitted but internally generated writes are still
+ * permitted.
*/
- virtual bool supportsReadOnly() const {
+ virtual bool supportsQueryableBackupMode() const {
return false;
}
};
diff --git a/src/mongo/db/storage/storage_engine_impl.cpp b/src/mongo/db/storage/storage_engine_impl.cpp
index 5f5e0de5a9d..a70e6039c23 100644
--- a/src/mongo/db/storage/storage_engine_impl.cpp
+++ b/src/mongo/db/storage/storage_engine_impl.cpp
@@ -797,10 +797,6 @@ void StorageEngineImpl::cleanShutdown() {
StorageEngineImpl::~StorageEngineImpl() {}
void StorageEngineImpl::startTimestampMonitor() {
- if (storageGlobalParams.readOnly) {
- return;
- }
-
// Unless explicitly disabled, all storage engines should create a TimestampMonitor for
// drop-pending internal idents, even if they do not support pending drops for collections
// and indexes.
diff --git a/src/mongo/db/storage/storage_engine_init.cpp b/src/mongo/db/storage/storage_engine_init.cpp
index 7c345ddcfe6..eb6f1e7dc40 100644
--- a/src/mongo/db/storage/storage_engine_init.cpp
+++ b/src/mongo/db/storage/storage_engine_init.cpp
@@ -85,19 +85,16 @@ StorageEngine::LastShutdownState initializeStorageEngine(OperationContext* opCtx
const std::string dbpath = storageGlobalParams.dbpath;
- if (!storageGlobalParams.readOnly) {
- StorageRepairObserver::set(service, std::make_unique<StorageRepairObserver>(dbpath));
- auto repairObserver = StorageRepairObserver::get(service);
-
- if (storageGlobalParams.repair) {
- repairObserver->onRepairStarted();
- } else if (repairObserver->isIncomplete()) {
- LOGV2_FATAL_NOTRACE(
- 50922,
- "An incomplete repair has been detected! This is likely because a repair "
- "operation unexpectedly failed before completing. MongoDB will not start up "
- "again without --repair.");
- }
+ StorageRepairObserver::set(service, std::make_unique<StorageRepairObserver>(dbpath));
+ auto repairObserver = StorageRepairObserver::get(service);
+
+ if (storageGlobalParams.repair) {
+ repairObserver->onRepairStarted();
+ } else if (repairObserver->isIncomplete()) {
+ LOGV2_FATAL_NOTRACE(50922,
+ "An incomplete repair has been detected! This is likely because a "
+ "repair operation unexpectedly failed before completing. MongoDB will "
+ "not start up again without --repair.");
}
if (auto existingStorageEngine = StorageEngineMetadata::getStorageEngineForPath(dbpath)) {
@@ -134,12 +131,12 @@ StorageEngine::LastShutdownState initializeStorageEngine(OperationContext* opCtx
<< storageGlobalParams.engine,
factory);
- if (storageGlobalParams.readOnly) {
+ if (storageGlobalParams.queryableBackupMode) {
uassert(34368,
- str::stream()
- << "Server was started in read-only mode, but the configured storage engine, "
- << storageGlobalParams.engine << ", does not support read-only operation",
- factory->supportsReadOnly());
+ str::stream() << "Server was started in queryable backup mode, but the configured "
+ << "storage engine, " << storageGlobalParams.engine
+ << ", does not support queryable backup mode",
+ factory->supportsQueryableBackupMode());
}
std::unique_ptr<StorageEngineMetadata> metadata;
@@ -147,13 +144,6 @@ StorageEngine::LastShutdownState initializeStorageEngine(OperationContext* opCtx
metadata = StorageEngineMetadata::forPath(dbpath);
}
- if (storageGlobalParams.readOnly) {
- uassert(34415,
- "Server was started in read-only mode, but the storage metadata file was not"
- " found.",
- metadata.get());
- }
-
// Validate options in metadata against current startup options.
if (metadata.get()) {
uassertStatusOK(factory->validateMetadata(*metadata, storageGlobalParams));
@@ -221,7 +211,6 @@ StorageEngine::LastShutdownState initializeStorageEngine(OperationContext* opCtx
// Write a new metadata file if it is not present.
if (!metadata.get() &&
(initFlags & StorageEngineInitFlags::kSkipMetadataFile) == StorageEngineInitFlags{}) {
- invariant(!storageGlobalParams.readOnly);
metadata.reset(new StorageEngineMetadata(storageGlobalParams.dbpath));
metadata->setStorageEngine(factory->getCanonicalName().toString());
metadata->setStorageEngineOptions(factory->createMetadataOptions(storageGlobalParams));
@@ -297,18 +286,13 @@ void createLockFile(ServiceContext* service) {
}
const bool wasUnclean = lockFile->createdByUncleanShutdown();
const auto openStatus = lockFile->open();
- if (storageGlobalParams.readOnly && openStatus == ErrorCodes::IllegalOperation) {
+ if (openStatus == ErrorCodes::IllegalOperation) {
lockFile = boost::none;
} else {
uassertStatusOK(openStatus);
}
if (wasUnclean) {
- if (storageGlobalParams.readOnly) {
- LOGV2_FATAL_NOTRACE(34416,
- "Attempted to open dbpath in readOnly mode, but the server was "
- "previously not shut down cleanly.");
- }
LOGV2_WARNING(22271,
"Detected unclean shutdown - Lock file is not empty",
"lockFile"_attr = lockFile->getFilespec());
diff --git a/src/mongo/db/storage/storage_init.cpp b/src/mongo/db/storage/storage_init.cpp
index d2e48977c70..18fe6c43b70 100644
--- a/src/mongo/db/storage/storage_init.cpp
+++ b/src/mongo/db/storage/storage_init.cpp
@@ -68,7 +68,7 @@ public:
bob.append("dropPendingIdents",
static_cast<long long>(engine->getDropPendingIdents().size()));
bob.append("supportsSnapshotReadConcern", engine->supportsReadConcernSnapshot());
- bob.append("readOnly", storageGlobalParams.readOnly);
+ bob.append("readOnly", !opCtx->getServiceContext()->userWritesAllowed());
bob.append("persistent", !engine->isEphemeral());
bob.append("backupCursorOpen", backupCursorHooks->isBackupCursorOpen());
diff --git a/src/mongo/db/storage/storage_options.cpp b/src/mongo/db/storage/storage_options.cpp
index 69a0f2aa6dc..ca2d9de84dc 100644
--- a/src/mongo/db/storage/storage_options.cpp
+++ b/src/mongo/db/storage/storage_options.cpp
@@ -55,7 +55,7 @@ void StorageGlobalParams::reset() {
noTableScan.store(false);
directoryperdb = false;
syncdelay = 60.0;
- readOnly = false;
+ queryableBackupMode = false;
groupCollections = false;
oplogMinRetentionHours.store(0.0);
allowOplogTruncation = true;
diff --git a/src/mongo/db/storage/storage_options.h b/src/mongo/db/storage/storage_options.h
index 8e95c4c934f..0455f09d245 100644
--- a/src/mongo/db/storage/storage_options.h
+++ b/src/mongo/db/storage/storage_options.h
@@ -105,10 +105,9 @@ struct StorageGlobalParams {
AtomicDouble syncdelay; // seconds between fsyncs
// --queryableBackupMode
- // Puts MongoD into "read-only" mode. MongoD will not write any data to the underlying
- // filesystem. Note that read operations may require writes. For example, a sort on a large
- // dataset may fail if it requires spilling to disk.
- bool readOnly;
+ // Prevents user-originating operations from performing writes to the server. Internally
+ // generated writes are still permitted.
+ bool queryableBackupMode;
// --groupCollections
// Dictate to the storage engine that it should attempt to create new MongoDB collections from
diff --git a/src/mongo/db/storage/storage_repair_observer.cpp b/src/mongo/db/storage/storage_repair_observer.cpp
index 22b76a6a39c..44bc890f3e8 100644
--- a/src/mongo/db/storage/storage_repair_observer.cpp
+++ b/src/mongo/db/storage/storage_repair_observer.cpp
@@ -65,8 +65,6 @@ const auto getRepairObserver =
} // namespace
StorageRepairObserver::StorageRepairObserver(const std::string& dbpath) {
- invariant(!storageGlobalParams.readOnly);
-
using boost::filesystem::path;
_repairIncompleteFilePath = path(dbpath) / path(kRepairIncompleteFileName);
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
index c54af529d49..6d9686f2341 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
@@ -233,10 +233,9 @@ WiredTigerIndex::WiredTigerIndex(OperationContext* ctx,
StringData ident,
KeyFormat rsKeyFormat,
const IndexDescriptor* desc,
- bool isLogged,
- bool isReadOnly)
+ bool isLogged)
: SortedDataInterface(ident,
- _handleVersionInfo(ctx, uri, desc, isLogged, isReadOnly),
+ _handleVersionInfo(ctx, uri, desc, isLogged),
Ordering::make(desc->keyPattern()),
rsKeyFormat),
_uri(uri),
@@ -633,8 +632,7 @@ StatusWith<bool> WiredTigerIndex::_checkDups(OperationContext* opCtx,
KeyString::Version WiredTigerIndex::_handleVersionInfo(OperationContext* ctx,
const std::string& uri,
const IndexDescriptor* desc,
- bool isLogged,
- bool isReadOnly) {
+ bool isLogged) {
auto version = WiredTigerUtil::checkApplicationMetadataFormatVersion(
ctx, uri, kMinimumIndexVersion, kMaximumIndexVersion);
if (!version.isOK()) {
@@ -661,9 +659,7 @@ KeyString::Version WiredTigerIndex::_handleVersionInfo(OperationContext* ctx,
fassertFailedWithStatusNoTrace(31179, versionStatus);
}
- if (!isReadOnly) {
- uassertStatusOK(WiredTigerUtil::setTableLogging(ctx, uri, isLogged));
- }
+ uassertStatusOK(WiredTigerUtil::setTableLogging(ctx, uri, isLogged));
/*
* Index data format 6, 11, and 13 correspond to KeyString version V0 and data format 8, 12, and
@@ -1484,10 +1480,8 @@ WiredTigerIndexUnique::WiredTigerIndexUnique(OperationContext* ctx,
StringData ident,
KeyFormat rsKeyFormat,
const IndexDescriptor* desc,
- bool isLogged,
- bool isReadOnly)
- : WiredTigerIndex(ctx, uri, ident, rsKeyFormat, desc, isLogged, isReadOnly),
- _partial(desc->isPartial()) {
+ bool isLogged)
+ : WiredTigerIndex(ctx, uri, ident, rsKeyFormat, desc, isLogged), _partial(desc->isPartial()) {
// _id indexes must use WiredTigerIdIndex
invariant(!isIdIndex());
// All unique indexes should be in the timestamp-safe format version as of version 4.2.
@@ -1573,9 +1567,8 @@ WiredTigerIdIndex::WiredTigerIdIndex(OperationContext* ctx,
const std::string& uri,
StringData ident,
const IndexDescriptor* desc,
- bool isLogged,
- bool isReadOnly)
- : WiredTigerIndex(ctx, uri, ident, KeyFormat::Long, desc, isLogged, isReadOnly) {
+ bool isLogged)
+ : WiredTigerIndex(ctx, uri, ident, KeyFormat::Long, desc, isLogged) {
invariant(isIdIndex());
}
@@ -1805,9 +1798,8 @@ WiredTigerIndexStandard::WiredTigerIndexStandard(OperationContext* ctx,
StringData ident,
KeyFormat rsKeyFormat,
const IndexDescriptor* desc,
- bool isLogged,
- bool isReadOnly)
- : WiredTigerIndex(ctx, uri, ident, rsKeyFormat, desc, isLogged, isReadOnly) {}
+ bool isLogged)
+ : WiredTigerIndex(ctx, uri, ident, rsKeyFormat, desc, isLogged) {}
std::unique_ptr<SortedDataInterface::Cursor> WiredTigerIndexStandard::newCursor(
OperationContext* opCtx, bool forward) const {
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_index.h b/src/mongo/db/storage/wiredtiger/wiredtiger_index.h
index 5a94980c22c..44e1bb16ad9 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_index.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_index.h
@@ -126,8 +126,7 @@ public:
StringData ident,
KeyFormat rsKeyFormat,
const IndexDescriptor* desc,
- bool isLogged,
- bool readOnly);
+ bool isLogged);
virtual Status insert(OperationContext* opCtx,
const KeyString::Value& keyString,
@@ -232,8 +231,7 @@ protected:
KeyString::Version _handleVersionInfo(OperationContext* ctx,
const std::string& uri,
const IndexDescriptor* desc,
- bool isLogged,
- bool isReadOnly);
+ bool isLogged);
RecordId _decodeRecordIdAtEnd(const void* buffer, size_t size);
@@ -263,8 +261,7 @@ public:
StringData ident,
KeyFormat rsKeyFormat,
const IndexDescriptor* desc,
- bool isLogged,
- bool readOnly = false);
+ bool isLogged);
std::unique_ptr<SortedDataInterface::Cursor> newCursor(OperationContext* opCtx,
bool forward) const override;
@@ -305,8 +302,7 @@ public:
const std::string& uri,
StringData ident,
const IndexDescriptor* desc,
- bool isLogged,
- bool readOnly = false);
+ bool isLogged);
std::unique_ptr<Cursor> newCursor(OperationContext* opCtx,
bool isForward = true) const override;
@@ -357,8 +353,7 @@ public:
StringData ident,
KeyFormat rsKeyFormat,
const IndexDescriptor* desc,
- bool isLogged,
- bool readOnly = false);
+ bool isLogged);
std::unique_ptr<SortedDataInterface::Cursor> newCursor(OperationContext* opCtx,
bool forward) const override;
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_init.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_init.cpp
index da82969596e..b674bd24111 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_init.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_init.cpp
@@ -126,8 +126,7 @@ public:
wiredTigerGlobalOptions.getMaxHistoryFileSizeMB(),
params.dur,
ephemeral,
- params.repair,
- params.readOnly);
+ params.repair);
kv->setRecordStoreExtraOptions(wiredTigerGlobalOptions.collectionConfig);
kv->setSortedDataInterfaceExtraOptions(wiredTigerGlobalOptions.indexConfig);
@@ -204,7 +203,7 @@ public:
return builder.obj();
}
- bool supportsReadOnly() const final {
+ bool supportsQueryableBackupMode() const final {
return true;
}
};
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
index 244a11ac071..66f419dd63e 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
@@ -143,12 +143,7 @@ boost::filesystem::path getOngoingBackupPath() {
} // namespace
-bool WiredTigerFileVersion::shouldDowngrade(bool readOnly, bool hasRecoveryTimestamp) {
- if (readOnly) {
- // A read-only state must not have upgraded. Nor could it downgrade.
- return false;
- }
-
+bool WiredTigerFileVersion::shouldDowngrade(bool hasRecoveryTimestamp) {
const auto replCoord = repl::ReplicationCoordinator::get(getGlobalServiceContext());
if (replCoord && replCoord->getMemberState().arbiter()) {
// SERVER-35361: Arbiters will no longer downgrade their data files. To downgrade
@@ -304,8 +299,7 @@ WiredTigerKVEngine::WiredTigerKVEngine(const std::string& canonicalName,
size_t maxHistoryFileSizeMB,
bool durable,
bool ephemeral,
- bool repair,
- bool readOnly)
+ bool repair)
: _clockSource(cs),
_oplogManager(std::make_unique<WiredTigerOplogManager>()),
_canonicalName(canonicalName),
@@ -314,7 +308,6 @@ WiredTigerKVEngine::WiredTigerKVEngine(const std::string& canonicalName,
_durable(durable),
_ephemeral(ephemeral),
_inRepairMode(repair),
- _readOnly(readOnly),
_keepDataHistory(serverGlobalParams.enableMajorityReadConcern) {
_pinnedOplogTimestamp.store(Timestamp::max().asULL());
boost::filesystem::path journalPath = path;
@@ -359,8 +352,7 @@ WiredTigerKVEngine::WiredTigerKVEngine(const std::string& canonicalName,
// The setting may have a later setting override it if not using the journal. We make it
// unconditional here because even nojournal may need this setting if it is a transition
// from using the journal.
- ss << "log=(enabled=true,remove=" << (_readOnly ? "false" : "true")
- << ",path=journal,compressor=";
+ ss << "log=(enabled=true,remove=true,path=journal,compressor=";
ss << wiredTigerGlobalOptions.journalCompressor << "),";
ss << "builtin_extension_config=(zstd=(compression_level="
<< wiredTigerGlobalOptions.zstdCompressorLevel << ")),";
@@ -530,7 +522,7 @@ WiredTigerKVEngine::WiredTigerKVEngine(const std::string& canonicalName,
setOldestActiveTransactionTimestampCallback(
[](Timestamp) { return StatusWith(boost::make_optional(Timestamp::min())); });
- if (!_readOnly && !_ephemeral) {
+ if (!_ephemeral) {
if (!_recoveryTimestamp.isNull()) {
// If the oldest/initial data timestamps were unset (there was no persisted durable
// history), initialize them to the recovery timestamp.
@@ -575,7 +567,7 @@ WiredTigerKVEngine::WiredTigerKVEngine(const std::string& canonicalName,
_sizeStorerUri = _uri("sizeStorer");
WiredTigerSession session(_conn);
- if (!_readOnly && repair && _hasUri(session.getSession(), _sizeStorerUri)) {
+ if (repair && _hasUri(session.getSession(), _sizeStorerUri)) {
LOGV2(22316, "Repairing size cache");
auto status = _salvageIfNeeded(_sizeStorerUri.c_str());
@@ -583,7 +575,7 @@ WiredTigerKVEngine::WiredTigerKVEngine(const std::string& canonicalName,
fassertNoTrace(28577, status);
}
- _sizeStorer = std::make_unique<WiredTigerSizeStorer>(_conn, _sizeStorerUri, _readOnly);
+ _sizeStorer = std::make_unique<WiredTigerSizeStorer>(_conn, _sizeStorerUri);
_runTimeConfigParam.reset(makeServerParameter<WiredTigerEngineRuntimeConfigParameter>(
"wiredTigerEngineRuntimeConfig", ServerParameterType::kRuntimeOnly));
_runTimeConfigParam->_data.second = this;
@@ -740,9 +732,7 @@ void WiredTigerKVEngine::cleanShutdown() {
_sessionCache->shuttingDown();
- if (!_readOnly) {
- syncSizeInfo(/*syncToDisk=*/true);
- }
+ syncSizeInfo(/*syncToDisk=*/true);
// The size storer has to be destructed after the session cache has shut down. This sets the
// shutdown flag internally in the session cache. As operations get interrupted during shutdown,
@@ -782,7 +772,7 @@ void WiredTigerKVEngine::cleanShutdown() {
quickExit(EXIT_SUCCESS);
}
- if (_fileVersion.shouldDowngrade(_readOnly, !_recoveryTimestamp.isNull())) {
+ if (_fileVersion.shouldDowngrade(!_recoveryTimestamp.isNull())) {
auto startTime = Date_t::now();
LOGV2(22324,
"Closing WiredTiger in preparation for reconfiguring",
@@ -1604,7 +1594,6 @@ std::unique_ptr<RecordStore> WiredTigerKVEngine::getRecordStore(OperationContext
params.isLogged = isLogged;
params.cappedCallback = nullptr;
params.sizeStorer = _sizeStorer.get();
- params.isReadOnly = _readOnly;
params.tracksSizeAdjustments = true;
params.forceUpdateWithFullDocument = options.timeseries != boost::none;
@@ -1713,33 +1702,21 @@ std::unique_ptr<SortedDataInterface> WiredTigerKVEngine::getSortedDataInterface(
if (desc->isIdIndex()) {
invariant(!collOptions.clusteredIndex);
return std::make_unique<WiredTigerIdIndex>(
- opCtx, _uri(ident), ident, desc, WiredTigerUtil::useTableLogging(nss), _readOnly);
+ opCtx, _uri(ident), ident, desc, WiredTigerUtil::useTableLogging(nss));
}
auto keyFormat = (collOptions.clusteredIndex) ? KeyFormat::String : KeyFormat::Long;
if (desc->unique()) {
- return std::make_unique<WiredTigerIndexUnique>(opCtx,
- _uri(ident),
- ident,
- keyFormat,
- desc,
- WiredTigerUtil::useTableLogging(nss),
- _readOnly);
+ return std::make_unique<WiredTigerIndexUnique>(
+ opCtx, _uri(ident), ident, keyFormat, desc, WiredTigerUtil::useTableLogging(nss));
}
- return std::make_unique<WiredTigerIndexStandard>(opCtx,
- _uri(ident),
- ident,
- keyFormat,
- desc,
- WiredTigerUtil::useTableLogging(nss),
- _readOnly);
+ return std::make_unique<WiredTigerIndexStandard>(
+ opCtx, _uri(ident), ident, keyFormat, desc, WiredTigerUtil::useTableLogging(nss));
}
std::unique_ptr<RecordStore> WiredTigerKVEngine::makeTemporaryRecordStore(OperationContext* opCtx,
StringData ident,
KeyFormat keyFormat) {
- invariant(!_readOnly || !recoverToOplogTimestamp.empty());
-
_ensureIdentPath(ident);
WiredTigerSession wtSession(_conn);
@@ -1780,7 +1757,6 @@ std::unique_ptr<RecordStore> WiredTigerKVEngine::makeTemporaryRecordStore(Operat
params.sizeStorer = nullptr;
// Temporary collections do not need to reconcile collection size/counts.
params.tracksSizeAdjustments = false;
- params.isReadOnly = false;
params.forceUpdateWithFullDocument = false;
std::unique_ptr<WiredTigerRecordStore> rs;
@@ -1937,7 +1913,7 @@ bool WiredTigerKVEngine::haveDropsQueued() const {
Date_t now = _clockSource->now();
Milliseconds delta = now - Date_t::fromMillisSinceEpoch(_previousCheckedDropsQueued.load());
- if (!_readOnly && _sizeStorerSyncTracker.intervalHasElapsed()) {
+ if (_sizeStorerSyncTracker.intervalHasElapsed()) {
_sizeStorerSyncTracker.resetLastTime();
syncSizeInfo(false);
}
@@ -2453,7 +2429,7 @@ StatusWith<Timestamp> WiredTigerKVEngine::recoverToStableTimestamp(OperationCont
_highestSeenDurableTimestamp = stableTimestamp.asULL();
}
- _sizeStorer = std::make_unique<WiredTigerSizeStorer>(_conn, _sizeStorerUri, _readOnly);
+ _sizeStorer = std::make_unique<WiredTigerSizeStorer>(_conn, _sizeStorerUri);
return {stableTimestamp};
}
@@ -2538,10 +2514,6 @@ boost::optional<Timestamp> WiredTigerKVEngine::getOplogNeededForCrashRecovery()
return boost::none;
}
- if (_readOnly) {
- return boost::none;
- }
-
return Timestamp(_oplogNeededForCrashRecovery.load());
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
index 55f3155d338..c4cecf51f04 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
@@ -71,7 +71,7 @@ struct WiredTigerFileVersion {
inline static const std::string kLatestWTRelease = "compatibility=(release=10.0)";
StartupVersion _startupVersion;
- bool shouldDowngrade(bool readOnly, bool hasRecoveryTimestamp);
+ bool shouldDowngrade(bool hasRecoveryTimestamp);
std::string getDowngradeString();
};
@@ -109,8 +109,7 @@ public:
size_t maxHistoryFileSizeMB,
bool durable,
bool ephemeral,
- bool repair,
- bool readOnly);
+ bool repair);
~WiredTigerKVEngine();
@@ -472,7 +471,6 @@ private:
bool _durable;
bool _ephemeral; // whether we are using the in-memory mode of the WT engine
const bool _inRepairMode;
- bool _readOnly;
// If _keepDataHistory is true, then the storage engine keeps all history after the stable
// timestamp, and WiredTigerKVEngine is responsible for advancing the oldest timestamp. If
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine_test.cpp
index 9917d95e0f2..c587811c169 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine_test.cpp
@@ -99,8 +99,7 @@ private:
0,
true,
false,
- _forRepair,
- false);
+ _forRepair);
}
const std::unique_ptr<ClockSource> _cs = std::make_unique<ClockSourceMock>();
@@ -514,29 +513,29 @@ TEST_F(WiredTigerKVEngineTest, WiredTigerDowngrade) {
// (Generic FCV reference): When FCV is kLatest, no downgrade is necessary.
serverGlobalParams.mutableFeatureCompatibility.setVersion(multiversion::GenericFCV::kLatest);
- ASSERT_FALSE(version.shouldDowngrade(/*readOnly=*/false, /*hasRecoveryTimestamp=*/false));
+ ASSERT_FALSE(version.shouldDowngrade(/*hasRecoveryTimestamp=*/false));
ASSERT_EQ(WiredTigerFileVersion::kLatestWTRelease, version.getDowngradeString());
// (Generic FCV reference): When FCV is kLastContinuous or kLastLTS, a downgrade may be needed.
serverGlobalParams.mutableFeatureCompatibility.setVersion(
multiversion::GenericFCV::kLastContinuous);
- ASSERT_TRUE(version.shouldDowngrade(/*readOnly=*/false, /*hasRecoveryTimestamp=*/false));
+ ASSERT_TRUE(version.shouldDowngrade(/*hasRecoveryTimestamp=*/false));
ASSERT_EQ(WiredTigerFileVersion::kLastContinuousWTRelease, version.getDowngradeString());
serverGlobalParams.mutableFeatureCompatibility.setVersion(multiversion::GenericFCV::kLastLTS);
- ASSERT_TRUE(version.shouldDowngrade(/*readOnly=*/false, /*hasRecoveryTimestamp=*/false));
+ ASSERT_TRUE(version.shouldDowngrade(/*hasRecoveryTimestamp=*/false));
ASSERT_EQ(WiredTigerFileVersion::kLastLTSWTRelease, version.getDowngradeString());
// (Generic FCV reference): While we're in a semi-downgraded state, we shouldn't try downgrading
// the WiredTiger compatibility version.
serverGlobalParams.mutableFeatureCompatibility.setVersion(
multiversion::GenericFCV::kDowngradingFromLatestToLastContinuous);
- ASSERT_FALSE(version.shouldDowngrade(/*readOnly=*/false, /*hasRecoveryTimestamp=*/false));
+ ASSERT_FALSE(version.shouldDowngrade(/*hasRecoveryTimestamp=*/false));
ASSERT_EQ(WiredTigerFileVersion::kLatestWTRelease, version.getDowngradeString());
serverGlobalParams.mutableFeatureCompatibility.setVersion(
multiversion::GenericFCV::kDowngradingFromLatestToLastLTS);
- ASSERT_FALSE(version.shouldDowngrade(/*readOnly=*/false, /*hasRecoveryTimestamp=*/false));
+ ASSERT_FALSE(version.shouldDowngrade(/*hasRecoveryTimestamp=*/false));
ASSERT_EQ(WiredTigerFileVersion::kLatestWTRelease, version.getDowngradeString());
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
index b59a4bdaef5..014c7e6e508 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
@@ -927,9 +927,7 @@ WiredTigerRecordStore::WiredTigerRecordStore(WiredTigerKVEngine* kvEngine,
}
}
- if (!params.isReadOnly) {
- uassertStatusOK(WiredTigerUtil::setTableLogging(ctx, _uri, _isLogged));
- }
+ uassertStatusOK(WiredTigerUtil::setTableLogging(ctx, _uri, _isLogged));
if (_isOplog) {
invariant(_keyFormat == KeyFormat::Long);
@@ -1006,14 +1004,10 @@ void WiredTigerRecordStore::checkSize(OperationContext* opCtx) {
}
void WiredTigerRecordStore::postConstructorInit(OperationContext* opCtx) {
- // When starting up with recoverFromOplogAsStandalone=true, the readOnly flag is initially set
- // to false to allow oplog recovery to run and perform its necessary writes. After recovery is
- // complete, the readOnly flag gets flipped to true. Because of this subtlety, we avoid
- // calculating the oplog stones when recoverFromOplogAsStandalone=true as the RecordStore
- // construction for the oplog happens before the readOnly flag gets flipped to true.
- if (NamespaceString::oplog(ns()) &&
- !(storageGlobalParams.repair || storageGlobalParams.readOnly ||
- repl::ReplSettings::shouldRecoverFromOplogAsStandalone())) {
+ // If the server was started in read-only mode, skip calculating the oplog stones. The
+ // OplogCapMaintainerThread does not get started in this instance.
+ if (NamespaceString::oplog(ns()) && opCtx->getServiceContext()->userWritesAllowed() &&
+ !storageGlobalParams.repair) {
_oplogStones = std::make_shared<OplogStones>(opCtx, this);
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h
index 7630cc0900b..e00ae177f95 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h
@@ -114,7 +114,6 @@ public:
boost::optional<int64_t> oplogMaxSize;
CappedCallback* cappedCallback;
WiredTigerSizeStorer* sizeStorer;
- bool isReadOnly;
bool tracksSizeAdjustments;
bool forceUpdateWithFullDocument;
};
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp
index a2d9c5f7e99..32e0257fc4a 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp
@@ -952,7 +952,7 @@ TEST(WiredTigerRecordStoreTest, GetLatestOplogTest) {
// 1) Initialize the top of oplog to "1".
ServiceContext::UniqueOperationContext op1(harnessHelper->newOperationContext());
- op1->recoveryUnit()->beginUnitOfWork(op1.get());
+ op1->recoveryUnit()->beginUnitOfWork(op1->readOnly());
Timestamp tsOne = Timestamp(
static_cast<unsigned long long>(_oplogOrderInsertOplog(op1.get(), rs, 1).getLong()));
op1->recoveryUnit()->commitUnitOfWork();
@@ -960,7 +960,7 @@ TEST(WiredTigerRecordStoreTest, GetLatestOplogTest) {
ASSERT_EQ(tsOne, wtrs->getLatestOplogTimestamp(op1.get()));
// 2) Open a hole at time "2".
- op1->recoveryUnit()->beginUnitOfWork(op1.get());
+ op1->recoveryUnit()->beginUnitOfWork(op1->readOnly());
// Don't save the return value because the compiler complains about unused variables.
_oplogOrderInsertOplog(op1.get(), rs, 2);
// Querying with the recovery unit with a snapshot will not return the uncommitted value.
@@ -971,7 +971,7 @@ TEST(WiredTigerRecordStoreTest, GetLatestOplogTest) {
Client::initThread("client2");
ServiceContext::UniqueOperationContext op2(harnessHelper->newOperationContext());
- op2->recoveryUnit()->beginUnitOfWork(op2.get());
+ op2->recoveryUnit()->beginUnitOfWork(op2->readOnly());
Timestamp tsThree = Timestamp(
static_cast<unsigned long long>(_oplogOrderInsertOplog(op2.get(), rs, 3).getLong()));
// Before committing, the query still only sees timestamp "1".
@@ -1116,7 +1116,6 @@ TEST(WiredTigerRecordStoreTest, ClusteredRecordStore) {
params.cappedCallback = nullptr;
params.sizeStorer = nullptr;
params.tracksSizeAdjustments = true;
- params.isReadOnly = false;
params.forceUpdateWithFullDocument = false;
const auto wtKvEngine = dynamic_cast<WiredTigerKVEngine*>(harnessHelper->getEngine());
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test_harness.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test_harness.cpp
index 70912221539..0b38e30c443 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test_harness.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test_harness.cpp
@@ -28,8 +28,6 @@
*/
#include "mongo/db/storage/wiredtiger/wiredtiger_record_store_test_harness.h"
-
-#include "mongo/db/operation_context_noop.h"
#include "mongo/db/repl/replication_coordinator_mock.h"
namespace mongo {
@@ -53,7 +51,6 @@ WiredTigerHarnessHelper::WiredTigerHarnessHelper(StringData extraStrings)
0,
true,
false,
- false,
false) {
repl::ReplicationCoordinator::set(
serviceContext(),
@@ -63,8 +60,8 @@ WiredTigerHarnessHelper::WiredTigerHarnessHelper(StringData extraStrings)
std::unique_ptr<RecordStore> WiredTigerHarnessHelper::newRecordStore(
const std::string& ns, const CollectionOptions& collOptions, KeyFormat keyFormat) {
- WiredTigerRecoveryUnit* ru = checked_cast<WiredTigerRecoveryUnit*>(_engine.newRecoveryUnit());
- OperationContextNoop opCtx(ru);
+ ServiceContext::UniqueOperationContext opCtx(newOperationContext());
+ WiredTigerRecoveryUnit* ru = checked_cast<WiredTigerRecoveryUnit*>(opCtx->recoveryUnit());
std::string uri = WiredTigerKVEngine::kTableUriPrefix + ns;
StringData ident = ns;
NamespaceString nss(ns);
@@ -81,7 +78,7 @@ std::unique_ptr<RecordStore> WiredTigerHarnessHelper::newRecordStore(
std::string config = result.getValue();
{
- WriteUnitOfWork uow(&opCtx);
+ WriteUnitOfWork uow(opCtx.get());
WT_SESSION* s = ru->getSession()->getSession();
invariantWTOK(s->create(s, uri.c_str(), config.c_str()), s);
uow.commit();
@@ -98,26 +95,24 @@ std::unique_ptr<RecordStore> WiredTigerHarnessHelper::newRecordStore(
params.isLogged = WiredTigerUtil::useTableLogging(nss);
params.cappedCallback = nullptr;
params.sizeStorer = nullptr;
- params.isReadOnly = false;
params.tracksSizeAdjustments = true;
params.forceUpdateWithFullDocument = collOptions.timeseries != boost::none;
- auto ret = std::make_unique<StandardWiredTigerRecordStore>(&_engine, &opCtx, params);
- ret->postConstructorInit(&opCtx);
+ auto ret = std::make_unique<StandardWiredTigerRecordStore>(&_engine, opCtx.get(), params);
+ ret->postConstructorInit(opCtx.get());
return std::move(ret);
}
std::unique_ptr<RecordStore> WiredTigerHarnessHelper::newOplogRecordStore() {
auto ret = newOplogRecordStoreNoInit();
- auto* ru = _engine.newRecoveryUnit();
- OperationContextNoop opCtx(ru);
- dynamic_cast<WiredTigerRecordStore*>(ret.get())->postConstructorInit(&opCtx);
+ ServiceContext::UniqueOperationContext opCtx(newOperationContext());
+ dynamic_cast<WiredTigerRecordStore*>(ret.get())->postConstructorInit(opCtx.get());
return ret;
}
std::unique_ptr<RecordStore> WiredTigerHarnessHelper::newOplogRecordStoreNoInit() {
- WiredTigerRecoveryUnit* ru = dynamic_cast<WiredTigerRecoveryUnit*>(_engine.newRecoveryUnit());
- OperationContextNoop opCtx(ru);
+ ServiceContext::UniqueOperationContext opCtx(newOperationContext());
+ WiredTigerRecoveryUnit* ru = checked_cast<WiredTigerRecoveryUnit*>(opCtx->recoveryUnit());
std::string ident = NamespaceString::kRsOplogNamespace.ns();
std::string uri = WiredTigerKVEngine::kTableUriPrefix + ident;
@@ -137,7 +132,7 @@ std::unique_ptr<RecordStore> WiredTigerHarnessHelper::newOplogRecordStoreNoInit(
std::string config = result.getValue();
{
- WriteUnitOfWork uow(&opCtx);
+ WriteUnitOfWork uow(opCtx.get());
WT_SESSION* s = ru->getSession()->getSession();
invariantWTOK(s->create(s, uri.c_str(), config.c_str()), s);
uow.commit();
@@ -156,10 +151,9 @@ std::unique_ptr<RecordStore> WiredTigerHarnessHelper::newOplogRecordStoreNoInit(
params.oplogMaxSize = 1024 * 1024 * 1024;
params.cappedCallback = nullptr;
params.sizeStorer = nullptr;
- params.isReadOnly = false;
params.tracksSizeAdjustments = true;
params.forceUpdateWithFullDocument = false;
- return std::make_unique<StandardWiredTigerRecordStore>(&_engine, &opCtx, params);
+ return std::make_unique<StandardWiredTigerRecordStore>(&_engine, opCtx.get(), params);
}
std::unique_ptr<RecoveryUnit> WiredTigerHarnessHelper::newRecoveryUnit() {
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp
index 969db98506e..99a4bc7a195 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp
@@ -228,7 +228,7 @@ void WiredTigerRecoveryUnit::_abort() {
_setState(State::kInactive);
}
-void WiredTigerRecoveryUnit::beginUnitOfWork(OperationContext* opCtx) {
+void WiredTigerRecoveryUnit::doBeginUnitOfWork() {
invariant(!_inUnitOfWork(), toString(_getState()));
invariant(!_isCommittingOrAborting(),
str::stream() << "cannot begin unit of work while commit or rollback handlers are "
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h
index 2d75b4e54f1..a1360dcdf0d 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h
@@ -105,7 +105,6 @@ public:
WiredTigerRecoveryUnit(WiredTigerSessionCache* sc, WiredTigerOplogManager* oplogManager);
~WiredTigerRecoveryUnit();
- void beginUnitOfWork(OperationContext* opCtx) override;
void prepareUnitOfWork() override;
bool waitUntilDurable(OperationContext* opCtx) override;
@@ -232,6 +231,7 @@ public:
void storeWriteContextForDebugging(const BSONObj& info);
private:
+ void doBeginUnitOfWork() override;
void doCommitUnitOfWork() override;
void doAbortUnitOfWork() override;
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit_test.cpp
index da700a414bb..6ed86d9fadc 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit_test.cpp
@@ -60,8 +60,7 @@ public:
0, // .maxCacheOverflowFileSizeMB
false, // .durable
false, // .ephemeral
- false, // .repair
- false // .readOnly
+ false // .repair
) {
repl::ReplicationCoordinator::set(
getGlobalServiceContext(),
@@ -113,7 +112,6 @@ public:
params.cappedCallback = nullptr;
params.sizeStorer = nullptr;
params.tracksSizeAdjustments = true;
- params.isReadOnly = false;
params.forceUpdateWithFullDocument = false;
auto ret = std::make_unique<StandardWiredTigerRecordStore>(&_engine, opCtx, params);
@@ -279,7 +277,7 @@ TEST_F(WiredTigerRecoveryUnitTestFixture, NoOverlapReadSource) {
TEST_F(WiredTigerRecoveryUnitTestFixture,
LocalReadOnADocumentBeingPreparedWithoutIgnoringPreparedTriggersPrepareConflict) {
// Prepare but don't commit a transaction
- ru1->beginUnitOfWork(clientAndCtx1.second.get());
+ ru1->beginUnitOfWork(clientAndCtx1.second->readOnly());
WT_CURSOR* cursor;
getCursor(ru1, &cursor);
cursor->set_key(cursor, "key");
@@ -289,7 +287,7 @@ TEST_F(WiredTigerRecoveryUnitTestFixture,
ru1->prepareUnitOfWork();
// The transaction read default enforces prepare conflicts and triggers a WT_PREPARE_CONFLICT.
- ru2->beginUnitOfWork(clientAndCtx2.second.get());
+ ru2->beginUnitOfWork(clientAndCtx2.second->readOnly());
getCursor(ru2, &cursor);
cursor->set_key(cursor, "key");
int ret = cursor->search(cursor);
@@ -302,7 +300,7 @@ TEST_F(WiredTigerRecoveryUnitTestFixture,
TEST_F(WiredTigerRecoveryUnitTestFixture,
LocalReadOnADocumentBeingPreparedDoesntTriggerPrepareConflict) {
// Prepare but don't commit a transaction
- ru1->beginUnitOfWork(clientAndCtx1.second.get());
+ ru1->beginUnitOfWork(clientAndCtx1.second->readOnly());
WT_CURSOR* cursor;
getCursor(ru1, &cursor);
cursor->set_key(cursor, "key");
@@ -313,7 +311,7 @@ TEST_F(WiredTigerRecoveryUnitTestFixture,
// A transaction that chooses to ignore prepare conflicts does not see the record instead of
// returning a prepare conflict.
- ru2->beginUnitOfWork(clientAndCtx2.second.get());
+ ru2->beginUnitOfWork(clientAndCtx2.second->readOnly());
ru2->setPrepareConflictBehavior(PrepareConflictBehavior::kIgnoreConflicts);
getCursor(ru2, &cursor);
cursor->set_key(cursor, "key");
@@ -326,7 +324,7 @@ TEST_F(WiredTigerRecoveryUnitTestFixture,
TEST_F(WiredTigerRecoveryUnitTestFixture, WriteAllowedWhileIgnorePrepareFalse) {
// Prepare but don't commit a transaction
- ru1->beginUnitOfWork(clientAndCtx1.second.get());
+ ru1->beginUnitOfWork(clientAndCtx1.second->readOnly());
WT_CURSOR* cursor;
getCursor(ru1, &cursor);
cursor->set_key(cursor, "key1");
@@ -337,7 +335,7 @@ TEST_F(WiredTigerRecoveryUnitTestFixture, WriteAllowedWhileIgnorePrepareFalse) {
// A transaction that chooses to ignore prepare conflicts with kIgnoreConflictsAllowWrites does
// not see the record
- ru2->beginUnitOfWork(clientAndCtx2.second.get());
+ ru2->beginUnitOfWork(clientAndCtx2.second->readOnly());
ru2->setPrepareConflictBehavior(PrepareConflictBehavior::kIgnoreConflictsAllowWrites);
// The prepared write is not visible.
@@ -358,7 +356,7 @@ TEST_F(WiredTigerRecoveryUnitTestFixture, WriteAllowedWhileIgnorePrepareFalse) {
TEST_F(WiredTigerRecoveryUnitTestFixture, WriteOnADocumentBeingPreparedTriggersWTRollback) {
// Prepare but don't commit a transaction
- ru1->beginUnitOfWork(clientAndCtx1.second.get());
+ ru1->beginUnitOfWork(clientAndCtx1.second->readOnly());
WT_CURSOR* cursor;
getCursor(ru1, &cursor);
cursor->set_key(cursor, "key");
@@ -368,7 +366,7 @@ TEST_F(WiredTigerRecoveryUnitTestFixture, WriteOnADocumentBeingPreparedTriggersW
ru1->prepareUnitOfWork();
// Another transaction with write triggers WT_ROLLBACK
- ru2->beginUnitOfWork(clientAndCtx2.second.get());
+ ru2->beginUnitOfWork(clientAndCtx2.second->readOnly());
getCursor(ru2, &cursor);
cursor->set_key(cursor, "key");
cursor->set_value(cursor, "value2");
@@ -792,7 +790,7 @@ TEST_F(WiredTigerRecoveryUnitTestFixture, MultiTimestampConstraintsInternalState
Timestamp ts2(2, 2);
OperationContext* opCtx = clientAndCtx1.second.get();
- ru1->beginUnitOfWork(opCtx);
+ ru1->beginUnitOfWork(opCtx->readOnly());
// Perform an non timestamped write.
WT_CURSOR* cursor;
@@ -813,7 +811,7 @@ TEST_F(WiredTigerRecoveryUnitTestFixture, MultiTimestampConstraintsInternalState
// Committing the unit of work should reset the internal state for the multi timestamp
// constraint checks.
ru1->commitUnitOfWork();
- ru1->beginUnitOfWork(opCtx);
+ ru1->beginUnitOfWork(opCtx->readOnly());
// Perform a write at ts2.
cursor->set_key(cursor, "key3");
@@ -831,7 +829,7 @@ TEST_F(WiredTigerRecoveryUnitTestFixture, AbandonSnapshotAbortMode) {
const char* const key = "key";
{
- ru1->beginUnitOfWork(opCtx);
+ ru1->beginUnitOfWork(opCtx->readOnly());
WT_CURSOR* cursor;
getCursor(ru1, &cursor);
@@ -869,7 +867,7 @@ DEATH_TEST_REGEX_F(WiredTigerRecoveryUnitTestFixture,
Timestamp ts2(2, 2);
OperationContext* opCtx = clientAndCtx1.second.get();
- ru1->beginUnitOfWork(opCtx);
+ ru1->beginUnitOfWork(opCtx->readOnly());
auto writeTest = [&]() {
// Perform an non timestamped write.
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.cpp
index 59a4e92e6a0..d326df17ae6 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.cpp
@@ -47,17 +47,8 @@
namespace mongo {
-WiredTigerSizeStorer::WiredTigerSizeStorer(WT_CONNECTION* conn,
- const std::string& storageUri,
- bool readOnly)
- : _conn(conn),
- _storageUri(storageUri),
- _tableId(WiredTigerSession::genTableId()),
- _readOnly(readOnly) {
- if (_readOnly) {
- return;
- }
-
+WiredTigerSizeStorer::WiredTigerSizeStorer(WT_CONNECTION* conn, const std::string& storageUri)
+ : _conn(conn), _storageUri(storageUri), _tableId(WiredTigerSession::genTableId()) {
std::string config = WiredTigerCustomizationHooks::get(getGlobalServiceContext())
->getTableCreateConfig(_storageUri);
@@ -69,7 +60,7 @@ WiredTigerSizeStorer::WiredTigerSizeStorer(WT_CONNECTION* conn,
void WiredTigerSizeStorer::store(StringData uri, std::shared_ptr<SizeInfo> sizeInfo) {
// If the SizeInfo is still dirty, we're done.
- if (sizeInfo->_dirty.load() || _readOnly)
+ if (sizeInfo->_dirty.load())
return;
// Ordering is important: as the entry may be flushed concurrently, set the dirty flag last.
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h b/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h
index 50eb23324c0..077fbfe75e4 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h
@@ -76,7 +76,7 @@ public:
AtomicWord<bool> _dirty;
};
- WiredTigerSizeStorer(WT_CONNECTION* conn, const std::string& storageUri, bool readOnly = false);
+ WiredTigerSizeStorer(WT_CONNECTION* conn, const std::string& storageUri);
~WiredTigerSizeStorer() = default;
/**
@@ -96,7 +96,6 @@ private:
WT_CONNECTION* _conn;
const std::string _storageUri;
const uint64_t _tableId; // Not persisted
- const bool _readOnly;
// Serializes flushes to disk.
Mutex _flushMutex = MONGO_MAKE_LATCH("WiredTigerSessionStorer::_flushMutex");
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_standard_record_store_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_standard_record_store_test.cpp
index 1b74a9ead4e..c05cc6097a5 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_standard_record_store_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_standard_record_store_test.cpp
@@ -72,8 +72,7 @@ TEST(WiredTigerRecordStoreTest, SizeStorer1) {
string uri = checked_cast<WiredTigerRecordStore*>(rs.get())->getURI();
string indexUri = WiredTigerKVEngine::kTableUriPrefix + "myindex";
- const bool enableWtLogging = false;
- WiredTigerSizeStorer ss(harnessHelper->conn(), indexUri, enableWtLogging);
+ WiredTigerSizeStorer ss(harnessHelper->conn(), indexUri);
checked_cast<WiredTigerRecordStore*>(rs.get())->setSizeStorer(&ss);
int N = 12;
@@ -116,7 +115,6 @@ TEST(WiredTigerRecordStoreTest, SizeStorer1) {
params.isLogged = false;
params.cappedCallback = nullptr;
params.sizeStorer = &ss;
- params.isReadOnly = false;
params.tracksSizeAdjustments = true;
params.forceUpdateWithFullDocument = false;
@@ -146,8 +144,7 @@ TEST(WiredTigerRecordStoreTest, SizeStorer1) {
{
ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
- const bool enableWtLogging = false;
- WiredTigerSizeStorer ss2(harnessHelper->conn(), indexUri, enableWtLogging);
+ WiredTigerSizeStorer ss2(harnessHelper->conn(), indexUri);
auto info = ss2.load(opCtx.get(), uri);
ASSERT_EQUALS(N, info->numRecords.load());
}
@@ -159,11 +156,8 @@ class SizeStorerUpdateTest : public mongo::unittest::Test {
private:
virtual void setUp() {
harnessHelper.reset(new WiredTigerHarnessHelper());
- const bool enableWtLogging = false;
- sizeStorer.reset(
- new WiredTigerSizeStorer(harnessHelper->conn(),
- WiredTigerKVEngine::kTableUriPrefix + "sizeStorer",
- enableWtLogging));
+ sizeStorer.reset(new WiredTigerSizeStorer(
+ harnessHelper->conn(), WiredTigerKVEngine::kTableUriPrefix + "sizeStorer"));
rs = harnessHelper->newRecordStore();
WiredTigerRecordStore* wtrs = checked_cast<WiredTigerRecordStore*>(rs.get());
wtrs->setSizeStorer(sizeStorer.get());
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp
index de31ec10751..d4e48faafe9 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp
@@ -832,9 +832,7 @@ void WiredTigerUtil::notifyStartupComplete() {
_tableLoggingInfo.isInitializing = false;
}
- if (!storageGlobalParams.readOnly) {
- removeTableChecksFile();
- }
+ removeTableChecksFile();
}
void WiredTigerUtil::resetTableLoggingInfo() {
@@ -878,7 +876,6 @@ Status WiredTigerUtil::setTableLogging(OperationContext* opCtx, const std::strin
WiredTigerSessionCache* sessionCache = WiredTigerRecoveryUnit::get(opCtx)->getSessionCache();
sessionCache->closeAllCursors(uri);
- invariant(!storageGlobalParams.readOnly);
stdx::lock_guard<Latch> lk(_tableLoggingInfoMutex);
// Update the table logging settings regardless if we're no longer starting up the process.
diff --git a/src/mongo/db/storage/write_unit_of_work.cpp b/src/mongo/db/storage/write_unit_of_work.cpp
index 3e2bb922565..564ff4098b5 100644
--- a/src/mongo/db/storage/write_unit_of_work.cpp
+++ b/src/mongo/db/storage/write_unit_of_work.cpp
@@ -58,9 +58,7 @@ WriteUnitOfWork::WriteUnitOfWork(OperationContext* opCtx, bool groupOplogEntries
_opCtx->lockState()->beginWriteUnitOfWork();
if (_toplevel) {
- if (!storageGlobalParams.readOnly) {
- _opCtx->recoveryUnit()->beginUnitOfWork(_opCtx);
- }
+ _opCtx->recoveryUnit()->beginUnitOfWork(_opCtx->readOnly());
_opCtx->_ruState = RecoveryUnitState::kActiveUnitOfWork;
}
// Make sure we don't silently proceed after a previous WriteUnitOfWork under the same parent
@@ -71,7 +69,7 @@ WriteUnitOfWork::WriteUnitOfWork(OperationContext* opCtx, bool groupOplogEntries
WriteUnitOfWork::~WriteUnitOfWork() {
if (!_released && !_committed) {
invariant(_opCtx->_ruState != RecoveryUnitState::kNotInUnitOfWork);
- if (!storageGlobalParams.readOnly) {
+ if (!_opCtx->readOnly()) {
if (_toplevel) {
// Abort unit of work and execute rollback handlers
_opCtx->recoveryUnit()->abortUnitOfWork();
@@ -80,7 +78,8 @@ WriteUnitOfWork::~WriteUnitOfWork() {
_opCtx->_ruState = RecoveryUnitState::kFailedUnitOfWork;
}
} else {
- // Just execute rollback handlers in readOnly mode
+ // Clear the readOnly state and execute rollback handlers in readOnly mode.
+ _opCtx->recoveryUnit()->endReadOnlyUnitOfWork();
_opCtx->recoveryUnit()->abortRegisteredChanges();
}
_opCtx->lockState()->endWriteUnitOfWork();
@@ -142,12 +141,12 @@ void WriteUnitOfWork::commit() {
// Execute preCommit hooks before committing the transaction. This is an opportunity to
// throw or do any last changes before committing.
_opCtx->recoveryUnit()->runPreCommitHooks(_opCtx);
- if (!storageGlobalParams.readOnly) {
- // Just execute commit handlers in readOnly mode
- _opCtx->recoveryUnit()->commitUnitOfWork();
- } else {
+ if (!_opCtx->readOnly()) {
// Commit unit of work and execute commit or rollback handlers depending on whether the
// commit was successful.
+ _opCtx->recoveryUnit()->commitUnitOfWork();
+ } else {
+ // Just execute commit handlers in readOnly mode
_opCtx->recoveryUnit()->commitRegisteredChanges(boost::none);
}
diff --git a/src/mongo/db/system_index.cpp b/src/mongo/db/system_index.cpp
index 2283e75c9d6..8386e02710a 100644
--- a/src/mongo/db/system_index.cpp
+++ b/src/mongo/db/system_index.cpp
@@ -129,13 +129,6 @@ void generateSystemIndexForExistingCollection(OperationContext* opCtx,
} // namespace
Status verifySystemIndexes(OperationContext* opCtx) {
- // Do not try and generate any system indexes in read only mode.
- if (storageGlobalParams.readOnly) {
- LOGV2_WARNING(22489,
- "Running in queryable backup mode. Unable to create authorization indexes");
- return Status::OK();
- }
-
const NamespaceString& systemUsers = AuthorizationManager::usersCollectionNamespace;
const NamespaceString& systemRoles = AuthorizationManager::rolesCollectionNamespace;
diff --git a/src/mongo/embedded/embedded.cpp b/src/mongo/embedded/embedded.cpp
index 20395926254..eb2549573b5 100644
--- a/src/mongo/embedded/embedded.cpp
+++ b/src/mongo/embedded/embedded.cpp
@@ -234,6 +234,13 @@ ServiceContext* initialize(const char* yaml_config) {
serviceContext, serviceContext->getPreciseClockSource());
serviceContext->setPeriodicRunner(std::move(periodicRunner));
+ // When starting the server with --queryableBackupMode or --recoverFromOplogAsStandalone, we are
+ // in read-only mode and don't allow user-originating operations to perform writes
+ if (storageGlobalParams.queryableBackupMode ||
+ repl::ReplSettings::shouldRecoverFromOplogAsStandalone()) {
+ serviceContext->disallowUserWrites();
+ }
+
setUpCatalog(serviceContext);
// Creating the operation context before initializing the storage engine allows the storage
@@ -278,15 +285,11 @@ ServiceContext* initialize(const char* yaml_config) {
uassert(50677, ss.str().c_str(), boost::filesystem::exists(storageGlobalParams.dbpath));
}
- if (!storageGlobalParams.readOnly) {
- boost::filesystem::remove_all(storageGlobalParams.dbpath + "/_tmp/");
- }
+ boost::filesystem::remove_all(storageGlobalParams.dbpath + "/_tmp/");
ReadWriteConcernDefaults::create(serviceContext, readWriteConcernDefaultsCacheLookupEmbedded);
- bool canCallFCVSetIfCleanStartup =
- !storageGlobalParams.readOnly && !(storageGlobalParams.engine == "devnull");
- if (canCallFCVSetIfCleanStartup) {
+ if (storageGlobalParams.engine != "devnull") {
Lock::GlobalWrite lk(startupOpCtx.get());
FeatureCompatibilityVersion::setIfCleanStartup(startupOpCtx.get(),
repl::StorageInterface::get(serviceContext));
diff --git a/src/mongo/embedded/embedded_ismaster.cpp b/src/mongo/embedded/embedded_ismaster.cpp
index 6b9903f35ca..a9f6763952e 100644
--- a/src/mongo/embedded/embedded_ismaster.cpp
+++ b/src/mongo/embedded/embedded_ismaster.cpp
@@ -87,7 +87,7 @@ public:
result.append("minWireVersion", wireSpec->incomingExternalClient.minWireVersion);
result.append("maxWireVersion", wireSpec->incomingExternalClient.maxWireVersion);
- result.append("readOnly", storageGlobalParams.readOnly);
+ result.append("readOnly", opCtx->readOnly());
return true;
}
diff --git a/src/mongo/s/commands/cluster_collection_mod_cmd.cpp b/src/mongo/s/commands/cluster_collection_mod_cmd.cpp
index d295ca18c71..73e968a618e 100644
--- a/src/mongo/s/commands/cluster_collection_mod_cmd.cpp
+++ b/src/mongo/s/commands/cluster_collection_mod_cmd.cpp
@@ -72,7 +72,8 @@ public:
const std::string& dbname,
const BSONObj& cmdObj) const override {
const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbname, cmdObj));
- return auth::checkAuthForCollMod(AuthorizationSession::get(client), nss, cmdObj, true);
+ return auth::checkAuthForCollMod(
+ client->getOperationContext(), AuthorizationSession::get(client), nss, cmdObj, true);
}
bool supportsWriteConcern(const BSONObj& cmd) const override {
diff --git a/src/mongo/s/commands/cluster_count_cmd.cpp b/src/mongo/s/commands/cluster_count_cmd.cpp
index 48545c7c0d7..04aa145b3d7 100644
--- a/src/mongo/s/commands/cluster_count_cmd.cpp
+++ b/src/mongo/s/commands/cluster_count_cmd.cpp
@@ -139,6 +139,7 @@ public:
uassertStatusOK(countCommandAsAggregationCommand(countRequest, nss));
auto aggCmdOnViewObj = OpMsgRequest::fromDBAndBody(nss.db(), aggCmdOnView).body;
auto aggRequestOnView = aggregation_request_helper::parseFromBSON(
+ opCtx,
nss,
aggCmdOnViewObj,
boost::none,
@@ -252,6 +253,7 @@ public:
auto aggCmdOnViewObj =
OpMsgRequest::fromDBAndBody(nss.db(), aggCmdOnView.getValue()).body;
auto aggRequestOnView = aggregation_request_helper::parseFromBSON(
+ opCtx,
nss,
aggCmdOnViewObj,
verbosity,
diff --git a/src/mongo/s/commands/cluster_create_cmd.cpp b/src/mongo/s/commands/cluster_create_cmd.cpp
index b7c6d5ea88b..f6496645e22 100644
--- a/src/mongo/s/commands/cluster_create_cmd.cpp
+++ b/src/mongo/s/commands/cluster_create_cmd.cpp
@@ -108,7 +108,7 @@ public:
void doCheckAuthorization(OperationContext* opCtx) const final {
uassertStatusOK(auth::checkAuthForCreate(
- AuthorizationSession::get(opCtx->getClient()), request(), true));
+ opCtx, AuthorizationSession::get(opCtx->getClient()), request(), true));
}
CreateCommandReply typedRun(OperationContext* opCtx) final {
diff --git a/src/mongo/s/commands/cluster_distinct_cmd.cpp b/src/mongo/s/commands/cluster_distinct_cmd.cpp
index 70ae5f4b671..cfb7b572e2c 100644
--- a/src/mongo/s/commands/cluster_distinct_cmd.cpp
+++ b/src/mongo/s/commands/cluster_distinct_cmd.cpp
@@ -147,6 +147,7 @@ public:
auto viewAggCmd = OpMsgRequest::fromDBAndBody(nss.db(), aggCmdOnView.getValue()).body;
auto aggRequestOnView = aggregation_request_helper::parseFromBSON(
+ opCtx,
nss,
viewAggCmd,
verbosity,
@@ -228,6 +229,7 @@ public:
auto viewAggCmd = OpMsgRequest::fromDBAndBody(nss.db(), aggCmdOnView.getValue()).body;
auto aggRequestOnView = aggregation_request_helper::parseFromBSON(
+ opCtx,
nss,
viewAggCmd,
boost::none,
diff --git a/src/mongo/s/commands/cluster_find_cmd.h b/src/mongo/s/commands/cluster_find_cmd.h
index efbc0bd4d72..b45c8e3d837 100644
--- a/src/mongo/s/commands/cluster_find_cmd.h
+++ b/src/mongo/s/commands/cluster_find_cmd.h
@@ -174,6 +174,7 @@ public:
OpMsgRequest::fromDBAndBody(_dbName, aggCmdOnView).body;
auto aggRequestOnView = aggregation_request_helper::parseFromBSON(
+ opCtx,
ns(),
viewAggregationCommand,
verbosity,
@@ -243,6 +244,7 @@ public:
OpMsgRequest::fromDBAndBody(_dbName, aggCmdOnView).body;
auto aggRequestOnView = aggregation_request_helper::parseFromBSON(
+ opCtx,
ns(),
viewAggregationCommand,
boost::none,
diff --git a/src/mongo/s/commands/cluster_pipeline_cmd.h b/src/mongo/s/commands/cluster_pipeline_cmd.h
index 788a974803d..0554d2a97e5 100644
--- a/src/mongo/s/commands/cluster_pipeline_cmd.h
+++ b/src/mongo/s/commands/cluster_pipeline_cmd.h
@@ -74,7 +74,8 @@ public:
const OpMsgRequest& opMsgRequest,
boost::optional<ExplainOptions::Verbosity> explainVerbosity) override {
const auto aggregationRequest =
- Impl::parseAggregationRequest(opMsgRequest,
+ Impl::parseAggregationRequest(opCtx,
+ opMsgRequest,
explainVerbosity,
APIParameters::get(opCtx).getAPIStrict().value_or(false));
diff --git a/src/mongo/s/commands/cluster_pipeline_cmd_s.cpp b/src/mongo/s/commands/cluster_pipeline_cmd_s.cpp
index 3009d9265e6..d099095fe96 100644
--- a/src/mongo/s/commands/cluster_pipeline_cmd_s.cpp
+++ b/src/mongo/s/commands/cluster_pipeline_cmd_s.cpp
@@ -54,11 +54,15 @@ struct ClusterPipelineCommandS {
}
static AggregateCommandRequest parseAggregationRequest(
+ OperationContext* opCtx,
const OpMsgRequest& opMsgRequest,
boost::optional<ExplainOptions::Verbosity> explainVerbosity,
bool apiStrict) {
- return aggregation_request_helper::parseFromBSON(
- opMsgRequest.getDatabase().toString(), opMsgRequest.body, explainVerbosity, apiStrict);
+ return aggregation_request_helper::parseFromBSON(opCtx,
+ opMsgRequest.getDatabase().toString(),
+ opMsgRequest.body,
+ explainVerbosity,
+ apiStrict);
}
};
ClusterPipelineCommandBase<ClusterPipelineCommandS> clusterPipelineCmdS;