summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGabriel Marks <gabriel.marks@mongodb.com>2023-04-17 18:41:34 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2023-04-20 22:40:07 +0000
commit9492eecdf7aa22b3455d802d288820e0083ebd7c (patch)
tree8d461343a75ede1ffbb265b6536ca02778609a22
parente3f788a7d7c32fbaf41ef8a8e0e51d109b2247ec (diff)
downloadmongo-9492eecdf7aa22b3455d802d288820e0083ebd7c.tar.gz
Revert "SERVER-75784 Stop cluster parameter refresher from running in telemetry test"
This reverts commit 726b5e1ba2662bd1039334a2477c1b295e62669b. Revert "SERVER-74107 Use transaction in cluster parameter refresher to get up-to-date FCV" This reverts commit 17804518dab21bd5f936787b51baf013fe16101e.
-rw-r--r--jstests/auth/list_all_local_sessions.js14
-rw-r--r--jstests/noPassthrough/cluster_server_parameter_refresher.js5
-rw-r--r--jstests/noPassthrough/ftdc_connection_reuse.js38
-rw-r--r--jstests/noPassthrough/router_transactions_metrics.js11
-rw-r--r--jstests/noPassthrough/server_parameter_fcv_upgrade_downgrade.js15
-rw-r--r--jstests/noPassthrough/telemetry/telemetry_collect_on_mongos.js3
-rw-r--r--jstests/noPassthrough/telemetry/telemetry_redact_find_cmd.js2
-rw-r--r--jstests/noPassthrough/transaction_reaper.js3
-rw-r--r--jstests/replsets/internal_sessions_reaping_basic.js2
-rw-r--r--jstests/sharding/internal_txns/kill_sessions.js5
-rw-r--r--jstests/sharding/internal_txns/sessions.js5
-rw-r--r--jstests/sharding/mongos_precache_routing_info.js15
-rw-r--r--jstests/sharding/query/aggregation_currentop.js6
-rw-r--r--jstests/sharding/sessions_collection_auto_healing.js9
-rw-r--r--jstests/sharding/txn_commit_optimizations_for_read_only_shards.js5
-rw-r--r--jstests/sharding/txn_two_phase_commit_server_status.js4
-rw-r--r--src/mongo/db/server_parameter.cpp58
-rw-r--r--src/mongo/db/server_parameter.h34
-rw-r--r--src/mongo/db/service_entry_point_common.cpp3
-rw-r--r--src/mongo/idl/SConscript2
-rw-r--r--src/mongo/idl/cluster_server_parameter_refresher.cpp198
-rw-r--r--src/mongo/idl/cluster_server_parameter_refresher.h1
-rw-r--r--src/mongo/s/commands/strategy.cpp2
23 files changed, 147 insertions, 293 deletions
diff --git a/jstests/auth/list_all_local_sessions.js b/jstests/auth/list_all_local_sessions.js
index 3b90d01b545..3efe32fc3e1 100644
--- a/jstests/auth/list_all_local_sessions.js
+++ b/jstests/auth/list_all_local_sessions.js
@@ -1,5 +1,5 @@
// Auth tests for the $listLocalSessions {allUsers:true} aggregation stage.
-// @tags: [requires_fcv_70, requires_sharding]
+// @tags: [requires_sharding]
(function() {
'use strict';
@@ -49,16 +49,8 @@ const mongod = MongoRunner.runMongod({auth: ""});
runListAllLocalSessionsTest(mongod);
MongoRunner.stopMongod(mongod);
-const st = new ShardingTest({
- shards: 1,
- mongos: 1,
- config: 1,
- other: {
- keyFile: 'jstests/libs/key1',
- mongosOptions:
- {setParameter: {'failpoint.skipClusterParameterRefresh': "{'mode':'alwaysOn'}"}}
- }
-});
+const st =
+ new ShardingTest({shards: 1, mongos: 1, config: 1, other: {keyFile: 'jstests/libs/key1'}});
runListAllLocalSessionsTest(st.s0);
st.stop();
})();
diff --git a/jstests/noPassthrough/cluster_server_parameter_refresher.js b/jstests/noPassthrough/cluster_server_parameter_refresher.js
index a0be0a5dac9..c4d5e4b8e8d 100644
--- a/jstests/noPassthrough/cluster_server_parameter_refresher.js
+++ b/jstests/noPassthrough/cluster_server_parameter_refresher.js
@@ -128,11 +128,8 @@ function runTest(st, startupRefreshIntervalMS) {
[st.configRS, ...st._rs.map(rs => rs.test)].forEach(rs => {
assert(rs.getPrimary().getDB('config').clusterParameters.drop());
});
-
- // Perform a dummy write in order to get the config shard's cluster time cached on the mongos.
- st.s.getDB('config').abc.insert({a: "hello"});
-
expectedParams = {};
+
assertParams(startupRefreshIntervalRelaxedMS);
}
diff --git a/jstests/noPassthrough/ftdc_connection_reuse.js b/jstests/noPassthrough/ftdc_connection_reuse.js
index 4d490870d67..f5e34b06af0 100644
--- a/jstests/noPassthrough/ftdc_connection_reuse.js
+++ b/jstests/noPassthrough/ftdc_connection_reuse.js
@@ -26,12 +26,24 @@ const kOperations = 5;
const testDB = st.s.getDB(kDbName);
const coll = testDB.getCollection(kCollName);
-function getDiagnosticData() {
+function getDiagnosticData(mustHavePool) {
let stats;
- assert.soon(() => {
- stats = verifyGetDiagnosticData(st.s.getDB("admin")).connPoolStats;
- return stats["pools"].hasOwnProperty('NetworkInterfaceTL-TaskExecutorPool-0');
- }, "Failed to load NetworkInterfaceTL-TaskExecutorPool-0 in FTDC within time limit");
+ try {
+ assert.soon(
+ () => {
+ stats = verifyGetDiagnosticData(st.s.getDB("admin")).connPoolStats;
+ return stats["pools"].hasOwnProperty('NetworkInterfaceTL-TaskExecutorPool-0');
+ },
+ "Failed to load NetworkInterfaceTL-TaskExecutorPool-0 in FTDC within time limit",
+ 5000,
+ undefined,
+ {runHangAnalyzer: mustHavePool});
+ } catch (e) {
+ // If the above assert.soon fails, and we must have a pool, fail out. Otherwise return empty
+ // stats.
+ assert(!mustHavePool, e);
+ return null;
+ }
assert(stats.hasOwnProperty('totalWasUsedOnce'));
assert(stats.hasOwnProperty('totalConnUsageTimeMillis'));
return stats["pools"]["NetworkInterfaceTL-TaskExecutorPool-0"];
@@ -78,9 +90,9 @@ function resetPools() {
// FTDC data is collected periodically. Check that the data returned reflects that the pools
// have been dropped before resuming testing.
assert.soon(() => {
- const stats = getDiagnosticData();
+ const stats = getDiagnosticData(false /* mustHavePool */);
// The shard has a single node in its replica set.
- return !stats.hasOwnProperty(allHosts[0]);
+ return !stats || !stats.hasOwnProperty(allHosts[0]);
}, "Failed to wait for pool stats to reflect dropped pools");
}
@@ -89,19 +101,19 @@ st.rs0.awaitReplication();
// Check that the amount of time connections from the pool are in-use monotonically increases with
// each operation that is run.
-let previous = getDiagnosticData()["poolConnUsageTimeMillis"];
+let previous = getDiagnosticData(true /* mustHavePool */)["poolConnUsageTimeMillis"];
let initialVal = previous;
for (let i = 0; i < kOperations; i++) {
jsTestLog("Issuing find #" + i);
assert.commandWorked(testDB.runCommand({"find": kCollName}));
assert.soon(() => {
- let poolStats = getDiagnosticData();
+ let poolStats = getDiagnosticData(true /* mustHavePool */);
return poolStats["poolConnUsageTimeMillis"] >= previous;
}, "poolConnUsageTime failed to update within time limit", 10 * 1000);
- let res = getDiagnosticData()["poolConnUsageTimeMillis"];
+ let res = getDiagnosticData(true /* mustHavePool */)["poolConnUsageTimeMillis"];
previous = res;
}
-assert.gte(getDiagnosticData()["poolConnUsageTimeMillis"],
+assert.gte(getDiagnosticData(true /* mustHavePool */)["poolConnUsageTimeMillis"],
initialVal,
"poolConnUsageTimeMillis failed to increase after issuing find operations");
@@ -118,7 +130,7 @@ jsTestLog("Launching blocked finds");
configureReplSetFailpoint("waitInFindBeforeMakingBatch", "alwaysOn");
launchFinds({times: 3, readPref: "primary"});
assert.soon(() => {
- let poolStats = getDiagnosticData();
+ let poolStats = getDiagnosticData(true /* mustHavePool */);
return poolStats["poolInUse"] == 3;
}, "Launched finds failed to be marked as inUse within time limit", 10 * 1000);
@@ -129,7 +141,7 @@ configureReplSetFailpoint("waitInFindBeforeMakingBatch", "off");
assert.commandWorked(st.s.adminCommand(
{"setParameter": 1, ShardingTaskExecutorPoolMinSize: 1, ShardingTaskExecutorPoolMaxSize: 1}));
assert.soon(() => {
- let poolStats = getDiagnosticData();
+ let poolStats = getDiagnosticData(true /* mustHavePool */);
return poolStats["poolInUse"] == 0 && poolStats["poolWasUsedOnce"] == 2;
}, "Dropped connections failed to be marked as wasUsedOnce within time limit", 20 * 1000);
diff --git a/jstests/noPassthrough/router_transactions_metrics.js b/jstests/noPassthrough/router_transactions_metrics.js
index 21677c30dfa..f74b6d596af 100644
--- a/jstests/noPassthrough/router_transactions_metrics.js
+++ b/jstests/noPassthrough/router_transactions_metrics.js
@@ -1,7 +1,6 @@
// Tests multi-statement transactions metrics in the serverStatus output from mongos in various
// basic cases.
// @tags: [
-// requires_fcv_70,
// uses_multi_shard_transaction,
// uses_transactions,
// ]
@@ -202,15 +201,7 @@ const dbName = "test";
const collName = "foo";
const ns = dbName + '.' + collName;
-const st = new ShardingTest({
- shards: 2,
- mongos: 2,
- config: 1,
- other: {
- mongosOptions:
- {setParameter: {'failpoint.skipClusterParameterRefresh': "{'mode':'alwaysOn'}"}}
- }
-});
+const st = new ShardingTest({shards: 2, mongos: 2, config: 1});
const session = st.s.startSession();
const sessionDB = session.getDatabase(dbName);
diff --git a/jstests/noPassthrough/server_parameter_fcv_upgrade_downgrade.js b/jstests/noPassthrough/server_parameter_fcv_upgrade_downgrade.js
index c1fef519d4a..9fc483d85a5 100644
--- a/jstests/noPassthrough/server_parameter_fcv_upgrade_downgrade.js
+++ b/jstests/noPassthrough/server_parameter_fcv_upgrade_downgrade.js
@@ -111,18 +111,23 @@ function runDowngradeUpgradeTestForCWSP(conn, isMongod, isStandalone, verifyStat
verifyStateCallback(sp, true);
}
- // Downgrade FCV and ensure we can't set or get.
+ // Downgrade FCV and ensure we can't set, and get either fails (if FCV is known by the
+ // server) or gets the default value (if it is not).
// If our downgrade takes us below the minimum FCV for
// featureFlagAuditConfigClusterParameter, we expect all cluster parameter commands to fail
// for standalone.
assert.commandWorked(admin.runCommand({setFeatureCompatibilityVersion: lastLTSFCV}));
-
- assertGetFailed(admin.runCommand({getClusterParameter: sp}));
+ if (isMongod) {
+ assertGetFailed(admin.runCommand({getClusterParameter: sp}));
+ } else {
+ const afterDowngrade =
+ assert.commandWorked(admin.runCommand({getClusterParameter: sp}));
+ assert.eq(val(afterDowngrade), initval);
+ }
assertSetFailed(admin.runCommand({setClusterParameter: {[sp]: {intData: updateVal + 1}}}));
-
if (!(isStandalone && !FeatureFlagUtil.isEnabled(admin, 'AuditConfigClusterParameter'))) {
assertParamExistenceInGetParamStar(
- assert.commandWorked(admin.runCommand({getClusterParameter: "*"})), sp, false);
+ assert.commandWorked(admin.runCommand({getClusterParameter: "*"})), sp, !isMongod);
}
if (verifyStateCallback !== undefined) {
verifyStateCallback(sp, false);
diff --git a/jstests/noPassthrough/telemetry/telemetry_collect_on_mongos.js b/jstests/noPassthrough/telemetry/telemetry_collect_on_mongos.js
index 5882ab529f0..e7a6ddd2332 100644
--- a/jstests/noPassthrough/telemetry/telemetry_collect_on_mongos.js
+++ b/jstests/noPassthrough/telemetry/telemetry_collect_on_mongos.js
@@ -1,6 +1,6 @@
/**
* Test that mongos is collecting telemetry metrics.
- * @tags: [requires_fcv_70, featureFlagTelemetry]
+ * @tags: [featureFlagTelemetry]
*/
load('jstests/libs/telemetry_utils.js');
@@ -21,7 +21,6 @@ const setup = () => {
mongosOptions: {
setParameter: {
internalQueryConfigureTelemetrySamplingRate: -1,
- 'failpoint.skipClusterParameterRefresh': "{'mode':'alwaysOn'}"
}
},
});
diff --git a/jstests/noPassthrough/telemetry/telemetry_redact_find_cmd.js b/jstests/noPassthrough/telemetry/telemetry_redact_find_cmd.js
index 6bbf55f08ea..91172afdbdb 100644
--- a/jstests/noPassthrough/telemetry/telemetry_redact_find_cmd.js
+++ b/jstests/noPassthrough/telemetry/telemetry_redact_find_cmd.js
@@ -1,6 +1,5 @@
/**
* Test that $telemetry properly redacts find commands, on mongod and mongos.
- * @tags: [requires_fcv_70]
*/
load("jstests/libs/telemetry_utils.js");
(function() {
@@ -56,7 +55,6 @@ const st = new ShardingTest({
setParameter: {
internalQueryConfigureTelemetrySamplingRate: -1,
featureFlagTelemetry: true,
- 'failpoint.skipClusterParameterRefresh': "{'mode':'alwaysOn'}"
}
},
});
diff --git a/jstests/noPassthrough/transaction_reaper.js b/jstests/noPassthrough/transaction_reaper.js
index 1240cfd1ba3..becc6a59fec 100644
--- a/jstests/noPassthrough/transaction_reaper.js
+++ b/jstests/noPassthrough/transaction_reaper.js
@@ -1,5 +1,4 @@
// @tags: [
-// requires_fcv_70,
// requires_replication,
// requires_sharding,
// ]
@@ -40,8 +39,6 @@ function Sharding(lifetime) {
rs: true,
rsOptions: {setParameter: {TransactionRecordMinimumLifetimeMinutes: lifetime}},
rs0: {nodes: 1},
- mongosOptions:
- {setParameter: {'failpoint.skipClusterParameterRefresh': "{'mode':'alwaysOn'}"}}
},
});
diff --git a/jstests/replsets/internal_sessions_reaping_basic.js b/jstests/replsets/internal_sessions_reaping_basic.js
index adf6ec1021e..18a15a6ae1b 100644
--- a/jstests/replsets/internal_sessions_reaping_basic.js
+++ b/jstests/replsets/internal_sessions_reaping_basic.js
@@ -3,7 +3,7 @@
* config.image_collection entries for a transaction session if the logical session that it
* corresponds to has expired and been removed from the config.system.sessions collection.
*
- * @tags: [requires_fcv_70, uses_transactions]
+ * @tags: [requires_fcv_60, uses_transactions]
*/
(function() {
diff --git a/jstests/sharding/internal_txns/kill_sessions.js b/jstests/sharding/internal_txns/kill_sessions.js
index 1240a573510..4475c3b532e 100644
--- a/jstests/sharding/internal_txns/kill_sessions.js
+++ b/jstests/sharding/internal_txns/kill_sessions.js
@@ -10,10 +10,7 @@ TestData.disableImplicitSessions = true;
const st = new ShardingTest({
shards: 1,
- mongosOptions: {
- setParameter:
- {maxSessions: 1, 'failpoint.skipClusterParameterRefresh': "{'mode':'alwaysOn'}"}
- },
+ mongosOptions: {setParameter: {maxSessions: 1}},
// The config server uses a session for internal operations, so raise the limit by 1 for a
// catalog shard.
shardOptions: {setParameter: {maxSessions: TestData.catalogShard ? 2 : 1}}
diff --git a/jstests/sharding/internal_txns/sessions.js b/jstests/sharding/internal_txns/sessions.js
index 538d9ffa58b..aff71480c91 100644
--- a/jstests/sharding/internal_txns/sessions.js
+++ b/jstests/sharding/internal_txns/sessions.js
@@ -10,10 +10,7 @@ TestData.disableImplicitSessions = true;
const st = new ShardingTest({
shards: 1,
- mongosOptions: {
- setParameter:
- {maxSessions: 1, 'failpoint.skipClusterParameterRefresh': "{'mode':'alwaysOn'}"}
- },
+ mongosOptions: {setParameter: {maxSessions: 1}},
// The config server uses a session for internal operations, so raise the limit by 1 for a
// catalog shard.
shardOptions: {setParameter: {maxSessions: TestData.catalogShard ? 2 : 1}}
diff --git a/jstests/sharding/mongos_precache_routing_info.js b/jstests/sharding/mongos_precache_routing_info.js
index 573391a3b59..6c3e59e7591 100644
--- a/jstests/sharding/mongos_precache_routing_info.js
+++ b/jstests/sharding/mongos_precache_routing_info.js
@@ -1,16 +1,8 @@
-// @tags: [requires_fcv_70]
-
(function() {
'use strict';
// create
-var s = new ShardingTest({
- shards: 2,
- other: {
- mongosOptions:
- {setParameter: {'failpoint.skipClusterParameterRefresh': "{'mode':'alwaysOn'}"}}
- }
-});
+var s = new ShardingTest({shards: 2});
var db = s.getDB("test");
var ss = db.serverStatus();
@@ -42,10 +34,7 @@ assert.eq(1, ss.shardingStatistics.catalogCache.countFullRefreshesStarted);
// does not pre cache when set parameter is disabled
s.restartMongos(0, {
restart: true,
- setParameter: {
- loadRoutingTableOnStartup: false,
- 'failpoint.skipClusterParameterRefresh': "{'mode':'alwaysOn'}"
- },
+ setParameter: {loadRoutingTableOnStartup: false},
});
db = s.getDB("test");
diff --git a/jstests/sharding/query/aggregation_currentop.js b/jstests/sharding/query/aggregation_currentop.js
index 1c566fbc9fb..bfa8350a00d 100644
--- a/jstests/sharding/query/aggregation_currentop.js
+++ b/jstests/sharding/query/aggregation_currentop.js
@@ -42,11 +42,7 @@ const stParams = {
name: jsTestName(),
keyFile: key,
shards: 3,
- rs: {nodes: 1, setParameter: {internalQueryExecYieldIterations: 1}},
- other: {
- mongosOptions:
- {setParameter: {'failpoint.skipClusterParameterRefresh': "{'mode':'alwaysOn'}"}}
- }
+ rs: {nodes: 1, setParameter: {internalQueryExecYieldIterations: 1}}
};
// Create a new sharded cluster for testing. We set the internalQueryExecYieldIterations
diff --git a/jstests/sharding/sessions_collection_auto_healing.js b/jstests/sharding/sessions_collection_auto_healing.js
index f314d8ac58c..c49cd32dd2f 100644
--- a/jstests/sharding/sessions_collection_auto_healing.js
+++ b/jstests/sharding/sessions_collection_auto_healing.js
@@ -1,7 +1,6 @@
/**
* Requires no shards.
* @tags: [
- * requires_fcv_70,
* catalog_shard_incompatible,
* requires_fcv_70,
* ]
@@ -18,13 +17,7 @@ load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Col
// implicit sessions.
TestData.disableImplicitSessions = true;
-var st = new ShardingTest({
- shards: 0,
- other: {
- mongosOptions:
- {setParameter: {'failpoint.skipClusterParameterRefresh': "{'mode':'alwaysOn'}"}}
- }
-});
+var st = new ShardingTest({shards: 0});
var configSvr = st.configRS.getPrimary();
var mongos = st.s;
diff --git a/jstests/sharding/txn_commit_optimizations_for_read_only_shards.js b/jstests/sharding/txn_commit_optimizations_for_read_only_shards.js
index 2749b88dd73..9b9a88b9907 100644
--- a/jstests/sharding/txn_commit_optimizations_for_read_only_shards.js
+++ b/jstests/sharding/txn_commit_optimizations_for_read_only_shards.js
@@ -65,10 +65,7 @@ let st = new ShardingTest({
// Create shards with more than one node because we test for writeConcern majority failing.
config: TestData.catalogShard ? undefined : 1,
other: {
- mongosOptions: {
- verbose: 3,
- setParameter: {'failpoint.skipClusterParameterRefresh': "{'mode':'alwaysOn'}"}
- },
+ mongosOptions: {verbose: 3},
rs0: {nodes: [{}, {rsConfig: {priority: 0}}]},
rs1: {nodes: [{}, {rsConfig: {priority: 0}}]},
rs2: {nodes: [{}, {rsConfig: {priority: 0}}]},
diff --git a/jstests/sharding/txn_two_phase_commit_server_status.js b/jstests/sharding/txn_two_phase_commit_server_status.js
index aee129a1f0b..fe1d64a57fa 100644
--- a/jstests/sharding/txn_two_phase_commit_server_status.js
+++ b/jstests/sharding/txn_two_phase_commit_server_status.js
@@ -6,9 +6,7 @@ const st = new ShardingTest({shards: 1});
const res = assert.commandWorked(st.shard0.adminCommand({serverStatus: 1}));
assert.neq(null, res.twoPhaseCommitCoordinator);
-// A catalog shard will have run config server metadata transactions, which are single shard but
-// create a two phase commit coordinator.
-assert.eq(TestData.catalogShard ? 1 : 0, res.twoPhaseCommitCoordinator.totalCreated);
+assert.eq(0, res.twoPhaseCommitCoordinator.totalCreated);
assert.eq(0, res.twoPhaseCommitCoordinator.totalStartedTwoPhaseCommit);
assert.eq(0, res.twoPhaseCommitCoordinator.totalCommittedTwoPhaseCommit);
assert.eq(0, res.twoPhaseCommitCoordinator.totalAbortedTwoPhaseCommit);
diff --git a/src/mongo/db/server_parameter.cpp b/src/mongo/db/server_parameter.cpp
index ba2d5db22c1..12546757b08 100644
--- a/src/mongo/db/server_parameter.cpp
+++ b/src/mongo/db/server_parameter.cpp
@@ -86,22 +86,6 @@ bool ServerParameter::isEnabled() const {
bool ServerParameter::isEnabledOnVersion(
const multiversion::FeatureCompatibilityVersion& targetFCV) const {
- if (_disableState != DisableState::Enabled) {
- return false;
- }
- return _isEnabledOnVersion(targetFCV);
-}
-
-bool ServerParameter::canBeEnabledOnVersion(
- const multiversion::FeatureCompatibilityVersion& targetFCV) const {
- if (_disableState == DisableState::PermanentlyDisabled) {
- return false;
- }
- return _isEnabledOnVersion(targetFCV);
-}
-
-bool ServerParameter::_isEnabledOnVersion(
- const multiversion::FeatureCompatibilityVersion& targetFCV) const {
return minFCVIsLessThanOrEqualToVersion(targetFCV) &&
!featureFlagIsDisabledOnVersion(targetFCV);
}
@@ -214,11 +198,51 @@ Status IDLServerParameterDeprecatedAlias::setFromString(StringData str,
return _sp->setFromString(str, tenantId);
}
+namespace {
+class DisabledTestParameter : public ServerParameter {
+public:
+ explicit DisabledTestParameter(ServerParameter* sp)
+ : ServerParameter(sp->name(), sp->getServerParameterType()), _sp(sp) {}
+
+ void append(OperationContext* opCtx,
+ BSONObjBuilder* b,
+ StringData name,
+ const boost::optional<TenantId>&) final {}
+
+ Status validate(const BSONElement& newValueElement,
+ const boost::optional<TenantId>& tenantId) const final {
+ return {ErrorCodes::BadValue,
+ str::stream() << "Server parameter: '" << name() << "' is currently disabled"};
+ }
+
+ Status setFromString(StringData, const boost::optional<TenantId>&) final {
+ return {ErrorCodes::BadValue,
+ str::stream() << "Server parameter: '" << name() << "' is currently disabled"};
+ }
+
+ Status set(const BSONElement& newValueElement, const boost::optional<TenantId>&) final {
+ return setFromString("", boost::none);
+ }
+
+ Status reset(const boost::optional<TenantId>&) final {
+ return setFromString("", boost::none);
+ }
+
+ bool isEnabledOnVersion(const multiversion::FeatureCompatibilityVersion&) const override {
+ return false;
+ }
+
+private:
+ // Retain the original pointer to avoid ASAN complaining.
+ ServerParameter* _sp;
+};
+} // namespace
+
void ServerParameterSet::disableTestParameters() {
for (auto& spit : _map) {
auto*& sp = spit.second;
if (sp->isTestOnly()) {
- sp->disable(true /* permanent */);
+ sp = new DisabledTestParameter(sp);
}
}
}
diff --git a/src/mongo/db/server_parameter.h b/src/mongo/db/server_parameter.h
index 47c840ff99b..3ecc6a5b6f5 100644
--- a/src/mongo/db/server_parameter.h
+++ b/src/mongo/db/server_parameter.h
@@ -218,31 +218,11 @@ public:
_redact = true;
}
-private:
- enum DisableState { Enabled = 0, TemporarilyDisabled = 1, PermanentlyDisabled = 2 };
-
-public:
- void disable(bool permanent) {
- if (_disableState != DisableState::PermanentlyDisabled) {
- _disableState =
- permanent ? DisableState::PermanentlyDisabled : DisableState::TemporarilyDisabled;
- }
- }
-
- void enable() {
- if (_disableState == DisableState::TemporarilyDisabled) {
- _disableState = DisableState::Enabled;
- }
- }
-
bool isEnabled() const;
- // Return whether this server parameter would be enabled with the given FCV
- bool isEnabledOnVersion(const multiversion::FeatureCompatibilityVersion& targetFCV) const;
-
- // Return whether this server parameter is compatible with the given FCV, regardless of if it is
- // temporarily disabled
- bool canBeEnabledOnVersion(const multiversion::FeatureCompatibilityVersion& targetFCV) const;
+ // Return whether this server parameter is compatible with the given FCV.
+ virtual bool isEnabledOnVersion(
+ const multiversion::FeatureCompatibilityVersion& targetFCV) const;
void setFeatureFlag(FeatureFlag* featureFlag) {
_featureFlag = featureFlag;
@@ -253,9 +233,6 @@ public:
}
protected:
- virtual bool _isEnabledOnVersion(
- const multiversion::FeatureCompatibilityVersion& targetFCV) const;
-
bool featureFlagIsDisabledOnVersion(
const multiversion::FeatureCompatibilityVersion& targetFCV) const;
@@ -274,11 +251,6 @@ private:
ServerParameterType _type;
bool _testOnly = false;
bool _redact = false;
-
- // Tracks whether a parameter is enabled, temporarily disabled, or permanently disabled. This is
- // used when disabling (permanently) test-only parameters, and when enabling/disabling
- // (temporarily) cluster parameters on the mongos based on the cluster's FCV.
- DisableState _disableState = DisableState::Enabled;
};
class ServerParameterSet {
diff --git a/src/mongo/db/service_entry_point_common.cpp b/src/mongo/db/service_entry_point_common.cpp
index 3d5e515301f..dd27d079bd5 100644
--- a/src/mongo/db/service_entry_point_common.cpp
+++ b/src/mongo/db/service_entry_point_common.cpp
@@ -1511,8 +1511,7 @@ void ExecCommandDatabase::_initiateCommand() {
const auto allowTransactionsOnConfigDatabase =
(serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer) ||
- serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)) ||
- client->isFromSystemConnection();
+ serverGlobalParams.clusterRole.has(ClusterRole::ShardServer));
const auto invocationNss = _invocation->ns();
diff --git a/src/mongo/idl/SConscript b/src/mongo/idl/SConscript
index 7fd0e80b436..bdcbe0cde2a 100644
--- a/src/mongo/idl/SConscript
+++ b/src/mongo/idl/SConscript
@@ -88,8 +88,6 @@ env.Library(
LIBDEPS_PRIVATE=[
'$BUILD_DIR/mongo/db/audit',
'$BUILD_DIR/mongo/db/server_base',
- '$BUILD_DIR/mongo/db/transaction/transaction_api',
- '$BUILD_DIR/mongo/executor/inline_executor',
'$BUILD_DIR/mongo/s/grid',
'cluster_server_parameter',
'cluster_server_parameter_common',
diff --git a/src/mongo/idl/cluster_server_parameter_refresher.cpp b/src/mongo/idl/cluster_server_parameter_refresher.cpp
index 6b35914cb57..5398bdcae98 100644
--- a/src/mongo/idl/cluster_server_parameter_refresher.cpp
+++ b/src/mongo/idl/cluster_server_parameter_refresher.cpp
@@ -33,21 +33,14 @@
#include "mongo/db/audit.h"
#include "mongo/db/commands/list_databases_for_all_tenants_gen.h"
-#include "mongo/db/feature_compatibility_version_parser.h"
#include "mongo/db/multitenancy_gen.h"
-#include "mongo/db/transaction/transaction_api.h"
-#include "mongo/db/vector_clock.h"
#include "mongo/idl/cluster_server_parameter_common.h"
#include "mongo/idl/cluster_server_parameter_refresher_gen.h"
#include "mongo/logv2/log.h"
#include "mongo/s/grid.h"
-#include "mongo/s/is_mongos.h"
-#include "mongo/util/stacktrace.h"
-#include "mongo/util/version/releases.h"
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kControl
-MONGO_FAIL_POINT_DEFINE(skipClusterParameterRefresh);
namespace mongo {
namespace {
@@ -58,97 +51,44 @@ Seconds loadInterval() {
return Seconds(clusterServerParameterRefreshIntervalSecs.load());
}
-std::pair<multiversion::FeatureCompatibilityVersion,
- TenantIdMap<stdx::unordered_map<std::string, BSONObj>>>
-getFCVAndClusterParametersFromConfigServer() {
- // Use an alternative client region, because we call refreshParameters both from the internal
- // refresher process and from getClusterParameter.
- auto altClient = getGlobalServiceContext()->makeClient("clusterParameterRefreshTransaction");
- AlternativeClientRegion clientRegion(altClient);
- auto opCtx = cc().makeOperationContext();
- auto as = AuthorizationSession::get(cc());
- as->grantInternalAuthorization(opCtx.get());
-
- auto configServers = Grid::get(opCtx.get())->shardRegistry()->getConfigShard();
- // Note that we get the list of tenants outside of the transaction. This should be okay, as if
- // we miss out on some new tenants created between this call and the transaction, we are just
- // getting slightly old data. Importantly, different tenants' cluster parameters don't interact
- // with each other, so we don't need a consistent snapshot of cluster parameters across all
- // tenants, just a consistent snapshot per tenant.
- auto tenantIds =
- uassertStatusOK(getTenantsWithConfigDbsOnShard(opCtx.get(), configServers.get()));
-
- auto allDocs = std::make_shared<TenantIdMap<stdx::unordered_map<std::string, BSONObj>>>();
- auto fcv = std::make_shared<multiversion::FeatureCompatibilityVersion>();
- auto doFetch = [allDocs, fcv, &tenantIds](const txn_api::TransactionClient& txnClient,
- ExecutorPtr txnExec) {
- FindCommandRequest findFCV{NamespaceString("admin.system.version")};
- findFCV.setFilter(BSON("_id"
- << "featureCompatibilityVersion"));
- return txnClient.exhaustiveFind(findFCV)
- .thenRunOn(txnExec)
- .then([fcv, allDocs, &tenantIds, &txnClient, txnExec](
- const std::vector<BSONObj>& foundDocs) {
- uassert(7410710,
- "Expected to find FCV in admin.system.version but found nothing!",
- !foundDocs.empty());
- *fcv = FeatureCompatibilityVersionParser::parseVersion(
- foundDocs[0]["version"].String());
-
- // Fetch one tenant, then call doFetchTenants for the rest of the tenants within
- // then() recursively.
- auto doFetchTenants = [](auto it,
- const auto& tenantIds,
- auto allDocs,
- const auto& txnClient,
- ExecutorPtr txnExec,
- auto& doFetchTenants_ref) mutable {
- if (it == tenantIds.end()) {
- return SemiFuture<void>::makeReady();
- }
- FindCommandRequest findClusterParametersTenant{
- NamespaceString::makeClusterParametersNSS(*it)};
- // We don't specify a filter as we want all documents.
- return txnClient.exhaustiveFind(findClusterParametersTenant)
- .thenRunOn(txnExec)
- .then([&doFetchTenants_ref, &txnClient, &tenantIds, txnExec, it, allDocs](
- const std::vector<BSONObj>& foundDocs) {
- stdx::unordered_map<std::string, BSONObj> docsMap;
- for (const auto& doc : foundDocs) {
- auto name = doc["_id"].String();
- docsMap.insert({std::move(name), doc.getOwned()});
- }
- allDocs->insert({*it, std::move(docsMap)});
- return doFetchTenants_ref(std::next(it),
- tenantIds,
- allDocs,
- txnClient,
- txnExec,
- doFetchTenants_ref);
- })
- .semi();
- };
- return doFetchTenants(
- tenantIds.begin(), tenantIds, allDocs, txnClient, txnExec, doFetchTenants);
- })
- .semi();
- };
-
- repl::ReadConcernArgs::get(opCtx.get()) =
- repl::ReadConcernArgs(repl::ReadConcernLevel::kSnapshotReadConcern);
-
- // We need to commit w/ writeConcern = majority for readConcern = snapshot to work.
- opCtx->setWriteConcern(WriteConcernOptions{WriteConcernOptions::kMajority,
- WriteConcernOptions::SyncMode::UNSET,
- WriteConcernOptions::kNoTimeout});
-
- auto executor = Grid::get(opCtx.get())->getExecutorPool()->getFixedExecutor();
- auto inlineExecutor = std::make_shared<executor::InlineExecutor>();
- auto sleepInlineExecutor = inlineExecutor->getSleepableExecutor(executor);
- txn_api::SyncTransactionWithRetries txn(
- opCtx.get(), sleepInlineExecutor, nullptr, inlineExecutor);
- txn.run(opCtx.get(), doFetch);
- return {*fcv, *allDocs};
+StatusWith<TenantIdMap<stdx::unordered_map<std::string, BSONObj>>>
+getClusterParametersFromConfigServer(OperationContext* opCtx) {
+ // Attempt to retrieve cluster parameter documents from the config server.
+ // exhaustiveFindOnConfig makes up to 3 total attempts if it receives a retriable error before
+ // giving up.
+ LOGV2_DEBUG(6226404, 3, "Retrieving cluster server parameters from config server");
+ auto configServers = Grid::get(opCtx)->shardRegistry()->getConfigShard();
+ auto swTenantIds = getTenantsWithConfigDbsOnShard(opCtx, configServers.get());
+ if (!swTenantIds.isOK()) {
+ return swTenantIds.getStatus();
+ }
+ auto tenantIds = std::move(swTenantIds.getValue());
+
+ TenantIdMap<stdx::unordered_map<std::string, BSONObj>> allDocs;
+ for (const auto& tenantId : tenantIds) {
+ auto swFindResponse = configServers->exhaustiveFindOnConfig(
+ opCtx,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ repl::ReadConcernLevel::kMajorityReadConcern,
+ NamespaceString::makeClusterParametersNSS(tenantId),
+ BSONObj(),
+ BSONObj(),
+ boost::none);
+
+ // If the error is not retriable or persists beyond the max number of retry attempts, give
+ // up and throw an error.
+ if (!swFindResponse.isOK()) {
+ return swFindResponse.getStatus();
+ }
+ stdx::unordered_map<std::string, BSONObj> docsMap;
+ for (const auto& doc : swFindResponse.getValue().docs) {
+ auto name = doc["_id"].String();
+ docsMap.insert({std::move(name), doc});
+ }
+ allDocs.insert({std::move(tenantId), std::move(docsMap)});
+ }
+
+ return allDocs;
}
} // namespace
@@ -181,56 +121,30 @@ void ClusterServerParameterRefresher::setPeriod(Milliseconds period) {
}
Status ClusterServerParameterRefresher::refreshParameters(OperationContext* opCtx) {
- invariant(isMongos());
- multiversion::FeatureCompatibilityVersion fcv;
- TenantIdMap<stdx::unordered_map<std::string, BSONObj>> clusterParameterDocs;
-
- try {
- std::tie(fcv, clusterParameterDocs) = getFCVAndClusterParametersFromConfigServer();
- } catch (const DBException& ex) {
- LOGV2_WARNING(
- 7410719,
- "Could not refresh cluster server parameters from config servers due to failure in "
- "getFCVAndClusterParametersFromConfigServer. Will retry after refresh interval",
- "ex"_attr = ex.toStatus());
- return ex.toStatus();
+ // Query the config servers for all cluster parameter documents.
+ auto swClusterParameterDocs = getClusterParametersFromConfigServer(opCtx);
+ if (!swClusterParameterDocs.isOK()) {
+ LOGV2_WARNING(6226401,
+ "Could not refresh cluster server parameters from config servers. Will retry "
+ "after refresh interval elapses",
+ "clusterServerParameterRefreshIntervalSecs"_attr = loadInterval(),
+ "reason"_attr = swClusterParameterDocs.getStatus().reason());
+ return swClusterParameterDocs.getStatus();
}
// Set each in-memory cluster parameter that was returned in the response.
bool isSuccessful = true;
Status status = Status::OK();
ServerParameterSet* clusterParameterCache = ServerParameterSet::getClusterParameterSet();
- bool fcvChanged = fcv != _lastFcv;
- if (fcvChanged) {
- LOGV2_DEBUG(7410705,
- 3,
- "Cluster's FCV was different from last during refresh",
- "oldFCV"_attr = multiversion::toString(_lastFcv),
- "newFCV"_attr = multiversion::toString(fcv));
- }
+
+ auto clusterParameterDocs = std::move(swClusterParameterDocs.getValue());
std::vector<BSONObj> allUpdatedParameters;
allUpdatedParameters.reserve(clusterParameterDocs.size());
for (const auto& [tenantId, tenantParamDocs] : clusterParameterDocs) {
std::vector<BSONObj> updatedParameters;
updatedParameters.reserve(tenantParamDocs.size());
- for (auto [name, sp] : clusterParameterCache->getMap()) {
- if (fcvChanged) {
- // Use canBeEnabled because if we previously temporarily disabled the parameter,
- // isEnabled will be false
- if (sp->canBeEnabledOnVersion(_lastFcv) && !sp->canBeEnabledOnVersion(fcv)) {
- // Parameter is newly disabled on cluster
- LOGV2_DEBUG(
- 7410703, 3, "Disabling parameter during refresh", "name"_attr = name);
- sp->disable(false /* permanent */);
- continue;
- } else if (sp->canBeEnabledOnVersion(fcv) && !sp->canBeEnabledOnVersion(_lastFcv)) {
- // Parameter is newly enabled on cluster
- LOGV2_DEBUG(
- 7410704, 3, "Enabling parameter during refresh", "name"_attr = name);
- sp->enable();
- }
- }
+ for (const auto& [name, sp] : clusterParameterCache->getMap()) {
if (!sp->isEnabled()) {
continue;
}
@@ -282,18 +196,12 @@ Status ClusterServerParameterRefresher::refreshParameters(OperationContext* opCt
"clusterParameterDocuments"_attr = allUpdatedParameters);
}
- _lastFcv = fcv;
-
return status;
}
void ClusterServerParameterRefresher::start(ServiceContext* serviceCtx, OperationContext* opCtx) {
auto refresher = std::make_unique<ClusterServerParameterRefresher>();
- // On mongos, this should always be true after FCV initialization
- // (Generic FCV reference):
- invariant(serverGlobalParams.featureCompatibility.getVersion() ==
- multiversion::GenericFCV::kLatest);
- refresher->_lastFcv = serverGlobalParams.featureCompatibility.getVersion();
+
auto periodicRunner = serviceCtx->getPeriodicRunner();
invariant(periodicRunner);
@@ -310,10 +218,6 @@ void ClusterServerParameterRefresher::start(ServiceContext* serviceCtx, Operatio
}
void ClusterServerParameterRefresher::run() {
- if (MONGO_unlikely(skipClusterParameterRefresh.shouldFail())) {
- return;
- }
-
auto opCtx = cc().makeOperationContext();
auto status = refreshParameters(opCtx.get());
if (!status.isOK()) {
diff --git a/src/mongo/idl/cluster_server_parameter_refresher.h b/src/mongo/idl/cluster_server_parameter_refresher.h
index 5eaeb5e6557..746d961d0c5 100644
--- a/src/mongo/idl/cluster_server_parameter_refresher.h
+++ b/src/mongo/idl/cluster_server_parameter_refresher.h
@@ -67,7 +67,6 @@ private:
void run();
std::unique_ptr<PeriodicJobAnchor> _job;
- multiversion::FeatureCompatibilityVersion _lastFcv;
};
Status clusterServerParameterRefreshIntervalSecsNotify(const int& newValue);
diff --git a/src/mongo/s/commands/strategy.cpp b/src/mongo/s/commands/strategy.cpp
index 5534cc42dca..ac410088bc8 100644
--- a/src/mongo/s/commands/strategy.cpp
+++ b/src/mongo/s/commands/strategy.cpp
@@ -586,7 +586,7 @@ void ParseAndRunCommand::_parseCommand() {
command->attachLogicalSessionsToOpCtx(),
true));
- auto allowTransactionsOnConfigDatabase = !isMongos() || client->isFromSystemConnection();
+ auto allowTransactionsOnConfigDatabase = !isMongos();
validateSessionOptions(*_osi, command->getName(), nss, allowTransactionsOnConfigDatabase);
_wc.emplace(uassertStatusOK(WriteConcernOptions::extractWCFromCommand(request.body)));