summaryrefslogtreecommitdiff
path: root/src/mongo/db
diff options
context:
space:
mode:
authorMark Benvenuto <mark.benvenuto@mongodb.com>2017-01-05 10:56:41 -0500
committerMark Benvenuto <mark.benvenuto@mongodb.com>2017-01-05 10:56:41 -0500
commitba55f2573976ba570c2319bce9b598f0a660445f (patch)
tree7fc8adb43798e03517933c4e575dcbebe8f42087 /src/mongo/db
parentf5fbf31650eea903edbbcd2f9ef042b4c39e2ecb (diff)
downloadmongo-ba55f2573976ba570c2319bce9b598f0a660445f.tar.gz
SERVER-25932 Make MONGO_EXPORT_SERVER_PARAMETER use AtomicWord instead of std::atomic
Diffstat (limited to 'src/mongo/db')
-rw-r--r--src/mongo/db/auth/sasl_commands.cpp4
-rw-r--r--src/mongo/db/auth/sasl_options.cpp8
-rw-r--r--src/mongo/db/auth/sasl_options.h5
-rw-r--r--src/mongo/db/auth/security_key.cpp4
-rw-r--r--src/mongo/db/auth/user_cache_invalidator_job.cpp2
-rw-r--r--src/mongo/db/catalog/collection.cpp2
-rw-r--r--src/mongo/db/catalog/drop_collection.cpp2
-rw-r--r--src/mongo/db/catalog/drop_indexes.cpp2
-rw-r--r--src/mongo/db/catalog/index_create.cpp5
-rw-r--r--src/mongo/db/clientcursor.cpp4
-rw-r--r--src/mongo/db/cloner.cpp2
-rw-r--r--src/mongo/db/commands/authentication_commands.cpp6
-rw-r--r--src/mongo/db/commands/dbcommands.cpp2
-rw-r--r--src/mongo/db/commands/user_management_commands.cpp6
-rw-r--r--src/mongo/db/commands/validate.cpp2
-rw-r--r--src/mongo/db/concurrency/write_conflict_exception.cpp4
-rw-r--r--src/mongo/db/concurrency/write_conflict_exception.h2
-rw-r--r--src/mongo/db/exec/geo_near.cpp2
-rw-r--r--src/mongo/db/exec/multi_plan.cpp6
-rw-r--r--src/mongo/db/exec/sort.cpp4
-rw-r--r--src/mongo/db/ftdc/ftdc_mongod.cpp32
-rw-r--r--src/mongo/db/index/index_access_method.cpp2
-rw-r--r--src/mongo/db/index/index_access_method.h2
-rw-r--r--src/mongo/db/mongod_options.cpp11
-rw-r--r--src/mongo/db/ops/write_ops_exec.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_cursor.cpp2
-rw-r--r--src/mongo/db/pipeline/tee_buffer.h2
-rw-r--r--src/mongo/db/query/expression_index.cpp21
-rw-r--r--src/mongo/db/query/expression_index_knobs.h12
-rw-r--r--src/mongo/db/query/get_executor.cpp7
-rw-r--r--src/mongo/db/query/index_bounds_builder.cpp2
-rw-r--r--src/mongo/db/query/plan_cache.cpp6
-rw-r--r--src/mongo/db/query/plan_cache_test.cpp12
-rw-r--r--src/mongo/db/query/plan_enumerator.h4
-rw-r--r--src/mongo/db/query/plan_ranker.cpp2
-rw-r--r--src/mongo/db/query/plan_yield_policy.cpp4
-rw-r--r--src/mongo/db/query/planner_access.cpp2
-rw-r--r--src/mongo/db/query/planner_analysis.cpp2
-rw-r--r--src/mongo/db/query/query_knobs.cpp2
-rw-r--r--src/mongo/db/query/query_knobs.h43
-rw-r--r--src/mongo/db/query/query_planner_params.h2
-rw-r--r--src/mongo/db/query/query_planner_test.cpp12
-rw-r--r--src/mongo/db/query/query_planner_test_fixture.cpp2
-rw-r--r--src/mongo/db/repl/collection_cloner.cpp6
-rw-r--r--src/mongo/db/repl/data_replicator.cpp2
-rw-r--r--src/mongo/db/repl/database_cloner.cpp2
-rw-r--r--src/mongo/db/repl/databases_cloner.cpp2
-rw-r--r--src/mongo/db/repl/master_slave.cpp8
-rw-r--r--src/mongo/db/repl/noop_writer.cpp2
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_impl.cpp2
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp2
-rw-r--r--src/mongo/db/repl/rs_initialsync.cpp6
-rw-r--r--src/mongo/db/repl/sync_tail.cpp2
-rw-r--r--src/mongo/db/repl/sync_tail.h4
-rw-r--r--src/mongo/db/s/collection_range_deleter.cpp3
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp2
-rw-r--r--src/mongo/db/server_options.h3
-rw-r--r--src/mongo/db/server_options_helpers.cpp4
-rw-r--r--src/mongo/db/server_parameters.h7
-rw-r--r--src/mongo/db/server_parameters_inline.h40
-rw-r--r--src/mongo/db/server_parameters_test.cpp8
-rw-r--r--src/mongo/db/storage/mmap_v1/dur.cpp4
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp2
-rw-r--r--src/mongo/db/storage/storage_options.h5
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp4
-rw-r--r--src/mongo/db/ttl.cpp4
66 files changed, 212 insertions, 174 deletions
diff --git a/src/mongo/db/auth/sasl_commands.cpp b/src/mongo/db/auth/sasl_commands.cpp
index c4edf1f9a0f..b30cbf84340 100644
--- a/src/mongo/db/auth/sasl_commands.cpp
+++ b/src/mongo/db/auth/sasl_commands.cpp
@@ -183,7 +183,7 @@ Status doSaslStep(const Client* client,
<< session->getPrincipalId() << " on " << session->getAuthenticationDatabase()
<< " from client " << client->getRemote().toString() << " ; " << redact(status);
- sleepmillis(saslGlobalParams.authFailedDelay);
+ sleepmillis(saslGlobalParams.authFailedDelay.load());
// All the client needs to know is that authentication has failed.
return Status(ErrorCodes::AuthenticationFailed, "Authentication failed.");
}
@@ -200,7 +200,7 @@ Status doSaslStep(const Client* client,
return status;
}
- if (!serverGlobalParams.quiet) {
+ if (!serverGlobalParams.quiet.load()) {
log() << "Successfully authenticated as principal " << session->getPrincipalId()
<< " on " << session->getAuthenticationDatabase();
}
diff --git a/src/mongo/db/auth/sasl_options.cpp b/src/mongo/db/auth/sasl_options.cpp
index fdb64f044e2..278d1037ca9 100644
--- a/src/mongo/db/auth/sasl_options.cpp
+++ b/src/mongo/db/auth/sasl_options.cpp
@@ -51,10 +51,10 @@ SASLGlobalParams::SASLGlobalParams() {
authenticationMechanisms.push_back("SCRAM-SHA-1");
// Default iteration count for SCRAM authentication.
- scramIterationCount = defaultScramIterationCount;
+ scramIterationCount.store(defaultScramIterationCount);
// Default value for auth failed delay
- authFailedDelay = 0;
+ authFailedDelay.store(0);
}
Status addSASLOptions(moe::OptionSection* options) {
@@ -139,8 +139,8 @@ Status storeSASLOptions(const moe::Environment& params) {
saslGlobalParams.authdPath = params["security.sasl.saslauthdSocketPath"].as<std::string>();
}
if (params.count("security.sasl.scramIterationCount") && !haveScramIterationCount) {
- saslGlobalParams.scramIterationCount =
- params["security.sasl.scramIterationCount"].as<int>();
+ saslGlobalParams.scramIterationCount.store(
+ params["security.sasl.scramIterationCount"].as<int>());
}
return Status::OK();
diff --git a/src/mongo/db/auth/sasl_options.h b/src/mongo/db/auth/sasl_options.h
index 6139ea10fde..9f9427c4a18 100644
--- a/src/mongo/db/auth/sasl_options.h
+++ b/src/mongo/db/auth/sasl_options.h
@@ -33,6 +33,7 @@
#include <vector>
#include "mongo/base/status.h"
+#include "mongo/platform/atomic_word.h"
namespace mongo {
@@ -48,8 +49,8 @@ struct SASLGlobalParams {
std::string hostName;
std::string serviceName;
std::string authdPath;
- std::atomic<int> scramIterationCount; // NOLINT
- std::atomic<int> authFailedDelay; // NOLINT
+ AtomicInt32 scramIterationCount;
+ AtomicInt32 authFailedDelay;
SASLGlobalParams();
};
diff --git a/src/mongo/db/auth/security_key.cpp b/src/mongo/db/auth/security_key.cpp
index 97e7076c447..8e1dfe46e00 100644
--- a/src/mongo/db/auth/security_key.cpp
+++ b/src/mongo/db/auth/security_key.cpp
@@ -76,8 +76,8 @@ bool setUpSecurityKey(const string& filename) {
credentials.password =
mongo::createPasswordDigest(internalSecurity.user->getName().getUser().toString(), str);
- BSONObj creds =
- scram::generateCredentials(credentials.password, saslGlobalParams.scramIterationCount);
+ BSONObj creds = scram::generateCredentials(credentials.password,
+ saslGlobalParams.scramIterationCount.load());
credentials.scram.iterationCount = creds[scram::iterationCountFieldName].Int();
credentials.scram.salt = creds[scram::saltFieldName].String();
credentials.scram.storedKey = creds[scram::storedKeyFieldName].String();
diff --git a/src/mongo/db/auth/user_cache_invalidator_job.cpp b/src/mongo/db/auth/user_cache_invalidator_job.cpp
index f941e1d87b1..4e5c113c664 100644
--- a/src/mongo/db/auth/user_cache_invalidator_job.cpp
+++ b/src/mongo/db/auth/user_cache_invalidator_job.cpp
@@ -53,7 +53,7 @@ namespace mongo {
namespace {
// How often to check with the config servers whether authorization information has changed.
-std::atomic<int> userCacheInvalidationIntervalSecs(30); // NOLINT 30 second default
+AtomicInt32 userCacheInvalidationIntervalSecs(30); // 30 second default
stdx::mutex invalidationIntervalMutex;
stdx::condition_variable invalidationIntervalChangedCondition;
Date_t lastInvalidationTime;
diff --git a/src/mongo/db/catalog/collection.cpp b/src/mongo/db/catalog/collection.cpp
index 39bb1870256..b96a578c0aa 100644
--- a/src/mongo/db/catalog/collection.cpp
+++ b/src/mongo/db/catalog/collection.cpp
@@ -1189,7 +1189,7 @@ public:
auto totalKeys = numLongKeys + numIndexedKeys;
bool hasTooFewKeys = false;
- bool noErrorOnTooFewKeys = !failIndexKeyTooLong && (_level != kValidateFull);
+ bool noErrorOnTooFewKeys = !failIndexKeyTooLong.load() && (_level != kValidateFull);
if (idx->isIdIndex() && totalKeys != numRecs) {
hasTooFewKeys = totalKeys < numRecs ? true : hasTooFewKeys;
diff --git a/src/mongo/db/catalog/drop_collection.cpp b/src/mongo/db/catalog/drop_collection.cpp
index 5b6fef47cc0..e2fc90f8273 100644
--- a/src/mongo/db/catalog/drop_collection.cpp
+++ b/src/mongo/db/catalog/drop_collection.cpp
@@ -52,7 +52,7 @@ namespace mongo {
Status dropCollection(OperationContext* txn,
const NamespaceString& collectionName,
BSONObjBuilder& result) {
- if (!serverGlobalParams.quiet) {
+ if (!serverGlobalParams.quiet.load()) {
log() << "CMD: drop " << collectionName;
}
diff --git a/src/mongo/db/catalog/drop_indexes.cpp b/src/mongo/db/catalog/drop_indexes.cpp
index 5f252685b77..a0bb7a22e0d 100644
--- a/src/mongo/db/catalog/drop_indexes.cpp
+++ b/src/mongo/db/catalog/drop_indexes.cpp
@@ -54,7 +54,7 @@ Status wrappedRun(OperationContext* txn,
Database* const db,
const BSONObj& jsobj,
BSONObjBuilder* anObjBuilder) {
- if (!serverGlobalParams.quiet) {
+ if (!serverGlobalParams.quiet.load()) {
LOG(0) << "CMD: dropIndexes " << toDeleteNs;
}
Collection* collection = db ? db->getCollection(toDeleteNs) : nullptr;
diff --git a/src/mongo/db/catalog/index_create.cpp b/src/mongo/db/catalog/index_create.cpp
index 17a3d981d5d..d7c86f27163 100644
--- a/src/mongo/db/catalog/index_create.cpp
+++ b/src/mongo/db/catalog/index_create.cpp
@@ -66,7 +66,7 @@ MONGO_FP_DECLARE(crashAfterStartingIndexBuild);
MONGO_FP_DECLARE(hangAfterStartingIndexBuild);
MONGO_FP_DECLARE(hangAfterStartingIndexBuildUnlocked);
-std::atomic<std::int32_t> maxIndexBuildMemoryUsageMegabytes(500); // NOLINT
+AtomicInt32 maxIndexBuildMemoryUsageMegabytes(500);
class ExportedMaxIndexBuildMemoryUsageParameter
: public ExportedServerParameter<std::int32_t, ServerParameterType::kStartupAndRuntime> {
@@ -213,7 +213,8 @@ StatusWith<std::vector<BSONObj>> MultiIndexBlock::init(const std::vector<BSONObj
std::size_t eachIndexBuildMaxMemoryUsageBytes = 0;
if (!indexSpecs.empty()) {
eachIndexBuildMaxMemoryUsageBytes =
- std::size_t(maxIndexBuildMemoryUsageMegabytes) * 1024 * 1024 / indexSpecs.size();
+ static_cast<std::size_t>(maxIndexBuildMemoryUsageMegabytes.load()) * 1024 * 1024 /
+ indexSpecs.size();
}
for (size_t i = 0; i < indexSpecs.size(); i++) {
diff --git a/src/mongo/db/clientcursor.cpp b/src/mongo/db/clientcursor.cpp
index b696529871f..c2f5c487d97 100644
--- a/src/mongo/db/clientcursor.cpp
+++ b/src/mongo/db/clientcursor.cpp
@@ -141,7 +141,7 @@ bool ClientCursor::shouldTimeout(int millis) {
if (_isNoTimeout || _isPinned) {
return false;
}
- return _idleAgeMillis > cursorTimeoutMillis;
+ return _idleAgeMillis > cursorTimeoutMillis.load();
}
void ClientCursor::resetIdleTime() {
@@ -273,7 +273,7 @@ public:
cursorStatsTimedOut.increment(
CursorManager::timeoutCursorsGlobal(&txn, t.millisReset()));
}
- sleepsecs(clientCursorMonitorFrequencySecs);
+ sleepsecs(clientCursorMonitorFrequencySecs.load());
}
}
};
diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp
index fbba5e00365..7ea8669ec4f 100644
--- a/src/mongo/db/cloner.cpp
+++ b/src/mongo/db/cloner.cpp
@@ -251,7 +251,7 @@ struct Cloner::Fun {
str::stream ss;
ss << "Cloner: found corrupt document in " << from_collection.toString() << ": "
<< redact(status);
- if (skipCorruptDocumentsWhenCloning) {
+ if (skipCorruptDocumentsWhenCloning.load()) {
warning() << ss.ss.str() << "; skipping";
continue;
}
diff --git a/src/mongo/db/commands/authentication_commands.cpp b/src/mongo/db/commands/authentication_commands.cpp
index d1374799c1d..8de3931d529 100644
--- a/src/mongo/db/commands/authentication_commands.cpp
+++ b/src/mongo/db/commands/authentication_commands.cpp
@@ -157,7 +157,7 @@ bool CmdAuthenticate::run(OperationContext* txn,
int,
string& errmsg,
BSONObjBuilder& result) {
- if (!serverGlobalParams.quiet) {
+ if (!serverGlobalParams.quiet.load()) {
mutablebson::Document cmdToLog(cmdObj, mutablebson::Document::kInPlaceDisabled);
redactForLogging(&cmdToLog);
log() << " authenticate db: " << dbname << " " << cmdToLog;
@@ -185,7 +185,7 @@ bool CmdAuthenticate::run(OperationContext* txn,
Status status = _authenticate(txn, mechanism, user, cmdObj);
audit::logAuthentication(Client::getCurrent(), mechanism, user, status.code());
if (!status.isOK()) {
- if (!serverGlobalParams.quiet) {
+ if (!serverGlobalParams.quiet.load()) {
log() << "Failed to authenticate " << user << " with mechanism " << mechanism << ": "
<< status;
}
@@ -196,7 +196,7 @@ bool CmdAuthenticate::run(OperationContext* txn,
} else {
appendCommandStatus(result, status);
}
- sleepmillis(saslGlobalParams.authFailedDelay);
+ sleepmillis(saslGlobalParams.authFailedDelay.load());
return false;
}
result.append("dbname", user.getDB());
diff --git a/src/mongo/db/commands/dbcommands.cpp b/src/mongo/db/commands/dbcommands.cpp
index 25b0423fa9e..e08f9c3b429 100644
--- a/src/mongo/db/commands/dbcommands.cpp
+++ b/src/mongo/db/commands/dbcommands.cpp
@@ -469,7 +469,7 @@ public:
int was = _diaglog.setLevel(cmdObj.firstElement().numberInt());
_diaglog.flush();
- if (!serverGlobalParams.quiet) {
+ if (!serverGlobalParams.quiet.load()) {
LOG(0) << "CMD: diagLogging set to " << _diaglog.getLevel() << " from: " << was;
}
result.append("was", was);
diff --git a/src/mongo/db/commands/user_management_commands.cpp b/src/mongo/db/commands/user_management_commands.cpp
index 061598fb4b6..7d14e2f1416 100644
--- a/src/mongo/db/commands/user_management_commands.cpp
+++ b/src/mongo/db/commands/user_management_commands.cpp
@@ -686,7 +686,7 @@ public:
// Add SCRAM credentials for appropriate authSchemaVersions.
if (authzVersion > AuthorizationManager::schemaVersion26Final) {
BSONObj scramCred = scram::generateCredentials(
- args.hashedPassword, saslGlobalParams.scramIterationCount);
+ args.hashedPassword, saslGlobalParams.scramIterationCount.load());
credentialsBuilder.append("SCRAM-SHA-1", scramCred);
} else { // Otherwise default to MONGODB-CR.
credentialsBuilder.append("MONGODB-CR", args.hashedPassword);
@@ -801,7 +801,7 @@ public:
// Add SCRAM credentials for appropriate authSchemaVersions
if (authzVersion > AuthorizationManager::schemaVersion26Final) {
BSONObj scramCred = scram::generateCredentials(
- args.hashedPassword, saslGlobalParams.scramIterationCount);
+ args.hashedPassword, saslGlobalParams.scramIterationCount.load());
credentialsBuilder.append("SCRAM-SHA-1", scramCred);
} else { // Otherwise default to MONGODB-CR
credentialsBuilder.append("MONGODB-CR", args.hashedPassword);
@@ -2785,7 +2785,7 @@ void updateUserCredentials(OperationContext* txn,
BSONObjBuilder toSetBuilder(updateBuilder.subobjStart("$set"));
toSetBuilder << "credentials"
<< BSON("SCRAM-SHA-1" << scram::generateCredentials(
- hashedPassword, saslGlobalParams.scramIterationCount));
+ hashedPassword, saslGlobalParams.scramIterationCount.load()));
}
uassertStatusOK(updateOneAuthzDocument(
diff --git a/src/mongo/db/commands/validate.cpp b/src/mongo/db/commands/validate.cpp
index 9622947e59f..02c577da9c8 100644
--- a/src/mongo/db/commands/validate.cpp
+++ b/src/mongo/db/commands/validate.cpp
@@ -105,7 +105,7 @@ public:
return false;
}
- if (!serverGlobalParams.quiet) {
+ if (!serverGlobalParams.quiet.load()) {
LOG(0) << "CMD: validate " << nss.ns();
}
diff --git a/src/mongo/db/concurrency/write_conflict_exception.cpp b/src/mongo/db/concurrency/write_conflict_exception.cpp
index b32dfe23084..2ef2e2a3494 100644
--- a/src/mongo/db/concurrency/write_conflict_exception.cpp
+++ b/src/mongo/db/concurrency/write_conflict_exception.cpp
@@ -37,11 +37,11 @@
namespace mongo {
-std::atomic<bool> WriteConflictException::trace(false); // NOLINT
+AtomicBool WriteConflictException::trace(false);
WriteConflictException::WriteConflictException()
: DBException("WriteConflict", ErrorCodes::WriteConflict) {
- if (trace) {
+ if (trace.load()) {
printStackTrace();
}
}
diff --git a/src/mongo/db/concurrency/write_conflict_exception.h b/src/mongo/db/concurrency/write_conflict_exception.h
index 1183e93233a..477fdcc7413 100644
--- a/src/mongo/db/concurrency/write_conflict_exception.h
+++ b/src/mongo/db/concurrency/write_conflict_exception.h
@@ -79,6 +79,6 @@ public:
* If true, will call printStackTrace on every WriteConflictException created.
* Can be set via setParameter named traceWriteConflictExceptions.
*/
- static std::atomic<bool> trace; // NOLINT
+ static AtomicBool trace;
};
}
diff --git a/src/mongo/db/exec/geo_near.cpp b/src/mongo/db/exec/geo_near.cpp
index 2acb29c6b02..744e2e6fc0b 100644
--- a/src/mongo/db/exec/geo_near.cpp
+++ b/src/mongo/db/exec/geo_near.cpp
@@ -699,7 +699,7 @@ StatusWith<NearStage::CoveredInterval*> //
const int twoDFieldPosition = 0;
std::vector<GeoHash> unorderedCovering = ExpressionMapping::get2dCovering(
- *coverRegion, _twoDIndex->infoObj(), internalGeoNearQuery2DMaxCoveringCells);
+ *coverRegion, _twoDIndex->infoObj(), internalGeoNearQuery2DMaxCoveringCells.load());
// Make sure the same index key isn't visited twice
R2CellUnion diffUnion;
diff --git a/src/mongo/db/exec/multi_plan.cpp b/src/mongo/db/exec/multi_plan.cpp
index 28c14dbcba3..ba18803a2ac 100644
--- a/src/mongo/db/exec/multi_plan.cpp
+++ b/src/mongo/db/exec/multi_plan.cpp
@@ -172,13 +172,13 @@ Status MultiPlanStage::tryYield(PlanYieldPolicy* yieldPolicy) {
size_t MultiPlanStage::getTrialPeriodWorks(OperationContext* txn, const Collection* collection) {
// Run each plan some number of times. This number is at least as great as
// 'internalQueryPlanEvaluationWorks', but may be larger for big collections.
- size_t numWorks = internalQueryPlanEvaluationWorks;
+ size_t numWorks = internalQueryPlanEvaluationWorks.load();
if (NULL != collection) {
// For large collections, the number of works is set to be this
// fraction of the collection size.
double fraction = internalQueryPlanEvaluationCollFraction;
- numWorks = std::max(static_cast<size_t>(internalQueryPlanEvaluationWorks),
+ numWorks = std::max(static_cast<size_t>(internalQueryPlanEvaluationWorks.load()),
static_cast<size_t>(fraction * collection->numRecords(txn)));
}
@@ -189,7 +189,7 @@ size_t MultiPlanStage::getTrialPeriodWorks(OperationContext* txn, const Collecti
size_t MultiPlanStage::getTrialPeriodNumToReturn(const CanonicalQuery& query) {
// Determine the number of results which we will produce during the plan
// ranking phase before stopping.
- size_t numResults = static_cast<size_t>(internalQueryPlanEvaluationMaxResults);
+ size_t numResults = static_cast<size_t>(internalQueryPlanEvaluationMaxResults.load());
if (query.getQueryRequest().getNToReturn()) {
numResults =
std::min(static_cast<size_t>(*query.getQueryRequest().getNToReturn()), numResults);
diff --git a/src/mongo/db/exec/sort.cpp b/src/mongo/db/exec/sort.cpp
index 91dabd1ce46..2fd40f0efb2 100644
--- a/src/mongo/db/exec/sort.cpp
+++ b/src/mongo/db/exec/sort.cpp
@@ -101,7 +101,7 @@ bool SortStage::isEOF() {
}
PlanStage::StageState SortStage::doWork(WorkingSetID* out) {
- const size_t maxBytes = static_cast<size_t>(internalQueryExecMaxBlockingSortBytes);
+ const size_t maxBytes = static_cast<size_t>(internalQueryExecMaxBlockingSortBytes.load());
if (_memUsage > maxBytes) {
mongoutils::str::stream ss;
ss << "Sort operation used more than the maximum " << maxBytes
@@ -219,7 +219,7 @@ void SortStage::doInvalidate(OperationContext* txn, const RecordId& dl, Invalida
unique_ptr<PlanStageStats> SortStage::getStats() {
_commonStats.isEOF = isEOF();
- const size_t maxBytes = static_cast<size_t>(internalQueryExecMaxBlockingSortBytes);
+ const size_t maxBytes = static_cast<size_t>(internalQueryExecMaxBlockingSortBytes.load());
_specificStats.memLimit = maxBytes;
_specificStats.memUsage = _memUsage;
_specificStats.limit = _limit;
diff --git a/src/mongo/db/ftdc/ftdc_mongod.cpp b/src/mongo/db/ftdc/ftdc_mongod.cpp
index 094a2b05d06..60c1c46c9e4 100644
--- a/src/mongo/db/ftdc/ftdc_mongod.cpp
+++ b/src/mongo/db/ftdc/ftdc_mongod.cpp
@@ -63,7 +63,7 @@ FTDCController* getGlobalFTDCController() {
return getFTDCController(getGlobalServiceContext()).get();
}
-std::atomic<bool> localEnabledFlag(FTDCConfig::kEnabledDefault); // NOLINT
+AtomicBool localEnabledFlag(FTDCConfig::kEnabledDefault);
class ExportedFTDCEnabledParameter
: public ExportedServerParameter<bool, ServerParameterType::kStartupAndRuntime> {
@@ -85,7 +85,7 @@ public:
} exportedFTDCEnabledParameter;
-std::atomic<std::int32_t> localPeriodMillis(FTDCConfig::kPeriodMillisDefault); // NOLINT
+AtomicInt32 localPeriodMillis(FTDCConfig::kPeriodMillisDefault);
class ExportedFTDCPeriodParameter
: public ExportedServerParameter<std::int32_t, ServerParameterType::kStartupAndRuntime> {
@@ -114,11 +114,9 @@ public:
} exportedFTDCPeriodParameter;
// Scale the values down since are defaults are in bytes, but the user interface is MB
-std::atomic<std::int32_t> localMaxDirectorySizeMB( // NOLINT
- FTDCConfig::kMaxDirectorySizeBytesDefault / (1024 * 1024));
+AtomicInt32 localMaxDirectorySizeMB(FTDCConfig::kMaxDirectorySizeBytesDefault / (1024 * 1024));
-std::atomic<std::int32_t> localMaxFileSizeMB(FTDCConfig::kMaxFileSizeBytesDefault / // NOLINT
- (1024 * 1024));
+AtomicInt32 localMaxFileSizeMB(FTDCConfig::kMaxFileSizeBytesDefault / (1024 * 1024));
class ExportedFTDCDirectorySizeParameter
: public ExportedServerParameter<std::int32_t, ServerParameterType::kStartupAndRuntime> {
@@ -136,12 +134,12 @@ public:
"diagnosticDataCollectionDirectorySizeMB must be greater than or equal to 10");
}
- if (potentialNewValue < localMaxFileSizeMB) {
+ if (potentialNewValue < localMaxFileSizeMB.load()) {
return Status(
ErrorCodes::BadValue,
str::stream()
<< "diagnosticDataCollectionDirectorySizeMB must be greater than or equal to '"
- << localMaxFileSizeMB
+ << localMaxFileSizeMB.load()
<< "' which is the current value of diagnosticDataCollectionFileSizeMB.");
}
@@ -170,12 +168,12 @@ public:
"diagnosticDataCollectionFileSizeMB must be greater than or equal to 1");
}
- if (potentialNewValue > localMaxDirectorySizeMB) {
+ if (potentialNewValue > localMaxDirectorySizeMB.load()) {
return Status(
ErrorCodes::BadValue,
str::stream()
<< "diagnosticDataCollectionFileSizeMB must be less than or equal to '"
- << localMaxDirectorySizeMB
+ << localMaxDirectorySizeMB.load()
<< "' which is the current value of diagnosticDataCollectionDirectorySizeMB.");
}
@@ -189,7 +187,7 @@ public:
} exportedFTDCFileSizeParameter;
-std::atomic<std::int32_t> localMaxSamplesPerArchiveMetricChunk( // NOLINT
+AtomicInt32 localMaxSamplesPerArchiveMetricChunk(
FTDCConfig::kMaxSamplesPerArchiveMetricChunkDefault);
class ExportedFTDCArchiveChunkSizeParameter
@@ -218,7 +216,7 @@ public:
} exportedFTDCArchiveChunkSizeParameter;
-std::atomic<std::int32_t> localMaxSamplesPerInterimMetricChunk( // NOLINT
+AtomicInt32 localMaxSamplesPerInterimMetricChunk(
FTDCConfig::kMaxSamplesPerInterimMetricChunkDefault);
class ExportedFTDCInterimChunkSizeParameter
@@ -296,11 +294,11 @@ void startFTDC() {
FTDCConfig config;
config.period = Milliseconds(localPeriodMillis.load());
- config.enabled = localEnabledFlag;
- config.maxFileSizeBytes = localMaxFileSizeMB * 1024 * 1024;
- config.maxDirectorySizeBytes = localMaxDirectorySizeMB * 1024 * 1024;
- config.maxSamplesPerArchiveMetricChunk = localMaxSamplesPerArchiveMetricChunk;
- config.maxSamplesPerInterimMetricChunk = localMaxSamplesPerInterimMetricChunk;
+ config.enabled = localEnabledFlag.load();
+ config.maxFileSizeBytes = localMaxFileSizeMB.load() * 1024 * 1024;
+ config.maxDirectorySizeBytes = localMaxDirectorySizeMB.load() * 1024 * 1024;
+ config.maxSamplesPerArchiveMetricChunk = localMaxSamplesPerArchiveMetricChunk.load();
+ config.maxSamplesPerInterimMetricChunk = localMaxSamplesPerInterimMetricChunk.load();
auto controller = stdx::make_unique<FTDCController>(dir, config);
diff --git a/src/mongo/db/index/index_access_method.cpp b/src/mongo/db/index/index_access_method.cpp
index 95d3a26f48b..a3d9716a2aa 100644
--- a/src/mongo/db/index/index_access_method.cpp
+++ b/src/mongo/db/index/index_access_method.cpp
@@ -117,7 +117,7 @@ bool IndexAccessMethod::ignoreKeyTooLong(OperationContext* txn) {
const auto shouldRelaxConstraints =
repl::ReplicationCoordinator::get(txn)->shouldRelaxIndexConstraints(
NamespaceString(_btreeState->ns()));
- return shouldRelaxConstraints || !failIndexKeyTooLong;
+ return shouldRelaxConstraints || !failIndexKeyTooLong.load();
}
// Find the keys for obj, put them in the tree pointing to loc
diff --git a/src/mongo/db/index/index_access_method.h b/src/mongo/db/index/index_access_method.h
index 2e52ce7990d..b96260b11f8 100644
--- a/src/mongo/db/index/index_access_method.h
+++ b/src/mongo/db/index/index_access_method.h
@@ -42,7 +42,7 @@
namespace mongo {
-extern std::atomic<bool> failIndexKeyTooLong; // NOLINT
+extern AtomicBool failIndexKeyTooLong;
class BSONObjBuilder;
class MatchExpression;
diff --git a/src/mongo/db/mongod_options.cpp b/src/mongo/db/mongod_options.cpp
index 1c18fb819c5..921de16e082 100644
--- a/src/mongo/db/mongod_options.cpp
+++ b/src/mongo/db/mongod_options.cpp
@@ -1084,11 +1084,10 @@ Status storeMongodOptions(const moe::Environment& params) {
// don't check if dur is false here as many will just use the default, and will default
// to off on win32. ie no point making life a little more complex by giving an error on
// a dev environment.
- storageGlobalParams.journalCommitIntervalMs =
- params["storage.journal.commitIntervalMs"].as<int>();
- if (storageGlobalParams.journalCommitIntervalMs < 1 ||
- storageGlobalParams.journalCommitIntervalMs >
- StorageGlobalParams::kMaxJournalCommitIntervalMs) {
+ auto journalCommitIntervalMs = params["storage.journal.commitIntervalMs"].as<int>();
+ storageGlobalParams.journalCommitIntervalMs.store(journalCommitIntervalMs);
+ if (journalCommitIntervalMs < 1 ||
+ journalCommitIntervalMs > StorageGlobalParams::kMaxJournalCommitIntervalMs) {
return Status(ErrorCodes::BadValue,
str::stream() << "--journalCommitInterval out of allowed range (1-"
<< StorageGlobalParams::kMaxJournalCommitIntervalMs
@@ -1144,7 +1143,7 @@ Status storeMongodOptions(const moe::Environment& params) {
storageGlobalParams.upgrade = 1;
}
if (params.count("notablescan")) {
- storageGlobalParams.noTableScan = params["notablescan"].as<bool>();
+ storageGlobalParams.noTableScan.store(params["notablescan"].as<bool>());
}
repl::ReplSettings replSettings;
diff --git a/src/mongo/db/ops/write_ops_exec.cpp b/src/mongo/db/ops/write_ops_exec.cpp
index a1dc9c0ec6f..347d8d1d486 100644
--- a/src/mongo/db/ops/write_ops_exec.cpp
+++ b/src/mongo/db/ops/write_ops_exec.cpp
@@ -436,7 +436,7 @@ WriteResult performInserts(OperationContext* txn, const InsertOp& wholeOp) {
size_t bytesInBatch = 0;
std::vector<BSONObj> batch;
- const size_t maxBatchSize = internalInsertMaxBatchSize;
+ const size_t maxBatchSize = internalInsertMaxBatchSize.load();
batch.reserve(std::min(wholeOp.documents.size(), maxBatchSize));
for (auto&& doc : wholeOp.documents) {
diff --git a/src/mongo/db/pipeline/document_source_cursor.cpp b/src/mongo/db/pipeline/document_source_cursor.cpp
index b8bacacf015..276a16196bc 100644
--- a/src/mongo/db/pipeline/document_source_cursor.cpp
+++ b/src/mongo/db/pipeline/document_source_cursor.cpp
@@ -106,7 +106,7 @@ void DocumentSourceCursor::loadBatch() {
memUsageBytes += _currentBatch.back().getApproximateSize();
- if (memUsageBytes > internalDocumentSourceCursorBatchSizeBytes) {
+ if (memUsageBytes > internalDocumentSourceCursorBatchSizeBytes.load()) {
// End this batch and prepare PlanExecutor for yielding.
_exec->saveState();
return;
diff --git a/src/mongo/db/pipeline/tee_buffer.h b/src/mongo/db/pipeline/tee_buffer.h
index 0ec9d5e57ba..bc4dfcd57b3 100644
--- a/src/mongo/db/pipeline/tee_buffer.h
+++ b/src/mongo/db/pipeline/tee_buffer.h
@@ -52,7 +52,7 @@ public:
* 'bufferSizeBytes' is a soft cap, and may be exceeded by one document's worth (~16MB).
*/
static boost::intrusive_ptr<TeeBuffer> create(
- size_t nConsumers, int bufferSizeBytes = internalQueryFacetBufferSizeBytes);
+ size_t nConsumers, int bufferSizeBytes = internalQueryFacetBufferSizeBytes.load());
void setSource(const boost::intrusive_ptr<DocumentSource>& source) {
_source = source;
diff --git a/src/mongo/db/query/expression_index.cpp b/src/mongo/db/query/expression_index.cpp
index f2e1439610a..09a1fb95e66 100644
--- a/src/mongo/db/query/expression_index.cpp
+++ b/src/mongo/db/query/expression_index.cpp
@@ -109,20 +109,17 @@ void ExpressionMapping::cover2d(const R2Region& region,
}
std::vector<S2CellId> ExpressionMapping::get2dsphereCovering(const S2Region& region) {
- uassert(28739,
- "Geo coarsest level must be in range [0,30]",
- 0 <= internalQueryS2GeoCoarsestLevel && internalQueryS2GeoCoarsestLevel <= 30);
- uassert(28740,
- "Geo finest level must be in range [0,30]",
- 0 <= internalQueryS2GeoFinestLevel && internalQueryS2GeoFinestLevel <= 30);
- uassert(28741,
- "Geo coarsest level must be less than or equal to finest",
- internalQueryS2GeoCoarsestLevel <= internalQueryS2GeoFinestLevel);
+ auto minLevel = internalQueryS2GeoCoarsestLevel.load();
+ auto maxLevel = internalQueryS2GeoFinestLevel.load();
+
+ uassert(28739, "Geo coarsest level must be in range [0,30]", 0 <= minLevel && minLevel <= 30);
+ uassert(28740, "Geo finest level must be in range [0,30]", 0 <= maxLevel && maxLevel <= 30);
+ uassert(28741, "Geo coarsest level must be less than or equal to finest", minLevel <= maxLevel);
S2RegionCoverer coverer;
- coverer.set_min_level(internalQueryS2GeoCoarsestLevel);
- coverer.set_max_level(internalQueryS2GeoFinestLevel);
- coverer.set_max_cells(internalQueryS2GeoMaxCells);
+ coverer.set_min_level(minLevel);
+ coverer.set_max_level(maxLevel);
+ coverer.set_max_cells(internalQueryS2GeoMaxCells.load());
std::vector<S2CellId> cover;
coverer.GetCovering(region, &cover);
diff --git a/src/mongo/db/query/expression_index_knobs.h b/src/mongo/db/query/expression_index_knobs.h
index ef3af4f8812..f3a29a8651f 100644
--- a/src/mongo/db/query/expression_index_knobs.h
+++ b/src/mongo/db/query/expression_index_knobs.h
@@ -28,7 +28,7 @@
#pragma once
-#include <atomic>
+#include "mongo/platform/atomic_word.h"
namespace mongo {
@@ -39,24 +39,24 @@ namespace mongo {
/**
* The maximum number of cells to use for 2D geo query covering for predicate queries
*/
-extern std::atomic<int> internalGeoPredicateQuery2DMaxCoveringCells; // NOLINT
+extern AtomicInt32 internalGeoPredicateQuery2DMaxCoveringCells;
/**
* The maximum number of cells to use for 2D geo query covering for predicate queries
*/
-extern std::atomic<int> internalGeoNearQuery2DMaxCoveringCells; // NOLINT
+extern AtomicInt32 internalGeoNearQuery2DMaxCoveringCells;
//
// Geo query.
//
// What is the finest level we will cover a queried region or geoNear annulus?
-extern std::atomic<int> internalQueryS2GeoFinestLevel; // NOLINT
+extern AtomicInt32 internalQueryS2GeoFinestLevel;
// What is the coarsest level we will cover a queried region or geoNear annulus?
-extern std::atomic<int> internalQueryS2GeoCoarsestLevel; // NOLINT
+extern AtomicInt32 internalQueryS2GeoCoarsestLevel;
// What is the maximum cell count that we want? (advisory, not a hard threshold)
-extern std::atomic<int> internalQueryS2GeoMaxCells; // NOLINT
+extern AtomicInt32 internalQueryS2GeoMaxCells;
} // namespace mongo
diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp
index 3ba0d9bfcb6..61dab0e40e0 100644
--- a/src/mongo/db/query/get_executor.cpp
+++ b/src/mongo/db/query/get_executor.cpp
@@ -84,6 +84,7 @@
#include "mongo/scripting/engine.h"
#include "mongo/stdx/memory.h"
#include "mongo/util/log.h"
+#include "mongo/util/stringutils.h"
namespace mongo {
@@ -161,7 +162,7 @@ void fillOutPlannerParams(OperationContext* txn,
// We will not output collection scans unless there are no indexed solutions. NO_TABLE_SCAN
// overrides this behavior by not outputting a collscan even if there are no indexed
// solutions.
- if (storageGlobalParams.noTableScan) {
+ if (storageGlobalParams.noTableScan.load()) {
const string& ns = canonicalQuery->ns();
// There are certain cases where we ignore this restriction:
bool ignore = canonicalQuery->getQueryObj().isEmpty() ||
@@ -183,7 +184,7 @@ void fillOutPlannerParams(OperationContext* txn,
}
}
- if (internalQueryPlannerEnableIndexIntersection) {
+ if (internalQueryPlannerEnableIndexIntersection.load()) {
plannerParams->options |= QueryPlannerParams::INDEX_INTERSECTION;
}
@@ -363,7 +364,7 @@ StatusWith<PrepareExecutionResult> prepareExecution(OperationContext* opCtx,
}
}
- if (internalQueryPlanOrChildrenIndependently &&
+ if (internalQueryPlanOrChildrenIndependently.load() &&
SubplanStage::canUseSubplanning(*canonicalQuery)) {
LOG(2) << "Running query as sub-queries: " << redact(canonicalQuery->toStringShort());
diff --git a/src/mongo/db/query/index_bounds_builder.cpp b/src/mongo/db/query/index_bounds_builder.cpp
index b16ad43dc5e..4f26030f47c 100644
--- a/src/mongo/db/query/index_bounds_builder.cpp
+++ b/src/mongo/db/query/index_bounds_builder.cpp
@@ -586,7 +586,7 @@ void IndexBoundsBuilder::translate(const MatchExpression* expr,
const R2Region& region = gme->getGeoExpression().getGeometry().getR2Region();
ExpressionMapping::cover2d(
- region, index.infoObj, internalGeoPredicateQuery2DMaxCoveringCells, oilOut);
+ region, index.infoObj, internalGeoPredicateQuery2DMaxCoveringCells.load(), oilOut);
*tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
} else {
diff --git a/src/mongo/db/query/plan_cache.cpp b/src/mongo/db/query/plan_cache.cpp
index d34df64ab88..4f926e70664 100644
--- a/src/mongo/db/query/plan_cache.cpp
+++ b/src/mongo/db/query/plan_cache.cpp
@@ -481,9 +481,9 @@ std::string SolutionCacheData::toString() const {
// PlanCache
//
-PlanCache::PlanCache() : _cache(internalQueryCacheSize) {}
+PlanCache::PlanCache() : _cache(internalQueryCacheSize.load()) {}
-PlanCache::PlanCache(const std::string& ns) : _cache(internalQueryCacheSize), _ns(ns) {}
+PlanCache::PlanCache(const std::string& ns) : _cache(internalQueryCacheSize.load()), _ns(ns) {}
PlanCache::~PlanCache() {}
@@ -704,7 +704,7 @@ Status PlanCache::feedback(const CanonicalQuery& cq, PlanCacheEntryFeedback* fee
invariant(entry);
// We store up to a constant number of feedback entries.
- if (entry->feedback.size() < size_t(internalQueryCacheFeedbacksStored)) {
+ if (entry->feedback.size() < static_cast<size_t>(internalQueryCacheFeedbacksStored.load())) {
entry->feedback.push_back(autoFeedback.release());
}
diff --git a/src/mongo/db/query/plan_cache_test.cpp b/src/mongo/db/query/plan_cache_test.cpp
index bac9704e0cf..b4d8f0f9ef0 100644
--- a/src/mongo/db/query/plan_cache_test.cpp
+++ b/src/mongo/db/query/plan_cache_test.cpp
@@ -1103,11 +1103,11 @@ TEST_F(CachePlanSelectionTest,
CachedPlanForSelfIntersectionOfMultikeyIndexNonPointRangesCannotIntersectBounds) {
// Enable a hash-based index intersection plan to be generated because we are scanning a
// non-point range on the "a" field.
- bool oldEnableHashIntersection = internalQueryPlannerEnableHashIntersection;
+ bool oldEnableHashIntersection = internalQueryPlannerEnableHashIntersection.load();
ON_BLOCK_EXIT([oldEnableHashIntersection] {
- internalQueryPlannerEnableHashIntersection = oldEnableHashIntersection;
+ internalQueryPlannerEnableHashIntersection.store(oldEnableHashIntersection);
});
- internalQueryPlannerEnableHashIntersection = true;
+ internalQueryPlannerEnableHashIntersection.store(true);
params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
const bool multikey = true;
@@ -1144,11 +1144,11 @@ TEST_F(CachePlanSelectionTest, CachedPlanForIntersectionOfMultikeyIndexesWhenUsi
TEST_F(CachePlanSelectionTest, CachedPlanForIntersectionWithNonMultikeyIndexCanIntersectBounds) {
// Enable a hash-based index intersection plan to be generated because we are scanning a
// non-point range on the "a.c" field.
- bool oldEnableHashIntersection = internalQueryPlannerEnableHashIntersection;
+ bool oldEnableHashIntersection = internalQueryPlannerEnableHashIntersection.load();
ON_BLOCK_EXIT([oldEnableHashIntersection] {
- internalQueryPlannerEnableHashIntersection = oldEnableHashIntersection;
+ internalQueryPlannerEnableHashIntersection.store(oldEnableHashIntersection);
});
- internalQueryPlannerEnableHashIntersection = true;
+ internalQueryPlannerEnableHashIntersection.store(true);
params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
const bool multikey = true;
diff --git a/src/mongo/db/query/plan_enumerator.h b/src/mongo/db/query/plan_enumerator.h
index 5543bc68ca9..a84cc909d58 100644
--- a/src/mongo/db/query/plan_enumerator.h
+++ b/src/mongo/db/query/plan_enumerator.h
@@ -43,8 +43,8 @@ namespace mongo {
struct PlanEnumeratorParams {
PlanEnumeratorParams()
: intersect(false),
- maxSolutionsPerOr(internalQueryEnumerationMaxOrSolutions),
- maxIntersectPerAnd(internalQueryEnumerationMaxIntersectPerAnd) {}
+ maxSolutionsPerOr(internalQueryEnumerationMaxOrSolutions.load()),
+ maxIntersectPerAnd(internalQueryEnumerationMaxIntersectPerAnd.load()) {}
// Do we provide solutions that use more indices than the minimum required to provide
// an indexed solution?
diff --git a/src/mongo/db/query/plan_ranker.cpp b/src/mongo/db/query/plan_ranker.cpp
index ce943e117a7..964d3448576 100644
--- a/src/mongo/db/query/plan_ranker.cpp
+++ b/src/mongo/db/query/plan_ranker.cpp
@@ -249,7 +249,7 @@ double PlanRanker::scoreTree(const PlanStageStats* stats) {
std::string scoreStr = ss;
LOG(2) << scoreStr;
- if (internalQueryForceIntersectionPlans) {
+ if (internalQueryForceIntersectionPlans.load()) {
if (hasStage(STAGE_AND_HASH, stats) || hasStage(STAGE_AND_SORTED, stats)) {
// The boost should be >2.001 to make absolutely sure the ixisect plan will win due
// to the combination of 1) productivity, 2) eof bonus, and 3) no ixisect bonus.
diff --git a/src/mongo/db/query/plan_yield_policy.cpp b/src/mongo/db/query/plan_yield_policy.cpp
index 7a665d29905..d84db2b225d 100644
--- a/src/mongo/db/query/plan_yield_policy.cpp
+++ b/src/mongo/db/query/plan_yield_policy.cpp
@@ -45,7 +45,7 @@ PlanYieldPolicy::PlanYieldPolicy(PlanExecutor* exec, PlanExecutor::YieldPolicy p
: _policy(policy),
_forceYield(false),
_elapsedTracker(exec->getOpCtx()->getServiceContext()->getFastClockSource(),
- internalQueryExecYieldIterations,
+ internalQueryExecYieldIterations.load(),
Milliseconds(internalQueryExecYieldPeriodMS.load())),
_planYielding(exec) {}
@@ -54,7 +54,7 @@ PlanYieldPolicy::PlanYieldPolicy(PlanExecutor::YieldPolicy policy, ClockSource*
: _policy(policy),
_forceYield(false),
_elapsedTracker(cs,
- internalQueryExecYieldIterations,
+ internalQueryExecYieldIterations.load(),
Milliseconds(internalQueryExecYieldPeriodMS.load())),
_planYielding(nullptr) {}
diff --git a/src/mongo/db/query/planner_access.cpp b/src/mongo/db/query/planner_access.cpp
index 7867b7c0161..3b8ee2f185e 100644
--- a/src/mongo/db/query/planner_access.cpp
+++ b/src/mongo/db/query/planner_access.cpp
@@ -972,7 +972,7 @@ QuerySolutionNode* QueryPlannerAccess::buildIndexedAnd(const CanonicalQuery& que
AndSortedNode* asn = new AndSortedNode();
asn->children.swap(ixscanNodes);
andResult = asn;
- } else if (internalQueryPlannerEnableHashIntersection) {
+ } else if (internalQueryPlannerEnableHashIntersection.load()) {
AndHashNode* ahn = new AndHashNode();
ahn->children.swap(ixscanNodes);
andResult = ahn;
diff --git a/src/mongo/db/query/planner_analysis.cpp b/src/mongo/db/query/planner_analysis.cpp
index 52c16279cc5..1685a18aae8 100644
--- a/src/mongo/db/query/planner_analysis.cpp
+++ b/src/mongo/db/query/planner_analysis.cpp
@@ -432,7 +432,7 @@ bool QueryPlannerAnalysis::explodeForSort(const CanonicalQuery& query,
}
// Too many ixscans spoil the performance.
- if (totalNumScans > (size_t)internalQueryMaxScansToExplode) {
+ if (totalNumScans > (size_t)internalQueryMaxScansToExplode.load()) {
LOG(5) << "Could expand ixscans to pull out sort order but resulting scan count"
<< "(" << totalNumScans << ") is too high.";
return false;
diff --git a/src/mongo/db/query/query_knobs.cpp b/src/mongo/db/query/query_knobs.cpp
index 496f69c5c47..cb50474adca 100644
--- a/src/mongo/db/query/query_knobs.cpp
+++ b/src/mongo/db/query/query_knobs.cpp
@@ -70,7 +70,7 @@ MONGO_EXPORT_SERVER_PARAMETER(internalQueryFacetBufferSizeBytes, int, 100 * 1024
MONGO_EXPORT_SERVER_PARAMETER(internalInsertMaxBatchSize,
int,
- internalQueryExecYieldIterations / 2);
+ internalQueryExecYieldIterations.load() / 2);
MONGO_EXPORT_SERVER_PARAMETER(internalDocumentSourceCursorBatchSizeBytes, int, 4 * 1024 * 1024);
diff --git a/src/mongo/db/query/query_knobs.h b/src/mongo/db/query/query_knobs.h
index 67bff82bb1e..1f441ea170a 100644
--- a/src/mongo/db/query/query_knobs.h
+++ b/src/mongo/db/query/query_knobs.h
@@ -28,9 +28,8 @@
#pragma once
-#include <atomic>
-
#include "mongo/platform/atomic_proxy.h"
+#include "mongo/platform/atomic_word.h"
namespace mongo {
@@ -40,79 +39,79 @@ namespace mongo {
// Max number of times we call work() on plans before comparing them,
// for small collections.
-extern std::atomic<int> internalQueryPlanEvaluationWorks; // NOLINT
+extern AtomicInt32 internalQueryPlanEvaluationWorks;
// For large collections, the number times we work() candidate plans is
// taken as this fraction of the collection size.
-extern AtomicDouble internalQueryPlanEvaluationCollFraction; // NOLINT
+extern AtomicDouble internalQueryPlanEvaluationCollFraction;
// Stop working plans once a plan returns this many results.
-extern std::atomic<int> internalQueryPlanEvaluationMaxResults; // NOLINT
+extern AtomicInt32 internalQueryPlanEvaluationMaxResults;
// Do we give a big ranking bonus to intersection plans?
-extern std::atomic<bool> internalQueryForceIntersectionPlans; // NOLINT
+extern AtomicBool internalQueryForceIntersectionPlans;
// Do we have ixisect on at all?
-extern std::atomic<bool> internalQueryPlannerEnableIndexIntersection; // NOLINT
+extern AtomicBool internalQueryPlannerEnableIndexIntersection;
// Do we use hash-based intersection for rooted $and queries?
-extern std::atomic<bool> internalQueryPlannerEnableHashIntersection; // NOLINT
+extern AtomicBool internalQueryPlannerEnableHashIntersection;
//
// plan cache
//
// How many entries in the cache?
-extern std::atomic<int> internalQueryCacheSize; // NOLINT
+extern AtomicInt32 internalQueryCacheSize;
// How many feedback entries do we collect before possibly evicting from the cache based on bad
// performance?
-extern std::atomic<int> internalQueryCacheFeedbacksStored; // NOLINT
+extern AtomicInt32 internalQueryCacheFeedbacksStored;
// How many times more works must we perform in order to justify plan cache eviction
// and replanning?
-extern AtomicDouble internalQueryCacheEvictionRatio; // NOLINT
+extern AtomicDouble internalQueryCacheEvictionRatio;
//
// Planning and enumeration.
//
// How many indexed solutions will QueryPlanner::plan output?
-extern std::atomic<int> internalQueryPlannerMaxIndexedSolutions; // NOLINT
+extern AtomicInt32 internalQueryPlannerMaxIndexedSolutions;
// How many solutions will the enumerator consider at each OR?
-extern std::atomic<int> internalQueryEnumerationMaxOrSolutions; // NOLINT
+extern AtomicInt32 internalQueryEnumerationMaxOrSolutions;
// How many intersections will the enumerator consider at each AND?
-extern std::atomic<int> internalQueryEnumerationMaxIntersectPerAnd; // NOLINT
+extern AtomicInt32 internalQueryEnumerationMaxIntersectPerAnd;
// Do we want to plan each child of the OR independently?
-extern std::atomic<bool> internalQueryPlanOrChildrenIndependently; // NOLINT
+extern AtomicBool internalQueryPlanOrChildrenIndependently;
// How many index scans are we willing to produce in order to obtain a sort order
// during explodeForSort?
-extern std::atomic<int> internalQueryMaxScansToExplode; // NOLINT
+extern AtomicInt32 internalQueryMaxScansToExplode;
//
// Query execution.
//
-extern std::atomic<int> internalQueryExecMaxBlockingSortBytes; // NOLINT
+extern AtomicInt32 internalQueryExecMaxBlockingSortBytes;
// Yield after this many "should yield?" checks.
-extern std::atomic<int> internalQueryExecYieldIterations; // NOLINT
+extern AtomicInt32 internalQueryExecYieldIterations;
// Yield if it's been at least this many milliseconds since we last yielded.
-extern std::atomic<int> internalQueryExecYieldPeriodMS; // NOLINT
+extern AtomicInt32 internalQueryExecYieldPeriodMS;
// Limit the size that we write without yielding to 16MB / 64 (max expected number of indexes)
const int64_t insertVectorMaxBytes = 256 * 1024;
// The number of bytes to buffer at once during a $facet stage.
-extern std::atomic<int> internalQueryFacetBufferSizeBytes; // NOLINT
+extern AtomicInt32 internalQueryFacetBufferSizeBytes;
-extern std::atomic<int> internalInsertMaxBatchSize; // NOLINT
+extern AtomicInt32 internalInsertMaxBatchSize;
-extern std::atomic<int> internalDocumentSourceCursorBatchSizeBytes; // NOLINT
+extern AtomicInt32 internalDocumentSourceCursorBatchSizeBytes;
} // namespace mongo
diff --git a/src/mongo/db/query/query_planner_params.h b/src/mongo/db/query/query_planner_params.h
index 8210fb05e1a..a43d43e7404 100644
--- a/src/mongo/db/query/query_planner_params.h
+++ b/src/mongo/db/query/query_planner_params.h
@@ -40,7 +40,7 @@ struct QueryPlannerParams {
QueryPlannerParams()
: options(DEFAULT),
indexFiltersApplied(false),
- maxIndexedSolutions(internalQueryPlannerMaxIndexedSolutions) {}
+ maxIndexedSolutions(internalQueryPlannerMaxIndexedSolutions.load()) {}
enum Options {
// You probably want to set this.
diff --git a/src/mongo/db/query/query_planner_test.cpp b/src/mongo/db/query/query_planner_test.cpp
index b063e12db0b..f63934389fe 100644
--- a/src/mongo/db/query/query_planner_test.cpp
+++ b/src/mongo/db/query/query_planner_test.cpp
@@ -3281,15 +3281,15 @@ TEST_F(QueryPlannerTest, IntersectCanBeVeryBig) {
"{ 'a' : null, 'b' : 9, 'c' : null, 'd' : null },"
"{ 'a' : null, 'b' : 16, 'c' : null, 'd' : null }]}"));
- assertNumSolutions(internalQueryEnumerationMaxOrSolutions);
+ assertNumSolutions(internalQueryEnumerationMaxOrSolutions.load());
}
// Ensure that disabling AND_HASH intersection works properly.
TEST_F(QueryPlannerTest, IntersectDisableAndHash) {
- bool oldEnableHashIntersection = internalQueryPlannerEnableHashIntersection;
+ bool oldEnableHashIntersection = internalQueryPlannerEnableHashIntersection.load();
// Turn index intersection on but disable hash-based intersection.
- internalQueryPlannerEnableHashIntersection = false;
+ internalQueryPlannerEnableHashIntersection.store(false);
params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
addIndex(BSON("a" << 1));
@@ -3315,7 +3315,7 @@ TEST_F(QueryPlannerTest, IntersectDisableAndHash) {
"{ixscan: {filter: null, pattern: {c:1}}}]}}}}");
// Restore the old value of the has intersection switch.
- internalQueryPlannerEnableHashIntersection = oldEnableHashIntersection;
+ internalQueryPlannerEnableHashIntersection.store(oldEnableHashIntersection);
}
//
@@ -3556,7 +3556,7 @@ TEST_F(QueryPlannerTest, OrEnumerationLimit) {
"{a: 5, b: 5},"
"{a: 6, b: 6}]}"));
- assertNumSolutions(internalQueryEnumerationMaxOrSolutions);
+ assertNumSolutions(internalQueryEnumerationMaxOrSolutions.load());
}
TEST_F(QueryPlannerTest, OrEnumerationLimit2) {
@@ -3573,7 +3573,7 @@ TEST_F(QueryPlannerTest, OrEnumerationLimit2) {
"{a: 2, b: 2, c: 2, d: 2},"
"{a: 3, b: 3, c: 3, d: 3}]}"));
- assertNumSolutions(internalQueryEnumerationMaxOrSolutions);
+ assertNumSolutions(internalQueryEnumerationMaxOrSolutions.load());
}
// SERVER-13104: test that we properly enumerate all solutions for nested $or.
diff --git a/src/mongo/db/query/query_planner_test_fixture.cpp b/src/mongo/db/query/query_planner_test_fixture.cpp
index c7bf639f199..bb97ce06bde 100644
--- a/src/mongo/db/query/query_planner_test_fixture.cpp
+++ b/src/mongo/db/query/query_planner_test_fixture.cpp
@@ -51,7 +51,7 @@ const NamespaceString QueryPlannerTest::nss("test.collection");
void QueryPlannerTest::setUp() {
opCtx = serviceContext.makeOperationContext();
- internalQueryPlannerEnableHashIntersection = true;
+ internalQueryPlannerEnableHashIntersection.store(true);
params.options = QueryPlannerParams::INCLUDE_COLLSCAN;
addIndex(BSON("_id" << 1));
}
diff --git a/src/mongo/db/repl/collection_cloner.cpp b/src/mongo/db/repl/collection_cloner.cpp
index 329207b4cf9..a284732393b 100644
--- a/src/mongo/db/repl/collection_cloner.cpp
+++ b/src/mongo/db/repl/collection_cloner.cpp
@@ -97,7 +97,7 @@ CollectionCloner::CollectionCloner(executor::TaskExecutor* executor,
RemoteCommandRequest::kNoTimeout),
stdx::bind(&CollectionCloner::_countCallback, this, stdx::placeholders::_1),
RemoteCommandRetryScheduler::makeRetryPolicy(
- numInitialSyncCollectionCountAttempts,
+ numInitialSyncCollectionCountAttempts.load(),
executor::RemoteCommandRequest::kNoTimeout,
RemoteCommandRetryScheduler::kAllRetriableErrors)),
_listIndexesFetcher(_executor,
@@ -112,7 +112,7 @@ CollectionCloner::CollectionCloner(executor::TaskExecutor* executor,
rpc::ServerSelectionMetadata(true, boost::none).toBSON(),
RemoteCommandRequest::kNoTimeout,
RemoteCommandRetryScheduler::makeRetryPolicy(
- numInitialSyncListIndexesAttempts,
+ numInitialSyncListIndexesAttempts.load(),
executor::RemoteCommandRequest::kNoTimeout,
RemoteCommandRetryScheduler::kAllRetriableErrors)),
_findFetcher(
@@ -130,7 +130,7 @@ CollectionCloner::CollectionCloner(executor::TaskExecutor* executor,
rpc::ServerSelectionMetadata(true, boost::none).toBSON(),
RemoteCommandRequest::kNoTimeout,
RemoteCommandRetryScheduler::makeRetryPolicy(
- numInitialSyncCollectionFindAttempts,
+ numInitialSyncCollectionFindAttempts.load(),
executor::RemoteCommandRequest::kNoTimeout,
RemoteCommandRetryScheduler::kAllRetriableErrors)),
diff --git a/src/mongo/db/repl/data_replicator.cpp b/src/mongo/db/repl/data_replicator.cpp
index 0fb71dacd75..dcc4dac0a49 100644
--- a/src/mongo/db/repl/data_replicator.cpp
+++ b/src/mongo/db/repl/data_replicator.cpp
@@ -1187,7 +1187,7 @@ Status DataReplicator::_scheduleLastOplogEntryFetcher_inlock(Fetcher::CallbackFn
rpc::ServerSelectionMetadata(true, boost::none).toBSON(),
RemoteCommandRequest::kNoTimeout,
RemoteCommandRetryScheduler::makeRetryPolicy(
- numInitialSyncOplogFindAttempts,
+ numInitialSyncOplogFindAttempts.load(),
executor::RemoteCommandRequest::kNoTimeout,
RemoteCommandRetryScheduler::kAllRetriableErrors));
Status scheduleStatus = _lastOplogEntryFetcher->schedule();
diff --git a/src/mongo/db/repl/database_cloner.cpp b/src/mongo/db/repl/database_cloner.cpp
index 175c0ebd74b..4a2ed14d72b 100644
--- a/src/mongo/db/repl/database_cloner.cpp
+++ b/src/mongo/db/repl/database_cloner.cpp
@@ -116,7 +116,7 @@ DatabaseCloner::DatabaseCloner(executor::TaskExecutor* executor,
rpc::ServerSelectionMetadata(true, boost::none).toBSON(),
RemoteCommandRequest::kNoTimeout,
RemoteCommandRetryScheduler::makeRetryPolicy(
- numInitialSyncListCollectionsAttempts,
+ numInitialSyncListCollectionsAttempts.load(),
executor::RemoteCommandRequest::kNoTimeout,
RemoteCommandRetryScheduler::kAllRetriableErrors)),
_startCollectionCloner([](CollectionCloner& cloner) { return cloner.startup(); }) {
diff --git a/src/mongo/db/repl/databases_cloner.cpp b/src/mongo/db/repl/databases_cloner.cpp
index 9b8fcbeb568..3f7c1ce11e4 100644
--- a/src/mongo/db/repl/databases_cloner.cpp
+++ b/src/mongo/db/repl/databases_cloner.cpp
@@ -211,7 +211,7 @@ Status DatabasesCloner::startup() noexcept {
listDBsReq,
stdx::bind(&DatabasesCloner::_onListDatabaseFinish, this, stdx::placeholders::_1),
RemoteCommandRetryScheduler::makeRetryPolicy(
- numInitialSyncListDatabasesAttempts,
+ numInitialSyncListDatabasesAttempts.load(),
executor::RemoteCommandRequest::kNoTimeout,
RemoteCommandRetryScheduler::kAllRetriableErrors));
_status = _listDBsScheduler->startup();
diff --git a/src/mongo/db/repl/master_slave.cpp b/src/mongo/db/repl/master_slave.cpp
index 6f25242273c..77a5aa55c64 100644
--- a/src/mongo/db/repl/master_slave.cpp
+++ b/src/mongo/db/repl/master_slave.cpp
@@ -838,7 +838,7 @@ void ReplSource::syncToTailOfRemoteLog() {
}
}
-std::atomic<int> replApplyBatchSize(1); // NOLINT
+AtomicInt32 replApplyBatchSize(1);
class ReplApplyBatchSize
: public ExportedServerParameter<int, ServerParameterType::kStartupAndRuntime> {
@@ -1065,7 +1065,7 @@ int ReplSource::_sync_pullOpLog(OperationContext* txn, int& nApplied) {
BSONObj op = oplogReader.nextSafe();
- int b = replApplyBatchSize;
+ int b = replApplyBatchSize.load();
bool justOne = b == 1;
unique_ptr<Lock::GlobalWrite> lk(justOne ? 0 : new Lock::GlobalWrite(txn->lockState()));
while (1) {
@@ -1137,7 +1137,7 @@ int ReplSource::_sync_pullOpLog(OperationContext* txn, int& nApplied) {
int ReplSource::sync(OperationContext* txn, int& nApplied) {
_sleepAdviceTime = 0;
ReplInfo r("sync");
- if (!serverGlobalParams.quiet) {
+ if (!serverGlobalParams.quiet.load()) {
LogstreamBuilder l = log();
l << "syncing from ";
if (sourceName() != "main") {
@@ -1293,7 +1293,7 @@ static void replMain(OperationContext* txn) {
stringstream ss;
ss << "sleep " << s << " sec before next pass";
string msg = ss.str();
- if (!serverGlobalParams.quiet)
+ if (!serverGlobalParams.quiet.load())
log() << msg << endl;
ReplInfo r(msg.c_str());
sleepsecs(s);
diff --git a/src/mongo/db/repl/noop_writer.cpp b/src/mongo/db/repl/noop_writer.cpp
index 1a98d8fdfe7..4bbf50269b7 100644
--- a/src/mongo/db/repl/noop_writer.cpp
+++ b/src/mongo/db/repl/noop_writer.cpp
@@ -160,7 +160,7 @@ void NoopWriter::_writeNoop(OperationContext* txn) {
LOG(1) << "Not scheduling a noop write. Last known OpTime: " << _lastKnownOpTime
<< " != last primary OpTime: " << lastAppliedOpTime;
} else {
- if (writePeriodicNoops) {
+ if (writePeriodicNoops.load()) {
const auto logLevel = Command::testCommandsEnabled ? 0 : 1;
LOG(logLevel)
<< "Writing noop to oplog as there has been no writes to this replica set in over "
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
index 769b910aa32..2c0cb8de808 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
@@ -909,7 +909,7 @@ bool ReplicationCoordinatorExternalStateImpl::shouldUseDataReplicatorInitialSync
}
std::size_t ReplicationCoordinatorExternalStateImpl::getOplogFetcherMaxFetcherRestarts() const {
- return oplogFetcherMaxFetcherRestarts;
+ return oplogFetcherMaxFetcherRestarts.load();
}
JournalListener::Token ReplicationCoordinatorExternalStateImpl::getToken() {
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index 54845f273e6..532363030e3 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -654,7 +654,7 @@ void ReplicationCoordinatorImpl::_startDataReplication(OperationContext* txn,
}
// DataReplicator::startup() must be called outside lock because it uses features (eg.
// setting the initial sync flag) which depend on the ReplicationCoordinatorImpl.
- uassertStatusOK(drCopy->startup(txn, numInitialSyncAttempts));
+ uassertStatusOK(drCopy->startup(txn, numInitialSyncAttempts.load()));
} catch (...) {
auto status = exceptionToStatus();
log() << "Initial Sync failed to start: " << status;
diff --git a/src/mongo/db/repl/rs_initialsync.cpp b/src/mongo/db/repl/rs_initialsync.cpp
index 4055c5bcdbd..990f9dd8fdf 100644
--- a/src/mongo/db/repl/rs_initialsync.cpp
+++ b/src/mongo/db/repl/rs_initialsync.cpp
@@ -540,7 +540,7 @@ void syncDoInitialSync(OperationContext* txn,
});
int failedAttempts = 0;
- while (failedAttempts < num3Dot2InitialSyncAttempts) {
+ while (failedAttempts < num3Dot2InitialSyncAttempts.load()) {
try {
// leave loop when successful
Status status = _initialSync(txn, bgsync.get());
@@ -562,12 +562,12 @@ void syncDoInitialSync(OperationContext* txn,
}
error() << "initial sync attempt failed, "
- << (num3Dot2InitialSyncAttempts - ++failedAttempts) << " attempts remaining";
+ << (num3Dot2InitialSyncAttempts.load() - ++failedAttempts) << " attempts remaining";
sleepmillis(durationCount<Milliseconds>(kInitialSyncRetrySleepDuration));
}
// No need to print a stack
- if (failedAttempts >= num3Dot2InitialSyncAttempts) {
+ if (failedAttempts >= num3Dot2InitialSyncAttempts.load()) {
severe() << "The maximum number of retries have been exhausted for initial sync.";
fassertFailedNoTrace(16233);
}
diff --git a/src/mongo/db/repl/sync_tail.cpp b/src/mongo/db/repl/sync_tail.cpp
index 57646bd213f..3bba514f24b 100644
--- a/src/mongo/db/repl/sync_tail.cpp
+++ b/src/mongo/db/repl/sync_tail.cpp
@@ -82,7 +82,7 @@ using std::endl;
namespace repl {
-std::atomic<int> SyncTail::replBatchLimitOperations{50 * 1000}; // NOLINT
+AtomicInt32 SyncTail::replBatchLimitOperations{50 * 1000};
/**
* This variable determines the number of writer threads SyncTail will have. It has a default
diff --git a/src/mongo/db/repl/sync_tail.h b/src/mongo/db/repl/sync_tail.h
index ec133599bd3..98485782868 100644
--- a/src/mongo/db/repl/sync_tail.h
+++ b/src/mongo/db/repl/sync_tail.h
@@ -115,7 +115,7 @@ public:
class OpQueue {
public:
OpQueue() : _bytes(0) {
- _batch.reserve(replBatchLimitOperations);
+ _batch.reserve(replBatchLimitOperations.load());
}
size_t getBytes() const {
@@ -214,7 +214,7 @@ public:
*/
OldThreadPool* getWriterPool();
- static std::atomic<int> replBatchLimitOperations; // NOLINT (server param must use std::atomic)
+ static AtomicInt32 replBatchLimitOperations;
protected:
static const unsigned int replBatchLimitBytes = 100 * 1024 * 1024;
diff --git a/src/mongo/db/s/collection_range_deleter.cpp b/src/mongo/db/s/collection_range_deleter.cpp
index 4feb104f748..a68b61bcf67 100644
--- a/src/mongo/db/s/collection_range_deleter.cpp
+++ b/src/mongo/db/s/collection_range_deleter.cpp
@@ -180,7 +180,8 @@ int CollectionRangeDeleter::_doDeletion(OperationContext* txn,
InternalPlanner::FORWARD,
InternalPlanner::IXSCAN_FETCH));
int numDeleted = 0;
- const int maxItersBeforeYield = std::max(static_cast<int>(internalQueryExecYieldIterations), 1);
+ const int maxItersBeforeYield =
+ std::max(static_cast<int>(internalQueryExecYieldIterations.load()), 1);
while (numDeleted < maxItersBeforeYield) {
RecordId rloc;
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
index 90714cdb71b..31eb0358825 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
@@ -405,7 +405,7 @@ Status MigrationChunkClonerSourceLegacy::nextCloneBatch(OperationContext* txn,
dassert(txn->lockState()->isCollectionLockedForMode(_args.getNss().ns(), MODE_IS));
ElapsedTracker tracker(txn->getServiceContext()->getFastClockSource(),
- internalQueryExecYieldIterations,
+ internalQueryExecYieldIterations.load(),
Milliseconds(internalQueryExecYieldPeriodMS.load()));
stdx::lock_guard<stdx::mutex> sl(_mutex);
diff --git a/src/mongo/db/server_options.h b/src/mongo/db/server_options.h
index 6f555e39ffd..13dc2705150 100644
--- a/src/mongo/db/server_options.h
+++ b/src/mongo/db/server_options.h
@@ -28,6 +28,7 @@
#pragma once
#include "mongo/db/jsobj.h"
+#include "mongo/platform/atomic_word.h"
#include "mongo/platform/process_id.h"
#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/util/net/listen.h" // For DEFAULT_MAX_CONN
@@ -54,7 +55,7 @@ struct ServerGlobalParams {
bool indexBuildRetry = true; // --noIndexBuildRetry
- std::atomic<bool> quiet{false}; // --quiet NOLINT
+ AtomicBool quiet{false}; // --quiet
ClusterRole clusterRole = ClusterRole::None; // --configsvr/--shardsvr
diff --git a/src/mongo/db/server_options_helpers.cpp b/src/mongo/db/server_options_helpers.cpp
index dac0f29e876..37412636992 100644
--- a/src/mongo/db/server_options_helpers.cpp
+++ b/src/mongo/db/server_options_helpers.cpp
@@ -864,11 +864,11 @@ Status storeServerOptions(const moe::Environment& params) {
}
if (params.count("systemLog.quiet")) {
- serverGlobalParams.quiet = params["systemLog.quiet"].as<bool>();
+ serverGlobalParams.quiet.store(params["systemLog.quiet"].as<bool>());
}
if (params.count("systemLog.traceAllExceptions")) {
- DBException::traceExceptions = params["systemLog.traceAllExceptions"].as<bool>();
+ DBException::traceExceptions.store(params["systemLog.traceAllExceptions"].as<bool>());
}
if (params.count("net.maxIncomingConnections")) {
diff --git a/src/mongo/db/server_parameters.h b/src/mongo/db/server_parameters.h
index 597323d2926..be061848df8 100644
--- a/src/mongo/db/server_parameters.h
+++ b/src/mongo/db/server_parameters.h
@@ -37,6 +37,7 @@
#include "mongo/base/status.h"
#include "mongo/db/jsobj.h"
#include "mongo/platform/atomic_proxy.h"
+#include "mongo/platform/atomic_word.h"
namespace mongo {
@@ -162,7 +163,7 @@ class is_safe_runtime_parameter_type<double> : public std::true_type {};
template <typename T, ServerParameterType paramType>
class server_parameter_storage_type {
public:
- using value_type = std::atomic<T>; // NOLINT
+ using value_type = AtomicWord<T>;
};
template <typename T>
@@ -217,9 +218,7 @@ public:
_value(value) {}
virtual ~ExportedServerParameter() {}
- virtual void append(OperationContext* txn, BSONObjBuilder& b, const std::string& name) {
- b.append(name, *_value);
- }
+ virtual void append(OperationContext* txn, BSONObjBuilder& b, const std::string& name);
virtual Status set(const BSONElement& newValueElement);
virtual Status set(const T& newValue);
diff --git a/src/mongo/db/server_parameters_inline.h b/src/mongo/db/server_parameters_inline.h
index 2014c494c70..4a4c5e6c88f 100644
--- a/src/mongo/db/server_parameters_inline.h
+++ b/src/mongo/db/server_parameters_inline.h
@@ -32,6 +32,39 @@
namespace mongo {
+// We create template specializations for each possible value type which is supported at runtime.
+// The only value types which are supported at runtime are types which can be stored in
+// AtomicWord<T> or AtomicProxy<T> which both have explicit load and store methods. The storage type
+// for a value type is chosen by the server_parameter_storage_type type trait. Since there is no
+// support for partial template specialization of member functions, we generate 4 (the Atomic types)
+// x 2 (RuntimeOnly, StartupAndRuntime) implementations of append and set.
+#define EXPORTED_ATOMIC_SERVER_PARAMETER_TYPE(VALUE_TYPE, PARAM_TYPE) \
+ template <> \
+ inline void ExportedServerParameter<VALUE_TYPE, PARAM_TYPE>::append( \
+ OperationContext* txn, BSONObjBuilder& b, const std::string& name) { \
+ b.append(name, _value->load()); \
+ } \
+ \
+ template <> \
+ inline Status ExportedServerParameter<VALUE_TYPE, PARAM_TYPE>::set( \
+ const VALUE_TYPE& newValue) { \
+ Status v = validate(newValue); \
+ if (!v.isOK()) \
+ return v; \
+ \
+ _value->store(newValue); \
+ return Status::OK(); \
+ }
+
+#define EXPORTED_ATOMIC_SERVER_PARAMETER(PARAM_TYPE) \
+ EXPORTED_ATOMIC_SERVER_PARAMETER_TYPE(bool, PARAM_TYPE) \
+ EXPORTED_ATOMIC_SERVER_PARAMETER_TYPE(int, PARAM_TYPE) \
+ EXPORTED_ATOMIC_SERVER_PARAMETER_TYPE(long long, PARAM_TYPE) \
+ EXPORTED_ATOMIC_SERVER_PARAMETER_TYPE(double, PARAM_TYPE)
+
+EXPORTED_ATOMIC_SERVER_PARAMETER(ServerParameterType::kRuntimeOnly);
+EXPORTED_ATOMIC_SERVER_PARAMETER(ServerParameterType::kStartupAndRuntime);
+
template <typename T, ServerParameterType paramType>
inline Status ExportedServerParameter<T, paramType>::set(const BSONElement& newValueElement) {
T newValue;
@@ -52,4 +85,11 @@ inline Status ExportedServerParameter<T, paramType>::set(const T& newValue) {
return Status::OK();
}
+template <typename T, ServerParameterType paramType>
+void ExportedServerParameter<T, paramType>::append(OperationContext* txn,
+ BSONObjBuilder& b,
+ const std::string& name) {
+ b.append(name, *_value);
+}
+
} // namespace mongo
diff --git a/src/mongo/db/server_parameters_test.cpp b/src/mongo/db/server_parameters_test.cpp
index fc480be9e02..3b4783bcbbd 100644
--- a/src/mongo/db/server_parameters_test.cpp
+++ b/src/mongo/db/server_parameters_test.cpp
@@ -41,18 +41,18 @@ using std::string;
using std::vector;
TEST(ServerParameters, Simple1) {
- std::atomic<int> f(5); // NOLINT
+ AtomicInt32 f(5);
ExportedServerParameter<int, ServerParameterType::kStartupAndRuntime> ff(NULL, "ff", &f);
ASSERT_EQUALS("ff", ff.name());
ff.set(6);
- ASSERT_EQUALS(6, f);
+ ASSERT_EQUALS(6, f.load());
ff.set(BSON("x" << 7).firstElement());
- ASSERT_EQUALS(7, f);
+ ASSERT_EQUALS(7, f.load());
ff.setFromString("8");
- ASSERT_EQUALS(8, f);
+ ASSERT_EQUALS(8, f.load());
}
TEST(ServerParameters, Vector1) {
diff --git a/src/mongo/db/storage/mmap_v1/dur.cpp b/src/mongo/db/storage/mmap_v1/dur.cpp
index 1ed88c8d117..ab1d1988cba 100644
--- a/src/mongo/db/storage/mmap_v1/dur.cpp
+++ b/src/mongo/db/storage/mmap_v1/dur.cpp
@@ -498,7 +498,7 @@ void Stats::S::_asObj(BSONObjBuilder* builder) const {
<< "commitsInWriteLock"
<< (unsigned)(_commitsInWriteLockMicros / 1000));
- if (storageGlobalParams.journalCommitIntervalMs != 0) {
+ if (storageGlobalParams.journalCommitIntervalMs.load() != 0) {
b << "journalCommitIntervalMs" << storageGlobalParams.journalCommitIntervalMs.load();
}
}
@@ -686,7 +686,7 @@ static void durThread(ClockSource* cs, int64_t serverStartMs) {
uint64_t remapLastTimestamp(0);
while (shutdownRequested.loadRelaxed() == 0) {
- unsigned ms = storageGlobalParams.journalCommitIntervalMs;
+ unsigned ms = storageGlobalParams.journalCommitIntervalMs.load();
if (ms == 0) {
ms = samePartition ? 100 : 30;
}
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp
index 3d2a0f3a290..cd503957176 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp
@@ -69,7 +69,7 @@ namespace {
* - setting to false will fail.
*/
// Unused, needed for server parameter.
-std::atomic<bool> newCollectionsUsePowerOf2SizesFlag(true); // NOLINT
+AtomicBool newCollectionsUsePowerOf2SizesFlag(true);
class NewCollectionsUsePowerOf2SizesParameter
: public ExportedServerParameter<bool, ServerParameterType::kStartupAndRuntime> {
diff --git a/src/mongo/db/storage/storage_options.h b/src/mongo/db/storage/storage_options.h
index 8ae1467e307..d1fd1c24fb2 100644
--- a/src/mongo/db/storage/storage_options.h
+++ b/src/mongo/db/storage/storage_options.h
@@ -32,6 +32,7 @@
#include <string>
#include "mongo/platform/atomic_proxy.h"
+#include "mongo/platform/atomic_word.h"
/*
* This file defines the storage for options that come from the command line related to data file
@@ -81,11 +82,11 @@ struct StorageGlobalParams {
// --journalCommitInterval
static const int kMaxJournalCommitIntervalMs;
- std::atomic<int> journalCommitIntervalMs; // NOLINT
+ AtomicInt32 journalCommitIntervalMs;
// --notablescan
// no table scans allowed
- std::atomic<bool> noTableScan{false}; // NOLINT
+ AtomicBool noTableScan{false};
// --directoryperdb
// Stores each database’s files in its own folder in the data directory.
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
index 8dc0fac9443..4229aa4d572 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
@@ -108,7 +108,7 @@ public:
invariant(e.getCode() == ErrorCodes::ShutdownInProgress);
}
- int ms = storageGlobalParams.journalCommitIntervalMs;
+ int ms = storageGlobalParams.journalCommitIntervalMs.load();
if (!ms) {
ms = 100;
}
@@ -125,7 +125,7 @@ public:
private:
WiredTigerSessionCache* _sessionCache;
- std::atomic<bool> _shuttingDown{false}; // NOLINT
+ AtomicBool _shuttingDown{false};
};
namespace {
diff --git a/src/mongo/db/ttl.cpp b/src/mongo/db/ttl.cpp
index c2ee5ba1561..ab842aa5782 100644
--- a/src/mongo/db/ttl.cpp
+++ b/src/mongo/db/ttl.cpp
@@ -87,11 +87,11 @@ public:
AuthorizationSession::get(cc())->grantInternalAuthorization();
while (!inShutdown()) {
- sleepsecs(ttlMonitorSleepSecs);
+ sleepsecs(ttlMonitorSleepSecs.load());
LOG(3) << "thread awake";
- if (!ttlMonitorEnabled) {
+ if (!ttlMonitorEnabled.load()) {
LOG(1) << "disabled";
continue;
}