summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2020-02-16 02:04:08 -0500
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-02-26 07:55:48 +0000
commit51eceb8afa6610b4ff0befb92ec6039173bab00f (patch)
tree4725ecdb5d0bec00ac6938cf447c21924297ea56 /src
parent8651c754eedf84651dd5051aa43c70cd96b00586 (diff)
downloadmongo-51eceb8afa6610b4ff0befb92ec6039173bab00f.tar.gz
SERVER-44978 Remove accidentally added usage of getGlobalServiceContext() from ReadWriteConcernDefaults
Diffstat (limited to 'src')
-rw-r--r--src/mongo/db/catalog/coll_mod.cpp4
-rw-r--r--src/mongo/db/catalog/database_holder_impl.cpp13
-rw-r--r--src/mongo/db/commands/fsync.cpp54
-rw-r--r--src/mongo/db/db.cpp38
-rw-r--r--src/mongo/db/read_write_concern_defaults.cpp17
-rw-r--r--src/mongo/db/read_write_concern_defaults.h4
-rw-r--r--src/mongo/db/service_context_test_fixture.cpp9
-rw-r--r--src/mongo/db/session_catalog_test.cpp4
-rw-r--r--src/mongo/db/transaction_participant.cpp2
-rw-r--r--src/mongo/db/write_concern.cpp6
-rw-r--r--src/mongo/util/concurrency/thread_pool_interface.h10
11 files changed, 82 insertions, 79 deletions
diff --git a/src/mongo/db/catalog/coll_mod.cpp b/src/mongo/db/catalog/coll_mod.cpp
index e525628b169..8d0b0f286bf 100644
--- a/src/mongo/db/catalog/coll_mod.cpp
+++ b/src/mongo/db/catalog/coll_mod.cpp
@@ -410,8 +410,8 @@ Status _collModInternal(OperationContext* opCtx,
// Only observe non-view collMods, as view operations are observed as operations on the
// system.views collection.
- getGlobalServiceContext()->getOpObserver()->onCollMod(
- opCtx, nss, coll->uuid(), oplogEntryObj, oldCollOptions, ttlInfo);
+ auto* const opObserver = opCtx->getServiceContext()->getOpObserver();
+ opObserver->onCollMod(opCtx, nss, coll->uuid(), oplogEntryObj, oldCollOptions, ttlInfo);
wunit.commit();
return Status::OK();
diff --git a/src/mongo/db/catalog/database_holder_impl.cpp b/src/mongo/db/catalog/database_holder_impl.cpp
index c84a0261a4b..73c3e633dd8 100644
--- a/src/mongo/db/catalog/database_holder_impl.cpp
+++ b/src/mongo/db/catalog/database_holder_impl.cpp
@@ -231,10 +231,8 @@ void DatabaseHolderImpl::close(OperationContext* opCtx, StringData ns) {
_dbs.erase(it);
- getGlobalServiceContext()
- ->getStorageEngine()
- ->closeDatabase(opCtx, dbName.toString())
- .transitional_ignore();
+ auto* const storageEngine = opCtx->getServiceContext()->getStorageEngine();
+ storageEngine->closeDatabase(opCtx, dbName.toString()).transitional_ignore();
}
void DatabaseHolderImpl::closeAll(OperationContext* opCtx) {
@@ -250,6 +248,8 @@ void DatabaseHolderImpl::closeAll(OperationContext* opCtx) {
dbs.insert(i->first);
}
+ auto* const storageEngine = opCtx->getServiceContext()->getStorageEngine();
+
for (const auto& name : dbs) {
LOGV2_DEBUG(20311, 2, "DatabaseHolder::closeAll name:{name}", "name"_attr = name);
@@ -259,10 +259,7 @@ void DatabaseHolderImpl::closeAll(OperationContext* opCtx) {
_dbs.erase(name);
- getGlobalServiceContext()
- ->getStorageEngine()
- ->closeDatabase(opCtx, name)
- .transitional_ignore();
+ storageEngine->closeDatabase(opCtx, name).transitional_ignore();
}
}
diff --git a/src/mongo/db/commands/fsync.cpp b/src/mongo/db/commands/fsync.cpp
index 9aad9e15662..29cbe97a7fa 100644
--- a/src/mongo/db/commands/fsync.cpp
+++ b/src/mongo/db/commands/fsync.cpp
@@ -29,6 +29,8 @@
#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kCommand
+#include "mongo/platform/basic.h"
+
#include "mongo/db/commands/fsync.h"
#include <string>
@@ -56,30 +58,30 @@
#include "mongo/util/exit.h"
namespace mongo {
-
-using std::string;
-using std::stringstream;
-
namespace {
+
// Ensures that only one command is operating on fsyncLock state at a time. As a 'ResourceMutex',
// lock time will be reported for a given user operation.
Lock::ResourceMutex commandMutex("fsyncCommandMutex");
-} // namespace
/**
* Maintains a global read lock while mongod is fsyncLocked.
*/
class FSyncLockThread : public BackgroundJob {
public:
- FSyncLockThread(bool allowFsyncFailure)
- : BackgroundJob(false), _allowFsyncFailure(allowFsyncFailure) {}
- virtual ~FSyncLockThread() {}
- virtual string name() const {
+ FSyncLockThread(ServiceContext* serviceContext, bool allowFsyncFailure)
+ : BackgroundJob(false),
+ _serviceContext(serviceContext),
+ _allowFsyncFailure(allowFsyncFailure) {}
+
+ std::string name() const override {
return "FSyncLockThread";
}
- virtual void run();
+
+ void run() override;
private:
+ ServiceContext* const _serviceContext;
bool _allowFsyncFailure;
static bool _shutdownTaskRegistered;
};
@@ -124,9 +126,9 @@ public:
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
virtual bool errmsgRun(OperationContext* opCtx,
- const string& dbname,
+ const std::string& dbname,
const BSONObj& cmdObj,
- string& errmsg,
+ std::string& errmsg,
BSONObjBuilder& result) {
if (opCtx->lockState()->isLocked()) {
errmsg = "fsync: Cannot execute fsync command from contexts that hold a data lock";
@@ -143,8 +145,8 @@ public:
if (!lock) {
// Take a global IS lock to ensure the storage engine is not shutdown
+ auto* const storageEngine = opCtx->getServiceContext()->getStorageEngine();
Lock::GlobalLock global(opCtx, MODE_IS);
- StorageEngine* storageEngine = getGlobalServiceContext()->getStorageEngine();
storageEngine->flushAllFiles(opCtx, /*callerHoldsReadLock*/ true);
// This field has had a dummy value since MMAP went away. It is undocumented.
@@ -161,13 +163,13 @@ public:
acquireLock();
if (lockCountAtStart == 0) {
-
Status status = Status::OK();
{
stdx::unique_lock<Latch> lk(lockStateMutex);
threadStatus = Status::OK();
threadStarted = false;
- _lockThread = std::make_unique<FSyncLockThread>(allowFsyncFailure);
+ _lockThread = std::make_unique<FSyncLockThread>(opCtx->getServiceContext(),
+ allowFsyncFailure);
_lockThread->go();
while (!threadStarted && threadStatus.isOK()) {
@@ -267,14 +269,14 @@ private:
Mutex _fsyncLockedMutex = MONGO_MAKE_LATCH("FSyncCommand::_fsyncLockedMutex");
bool _fsyncLocked = false;
+
} fsyncCmd;
class FSyncUnlockCommand : public ErrmsgCommandDeprecated {
public:
FSyncUnlockCommand() : ErrmsgCommandDeprecated("fsyncUnlock") {}
-
- virtual bool supportsWriteConcern(const BSONObj& cmd) const override {
+ bool supportsWriteConcern(const BSONObj& cmd) const override {
return false;
}
@@ -334,15 +336,12 @@ public:
return true;
}
-} unlockFsyncCmd;
-
-// Exposed publically via extern in fsync.h.
-SimpleMutex filesLockedFsync;
+} fsyncUnlockCmd;
bool FSyncLockThread::_shutdownTaskRegistered = false;
void FSyncLockThread::run() {
- ThreadClient tc("fsyncLockWorker", getGlobalServiceContext());
+ ThreadClient tc("fsyncLockWorker", _serviceContext);
stdx::lock_guard<SimpleMutex> lkf(filesLockedFsync);
stdx::unique_lock<Latch> lk(fsyncCmd.lockStateMutex);
@@ -353,8 +352,7 @@ void FSyncLockThread::run() {
OperationContext& opCtx = *opCtxPtr;
Lock::GlobalRead global(&opCtx); // Block any writes in order to flush the files.
- ServiceContext* serviceContext = opCtx.getServiceContext();
- StorageEngine* storageEngine = serviceContext->getStorageEngine();
+ StorageEngine* storageEngine = _serviceContext->getStorageEngine();
// The fsync shutdown task has to be registered once the server is running otherwise it
// conflicts with the servers shutdown task.
@@ -384,7 +382,7 @@ void FSyncLockThread::run() {
}
bool successfulFsyncLock = false;
- auto backupCursorHooks = BackupCursorHooks::get(serviceContext);
+ auto backupCursorHooks = BackupCursorHooks::get(_serviceContext);
try {
writeConflictRetry(&opCtx,
"beginBackup",
@@ -444,4 +442,10 @@ MONGO_INITIALIZER(fsyncLockedForWriting)(InitializerContext* context) {
setLockedForWritingImpl([]() { return fsyncCmd.fsyncLocked(); });
return Status::OK();
}
+
+} // namespace
+
+// Exposed publically via extern in fsync.h.
+SimpleMutex filesLockedFsync;
+
} // namespace mongo
diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp
index 970601d929a..886592fe567 100644
--- a/src/mongo/db/db.cpp
+++ b/src/mongo/db/db.cpp
@@ -293,11 +293,10 @@ void initializeCommandHooks(ServiceContext* serviceContext) {
MONGO_FAIL_POINT_DEFINE(shutdownAtStartup);
-ExitCode _initAndListen(int listenPort) {
+ExitCode _initAndListen(ServiceContext* serviceContext, int listenPort) {
Client::initThread("initandlisten");
initWireSpec();
- auto serviceContext = getGlobalServiceContext();
serviceContext->setFastClockSource(FastClockSourceFactory::create(Milliseconds(10)));
@@ -767,9 +766,9 @@ ExitCode _initAndListen(int listenPort) {
return waitForShutdown();
}
-ExitCode initAndListen(int listenPort) {
+ExitCode initAndListen(ServiceContext* service, int listenPort) {
try {
- return _initAndListen(listenPort);
+ return _initAndListen(service, listenPort);
} catch (DBException& e) {
LOGV2(20557, "exception in initAndListen: {e}, terminating", "e"_attr = e.toString());
return EXIT_UNCAUGHT;
@@ -789,7 +788,7 @@ ExitCode initAndListen(int listenPort) {
#if defined(_WIN32)
ExitCode initService() {
- return initAndListen(serverGlobalParams.port);
+ return initAndListen(getGlobalServiceContext(), serverGlobalParams.port);
}
#endif
@@ -1247,18 +1246,23 @@ int mongoDbMain(int argc, char* argv[], char** envp) {
quickExit(EXIT_FAILURE);
}
- try {
- setGlobalServiceContext(ServiceContext::make());
- } catch (...) {
- auto cause = exceptionToStatus();
- LOGV2_FATAL_OPTIONS(20575,
- {logComponentV1toV2(LogComponent::kControl)},
- "Failed to create service context: {cause}",
- "cause"_attr = redact(cause));
- quickExit(EXIT_FAILURE);
- }
+ auto* service = [] {
+ try {
+ auto serviceContextHolder = ServiceContext::make();
+ auto* serviceContext = serviceContextHolder.get();
+ setGlobalServiceContext(std::move(serviceContextHolder));
+
+ return serviceContext;
+ } catch (...) {
+ auto cause = exceptionToStatus();
+ LOGV2_FATAL_OPTIONS(20575,
+ {logComponentV1toV2(LogComponent::kControl)},
+ "Failed to create service context: {cause}",
+ "cause"_attr = redact(cause));
+ quickExit(EXIT_FAILURE);
+ }
+ }();
- auto service = getGlobalServiceContext();
setUpCollectionShardingState(service);
setUpCatalog(service);
setUpReplication(service);
@@ -1289,7 +1293,7 @@ int mongoDbMain(int argc, char* argv[], char** envp) {
}
#endif
- ExitCode exitCode = initAndListen(serverGlobalParams.port);
+ ExitCode exitCode = initAndListen(service, serverGlobalParams.port);
exitCleanly(exitCode);
return 0;
}
diff --git a/src/mongo/db/read_write_concern_defaults.cpp b/src/mongo/db/read_write_concern_defaults.cpp
index 65cabd5e5eb..dbc9a06eba7 100644
--- a/src/mongo/db/read_write_concern_defaults.cpp
+++ b/src/mongo/db/read_write_concern_defaults.cpp
@@ -43,7 +43,7 @@ static constexpr auto kReadConcernLevelsDisallowedAsDefault = {
repl::ReadConcernLevel::kSnapshotReadConcern, repl::ReadConcernLevel::kLinearizableReadConcern};
const auto getReadWriteConcernDefaults =
- ServiceContext::declareDecoration<std::unique_ptr<ReadWriteConcernDefaults>>();
+ ServiceContext::declareDecoration<boost::optional<ReadWriteConcernDefaults>>();
} // namespace
@@ -210,11 +210,11 @@ ReadWriteConcernDefaults& ReadWriteConcernDefaults::get(OperationContext* opCtx)
}
void ReadWriteConcernDefaults::create(ServiceContext* service, FetchDefaultsFn fetchDefaultsFn) {
- getReadWriteConcernDefaults(service) =
- std::make_unique<ReadWriteConcernDefaults>(fetchDefaultsFn);
+ getReadWriteConcernDefaults(service).emplace(service, fetchDefaultsFn);
}
-ReadWriteConcernDefaults::ReadWriteConcernDefaults(FetchDefaultsFn fetchDefaultsFn)
+ReadWriteConcernDefaults::ReadWriteConcernDefaults(ServiceContext* service,
+ FetchDefaultsFn fetchDefaultsFn)
: _threadPool([] {
ThreadPool::Options options;
options.poolName = "ReadWriteConcernDefaults";
@@ -228,14 +228,17 @@ ReadWriteConcernDefaults::ReadWriteConcernDefaults(FetchDefaultsFn fetchDefaults
return options;
}()),
- _defaults(_threadPool,
+ _defaults(service,
+ _threadPool,
[fetchDefaultsFn = std::move(fetchDefaultsFn)](
OperationContext* opCtx, const Type&) { return fetchDefaultsFn(opCtx); }) {}
ReadWriteConcernDefaults::~ReadWriteConcernDefaults() = default;
-ReadWriteConcernDefaults::Cache::Cache(ThreadPoolInterface& threadPool, LookupFn lookupFn)
- : ReadThroughCache(_mutex, getGlobalServiceContext(), threadPool, 1 /* cacheSize */),
+ReadWriteConcernDefaults::Cache::Cache(ServiceContext* service,
+ ThreadPoolInterface& threadPool,
+ LookupFn lookupFn)
+ : ReadThroughCache(_mutex, service, threadPool, 1 /* cacheSize */),
_lookupFn(std::move(lookupFn)) {}
boost::optional<RWConcernDefault> ReadWriteConcernDefaults::Cache::lookup(
diff --git a/src/mongo/db/read_write_concern_defaults.h b/src/mongo/db/read_write_concern_defaults.h
index 259a3eafd24..a534b8ea669 100644
--- a/src/mongo/db/read_write_concern_defaults.h
+++ b/src/mongo/db/read_write_concern_defaults.h
@@ -66,7 +66,7 @@ public:
static ReadWriteConcernDefaults& get(OperationContext* opCtx);
static void create(ServiceContext* service, FetchDefaultsFn fetchDefaultsFn);
- ReadWriteConcernDefaults(FetchDefaultsFn fetchDefaultsFn);
+ ReadWriteConcernDefaults(ServiceContext* service, FetchDefaultsFn fetchDefaultsFn);
~ReadWriteConcernDefaults();
/**
@@ -162,7 +162,7 @@ private:
Cache& operator=(const Cache&) = delete;
public:
- Cache(ThreadPoolInterface& threadPool, LookupFn lookupFn);
+ Cache(ServiceContext* service, ThreadPoolInterface& threadPool, LookupFn lookupFn);
virtual ~Cache() = default;
boost::optional<RWConcernDefault> lookup(OperationContext* opCtx, const Type& key) override;
diff --git a/src/mongo/db/service_context_test_fixture.cpp b/src/mongo/db/service_context_test_fixture.cpp
index a0d480a3924..3a1a8b4edb5 100644
--- a/src/mongo/db/service_context_test_fixture.cpp
+++ b/src/mongo/db/service_context_test_fixture.cpp
@@ -42,8 +42,13 @@
namespace mongo {
ScopedGlobalServiceContextForTest::ScopedGlobalServiceContextForTest() {
- setGlobalServiceContext(ServiceContext::make());
- auto const serviceContext = getGlobalServiceContext();
+ auto serviceContext = [] {
+ auto serviceContext = ServiceContext::make();
+ auto serviceContextPtr = serviceContext.get();
+ setGlobalServiceContext(std::move(serviceContext));
+ return serviceContextPtr;
+ }();
+
auto observerRegistry = std::make_unique<OpObserverRegistry>();
serviceContext->setOpObserver(std::move(observerRegistry));
}
diff --git a/src/mongo/db/session_catalog_test.cpp b/src/mongo/db/session_catalog_test.cpp
index cd086a9e8b0..a83e6d4c240 100644
--- a/src/mongo/db/session_catalog_test.cpp
+++ b/src/mongo/db/session_catalog_test.cpp
@@ -569,7 +569,7 @@ TEST_F(SessionCatalogTestWithDefaultOpCtx, ConcurrentCheckOutAndKill) {
// Normal check out should start after kill.
normalCheckOutFinish = stdx::async(stdx::launch::async, [&] {
- ThreadClient tc(getGlobalServiceContext());
+ ThreadClient tc(getServiceContext());
auto sideOpCtx = Client::getCurrent()->makeOperationContext();
sideOpCtx->setLogicalSessionId(lsid);
OperationContextSession normalCheckOut(sideOpCtx.get());
@@ -579,7 +579,7 @@ TEST_F(SessionCatalogTestWithDefaultOpCtx, ConcurrentCheckOutAndKill) {
// Kill will short-cut the queue and be the next one to check out.
killCheckOutFinish = stdx::async(stdx::launch::async, [&] {
- ThreadClient tc(getGlobalServiceContext());
+ ThreadClient tc(getServiceContext());
auto sideOpCtx = Client::getCurrent()->makeOperationContext();
sideOpCtx->setLogicalSessionId(lsid);
diff --git a/src/mongo/db/transaction_participant.cpp b/src/mongo/db/transaction_participant.cpp
index 62f03df6d6b..2eddbb13984 100644
--- a/src/mongo/db/transaction_participant.cpp
+++ b/src/mongo/db/transaction_participant.cpp
@@ -1451,7 +1451,7 @@ void TransactionParticipant::Participant::_finishCommitTransaction(
o(lk).transactionMetricsObserver.onCommit(opCtx,
ServerTransactionsMetrics::get(opCtx),
tickSource,
- &Top::get(getGlobalServiceContext()),
+ &Top::get(opCtx->getServiceContext()),
operationCount,
oplogOperationBytes);
o(lk).transactionMetricsObserver.onTransactionOperation(
diff --git a/src/mongo/db/write_concern.cpp b/src/mongo/db/write_concern.cpp
index 1b493bc129b..1a7f4ee868e 100644
--- a/src/mongo/db/write_concern.cpp
+++ b/src/mongo/db/write_concern.cpp
@@ -254,6 +254,7 @@ Status waitForWriteConcern(OperationContext* opCtx,
"replOpTime"_attr = replOpTime,
"writeConcern"_attr = writeConcern.toBSON());
+ auto* const storageEngine = opCtx->getServiceContext()->getStorageEngine();
auto const replCoord = repl::ReplicationCoordinator::get(opCtx);
if (!opCtx->getClient()->isInDirectClient()) {
@@ -273,7 +274,6 @@ Status waitForWriteConcern(OperationContext* opCtx,
case WriteConcernOptions::SyncMode::NONE:
break;
case WriteConcernOptions::SyncMode::FSYNC: {
- StorageEngine* storageEngine = getGlobalServiceContext()->getStorageEngine();
if (!storageEngine->isDurable()) {
storageEngine->flushAllFiles(opCtx, /*callerHoldsReadLock*/ false);
@@ -282,12 +282,12 @@ Status waitForWriteConcern(OperationContext* opCtx,
result->fsyncFiles = 1;
} else {
// We only need to commit the journal if we're durable
- getGlobalServiceContext()->getStorageEngine()->waitForJournalFlush(opCtx);
+ storageEngine->waitForJournalFlush(opCtx);
}
break;
}
case WriteConcernOptions::SyncMode::JOURNAL:
- getGlobalServiceContext()->getStorageEngine()->waitForJournalFlush(opCtx);
+ storageEngine->waitForJournalFlush(opCtx);
break;
}
diff --git a/src/mongo/util/concurrency/thread_pool_interface.h b/src/mongo/util/concurrency/thread_pool_interface.h
index bb59ac8e9f8..6d8e3980809 100644
--- a/src/mongo/util/concurrency/thread_pool_interface.h
+++ b/src/mongo/util/concurrency/thread_pool_interface.h
@@ -73,16 +73,6 @@ public:
*/
virtual void join() = 0;
- /**
- * Schedules "task" to run in the thread pool.
- *
- * Returns OK on success, ShutdownInProgress if shutdown() has already executed.
- *
- * It is safe to call this before startup(), but the scheduled task will not execute
- * until after startup() is called.
- */
- virtual void schedule(Task task) = 0;
-
protected:
ThreadPoolInterface() = default;
};