From b1cc960cb5de30861b00bf72057c5dfa0733c1fb Mon Sep 17 00:00:00 2001 From: Andy Schwerin Date: Wed, 2 May 2018 10:37:46 -0400 Subject: SERVER-34751 Replace getGlobalStorageEngine cals with getStorageEngine. --- src/mongo/db/catalog/catalog_control.cpp | 4 ++-- src/mongo/db/catalog/coll_mod.cpp | 2 +- src/mongo/db/catalog/database_holder_impl.cpp | 6 +++--- src/mongo/db/catalog/database_impl.cpp | 6 +++--- src/mongo/db/catalog/index_catalog_impl.cpp | 2 +- src/mongo/db/commands/dbcommands.cpp | 4 ++-- src/mongo/db/commands/do_txn_cmd.cpp | 2 +- .../db/commands/feature_compatibility_version.cpp | 2 +- src/mongo/db/commands/fsync.cpp | 4 ++-- src/mongo/db/commands/list_databases.cpp | 2 +- src/mongo/db/commands/restart_catalog_command.cpp | 2 +- src/mongo/db/commands/snapshot_management.cpp | 6 ++---- src/mongo/db/db.cpp | 6 +++--- src/mongo/db/db_raii.cpp | 2 +- src/mongo/db/index_rebuilder.cpp | 2 +- src/mongo/db/repair_database_and_check_version.cpp | 6 +++--- src/mongo/db/repl/apply_ops.cpp | 2 +- src/mongo/db/repl/bgsync.cpp | 2 +- src/mongo/db/repl/do_txn.cpp | 2 +- src/mongo/db/repl/oplog.cpp | 6 +++--- src/mongo/db/repl/repl_set_commands.cpp | 2 +- .../db/repl/replication_consistency_markers_impl.cpp | 5 ++--- .../replication_coordinator_external_state_impl.cpp | 18 +++++++++--------- src/mongo/db/repl/replication_coordinator_impl.cpp | 4 ++-- src/mongo/db/repl/rollback_impl.cpp | 4 ++-- src/mongo/db/repl/rollback_impl_test.cpp | 2 +- src/mongo/db/repl/rs_rollback.cpp | 3 +-- src/mongo/db/repl/storage_interface_impl.cpp | 12 ++++++------ src/mongo/db/repl/sync_tail.cpp | 8 ++++---- src/mongo/db/s/chunk_move_write_concern_options.cpp | 2 +- src/mongo/db/service_context.cpp | 2 +- src/mongo/db/service_context.h | 7 ------- src/mongo/db/service_context_d.cpp | 2 +- src/mongo/db/service_context_d_test_fixture.cpp | 2 +- src/mongo/db/service_entry_point_common.cpp | 2 +- src/mongo/db/session.cpp | 12 +++++------- src/mongo/db/storage/mmap_v1/data_file_sync.cpp | 2 +- .../db/storage/mmap_v1/mmap_v1_extent_manager.cpp | 2 +- src/mongo/db/storage/storage_init.cpp | 2 +- .../wiredtiger/wiredtiger_record_store_mongod.cpp | 2 +- src/mongo/db/write_concern.cpp | 4 ++-- 41 files changed, 78 insertions(+), 91 deletions(-) (limited to 'src/mongo/db') diff --git a/src/mongo/db/catalog/catalog_control.cpp b/src/mongo/db/catalog/catalog_control.cpp index f97cb254d72..7edb945faa5 100644 --- a/src/mongo/db/catalog/catalog_control.cpp +++ b/src/mongo/db/catalog/catalog_control.cpp @@ -60,7 +60,7 @@ void closeCatalog(OperationContext* opCtx) { // Close the storage engine's catalog. log() << "closeCatalog: closing storage engine catalog"; - opCtx->getServiceContext()->getGlobalStorageEngine()->closeCatalog(opCtx); + opCtx->getServiceContext()->getStorageEngine()->closeCatalog(opCtx); } void openCatalog(OperationContext* opCtx) { @@ -68,7 +68,7 @@ void openCatalog(OperationContext* opCtx) { // Load the catalog in the storage engine. log() << "openCatalog: loading storage engine catalog"; - auto storageEngine = opCtx->getServiceContext()->getGlobalStorageEngine(); + auto storageEngine = opCtx->getServiceContext()->getStorageEngine(); storageEngine->loadCatalog(opCtx); log() << "openCatalog: reconciling catalog and idents"; diff --git a/src/mongo/db/catalog/coll_mod.cpp b/src/mongo/db/catalog/coll_mod.cpp index 76b0a3e5b78..9cdb68548cf 100644 --- a/src/mongo/db/catalog/coll_mod.cpp +++ b/src/mongo/db/catalog/coll_mod.cpp @@ -558,7 +558,7 @@ void addCollectionUUIDs(OperationContext* opCtx) { // Add UUIDs to all collections of all databases if they do not already have UUIDs. std::vector dbNames; - StorageEngine* storageEngine = opCtx->getServiceContext()->getGlobalStorageEngine(); + StorageEngine* storageEngine = opCtx->getServiceContext()->getStorageEngine(); { Lock::GlobalLock lk(opCtx, MODE_IS); storageEngine->listDatabases(&dbNames); diff --git a/src/mongo/db/catalog/database_holder_impl.cpp b/src/mongo/db/catalog/database_holder_impl.cpp index df70ef25c27..c4873930036 100644 --- a/src/mongo/db/catalog/database_holder_impl.cpp +++ b/src/mongo/db/catalog/database_holder_impl.cpp @@ -167,7 +167,7 @@ Database* DatabaseHolderImpl::openDb(OperationContext* opCtx, StringData ns, boo // requirement for X-lock on the database when we enter. So there is no way we can insert two // different databases for the same name. lk.unlock(); - StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine(); + StorageEngine* storageEngine = getGlobalServiceContext()->getStorageEngine(); DatabaseCatalogEntry* entry = storageEngine->getDatabaseCatalogEntry(opCtx, dbname); if (!entry->exists()) { @@ -221,7 +221,7 @@ void DatabaseHolderImpl::close(OperationContext* opCtx, StringData ns, const std _dbs.erase(it); getGlobalServiceContext() - ->getGlobalStorageEngine() + ->getStorageEngine() ->closeDatabase(opCtx, dbName.toString()) .transitional_ignore(); } @@ -252,7 +252,7 @@ void DatabaseHolderImpl::closeAll(OperationContext* opCtx, const std::string& re _dbs.erase(name); getGlobalServiceContext() - ->getGlobalStorageEngine() + ->getStorageEngine() ->closeDatabase(opCtx, name) .transitional_ignore(); } diff --git a/src/mongo/db/catalog/database_impl.cpp b/src/mongo/db/catalog/database_impl.cpp index fea411a7177..7ee8c22b7ba 100644 --- a/src/mongo/db/catalog/database_impl.cpp +++ b/src/mongo/db/catalog/database_impl.cpp @@ -410,7 +410,7 @@ void DatabaseImpl::getStats(OperationContext* opCtx, BSONObjBuilder* output, dou _dbEntry->appendExtraStats(opCtx, output, scale); - if (!opCtx->getServiceContext()->getGlobalStorageEngine()->isEphemeral()) { + if (!opCtx->getServiceContext()->getStorageEngine()->isEphemeral()) { boost::filesystem::path dbpath(storageGlobalParams.dbpath); if (storageGlobalParams.directoryperdb) { dbpath /= _name; @@ -868,7 +868,7 @@ void DatabaseImpl::dropDatabase(OperationContext* opCtx, Database* db) { DatabaseHolder::getDatabaseHolder().close(opCtx, name, "database dropped"); - auto const storageEngine = serviceContext->getGlobalStorageEngine(); + auto const storageEngine = serviceContext->getStorageEngine(); writeConflictRetry(opCtx, "dropDatabase", name, [&] { storageEngine->dropDatabase(opCtx, name).transitional_ignore(); }); @@ -1052,7 +1052,7 @@ MONGO_REGISTER_SHIM(Database::dropAllDatabasesExceptLocal)(OperationContext* opC Lock::GlobalWrite lk(opCtx); vector n; - StorageEngine* storageEngine = opCtx->getServiceContext()->getGlobalStorageEngine(); + StorageEngine* storageEngine = opCtx->getServiceContext()->getStorageEngine(); storageEngine->listDatabases(&n); if (n.size() == 0) diff --git a/src/mongo/db/catalog/index_catalog_impl.cpp b/src/mongo/db/catalog/index_catalog_impl.cpp index 572b54e4a05..583721aba4b 100644 --- a/src/mongo/db/catalog/index_catalog_impl.cpp +++ b/src/mongo/db/catalog/index_catalog_impl.cpp @@ -572,7 +572,7 @@ Status IndexCatalogImpl::_isSpecOk(OperationContext* opCtx, const BSONObj& spec) // SERVER-16893 Forbid use of v0 indexes with non-mmapv1 engines if (indexVersion == IndexVersion::kV0 && - !opCtx->getServiceContext()->getGlobalStorageEngine()->isMmapV1()) { + !opCtx->getServiceContext()->getStorageEngine()->isMmapV1()) { return Status(ErrorCodes::CannotCreateIndex, str::stream() << "use of v0 indexes is only allowed with the " << "mmapv1 storage engine"); diff --git a/src/mongo/db/commands/dbcommands.cpp b/src/mongo/db/commands/dbcommands.cpp index ed753c7392a..0499c35f683 100644 --- a/src/mongo/db/commands/dbcommands.cpp +++ b/src/mongo/db/commands/dbcommands.cpp @@ -250,7 +250,7 @@ public: e = cmdObj.getField("backupOriginalFiles"); bool backupOriginalFiles = e.isBoolean() && e.boolean(); - StorageEngine* engine = getGlobalServiceContext()->getGlobalStorageEngine(); + StorageEngine* engine = getGlobalServiceContext()->getStorageEngine(); repl::UnreplicatedWritesBlock uwb(opCtx); Status status = repairDatabase( opCtx, engine, dbname, preserveClonedFilesOnFailure, backupOriginalFiles); @@ -745,7 +745,7 @@ public: result.appendNumber("indexes", 0); result.appendNumber("indexSize", 0); result.appendNumber("fileSize", 0); - if (!getGlobalServiceContext()->getGlobalStorageEngine()->isEphemeral()) { + if (!getGlobalServiceContext()->getStorageEngine()->isEphemeral()) { result.appendNumber("fsUsedSize", 0); result.appendNumber("fsTotalSize", 0); } diff --git a/src/mongo/db/commands/do_txn_cmd.cpp b/src/mongo/db/commands/do_txn_cmd.cpp index 033669f7132..d0dd99ec645 100644 --- a/src/mongo/db/commands/do_txn_cmd.cpp +++ b/src/mongo/db/commands/do_txn_cmd.cpp @@ -136,7 +136,7 @@ public: BSONObjBuilder& result) override { uassert(ErrorCodes::CommandNotSupported, "This storage engine does not support transactions.", - !opCtx->getServiceContext()->getGlobalStorageEngine()->isMmapV1()); + !opCtx->getServiceContext()->getStorageEngine()->isMmapV1()); validateDoTxnCommand(cmdObj); diff --git a/src/mongo/db/commands/feature_compatibility_version.cpp b/src/mongo/db/commands/feature_compatibility_version.cpp index c9cacca5f32..a305333b693 100644 --- a/src/mongo/db/commands/feature_compatibility_version.cpp +++ b/src/mongo/db/commands/feature_compatibility_version.cpp @@ -130,7 +130,7 @@ void FeatureCompatibilityVersion::setIfCleanStartup(OperationContext* opCtx, bool FeatureCompatibilityVersion::isCleanStartUp() { std::vector dbNames; - StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine(); + StorageEngine* storageEngine = getGlobalServiceContext()->getStorageEngine(); storageEngine->listDatabases(&dbNames); for (auto&& dbName : dbNames) { diff --git a/src/mongo/db/commands/fsync.cpp b/src/mongo/db/commands/fsync.cpp index f945c2dddaa..62d25856e38 100644 --- a/src/mongo/db/commands/fsync.cpp +++ b/src/mongo/db/commands/fsync.cpp @@ -148,7 +148,7 @@ public: // Take a global IS lock to ensure the storage engine is not shutdown Lock::GlobalLock global(opCtx, MODE_IS); - StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine(); + StorageEngine* storageEngine = getGlobalServiceContext()->getStorageEngine(); result.append("numFiles", storageEngine->flushAllFiles(opCtx, sync)); return true; } @@ -356,7 +356,7 @@ void FSyncLockThread::run() { return; } opCtx.lockState()->downgradeGlobalXtoSForMMAPV1(); - StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine(); + StorageEngine* storageEngine = getGlobalServiceContext()->getStorageEngine(); try { storageEngine->flushAllFiles(&opCtx, true); diff --git a/src/mongo/db/commands/list_databases.cpp b/src/mongo/db/commands/list_databases.cpp index fc8068ad503..3c93304a370 100644 --- a/src/mongo/db/commands/list_databases.cpp +++ b/src/mongo/db/commands/list_databases.cpp @@ -112,7 +112,7 @@ public: bool nameOnly = jsobj[kNameOnlyField].trueValue(); vector dbNames; - StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine(); + StorageEngine* storageEngine = getGlobalServiceContext()->getStorageEngine(); { Lock::GlobalLock lk(opCtx, MODE_IS); storageEngine->listDatabases(&dbNames); diff --git a/src/mongo/db/commands/restart_catalog_command.cpp b/src/mongo/db/commands/restart_catalog_command.cpp index fd954626f10..6dec4a02d24 100644 --- a/src/mongo/db/commands/restart_catalog_command.cpp +++ b/src/mongo/db/commands/restart_catalog_command.cpp @@ -93,7 +93,7 @@ public: // marked drop-pending. (Otherwise, the Database object will be reconstructed when // re-opening the catalog, but with the drop pending flag cleared.) std::vector allDbs; - getGlobalServiceContext()->getGlobalStorageEngine()->listDatabases(&allDbs); + getGlobalServiceContext()->getStorageEngine()->listDatabases(&allDbs); for (auto&& dbName : allDbs) { const auto db = DatabaseHolder::getDatabaseHolder().get(opCtx, dbName); if (db->isDropPending(opCtx)) { diff --git a/src/mongo/db/commands/snapshot_management.cpp b/src/mongo/db/commands/snapshot_management.cpp index 9a9084b8723..84c57af241b 100644 --- a/src/mongo/db/commands/snapshot_management.cpp +++ b/src/mongo/db/commands/snapshot_management.cpp @@ -71,8 +71,7 @@ public: const std::string& dbname, const BSONObj& cmdObj, BSONObjBuilder& result) { - auto snapshotManager = - getGlobalServiceContext()->getGlobalStorageEngine()->getSnapshotManager(); + auto snapshotManager = getGlobalServiceContext()->getStorageEngine()->getSnapshotManager(); if (!snapshotManager) { return CommandHelpers::appendCommandStatus(result, {ErrorCodes::CommandNotSupported, ""}); @@ -116,8 +115,7 @@ public: const std::string& dbname, const BSONObj& cmdObj, BSONObjBuilder& result) { - auto snapshotManager = - getGlobalServiceContext()->getGlobalStorageEngine()->getSnapshotManager(); + auto snapshotManager = getGlobalServiceContext()->getStorageEngine()->getSnapshotManager(); if (!snapshotManager) { return CommandHelpers::appendCommandStatus(result, {ErrorCodes::CommandNotSupported, ""}); diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp index ce59dfe90f8..957b55b633b 100644 --- a/src/mongo/db/db.cpp +++ b/src/mongo/db/db.cpp @@ -586,7 +586,7 @@ ExitCode _initAndListen(int listenPort) { // Start up a background task to periodically check for and kill expired transactions. // Only do this on storage engines supporting snapshot reads, which hold resources we wish to // release periodically in order to avoid storage cache pressure build up. - auto storageEngine = serviceContext->getGlobalStorageEngine(); + auto storageEngine = serviceContext->getStorageEngine(); invariant(storageEngine); if (storageEngine->supportsReadConcernSnapshot()) { startPeriodicThreadToAbortExpiredTransactions(serviceContext); @@ -844,7 +844,7 @@ void shutdownTask() { // Shut down the global dbclient pool so callers stop waiting for connections. globalConnPool.shutdown(); - if (serviceContext->getGlobalStorageEngine()) { + if (serviceContext->getStorageEngine()) { ServiceContext::UniqueOperationContext uniqueOpCtx; OperationContext* opCtx = client->getOperationContext(); if (!opCtx) { @@ -930,7 +930,7 @@ void shutdownTask() { invariant(LOCK_OK == result); // Global storage engine may not be started in all cases before we exit - if (serviceContext->getGlobalStorageEngine()) { + if (serviceContext->getStorageEngine()) { serviceContext->shutdownGlobalStorageEngineCleanly(); } diff --git a/src/mongo/db/db_raii.cpp b/src/mongo/db/db_raii.cpp index 3ff4649584a..0ac1fea64de 100644 --- a/src/mongo/db/db_raii.cpp +++ b/src/mongo/db/db_raii.cpp @@ -90,7 +90,7 @@ AutoGetCollectionForRead::AutoGetCollectionForRead(OperationContext* opCtx, // Don't take the ParallelBatchWriterMode lock when the server parameter is set and our // storage engine supports snapshot reads. if (allowSecondaryReadsDuringBatchApplication.load() && - opCtx->getServiceContext()->getGlobalStorageEngine()->supportsReadConcernSnapshot()) { + opCtx->getServiceContext()->getStorageEngine()->supportsReadConcernSnapshot()) { _shouldNotConflictWithSecondaryBatchApplicationBlock.emplace(opCtx->lockState()); } const auto collectionLockMode = getLockModeForQuery(opCtx); diff --git a/src/mongo/db/index_rebuilder.cpp b/src/mongo/db/index_rebuilder.cpp index 3fa674392cf..376d05e5e74 100644 --- a/src/mongo/db/index_rebuilder.cpp +++ b/src/mongo/db/index_rebuilder.cpp @@ -146,7 +146,7 @@ void restartInProgressIndexesFromLastShutdown(OperationContext* opCtx) { std::vector dbNames; - StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine(); + StorageEngine* storageEngine = getGlobalServiceContext()->getStorageEngine(); storageEngine->listDatabases(&dbNames); try { diff --git a/src/mongo/db/repair_database_and_check_version.cpp b/src/mongo/db/repair_database_and_check_version.cpp index 7a9a50e215e..e57bfe58840 100644 --- a/src/mongo/db/repair_database_and_check_version.cpp +++ b/src/mongo/db/repair_database_and_check_version.cpp @@ -135,7 +135,7 @@ Status restoreMissingFeatureCompatibilityVersionDocument(OperationContext* opCtx */ Status ensureAllCollectionsHaveUUIDs(OperationContext* opCtx, const std::vector& dbNames) { - bool isMmapV1 = opCtx->getServiceContext()->getGlobalStorageEngine()->isMmapV1(); + bool isMmapV1 = opCtx->getServiceContext()->getStorageEngine()->isMmapV1(); std::vector nonReplicatedCollNSSsWithoutUUIDs; for (const auto& dbName : dbNames) { Database* db = DatabaseHolder::getDatabaseHolder().openDb(opCtx, dbName); @@ -313,7 +313,7 @@ void rebuildIndexes(OperationContext* opCtx, StorageEngine* storageEngine) { StatusWith repairDatabasesAndCheckVersion(OperationContext* opCtx) { LOG(1) << "enter repairDatabases (to check pdfile version #)"; - auto const storageEngine = opCtx->getServiceContext()->getGlobalStorageEngine(); + auto const storageEngine = opCtx->getServiceContext()->getStorageEngine(); Lock::GlobalWrite lk(opCtx); @@ -545,7 +545,7 @@ StatusWith repairDatabasesAndCheckVersion(OperationContext* opCtx) { if (!fcvDocumentExists && nonLocalDatabases) { severe() << "Unable to start up mongod due to missing featureCompatibilityVersion document."; - if (opCtx->getServiceContext()->getGlobalStorageEngine()->isMmapV1()) { + if (opCtx->getServiceContext()->getStorageEngine()->isMmapV1()) { severe() << "Please run with --journalOptions " << static_cast(MMAPV1Options::JournalRecoverOnly) << " to recover the journal. Then run with --repair to restore the document."; diff --git a/src/mongo/db/repl/apply_ops.cpp b/src/mongo/db/repl/apply_ops.cpp index 81ebf365c62..e73efba3d41 100644 --- a/src/mongo/db/repl/apply_ops.cpp +++ b/src/mongo/db/repl/apply_ops.cpp @@ -301,7 +301,7 @@ Status _applyOps(OperationContext* opCtx, // lock or any database locks. We release all locks temporarily while the fail // point is enabled to allow other threads to make progress. boost::optional release; - auto storageEngine = opCtx->getServiceContext()->getGlobalStorageEngine(); + auto storageEngine = opCtx->getServiceContext()->getStorageEngine(); if (storageEngine->isMmapV1() && !opCtx->lockState()->isW()) { release.emplace(opCtx->lockState()); } diff --git a/src/mongo/db/repl/bgsync.cpp b/src/mongo/db/repl/bgsync.cpp index 6a395d6faeb..ffe0c992dc9 100644 --- a/src/mongo/db/repl/bgsync.cpp +++ b/src/mongo/db/repl/bgsync.cpp @@ -625,7 +625,7 @@ void BackgroundSync::_runRollback(OperationContext* opCtx, return connection->get(); }; - auto storageEngine = opCtx->getServiceContext()->getGlobalStorageEngine(); + auto storageEngine = opCtx->getServiceContext()->getStorageEngine(); if (!forceRollbackViaRefetch.load() && storageEngine->supportsRecoverToStableTimestamp()) { log() << "Rollback using 'recoverToStableTimestamp' method."; _runRollbackViaRecoverToCheckpoint( diff --git a/src/mongo/db/repl/do_txn.cpp b/src/mongo/db/repl/do_txn.cpp index 52a1f36fb31..5579ceb1c33 100644 --- a/src/mongo/db/repl/do_txn.cpp +++ b/src/mongo/db/repl/do_txn.cpp @@ -188,7 +188,7 @@ Status _doTxn(OperationContext* opCtx, // lock or any database locks. We release all locks temporarily while the fail // point is enabled to allow other threads to make progress. boost::optional release; - auto storageEngine = opCtx->getServiceContext()->getGlobalStorageEngine(); + auto storageEngine = opCtx->getServiceContext()->getStorageEngine(); if (storageEngine->isMmapV1() && !opCtx->lockState()->isW()) { release.emplace(opCtx->lockState()); } diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp index 32ffed83e67..7e01e6226e5 100644 --- a/src/mongo/db/repl/oplog.cpp +++ b/src/mongo/db/repl/oplog.cpp @@ -403,7 +403,7 @@ void _logOpsInner(OperationContext* opCtx, } auto lastAppliedTimestamp = finalOpTime.getTimestamp(); - const auto storageEngine = opCtx->getServiceContext()->getGlobalStorageEngine(); + const auto storageEngine = opCtx->getServiceContext()->getStorageEngine(); if (storageEngine->supportsDocLocking()) { // If the storage engine supports document level locking, then it is possible for // oplog writes to commit out of order. In that case, we only want to set our last @@ -577,7 +577,7 @@ long long getNewOplogSizeBytes(OperationContext* opCtx, const ReplSettings& repl #else long long lowerBound = 0; double bytes = 0; - if (opCtx->getClient()->getServiceContext()->getGlobalStorageEngine()->isEphemeral()) { + if (opCtx->getClient()->getServiceContext()->getStorageEngine()->isEphemeral()) { // in memory: 50MB minimum size lowerBound = 50LL * 1024 * 1024; bytes = pi.getMemSizeMB() * 1024 * 1024; @@ -652,7 +652,7 @@ void createOplog(OperationContext* opCtx, const std::string& oplogCollectionName }); /* sync here so we don't get any surprising lag later when we try to sync */ - StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine(); + StorageEngine* storageEngine = getGlobalServiceContext()->getStorageEngine(); storageEngine->flushAllFiles(opCtx, true); log() << "******" << endl; diff --git a/src/mongo/db/repl/repl_set_commands.cpp b/src/mongo/db/repl/repl_set_commands.cpp index d96486aa7d5..288dc66c389 100644 --- a/src/mongo/db/repl/repl_set_commands.cpp +++ b/src/mongo/db/repl/repl_set_commands.cpp @@ -635,7 +635,7 @@ namespace { */ bool replHasDatabases(OperationContext* opCtx) { std::vector names; - StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine(); + StorageEngine* storageEngine = getGlobalServiceContext()->getStorageEngine(); storageEngine->listDatabases(&names); if (names.size() >= 2) diff --git a/src/mongo/db/repl/replication_consistency_markers_impl.cpp b/src/mongo/db/repl/replication_consistency_markers_impl.cpp index 8c21e344fdd..5d0b1f70a4a 100644 --- a/src/mongo/db/repl/replication_consistency_markers_impl.cpp +++ b/src/mongo/db/repl/replication_consistency_markers_impl.cpp @@ -162,7 +162,7 @@ void ReplicationConsistencyMarkersImpl::clearInitialSyncFlag(OperationContext* o _updateMinValidDocument(opCtx, update); - if (getGlobalServiceContext()->getGlobalStorageEngine()->isDurable()) { + if (getGlobalServiceContext()->getStorageEngine()->isDurable()) { opCtx->recoveryUnit()->waitUntilDurable(); replCoord->setMyLastDurableOpTime(time); } @@ -191,8 +191,7 @@ void ReplicationConsistencyMarkersImpl::setMinValid(OperationContext* opCtx, // This method is only used with storage engines that do not support recover to stable // timestamp. As a result, their timestamps do not matter. - invariant( - !opCtx->getServiceContext()->getGlobalStorageEngine()->supportsRecoverToStableTimestamp()); + invariant(!opCtx->getServiceContext()->getStorageEngine()->supportsRecoverToStableTimestamp()); update.timestamp = Timestamp(); _updateMinValidDocument(opCtx, update); diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp index d2c8aeb7f33..1f2493805f4 100644 --- a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp +++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp @@ -326,7 +326,7 @@ void ReplicationCoordinatorExternalStateImpl::startThreads(const ReplSettings& s } log() << "Starting replication storage threads"; - _service->getGlobalStorageEngine()->setJournalListener(this); + _service->getStorageEngine()->setJournalListener(this); _oplogApplierTaskExecutor = makeTaskExecutor(_service, "rsSync"); _oplogApplierTaskExecutor->startup(); @@ -387,7 +387,7 @@ ThreadPool* ReplicationCoordinatorExternalStateImpl::getDbWorkThreadPool() const Status ReplicationCoordinatorExternalStateImpl::runRepairOnLocalDB(OperationContext* opCtx) { try { Lock::GlobalWrite globalWrite(opCtx); - StorageEngine* engine = getGlobalServiceContext()->getGlobalStorageEngine(); + StorageEngine* engine = getGlobalServiceContext()->getStorageEngine(); if (!engine->isMmapV1()) { return Status::OK(); @@ -783,7 +783,7 @@ void ReplicationCoordinatorExternalStateImpl::startProducerIfStopped() { void ReplicationCoordinatorExternalStateImpl::_dropAllTempCollections(OperationContext* opCtx) { std::vector dbNames; - StorageEngine* storageEngine = _service->getGlobalStorageEngine(); + StorageEngine* storageEngine = _service->getStorageEngine(); storageEngine->listDatabases(&dbNames); for (std::vector::iterator it = dbNames.begin(); it != dbNames.end(); ++it) { @@ -802,13 +802,13 @@ void ReplicationCoordinatorExternalStateImpl::_dropAllTempCollections(OperationC } void ReplicationCoordinatorExternalStateImpl::dropAllSnapshots() { - if (auto manager = _service->getGlobalStorageEngine()->getSnapshotManager()) + if (auto manager = _service->getStorageEngine()->getSnapshotManager()) manager->dropAllSnapshots(); } void ReplicationCoordinatorExternalStateImpl::updateCommittedSnapshot( const OpTime& newCommitPoint) { - auto manager = _service->getGlobalStorageEngine()->getSnapshotManager(); + auto manager = _service->getStorageEngine()->getSnapshotManager(); if (manager) { manager->setCommittedSnapshot(newCommitPoint.getTimestamp()); } @@ -816,14 +816,14 @@ void ReplicationCoordinatorExternalStateImpl::updateCommittedSnapshot( } void ReplicationCoordinatorExternalStateImpl::updateLocalSnapshot(const OpTime& optime) { - auto manager = _service->getGlobalStorageEngine()->getSnapshotManager(); + auto manager = _service->getStorageEngine()->getSnapshotManager(); if (manager) { manager->setLocalSnapshot(optime.getTimestamp()); } } bool ReplicationCoordinatorExternalStateImpl::snapshotsEnabled() const { - return _service->getGlobalStorageEngine()->getSnapshotManager() != nullptr; + return _service->getStorageEngine()->getSnapshotManager() != nullptr; } void ReplicationCoordinatorExternalStateImpl::notifyOplogMetadataWaiters( @@ -865,7 +865,7 @@ double ReplicationCoordinatorExternalStateImpl::getElectionTimeoutOffsetLimitFra bool ReplicationCoordinatorExternalStateImpl::isReadCommittedSupportedByStorageEngine( OperationContext* opCtx) const { - auto storageEngine = opCtx->getServiceContext()->getGlobalStorageEngine(); + auto storageEngine = opCtx->getServiceContext()->getStorageEngine(); // This should never be called if the storage engine has not been initialized. invariant(storageEngine); return storageEngine->getSnapshotManager(); @@ -873,7 +873,7 @@ bool ReplicationCoordinatorExternalStateImpl::isReadCommittedSupportedByStorageE bool ReplicationCoordinatorExternalStateImpl::isReadConcernSnapshotSupportedByStorageEngine( OperationContext* opCtx) const { - auto storageEngine = opCtx->getServiceContext()->getGlobalStorageEngine(); + auto storageEngine = opCtx->getServiceContext()->getStorageEngine(); // This should never be called if the storage engine has not been initialized. invariant(storageEngine); return storageEngine->supportsReadConcernSnapshot(); diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp index d0d46498f1f..2d6bc35340d 100644 --- a/src/mongo/db/repl/replication_coordinator_impl.cpp +++ b/src/mongo/db/repl/replication_coordinator_impl.cpp @@ -1144,7 +1144,7 @@ void ReplicationCoordinatorImpl::_setMyLastAppliedOpTime_inlock(const OpTime& op // The oplog application phase of initial sync starts timestamping writes, causing // WiredTiger to pin this data in memory. Advancing the oldest timestamp in step with the // last applied optime here will permit WiredTiger to evict this data as it sees fit. - _service->getGlobalStorageEngine()->setOldestTimestamp(opTime.getTimestamp()); + _service->getStorageEngine()->setOldestTimestamp(opTime.getTimestamp()); } } @@ -2779,7 +2779,7 @@ ReplicationCoordinatorImpl::_setCurrentRSConfig_inlock(OperationContext* opCtx, } // Warn if running --nojournal and writeConcernMajorityJournalDefault = false - StorageEngine* storageEngine = opCtx->getServiceContext()->getGlobalStorageEngine(); + StorageEngine* storageEngine = opCtx->getServiceContext()->getStorageEngine(); if (storageEngine && !storageEngine->isDurable() && (newConfig.getWriteConcernMajorityShouldJournal() && (!oldConfig.isInitialized() || !oldConfig.getWriteConcernMajorityShouldJournal()))) { diff --git a/src/mongo/db/repl/rollback_impl.cpp b/src/mongo/db/repl/rollback_impl.cpp index e6831304a46..13a3874736b 100644 --- a/src/mongo/db/repl/rollback_impl.cpp +++ b/src/mongo/db/repl/rollback_impl.cpp @@ -299,7 +299,7 @@ Status RollbackImpl::_awaitBgIndexCompletion(OperationContext* opCtx) { } // Get a list of all databases. - StorageEngine* storageEngine = opCtx->getServiceContext()->getGlobalStorageEngine(); + StorageEngine* storageEngine = opCtx->getServiceContext()->getStorageEngine(); std::vector dbs; { Lock::GlobalLock lk(opCtx, MODE_IS); @@ -854,7 +854,7 @@ void RollbackImpl::_resetDropPendingState(OperationContext* opCtx) { DropPendingCollectionReaper::get(opCtx)->clearDropPendingState(); std::vector dbNames; - opCtx->getServiceContext()->getGlobalStorageEngine()->listDatabases(&dbNames); + opCtx->getServiceContext()->getStorageEngine()->listDatabases(&dbNames); for (const auto& dbName : dbNames) { Lock::DBLock dbLock(opCtx, dbName, MODE_X); Database* db = DatabaseHolder::getDatabaseHolder().openDb(opCtx, dbName); diff --git a/src/mongo/db/repl/rollback_impl_test.cpp b/src/mongo/db/repl/rollback_impl_test.cpp index c0a1ed1c1ce..d8097698f14 100644 --- a/src/mongo/db/repl/rollback_impl_test.cpp +++ b/src/mongo/db/repl/rollback_impl_test.cpp @@ -421,7 +421,7 @@ void _assertDocsInOplog(OperationContext* opCtx, std::vector timestamps) { TEST_F(RollbackImplTest, TestFixtureSetUpInitializesStorageEngine) { auto serviceContext = _serviceContextMongoDTest.getServiceContext(); ASSERT_TRUE(serviceContext); - ASSERT_TRUE(serviceContext->getGlobalStorageEngine()); + ASSERT_TRUE(serviceContext->getStorageEngine()); } TEST_F(RollbackImplTest, RollbackReturnsNotSecondaryWhenFailingToTransitionToRollback) { diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp index eae8c70a468..682b6b6353a 100644 --- a/src/mongo/db/repl/rs_rollback.cpp +++ b/src/mongo/db/repl/rs_rollback.cpp @@ -612,8 +612,7 @@ void checkRbidAndUpdateMinValid(OperationContext* opCtx, // This method is only used with storage engines that do not support recover to stable // timestamp. As a result, the timestamp on the 'appliedThrough' update does not matter. - invariant( - !opCtx->getServiceContext()->getGlobalStorageEngine()->supportsRecoverToStableTimestamp()); + invariant(!opCtx->getServiceContext()->getStorageEngine()->supportsRecoverToStableTimestamp()); replicationProcess->getConsistencyMarkers()->clearAppliedThrough(opCtx, {}); replicationProcess->getConsistencyMarkers()->setMinValid(opCtx, minValid); diff --git a/src/mongo/db/repl/storage_interface_impl.cpp b/src/mongo/db/repl/storage_interface_impl.cpp index cd9dbcc1fd1..a2298a67932 100644 --- a/src/mongo/db/repl/storage_interface_impl.cpp +++ b/src/mongo/db/repl/storage_interface_impl.cpp @@ -1033,25 +1033,25 @@ StatusWith StorageInterfaceImpl::getCollectionUUID( } void StorageInterfaceImpl::setStableTimestamp(ServiceContext* serviceCtx, Timestamp snapshotName) { - serviceCtx->getGlobalStorageEngine()->setStableTimestamp(snapshotName); + serviceCtx->getStorageEngine()->setStableTimestamp(snapshotName); } void StorageInterfaceImpl::setInitialDataTimestamp(ServiceContext* serviceCtx, Timestamp snapshotName) { - serviceCtx->getGlobalStorageEngine()->setInitialDataTimestamp(snapshotName); + serviceCtx->getStorageEngine()->setInitialDataTimestamp(snapshotName); } StatusWith StorageInterfaceImpl::recoverToStableTimestamp(OperationContext* opCtx) { - return opCtx->getServiceContext()->getGlobalStorageEngine()->recoverToStableTimestamp(opCtx); + return opCtx->getServiceContext()->getStorageEngine()->recoverToStableTimestamp(opCtx); } bool StorageInterfaceImpl::supportsRecoverToStableTimestamp(ServiceContext* serviceCtx) const { - return serviceCtx->getGlobalStorageEngine()->supportsRecoverToStableTimestamp(); + return serviceCtx->getStorageEngine()->supportsRecoverToStableTimestamp(); } boost::optional StorageInterfaceImpl::getRecoveryTimestamp( ServiceContext* serviceCtx) const { - return serviceCtx->getGlobalStorageEngine()->getRecoveryTimestamp(); + return serviceCtx->getStorageEngine()->getRecoveryTimestamp(); } Status StorageInterfaceImpl::isAdminDbValid(OperationContext* opCtx) { @@ -1131,7 +1131,7 @@ boost::optional StorageInterfaceImpl::getLastStableCheckpointTimestam return boost::none; } - const auto ret = serviceCtx->getGlobalStorageEngine()->getLastStableCheckpointTimestamp(); + const auto ret = serviceCtx->getStorageEngine()->getLastStableCheckpointTimestamp(); if (ret == boost::none) { return Timestamp::min(); } diff --git a/src/mongo/db/repl/sync_tail.cpp b/src/mongo/db/repl/sync_tail.cpp index 40ace32b248..157ad291cdf 100644 --- a/src/mongo/db/repl/sync_tail.cpp +++ b/src/mongo/db/repl/sync_tail.cpp @@ -503,7 +503,7 @@ void scheduleWritesToOplog(OperationContext* opCtx, // there would be no way to take advantage of multiple threads if a storage engine doesn't // support document locking. if (!enoughToMultiThread || - !opCtx->getServiceContext()->getGlobalStorageEngine()->supportsDocLocking()) { + !opCtx->getServiceContext()->getStorageEngine()->supportsDocLocking()) { invariant(threadPool->schedule(makeOplogWriterForRange(0, ops.size()))); return; @@ -576,7 +576,7 @@ void fillWriterVectors(OperationContext* opCtx, std::vector* writerVectors, std::vector* applyOpsOperations) { const auto serviceContext = opCtx->getServiceContext(); - const auto storageEngine = serviceContext->getGlobalStorageEngine(); + const auto storageEngine = serviceContext->getStorageEngine(); const bool supportsDocLocking = storageEngine->supportsDocLocking(); const uint32_t numWriters = writerVectors->size(); @@ -812,7 +812,7 @@ void SyncTail::oplogApplication(OplogBuffer* oplogBuffer, ReplicationCoordinator OpQueueBatcher batcher(this, _storageInterface, oplogBuffer); std::unique_ptr finalizer{ - getGlobalServiceContext()->getGlobalStorageEngine()->isDurable() + getGlobalServiceContext()->getStorageEngine()->isDurable() ? new ApplyBatchFinalizerForJournal(replCoord) : new ApplyBatchFinalizer(replCoord)}; @@ -1388,7 +1388,7 @@ StatusWith SyncTail::multiApply(OperationContext* opCtx, MultiApplier::O // This means that all the writes associated with the oplog entries in the batch are // finished and no new writes with timestamps associated with those oplog entries will show // up in the future. - const auto storageEngine = opCtx->getServiceContext()->getGlobalStorageEngine(); + const auto storageEngine = opCtx->getServiceContext()->getStorageEngine(); storageEngine->replicationBatchIsComplete(); } diff --git a/src/mongo/db/s/chunk_move_write_concern_options.cpp b/src/mongo/db/s/chunk_move_write_concern_options.cpp index f9d6095229f..f2196d2ef4b 100644 --- a/src/mongo/db/s/chunk_move_write_concern_options.cpp +++ b/src/mongo/db/s/chunk_move_write_concern_options.cpp @@ -69,7 +69,7 @@ StatusWith ChunkMoveWriteConcernOptions::getEffectiveWriteC OperationContext* opCtx, const MigrationSecondaryThrottleOptions& options) { auto secondaryThrottle = options.getSecondaryThrottle(); if (secondaryThrottle == MigrationSecondaryThrottleOptions::kDefault) { - if (opCtx->getServiceContext()->getGlobalStorageEngine()->supportsDocLocking()) { + if (opCtx->getServiceContext()->getStorageEngine()->supportsDocLocking()) { secondaryThrottle = MigrationSecondaryThrottleOptions::kOff; } else { secondaryThrottle = MigrationSecondaryThrottleOptions::kOn; diff --git a/src/mongo/db/service_context.cpp b/src/mongo/db/service_context.cpp index a61734e8cff..17193d9462a 100644 --- a/src/mongo/db/service_context.cpp +++ b/src/mongo/db/service_context.cpp @@ -93,7 +93,7 @@ bool supportsDocLocking() { } bool isMMAPV1() { - StorageEngine* globalStorageEngine = getGlobalServiceContext()->getGlobalStorageEngine(); + StorageEngine* globalStorageEngine = getGlobalServiceContext()->getStorageEngine(); invariant(globalStorageEngine); return globalStorageEngine->isMmapV1(); diff --git a/src/mongo/db/service_context.h b/src/mongo/db/service_context.h index 305a85c226f..1caa0fd0f05 100644 --- a/src/mongo/db/service_context.h +++ b/src/mongo/db/service_context.h @@ -278,13 +278,6 @@ public: return _storageEngine.get(); } - /** - * Return the storage engine instance we're using. - */ - StorageEngine* getGlobalStorageEngine() { - return getStorageEngine(); - } - // // Global operation management. This may not belong here and there may be too many methods // here. diff --git a/src/mongo/db/service_context_d.cpp b/src/mongo/db/service_context_d.cpp index 4162f7150ca..ca500ce6abe 100644 --- a/src/mongo/db/service_context_d.cpp +++ b/src/mongo/db/service_context_d.cpp @@ -287,7 +287,7 @@ std::unique_ptr ServiceContextMongoD::_newOpCtx(Client* client opCtx->setLockState(stdx::make_unique()); } - opCtx->setRecoveryUnit(getGlobalStorageEngine()->newRecoveryUnit(), + opCtx->setRecoveryUnit(getStorageEngine()->newRecoveryUnit(), WriteUnitOfWork::RecoveryUnitState::kNotInUnitOfWork); return opCtx; } diff --git a/src/mongo/db/service_context_d_test_fixture.cpp b/src/mongo/db/service_context_d_test_fixture.cpp index df3811421d4..1b51ac9e966 100644 --- a/src/mongo/db/service_context_d_test_fixture.cpp +++ b/src/mongo/db/service_context_d_test_fixture.cpp @@ -59,7 +59,7 @@ void ServiceContextMongoDTest::setUp() { auto logicalClock = stdx::make_unique(serviceContext); LogicalClock::set(serviceContext, std::move(logicalClock)); - if (!serviceContext->getGlobalStorageEngine()) { + if (!serviceContext->getStorageEngine()) { // When using the "ephemeralForTest" storage engine, it is fine for the temporary directory // to go away after the global storage engine is initialized. unittest::TempDir tempDir("service_context_d_test_fixture"); diff --git a/src/mongo/db/service_entry_point_common.cpp b/src/mongo/db/service_entry_point_common.cpp index b75aeff8ac5..632d3e10f14 100644 --- a/src/mongo/db/service_entry_point_common.cpp +++ b/src/mongo/db/service_entry_point_common.cpp @@ -600,7 +600,7 @@ void execCommandDatabase(OperationContext* opCtx, request.body, command->requiresAuth(), replCoord->getReplicationMode() == repl::ReplicationCoordinator::modeReplSet, - opCtx->getServiceContext()->getGlobalStorageEngine()->supportsDocLocking()); + opCtx->getServiceContext()->getStorageEngine()->supportsDocLocking()); // Session ids are forwarded in requests, so commands that require roundtrips between // servers may result in a deadlock when a server tries to check out a session it is already diff --git a/src/mongo/db/session.cpp b/src/mongo/db/session.cpp index 7d129b164ed..5a8c1cee67c 100644 --- a/src/mongo/db/session.cpp +++ b/src/mongo/db/session.cpp @@ -618,7 +618,7 @@ Session::TxnResources::TxnResources(OperationContext* opCtx) { _locker->unsetThreadId(); _recoveryUnit = std::unique_ptr(opCtx->releaseRecoveryUnit()); - opCtx->setRecoveryUnit(opCtx->getServiceContext()->getGlobalStorageEngine()->newRecoveryUnit(), + opCtx->setRecoveryUnit(opCtx->getServiceContext()->getStorageEngine()->newRecoveryUnit(), WriteUnitOfWork::RecoveryUnitState::kNotInUnitOfWork); _readConcernArgs = repl::ReadConcernArgs::get(opCtx); @@ -837,9 +837,8 @@ void Session::abortActiveTransaction(OperationContext* opCtx) { } // We must clear the recovery unit so any post-transaction writes can run without // transactional settings such as a read timestamp. - opCtx->setRecoveryUnit( - opCtx->getServiceContext()->getGlobalStorageEngine()->newRecoveryUnit(), - WriteUnitOfWork::RecoveryUnitState::kNotInUnitOfWork); + opCtx->setRecoveryUnit(opCtx->getServiceContext()->getStorageEngine()->newRecoveryUnit(), + WriteUnitOfWork::RecoveryUnitState::kNotInUnitOfWork); } if (canKillCursors) { _killTransactionCursors(opCtx, _sessionId, txnNumberAtStart); @@ -992,9 +991,8 @@ void Session::_commitTransaction(stdx::unique_lock lk, OperationCon } // We must clear the recovery unit so any post-transaction writes can run without // transactional settings such as a read timestamp. - opCtx->setRecoveryUnit( - opCtx->getServiceContext()->getGlobalStorageEngine()->newRecoveryUnit(), - WriteUnitOfWork::RecoveryUnitState::kNotInUnitOfWork); + opCtx->setRecoveryUnit(opCtx->getServiceContext()->getStorageEngine()->newRecoveryUnit(), + WriteUnitOfWork::RecoveryUnitState::kNotInUnitOfWork); _commitcv.notify_all(); }); lk.unlock(); diff --git a/src/mongo/db/storage/mmap_v1/data_file_sync.cpp b/src/mongo/db/storage/mmap_v1/data_file_sync.cpp index 4d895c09b20..975b1c3413e 100644 --- a/src/mongo/db/storage/mmap_v1/data_file_sync.cpp +++ b/src/mongo/db/storage/mmap_v1/data_file_sync.cpp @@ -81,7 +81,7 @@ void DataFileSync::run() { auto opCtx = cc().makeOperationContext(); Date_t start = jsTime(); - StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine(); + StorageEngine* storageEngine = getGlobalServiceContext()->getStorageEngine(); dur::notifyPreDataFileFlush(); int numFiles = storageEngine->flushAllFiles(opCtx.get(), true); diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp index 5ecca6b9c9b..211db208156 100644 --- a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp +++ b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp @@ -115,7 +115,7 @@ MmapV1ExtentManager::MmapV1ExtentManager(StringData dbname, StringData path, boo _path(path.toString()), _directoryPerDB(directoryPerDB), _rid(RESOURCE_METADATA, dbname) { - StorageEngine* engine = getGlobalServiceContext()->getGlobalStorageEngine(); + StorageEngine* engine = getGlobalServiceContext()->getStorageEngine(); invariant(engine->isMmapV1()); MMAPV1Engine* mmapEngine = static_cast(engine); _recordAccessTracker = &mmapEngine->getRecordAccessTracker(); diff --git a/src/mongo/db/storage/storage_init.cpp b/src/mongo/db/storage/storage_init.cpp index 1f24b6d8af9..6fdc7de4304 100644 --- a/src/mongo/db/storage/storage_init.cpp +++ b/src/mongo/db/storage/storage_init.cpp @@ -54,7 +54,7 @@ public: virtual BSONObj generateSection(OperationContext* opCtx, const BSONElement& configElement) const { - auto engine = opCtx->getClient()->getServiceContext()->getGlobalStorageEngine(); + auto engine = opCtx->getClient()->getServiceContext()->getStorageEngine(); return BSON("name" << storageGlobalParams.engine << "supportsCommittedReads" << bool(engine->getSnapshotManager()) << "supportsSnapshotReadConcern" diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mongod.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mongod.cpp index 03934922aef..3112cbd48ab 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mongod.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mongod.cpp @@ -72,7 +72,7 @@ public: * Returns true iff there was an oplog to delete from. */ bool _deleteExcessDocuments() { - if (!getGlobalServiceContext()->getGlobalStorageEngine()) { + if (!getGlobalServiceContext()->getStorageEngine()) { LOG(2) << "no global storage engine yet"; return false; } diff --git a/src/mongo/db/write_concern.cpp b/src/mongo/db/write_concern.cpp index 2237cdcbdf0..91e98404a53 100644 --- a/src/mongo/db/write_concern.cpp +++ b/src/mongo/db/write_concern.cpp @@ -98,7 +98,7 @@ StatusWith extractWriteConcern(OperationContext* opCtx, Status validateWriteConcern(OperationContext* opCtx, const WriteConcernOptions& writeConcern) { if (writeConcern.syncMode == WriteConcernOptions::SyncMode::JOURNAL && - !opCtx->getServiceContext()->getGlobalStorageEngine()->isDurable()) { + !opCtx->getServiceContext()->getStorageEngine()->isDurable()) { return Status(ErrorCodes::BadValue, "cannot use 'j' option when a host does not have journaling enabled"); } @@ -183,7 +183,7 @@ Status waitForWriteConcern(OperationContext* opCtx, case WriteConcernOptions::SyncMode::NONE: break; case WriteConcernOptions::SyncMode::FSYNC: { - StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine(); + StorageEngine* storageEngine = getGlobalServiceContext()->getStorageEngine(); if (!storageEngine->isDurable()) { result->fsyncFiles = storageEngine->flushAllFiles(opCtx, true); } else { -- cgit v1.2.1