diff options
author | Vishnu Kaushik <vishnu.kaushik@mongodb.com> | 2020-09-11 19:46:48 +0000 |
---|---|---|
committer | Evergreen Agent <no-reply@evergreen.mongodb.com> | 2020-09-11 20:28:52 +0000 |
commit | 8911b6cdda36dac94d6e5ae8e5950452e0ebf778 (patch) | |
tree | 2403a84f0d932fe639a3f07e737b74ad3e768307 | |
parent | dfbcc00f25717beae48ca28eb6ab77c3589a1ef7 (diff) | |
download | mongo-8911b6cdda36dac94d6e5ae8e5950452e0ebf778.tar.gz |
SERVER-43385 remove MMAPv1 references
22 files changed, 19 insertions, 118 deletions
diff --git a/buildscripts/gdb/mongo_lock.py b/buildscripts/gdb/mongo_lock.py index 09996428a91..448d3518f46 100644 --- a/buildscripts/gdb/mongo_lock.py +++ b/buildscripts/gdb/mongo_lock.py @@ -309,10 +309,7 @@ def find_mutex_holder(graph, thread_dict, show): def find_lock_manager_holders(graph, thread_dict, show): # pylint: disable=too-many-locals """Find lock manager holders.""" - # In versions of MongoDB 4.0 and older, the LockerImpl class is templatized with a boolean - # parameter. With the removal of the MMAPv1 storage engine in MongoDB 4.2, the LockerImpl class - # is no longer templatized. - frame = find_frame(r'mongo::LockerImpl(?:\<.*\>)?::') + frame = find_frame(r'mongo::LockerImpl::') if not frame: return @@ -321,15 +318,7 @@ def find_lock_manager_holders(graph, thread_dict, show): # pylint: disable=too- (_, lock_waiter_lwpid, _) = gdb.selected_thread().ptid lock_waiter = thread_dict[lock_waiter_lwpid] - try: - locker_ptr_type = gdb.lookup_type("mongo::LockerImpl<false>").pointer() - except gdb.error as err: - # If we don't find the templatized version of the LockerImpl class, then we try to find the - # non-templatized version. - if not err.args[0].startswith("No type named"): - raise - - locker_ptr_type = gdb.lookup_type("mongo::LockerImpl").pointer() + locker_ptr_type = gdb.lookup_type("mongo::LockerImpl").pointer() lock_head = gdb.parse_and_eval( "mongo::getGlobalLockManager()->_getBucket(resId)->findOrInsert(resId)") diff --git a/buildscripts/packaging/msi/mongod.yaml b/buildscripts/packaging/msi/mongod.yaml index ddfc798e4b4..ba10e7517ef 100644 --- a/buildscripts/packaging/msi/mongod.yaml +++ b/buildscripts/packaging/msi/mongod.yaml @@ -9,7 +9,6 @@ storage: journal: enabled: true # engine: -# mmapv1: # wiredTiger: # where to write logging data. diff --git a/buildscripts/tests/ciconfig/evergreen.yml b/buildscripts/tests/ciconfig/evergreen.yml index eeca00586b2..cb20efc55ec 100644 --- a/buildscripts/tests/ciconfig/evergreen.yml +++ b/buildscripts/tests/ciconfig/evergreen.yml @@ -138,7 +138,7 @@ tasks: - func: "setup" - func: "run tests" vars: - resmoke_args: "--suites=somesuite --storageEngine=mmapv1" + resmoke_args: "--suites=somesuite --storageEngine=wiredTiger" modules: diff --git a/buildscripts/tests/ciconfig/test_evergreen.py b/buildscripts/tests/ciconfig/test_evergreen.py index 2132e5ecf91..e1734016023 100644 --- a/buildscripts/tests/ciconfig/test_evergreen.py +++ b/buildscripts/tests/ciconfig/test_evergreen.py @@ -458,7 +458,7 @@ class TestVariant(unittest.TestCase): # Check combined_resmoke_args when test_flags is set on the variant. resmoke_task = variant_ubuntu.get_task("resmoke_task") - self.assertEqual("--suites=somesuite --storageEngine=mmapv1 --param=value --ubuntu", + self.assertEqual("--suites=somesuite --storageEngine=wiredTiger --param=value --ubuntu", resmoke_task.combined_resmoke_args) # Check combined_resmoke_args when the task doesn't have resmoke_args. @@ -468,7 +468,7 @@ class TestVariant(unittest.TestCase): # Check combined_resmoke_args when test_flags is not set on the variant. variant_debian = self.conf.get_variant("debian") resmoke_task = variant_debian.get_task("resmoke_task") - self.assertEqual("--suites=somesuite --storageEngine=mmapv1", + self.assertEqual("--suites=somesuite --storageEngine=wiredTiger", resmoke_task.combined_resmoke_args) # Check for tasks included in task_groups diff --git a/debian/mongod.conf b/debian/mongod.conf index 4a82d14d95a..695b62ca9b5 100644 --- a/debian/mongod.conf +++ b/debian/mongod.conf @@ -9,7 +9,6 @@ storage: journal: enabled: true # engine: -# mmapv1: # wiredTiger: # where to write logging data. diff --git a/jstests/concurrency/fsm_workloads/findAndModify_update_grow.js b/jstests/concurrency/fsm_workloads/findAndModify_update_grow.js index 19ba3f27ab9..0eebb29cb59 100644 --- a/jstests/concurrency/fsm_workloads/findAndModify_update_grow.js +++ b/jstests/concurrency/fsm_workloads/findAndModify_update_grow.js @@ -4,11 +4,9 @@ * findAndModify_update_grow.js * * Each thread inserts a single document into a collection, and then - * repeatedly performs the findAndModify command. Attempts to trigger - * the same conditions that with MMAPv1 caused a document move, - * by growing the size of the inserted document using the $set and $mul - * update operators. Now checks that document moves don't happen and - * that large changes in document size are handled correctly. + * repeatedly performs the findAndModify command. Checks that document + * moves don't happen and that large changes in document size are handled + * correctly. */ load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod diff --git a/jstests/noPassthrough/command_line_parsing.js b/jstests/noPassthrough/command_line_parsing.js index 511aabf90dd..95ecdb6cb85 100644 --- a/jstests/noPassthrough/command_line_parsing.js +++ b/jstests/noPassthrough/command_line_parsing.js @@ -28,7 +28,6 @@ MongoRunner.stopMongod(m2); // remove variables that depend on the way the test is started. delete m2result.parsed.net.transportLayer; -delete m2result.parsed.storage.mmapv1; delete m2result.parsed.setParameter; delete m2result.parsed.storage.engine; delete m2result.parsed.storage.inMemory; @@ -56,7 +55,6 @@ MongoRunner.stopMongod(m3); // remove variables that depend on the way the test is started. delete m3result.parsed.net.transportLayer; -delete m3result.parsed.storage.mmapv1; delete m3result.parsed.setParameter; delete m3result.parsed.storage.engine; delete m3result.parsed.storage.inMemory; diff --git a/jstests/noPassthrough/hyphenated_database_name.js b/jstests/noPassthrough/hyphenated_database_name.js index 0290e4444d9..d6060655118 100644 --- a/jstests/noPassthrough/hyphenated_database_name.js +++ b/jstests/noPassthrough/hyphenated_database_name.js @@ -5,8 +5,8 @@ */ (function() { "use strict"; -var isDirectoryPerDBSupported = jsTest.options().storageEngine == "wiredTiger" || - jsTest.options().storageEngine == "mmapv1" || !jsTest.options().storageEngine; +var isDirectoryPerDBSupported = + jsTest.options().storageEngine == "wiredTiger" || !jsTest.options().storageEngine; if (!isDirectoryPerDBSupported) return; diff --git a/jstests/replsets/drop_collections_two_phase_rename_drop_target.js b/jstests/replsets/drop_collections_two_phase_rename_drop_target.js index 4e35c394b21..5191fb47127 100644 --- a/jstests/replsets/drop_collections_two_phase_rename_drop_target.js +++ b/jstests/replsets/drop_collections_two_phase_rename_drop_target.js @@ -42,8 +42,7 @@ twoPhaseDropTest.createCollection(fromCollName); twoPhaseDropTest.createCollection(toCollName); // Collection renames with dropTarget set to true should handle long index names in the target -// collection gracefully. MMAPv1 imposes a hard limit on index namespaces so we have to drop -// indexes that are too long to store on disk after renaming the collection. +// collection gracefully. const primary = replTest.getPrimary(); const testDb = primary.getDB(dbName); const fromColl = testDb.getCollection(fromCollName); diff --git a/pytests/powertest.py b/pytests/powertest.py index 6c2904132ef..eeef8472fa5 100755 --- a/pytests/powertest.py +++ b/pytests/powertest.py @@ -1778,7 +1778,7 @@ Examples: --crashOption output1 --sshCrashUserHost admin@10.4.100.2 --sshCrashOption "-oKexAlgorithms=+diffie-hellman-group1-sha1 -i /Users/jonathan/.ssh/mFi.pem" - --mongodOptions "--storageEngine mmapv1" + --mongodOptions "--storageEngine wiredTiger" Linux server running in AWS, testing nojournal: python powertest.py @@ -1978,9 +1978,7 @@ Examples: mongod_options.add_option( "--removeLockFile", dest="remove_lock_file", help="If specified, the mongod.lock file will be deleted after a" - " powercycle event, before mongod is started. This is a" - " workaround for mongod failing start with MMAPV1 (See" - " SERVER-15109).", action="store_true", default=False) + " powercycle event, before mongod is started.", action="store_true", default=False) # Client options mongo_path = distutils.spawn.find_executable("dist-test/bin/mongo", diff --git a/src/mongo/db/catalog/create_collection.cpp b/src/mongo/db/catalog/create_collection.cpp index 2fef0686a47..1c74a34159f 100644 --- a/src/mongo/db/catalog/create_collection.cpp +++ b/src/mongo/db/catalog/create_collection.cpp @@ -251,7 +251,7 @@ Status createCollectionForApplyOps(OperationContext* opCtx, // collection already exists under a different name. If so, rename it into place. As this is // done during replay of the oplog, the operations do not need to be atomic, just idempotent. // We need to do the renaming part in a separate transaction, as we cannot transactionally - // create a database on MMAPv1, which could result in createCollection failing if the database + // create a database, which could result in createCollection failing if the database // does not yet exist. if (ui) { // Return an optional, indicating whether we need to early return (if the collection already @@ -291,11 +291,7 @@ Status createCollectionForApplyOps(OperationContext* opCtx, // In the case of oplog replay, a future command may have created or renamed a // collection with that same name. In that case, renaming this future collection to // a random temporary name is correct: once all entries are replayed no temporary - // names will remain. On MMAPv1 the rename can result in index names that are too - // long. However this should only happen for initial sync and "resync collection" - // for rollback, so we can let the error propagate resulting in an abort and restart - // of the initial sync or result in rollback to fassert, requiring a resync of that - // node. + // names will remain. const bool stayTemp = true; auto futureColl = db ? CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, newCollName) diff --git a/src/mongo/db/catalog/rename_collection.cpp b/src/mongo/db/catalog/rename_collection.cpp index 08383c041bc..9272fd2e3ff 100644 --- a/src/mongo/db/catalog/rename_collection.cpp +++ b/src/mongo/db/catalog/rename_collection.cpp @@ -229,9 +229,6 @@ Status renameCollectionAndDropTarget(OperationContext* opCtx, // Target collection exists - drop it. invariant(options.dropTarget); - // If this rename collection is replicated, check for long index names in the target - // collection that may exceed the MMAPv1 namespace limit when the target collection - // is renamed with a drop-pending namespace. auto replCoord = repl::ReplicationCoordinator::get(opCtx); auto isOplogDisabledForNamespace = replCoord->isOplogDisabledFor(opCtx, target); if (!isOplogDisabledForNamespace) { diff --git a/src/mongo/db/s/session_catalog_migration_destination.cpp b/src/mongo/db/s/session_catalog_migration_destination.cpp index 1c9199b727f..8e9f5c4a89f 100644 --- a/src/mongo/db/s/session_catalog_migration_destination.cpp +++ b/src/mongo/db/s/session_catalog_migration_destination.cpp @@ -264,10 +264,9 @@ ProcessOplogResult processSessionOplog(const BSONObj& oplogBSON, NamespaceString::kSessionTransactionsTableNamespace.ns(), [&] { // Need to take global lock here so repl::logOp will not unlock it and trigger the - // invariant that disallows unlocking global lock while inside a WUOW. Grab a DBLock - // here instead of plain GlobalLock to make sure the MMAPV1 flush lock will be - // lock/unlocked correctly. Take the transaction table db lock to ensure the same lock - // ordering with normal replicated updates to the table. + // invariant that disallows unlocking global lock while inside a WUOW. Take the + // transaction table db lock to ensure the same lock ordering with normal replicated + // updates to the table. Lock::DBLock lk( opCtx, NamespaceString::kSessionTransactionsTableNamespace.db(), MODE_IX); WriteUnitOfWork wunit(opCtx); diff --git a/src/mongo/db/server_recovery.h b/src/mongo/db/server_recovery.h index 3b9d87a8065..52f9858ce4d 100644 --- a/src/mongo/db/server_recovery.h +++ b/src/mongo/db/server_recovery.h @@ -37,8 +37,7 @@ namespace mongo { /** - * This class is for use with non-MMAPv1 storage engines that track record store sizes in catalog - * metadata. + * This class is for use with storage engines that track record store sizes in catalog metadata. * * During normal server operation, we adjust the size metadata for all record stores. But when * performing replication recovery, we avoid doing so, as we trust that the size metadata on disk is diff --git a/src/mongo/util/processinfo.h b/src/mongo/util/processinfo.h index a65285ef134..732649d60b8 100644 --- a/src/mongo/util/processinfo.h +++ b/src/mongo/util/processinfo.h @@ -160,13 +160,6 @@ public: static bool blockInMemory(const void* start); /** - * Returns a positive floating point number between 0.0 and 1.0 to inform MMapV1 how much it - * must remap pages to bring the system page file implementation back below a certain - * threshold. A number of 1.0 means remap everything. - */ - static double getSystemMemoryPressurePercentage(); - - /** * @return a pointer aligned to the start of the page the provided pointer belongs to. * * NOTE requires blockCheckSupported() == true diff --git a/src/mongo/util/processinfo_freebsd.cpp b/src/mongo/util/processinfo_freebsd.cpp index d7f1d4127ca..f47e13e2b92 100644 --- a/src/mongo/util/processinfo_freebsd.cpp +++ b/src/mongo/util/processinfo_freebsd.cpp @@ -120,10 +120,6 @@ int ProcessInfo::getResidentSize() { return rss; } -double ProcessInfo::getSystemMemoryPressurePercentage() { - return 0.0; -} - void ProcessInfo::SystemInfo::collectSystemInfo() { osType = "BSD"; osName = "FreeBSD"; diff --git a/src/mongo/util/processinfo_linux.cpp b/src/mongo/util/processinfo_linux.cpp index 78c38ec996f..639f0f57e82 100644 --- a/src/mongo/util/processinfo_linux.cpp +++ b/src/mongo/util/processinfo_linux.cpp @@ -538,10 +538,6 @@ int ProcessInfo::getResidentSize() { return (int)((p.getResidentSizeInPages() * getPageSize()) / (1024.0 * 1024)); } -double ProcessInfo::getSystemMemoryPressurePercentage() { - return 0.0; -} - void ProcessInfo::getExtraInfo(BSONObjBuilder& info) { struct rusage ru; getrusage(RUSAGE_SELF, &ru); diff --git a/src/mongo/util/processinfo_openbsd.cpp b/src/mongo/util/processinfo_openbsd.cpp index 5736504ce9c..260acc84a20 100644 --- a/src/mongo/util/processinfo_openbsd.cpp +++ b/src/mongo/util/processinfo_openbsd.cpp @@ -130,10 +130,6 @@ int ProcessInfo::getResidentSize() { return rss; } -double ProcessInfo::getSystemMemoryPressurePercentage() { - return 0.0; -} - void ProcessInfo::SystemInfo::collectSystemInfo() { osType = "BSD"; osName = "OpenBSD"; diff --git a/src/mongo/util/processinfo_osx.cpp b/src/mongo/util/processinfo_osx.cpp index aab2f4e3fd7..4de98f203ca 100644 --- a/src/mongo/util/processinfo_osx.cpp +++ b/src/mongo/util/processinfo_osx.cpp @@ -116,10 +116,6 @@ int ProcessInfo::getResidentSize() { return (int)(ti.resident_size / (1024 * 1024)); } -double ProcessInfo::getSystemMemoryPressurePercentage() { - return 0.0; -} - void ProcessInfo::getExtraInfo(BSONObjBuilder& info) { struct task_events_info taskInfo; mach_msg_type_number_t taskInfoCount = TASK_EVENTS_INFO_COUNT; diff --git a/src/mongo/util/processinfo_solaris.cpp b/src/mongo/util/processinfo_solaris.cpp index 54ae6184fe2..61e509a1089 100644 --- a/src/mongo/util/processinfo_solaris.cpp +++ b/src/mongo/util/processinfo_solaris.cpp @@ -124,10 +124,6 @@ int ProcessInfo::getResidentSize() { return static_cast<int>(p.psinfo.pr_rssize / 1024); } -double ProcessInfo::getSystemMemoryPressurePercentage() { - return 0.0; -} - void ProcessInfo::getExtraInfo(BSONObjBuilder& info) { ProcUsage p; info.appendNumber("page_faults", static_cast<long long>(p.prusage.pr_majf)); diff --git a/src/mongo/util/processinfo_unknown.cpp b/src/mongo/util/processinfo_unknown.cpp index 05f84b7f22c..b77e79d8fae 100644 --- a/src/mongo/util/processinfo_unknown.cpp +++ b/src/mongo/util/processinfo_unknown.cpp @@ -51,10 +51,6 @@ int ProcessInfo::getResidentSize() { return -1; } -double ProcessInfo::getSystemMemoryPressurePercentage() { - return 0.0; -} - bool ProcessInfo::checkNumaEnabled() { return false; } diff --git a/src/mongo/util/processinfo_windows.cpp b/src/mongo/util/processinfo_windows.cpp index 31c0908fb32..b8a976fabcd 100644 --- a/src/mongo/util/processinfo_windows.cpp +++ b/src/mongo/util/processinfo_windows.cpp @@ -97,45 +97,6 @@ int ProcessInfo::getResidentSize() { return _wconvertmtos(pmc.WorkingSetSize); } -double ProcessInfo::getSystemMemoryPressurePercentage() { - MEMORYSTATUSEX mse; - mse.dwLength = sizeof(mse); - BOOL status = GlobalMemoryStatusEx(&mse); - if (!status) { - DWORD gle = GetLastError(); - LOGV2_ERROR(23814, - "GlobalMemoryStatusEx failed with {errnoWithDescription_gle}", - "errnoWithDescription_gle"_attr = errnoWithDescription(gle)); - fassert(28623, status); - } - - DWORDLONG totalPageFile = mse.ullTotalPageFile; - if (totalPageFile == 0) { - return false; - } - - // If the page file is >= 50%, say we are low on system memory - // If the page file is >= 75%, we are running very low on system memory - // - DWORDLONG highWatermark = totalPageFile / 2; - DWORDLONG veryHighWatermark = 3 * (totalPageFile / 4); - - DWORDLONG usedPageFile = mse.ullTotalPageFile - mse.ullAvailPageFile; - - // Below the watermark, we are fine - // Also check we will not do a divide by zero below - if (usedPageFile < highWatermark || veryHighWatermark <= highWatermark) { - return 0.0; - } - - // Above the high watermark, we tell MMapV1 how much to remap - // < 1.0, we have some pressure, but not much so do not be very aggressive - // 1.0 = we are at very high watermark, remap everything - // > 1.0, the user may run out of memory, remap everything - // i.e., Example (N - 50) / (75 - 50) - return static_cast<double>(usedPageFile - highWatermark) / (veryHighWatermark - highWatermark); -} - void ProcessInfo::getExtraInfo(BSONObjBuilder& info) { MEMORYSTATUSEX mse; mse.dwLength = sizeof(mse); |