diff options
author | vboxsync <vboxsync@cfe28804-0f27-0410-a406-dd0f0b0b656f> | 2021-11-06 15:21:57 +0000 |
---|---|---|
committer | vboxsync <vboxsync@cfe28804-0f27-0410-a406-dd0f0b0b656f> | 2021-11-06 15:21:57 +0000 |
commit | 88668fc5a61dd18a110fe2a1d711b99ab1aeb9a6 (patch) | |
tree | 47f33093967bf0a34cb7dcd27fac5d59e316f708 /src/VBox/VMM | |
parent | 1000169e2542e27a7cc510362ea32c21cbf27fa8 (diff) | |
download | VirtualBox-svn-88668fc5a61dd18a110fe2a1d711b99ab1aeb9a6.tar.gz |
VMM/GMM: Removed all the legacy mode code (disabled everywhere since r146982). bugref:10093
git-svn-id: https://www.virtualbox.org/svn/vbox/trunk@92248 cfe28804-0f27-0410-a406-dd0f0b0b656f
Diffstat (limited to 'src/VBox/VMM')
-rw-r--r-- | src/VBox/VMM/VMMR0/GMMR0.cpp | 236 | ||||
-rw-r--r-- | src/VBox/VMM/VMMR0/PGMR0.cpp | 5 | ||||
-rw-r--r-- | src/VBox/VMM/VMMR0/VMMR0.cpp | 6 | ||||
-rw-r--r-- | src/VBox/VMM/VMMR3/GMM.cpp | 44 | ||||
-rw-r--r-- | src/VBox/VMM/VMMR3/PGM.cpp | 27 | ||||
-rw-r--r-- | src/VBox/VMM/VMMR3/PGMPhys.cpp | 14 |
6 files changed, 29 insertions, 303 deletions
diff --git a/src/VBox/VMM/VMMR0/GMMR0.cpp b/src/VBox/VMM/VMMR0/GMMR0.cpp index 969a0d477fe..81747333a9e 100644 --- a/src/VBox/VMM/VMMR0/GMMR0.cpp +++ b/src/VBox/VMM/VMMR0/GMMR0.cpp @@ -193,11 +193,6 @@ # define VBOX_USE_CRIT_SECT_FOR_GIANT #endif -#if defined(VBOX_WITH_LINEAR_HOST_PHYS_MEM) && !defined(RT_OS_DARWIN) && 0 -/** Enable the legacy mode code (will be dropped soon). */ -# define GMM_WITH_LEGACY_MODE -#endif - /********************************************************************************************************************************* * Structures and Typedefs * @@ -464,10 +459,6 @@ typedef struct GMMCHUNK * @{ */ /** Indicates that the chunk is a large page (2MB). */ #define GMM_CHUNK_FLAGS_LARGE_PAGE UINT16_C(0x0001) -#ifdef GMM_WITH_LEGACY_MODE -/** Indicates that the chunk was locked rather than allocated directly. */ -# define GMM_CHUNK_FLAGS_SEEDED UINT16_C(0x0002) -#endif /** @} */ @@ -578,17 +569,11 @@ typedef struct GMM /** The number of current ballooned pages. */ uint64_t cBalloonedPages; -#ifndef GMM_WITH_LEGACY_MODE -# ifdef VBOX_WITH_LINEAR_HOST_PHYS_MEM +#ifdef VBOX_WITH_LINEAR_HOST_PHYS_MEM /** Whether #RTR0MemObjAllocPhysNC works. */ bool fHasWorkingAllocPhysNC; -# else - bool fPadding; -# endif #else - /** The legacy allocation mode indicator. - * This is determined at initialization time. */ - bool fLegacyAllocationMode; + bool fPadding; #endif /** The bound memory mode indicator. * When set, the memory will be bound to a specific VM and never @@ -828,13 +813,12 @@ GMMR0DECL(int) GMMR0Init(void) rc = RTSpinlockCreate(&pGMM->hSpinLockTree, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "gmm-chunk-tree"); if (RT_SUCCESS(rc)) { -#ifndef GMM_WITH_LEGACY_MODE /* * Figure out how we're going to allocate stuff (only applicable to * host with linear physical memory mappings). */ pGMM->fBoundMemoryMode = false; -# ifdef VBOX_WITH_LINEAR_HOST_PHYS_MEM +#ifdef VBOX_WITH_LINEAR_HOST_PHYS_MEM pGMM->fHasWorkingAllocPhysNC = false; RTR0MEMOBJ hMemObj; @@ -848,38 +832,6 @@ GMMR0DECL(int) GMMR0Init(void) else if (rc != VERR_NOT_SUPPORTED) SUPR0Printf("GMMR0Init: Warning! RTR0MemObjAllocPhysNC(, %u, NIL_RTHCPHYS) -> %d!\n", GMM_CHUNK_SIZE, rc); # endif -#else /* GMM_WITH_LEGACY_MODE */ - /* - * Check and see if RTR0MemObjAllocPhysNC works. - */ -# if 0 /* later, see @bufref{3170}. */ - RTR0MEMOBJ MemObj; - rc = RTR0MemObjAllocPhysNC(&MemObj, _64K, NIL_RTHCPHYS); - if (RT_SUCCESS(rc)) - { - rc = RTR0MemObjFree(MemObj, true); - AssertRC(rc); - } - else if (rc == VERR_NOT_SUPPORTED) - pGMM->fLegacyAllocationMode = pGMM->fBoundMemoryMode = true; - else - SUPR0Printf("GMMR0Init: RTR0MemObjAllocPhysNC(,64K,Any) -> %d!\n", rc); -# else -# if defined(RT_OS_WINDOWS) || (defined(RT_OS_SOLARIS) && ARCH_BITS == 64) || defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD) - pGMM->fLegacyAllocationMode = false; -# if ARCH_BITS == 32 - /* Don't reuse possibly partial chunks because of the virtual - address space limitation. */ - pGMM->fBoundMemoryMode = true; -# else - pGMM->fBoundMemoryMode = false; -# endif -# else - pGMM->fLegacyAllocationMode = true; - pGMM->fBoundMemoryMode = true; -# endif -# endif -#endif /* GMM_WITH_LEGACY_MODE */ /* * Query system page count and guess a reasonable cMaxPages value. @@ -893,9 +845,7 @@ GMMR0DECL(int) GMMR0Init(void) pGMM->idFreeGeneration = UINT64_MAX / 4 - 128; g_pGMM = pGMM; -#ifdef GMM_WITH_LEGACY_MODE - LogFlow(("GMMInit: pGMM=%p fLegacyAllocationMode=%RTbool fBoundMemoryMode=%RTbool\n", pGMM, pGMM->fLegacyAllocationMode, pGMM->fBoundMemoryMode)); -#elif defined(VBOX_WITH_LINEAR_HOST_PHYS_MEM) +#ifdef VBOX_WITH_LINEAR_HOST_PHYS_MEM LogFlow(("GMMInit: pGMM=%p fBoundMemoryMode=%RTbool fHasWorkingAllocPhysNC=%RTbool\n", pGMM, pGMM->fBoundMemoryMode, pGMM->fHasWorkingAllocPhysNC)); #else LogFlow(("GMMInit: pGMM=%p fBoundMemoryMode=%RTbool\n", pGMM, pGMM->fBoundMemoryMode)); @@ -2212,7 +2162,7 @@ static uint32_t gmmR0AllocatePagesFromChunk(PGMMCHUNK pChunk, uint16_t const hGV /** * Registers a new chunk of memory. * - * This is called by both gmmR0AllocateOneChunk and GMMR0SeedChunk. + * This is called by gmmR0AllocateOneChunk. * * @returns VBox status code. On success, the giant GMM lock will be held, the * caller must release it (ugly). @@ -2234,21 +2184,13 @@ static int gmmR0RegisterChunk(PGMM pGMM, PGMMCHUNKFREESET pSet, RTR0MEMOBJ hMemO { Assert(pGMM->hMtxOwner != RTThreadNativeSelf()); Assert(hGVM != NIL_GVM_HANDLE || pGMM->fBoundMemoryMode); -#ifdef GMM_WITH_LEGACY_MODE - Assert(fChunkFlags == 0 || fChunkFlags == GMM_CHUNK_FLAGS_LARGE_PAGE || fChunkFlags == GMM_CHUNK_FLAGS_SEEDED); -#else Assert(fChunkFlags == 0 || fChunkFlags == GMM_CHUNK_FLAGS_LARGE_PAGE); -#endif #ifndef VBOX_WITH_LINEAR_HOST_PHYS_MEM /* * Get a ring-0 mapping of the object. */ -# ifdef GMM_WITH_LEGACY_MODE - uint8_t *pbMapping = !(fChunkFlags & GMM_CHUNK_FLAGS_SEEDED) ? (uint8_t *)RTR0MemObjAddress(hMemObj) : NULL; -# else uint8_t *pbMapping = (uint8_t *)RTR0MemObjAddress(hMemObj); -# endif if (!pbMapping) { RTR0MEMOBJ hMapObj; @@ -2359,17 +2301,13 @@ static int gmmR0AllocateChunkNew(PGMM pGMM, PGVM pGVM, PGMMCHUNKFREESET pSet, ui gmmR0MutexRelease(pGMM); RTR0MEMOBJ hMemObj; -#ifndef GMM_WITH_LEGACY_MODE int rc; -# ifdef VBOX_WITH_LINEAR_HOST_PHYS_MEM +#ifdef VBOX_WITH_LINEAR_HOST_PHYS_MEM if (pGMM->fHasWorkingAllocPhysNC) rc = RTR0MemObjAllocPhysNC(&hMemObj, GMM_CHUNK_SIZE, NIL_RTHCPHYS); else -# endif - rc = RTR0MemObjAllocPage(&hMemObj, GMM_CHUNK_SIZE, false /*fExecutable*/); -#else - int rc = RTR0MemObjAllocPhysNC(&hMemObj, GMM_CHUNK_SIZE, NIL_RTHCPHYS); #endif + rc = RTR0MemObjAllocPage(&hMemObj, GMM_CHUNK_SIZE, false /*fExecutable*/); if (RT_SUCCESS(rc)) { /** @todo Duplicate gmmR0RegisterChunk here so we can avoid chaining up the @@ -2655,8 +2593,6 @@ static bool gmmR0ShouldAllocatePagesInOtherChunksBecauseOfLotsFree(PGMM pGMM) * * @returns VBox status code: * @retval VINF_SUCCESS on success. - * @retval VERR_GMM_SEED_ME if seeding via GMMR0SeedChunk or - * gmmR0AllocateMoreChunks is necessary. * @retval VERR_GMM_HIT_GLOBAL_LIMIT if we've exhausted the available pages. * @retval VERR_GMM_HIT_VM_ACCOUNT_LIMIT if we've hit the VM account limit, * that is we're trying to allocate more than we've reserved. @@ -2720,19 +2656,6 @@ static int gmmR0AllocatePagesNew(PGMM pGMM, PGVM pGVM, uint32_t cPages, PGMMPAGE AssertMsgFailedReturn(("enmAccount=%d\n", enmAccount), VERR_IPE_NOT_REACHED_DEFAULT_CASE); } -#ifdef GMM_WITH_LEGACY_MODE - /* - * If we're in legacy memory mode, it's easy to figure if we have - * sufficient number of pages up-front. - */ - if ( pGMM->fLegacyAllocationMode - && pGVM->gmm.s.Private.cFreePages < cPages) - { - Assert(pGMM->fBoundMemoryMode); - return VERR_GMM_SEED_ME; - } -#endif - /* * Update the accounts before we proceed because we might be leaving the * protection of the global mutex and thus run the risk of permitting @@ -2748,18 +2671,6 @@ static int gmmR0AllocatePagesNew(PGMM pGMM, PGVM pGVM, uint32_t cPages, PGMMPAGE pGVM->gmm.s.Stats.cPrivatePages += cPages; pGMM->cAllocatedPages += cPages; -#ifdef GMM_WITH_LEGACY_MODE - /* - * Part two of it's-easy-in-legacy-memory-mode. - */ - if (pGMM->fLegacyAllocationMode) - { - uint32_t iPage = gmmR0AllocatePagesInBoundMode(pGVM, 0, cPages, paPages); - AssertReleaseReturn(iPage == cPages, VERR_GMM_ALLOC_PAGES_IPE); - return VINF_SUCCESS; - } -#endif - /* * Bound mode is also relatively straightforward. */ @@ -2902,7 +2813,6 @@ static int gmmR0AllocatePagesNew(PGMM pGMM, PGVM pGVM, uint32_t cPages, PGMMPAGE * shared page. * @retval VERR_GMM_NOT_PAGE_OWNER if one of the pages to be updated wasn't * owned by the VM. - * @retval VERR_GMM_SEED_ME if seeding via GMMR0SeedChunk is necessary. * @retval VERR_GMM_HIT_GLOBAL_LIMIT if we've exhausted the available pages. * @retval VERR_GMM_HIT_VM_ACCOUNT_LIMIT if we've hit the VM account limit, * that is we're trying to allocate more than we've reserved. @@ -3099,7 +3009,6 @@ GMMR0DECL(int) GMMR0AllocateHandyPages(PGVM pGVM, VMCPUID idCpu, uint32_t cPages * @returns VBox status code: * @retval VINF_SUCCESS on success. * @retval VERR_NOT_OWNER if the caller is not an EMT. - * @retval VERR_GMM_SEED_ME if seeding via GMMR0SeedChunk is necessary. * @retval VERR_GMM_HIT_GLOBAL_LIMIT if we've exhausted the available pages. * @retval VERR_GMM_HIT_VM_ACCOUNT_LIMIT if we've hit the VM account limit, * that is we're trying to allocate more than we've reserved. @@ -3227,12 +3136,6 @@ GMMR0DECL(int) GMMR0AllocateLargePage(PGVM pGVM, VMCPUID idCpu, uint32_t cbPage if (RT_FAILURE(rc)) return rc; -#ifdef GMM_WITH_LEGACY_MODE - // /* Not supported in legacy mode where we allocate the memory in ring 3 and lock it in ring 0. */ - // if (pGMM->fLegacyAllocationMode) - // return VERR_NOT_SUPPORTED; -#endif - *pHCPhys = NIL_RTHCPHYS; *pIdPage = NIL_GMM_PAGEID; @@ -3330,12 +3233,6 @@ GMMR0DECL(int) GMMR0FreeLargePage(PGVM pGVM, VMCPUID idCpu, uint32_t idPage) if (RT_FAILURE(rc)) return rc; -#ifdef GMM_WITH_LEGACY_MODE - // /* Not supported in legacy mode where we allocate the memory in ring 3 and lock it in ring 0. */ - // if (pGMM->fLegacyAllocationMode) - // return VERR_NOT_SUPPORTED; -#endif - gmmR0MutexAcquire(pGMM); if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM)) { @@ -3477,11 +3374,7 @@ static bool gmmR0FreeChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk, bool fRelaxed * Cleanup hack! Unmap the chunk from the callers address space. * This shouldn't happen, so screw lock contention... */ - if ( pChunk->cMappingsX -#ifdef GMM_WITH_LEGACY_MODE - && (!pGMM->fLegacyAllocationMode || (pChunk->fFlags & GMM_CHUNK_FLAGS_LARGE_PAGE)) -#endif - && pGVM) + if (pChunk->cMappingsX && pGVM) gmmR0UnmapChunkLocked(pGMM, pGVM, pChunk); /* @@ -3621,10 +3514,6 @@ static void gmmR0FreePageWorker(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk, uint32_t || pChunk->pFreeNext == NULL || pChunk->pFreePrev == NULL /** @todo this is probably misfiring, see reset... */)) { /* likely */ } -#ifdef GMM_WITH_LEGACY_MODE - else if (RT_LIKELY(pGMM->fLegacyAllocationMode && !(pChunk->fFlags & GMM_CHUNK_FLAGS_LARGE_PAGE))) - { /* likely */ } -#endif else gmmR0FreeChunk(pGMM, NULL, pChunk, false); @@ -4149,9 +4038,6 @@ GMMR0DECL(int) GMMR0QueryMemoryStatsReq(PGVM pGVM, VMCPUID idCpu, PGMMMEMSTATSRE static int gmmR0UnmapChunkLocked(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk) { RT_NOREF_PV(pGMM); -#ifdef GMM_WITH_LEGACY_MODE - Assert(!pGMM->fLegacyAllocationMode || (pChunk->fFlags & GMM_CHUNK_FLAGS_LARGE_PAGE)); -#endif /* * Find the mapping and try unmapping it. @@ -4197,31 +4083,18 @@ static int gmmR0UnmapChunkLocked(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk) */ static int gmmR0UnmapChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk, bool fRelaxedSem) { -#ifdef GMM_WITH_LEGACY_MODE - if (!pGMM->fLegacyAllocationMode || (pChunk->fFlags & GMM_CHUNK_FLAGS_LARGE_PAGE)) + /* + * Lock the chunk and if possible leave the giant GMM lock. + */ + GMMR0CHUNKMTXSTATE MtxState; + int rc = gmmR0ChunkMutexAcquire(&MtxState, pGMM, pChunk, + fRelaxedSem ? GMMR0CHUNK_MTX_RETAKE_GIANT : GMMR0CHUNK_MTX_KEEP_GIANT); + if (RT_SUCCESS(rc)) { -#endif - /* - * Lock the chunk and if possible leave the giant GMM lock. - */ - GMMR0CHUNKMTXSTATE MtxState; - int rc = gmmR0ChunkMutexAcquire(&MtxState, pGMM, pChunk, - fRelaxedSem ? GMMR0CHUNK_MTX_RETAKE_GIANT : GMMR0CHUNK_MTX_KEEP_GIANT); - if (RT_SUCCESS(rc)) - { - rc = gmmR0UnmapChunkLocked(pGMM, pGVM, pChunk); - gmmR0ChunkMutexRelease(&MtxState, pChunk); - } - return rc; -#ifdef GMM_WITH_LEGACY_MODE + rc = gmmR0UnmapChunkLocked(pGMM, pGVM, pChunk); + gmmR0ChunkMutexRelease(&MtxState, pChunk); } - - if (pChunk->hGVM == pGVM->hSelf) - return VINF_SUCCESS; - - Log(("gmmR0UnmapChunk: Chunk %#x is not mapped into pGVM=%p/%#x (legacy)\n", pChunk->Core.Key, pGVM, pGVM->hSelf)); - return VERR_GMM_CHUNK_NOT_MAPPED; -#endif + return rc; } @@ -4238,24 +4111,7 @@ static int gmmR0UnmapChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk, bool fRelaxed */ static int gmmR0MapChunkLocked(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk, PRTR3PTR ppvR3) { -#ifdef GMM_WITH_LEGACY_MODE - /* - * If we're in legacy mode this is simple. - */ - if (pGMM->fLegacyAllocationMode && !(pChunk->fFlags & GMM_CHUNK_FLAGS_LARGE_PAGE)) - { - if (pChunk->hGVM != pGVM->hSelf) - { - Log(("gmmR0MapChunk: chunk %#x is already mapped at %p!\n", pChunk->Core.Key, *ppvR3)); - return VERR_GMM_CHUNK_NOT_FOUND; - } - - *ppvR3 = RTR0MemObjAddressR3(pChunk->hMemObj); - return VINF_SUCCESS; - } -#else RT_NOREF(pGMM); -#endif /* * Check to see if the chunk is already mapped. @@ -4499,62 +4355,6 @@ GMMR0DECL(int) GMMR0MapUnmapChunkReq(PGVM pGVM, PGMMMAPUNMAPCHUNKREQ pReq) } -/** - * Legacy mode API for supplying pages. - * - * The specified user address points to a allocation chunk sized block that - * will be locked down and used by the GMM when the GM asks for pages. - * - * @returns VBox status code. - * @param pGVM The global (ring-0) VM structure. - * @param idCpu The VCPU id. - * @param pvR3 Pointer to the chunk size memory block to lock down. - */ -GMMR0DECL(int) GMMR0SeedChunk(PGVM pGVM, VMCPUID idCpu, RTR3PTR pvR3) -{ -#ifdef GMM_WITH_LEGACY_MODE - /* - * Validate input and get the basics. - */ - PGMM pGMM; - GMM_GET_VALID_INSTANCE(pGMM, VERR_GMM_INSTANCE); - int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu); - if (RT_FAILURE(rc)) - return rc; - - AssertPtrReturn(pvR3, VERR_INVALID_POINTER); - AssertReturn(!(PAGE_OFFSET_MASK & pvR3), VERR_INVALID_POINTER); - - if (!pGMM->fLegacyAllocationMode) - { - Log(("GMMR0SeedChunk: not in legacy allocation mode!\n")); - return VERR_NOT_SUPPORTED; - } - - /* - * Lock the memory and add it as new chunk with our hGVM. - * (The GMM locking is done inside gmmR0RegisterChunk.) - */ - RTR0MEMOBJ hMemObj; - rc = RTR0MemObjLockUser(&hMemObj, pvR3, GMM_CHUNK_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS); - if (RT_SUCCESS(rc)) - { - rc = gmmR0RegisterChunk(pGMM, &pGVM->gmm.s.Private, hMemObj, pGVM->hSelf, pGVM->pSession, GMM_CHUNK_FLAGS_SEEDED, NULL); - if (RT_SUCCESS(rc)) - gmmR0MutexRelease(pGMM); - else - RTR0MemObjFree(hMemObj, true /* fFreeMappings */); - } - - LogFlow(("GMMR0SeedChunk: rc=%d (pvR3=%p)\n", rc, pvR3)); - return rc; -#else - RT_NOREF(pGVM, idCpu, pvR3); - return VERR_NOT_SUPPORTED; -#endif -} - - #ifndef VBOX_WITH_LINEAR_HOST_PHYS_MEM /** * Gets the ring-0 virtual address for the given page. diff --git a/src/VBox/VMM/VMMR0/PGMR0.cpp b/src/VBox/VMM/VMMR0/PGMR0.cpp index dc60cb18f72..58e17af3f61 100644 --- a/src/VBox/VMM/VMMR0/PGMR0.cpp +++ b/src/VBox/VMM/VMMR0/PGMR0.cpp @@ -176,7 +176,7 @@ VMMR0_INT_DECL(int) PGMR0PhysAllocateHandyPages(PGVM pGVM, VMCPUID idCpu) pGVM->pgm.s.cHandyPages = RT_ELEMENTS(pGVM->pgm.s.aHandyPages); } - else if (rc != VERR_GMM_SEED_ME) + else { if ( ( rc == VERR_GMM_HIT_GLOBAL_LIMIT || rc == VERR_GMM_HIT_VM_ACCOUNT_LIMIT) @@ -231,14 +231,13 @@ VMMR0_INT_DECL(int) PGMR0PhysAllocateHandyPages(PGVM pGVM, VMCPUID idCpu) } } - if (RT_FAILURE(rc) && rc != VERR_GMM_SEED_ME) + if (RT_FAILURE(rc)) { LogRel(("PGMR0PhysAllocateHandyPages: rc=%Rrc iFirst=%d cPages=%d\n", rc, iFirst, cPages)); VM_FF_SET(pGVM, VM_FF_PGM_NO_MEMORY); } } - LogFlow(("PGMR0PhysAllocateHandyPages: cPages=%d rc=%Rrc\n", cPages, rc)); return rc; } diff --git a/src/VBox/VMM/VMMR0/VMMR0.cpp b/src/VBox/VMM/VMMR0/VMMR0.cpp index e4ce411bbe6..0ac4520a6cd 100644 --- a/src/VBox/VMM/VMMR0/VMMR0.cpp +++ b/src/VBox/VMM/VMMR0/VMMR0.cpp @@ -1959,12 +1959,6 @@ DECL_NO_INLINE(static, int) vmmR0EntryExWorker(PGVM pGVM, VMCPUID idCpu, VMMR0OP rc = GMMR0MapUnmapChunkReq(pGVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr); break; - case VMMR0_DO_GMM_SEED_CHUNK: - if (pReqHdr) - return VERR_INVALID_PARAMETER; - rc = GMMR0SeedChunk(pGVM, idCpu, (RTR3PTR)u64Arg); - break; - case VMMR0_DO_GMM_REGISTER_SHARED_MODULE: if (idCpu == NIL_VMCPUID) return VERR_INVALID_CPU_ID; diff --git a/src/VBox/VMM/VMMR3/GMM.cpp b/src/VBox/VMM/VMMR3/GMM.cpp index 155f073c283..304d9235307 100644 --- a/src/VBox/VMM/VMMR3/GMM.cpp +++ b/src/VBox/VMM/VMMR3/GMM.cpp @@ -103,38 +103,17 @@ GMMR3DECL(int) GMMR3AllocatePagesPrepare(PVM pVM, PGMMALLOCATEPAGESREQ *ppReq, u */ GMMR3DECL(int) GMMR3AllocatePagesPerform(PVM pVM, PGMMALLOCATEPAGESREQ pReq) { - for (unsigned i = 0; ; i++) + int rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_ALLOCATE_PAGES, 0, &pReq->Hdr); + if (RT_SUCCESS(rc)) { - int rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_ALLOCATE_PAGES, 0, &pReq->Hdr); - if (RT_SUCCESS(rc)) - { #ifdef LOG_ENABLED - for (uint32_t iPage = 0; iPage < pReq->cPages; iPage++) - Log3(("GMMR3AllocatePagesPerform: idPage=%#x HCPhys=%RHp\n", - pReq->aPages[iPage].idPage, pReq->aPages[iPage].HCPhysGCPhys)); + for (uint32_t iPage = 0; iPage < pReq->cPages; iPage++) + Log3(("GMMR3AllocatePagesPerform: idPage=%#x HCPhys=%RHp\n", + pReq->aPages[iPage].idPage, pReq->aPages[iPage].HCPhysGCPhys)); #endif - return rc; - } - if (rc != VERR_GMM_SEED_ME) - return VMSetError(pVM, rc, RT_SRC_POS, - N_("GMMR0AllocatePages failed to allocate %u pages"), - pReq->cPages); - Assert(i < pReq->cPages); - - /* - * Seed another chunk. - */ - void *pvChunk; - rc = SUPR3PageAlloc(GMM_CHUNK_SIZE >> PAGE_SHIFT, &pvChunk); - if (RT_FAILURE(rc)) - return VMSetError(pVM, rc, RT_SRC_POS, - N_("Out of memory (SUPR3PageAlloc) seeding a %u pages allocation request"), - pReq->cPages); - - rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_SEED_CHUNK, (uintptr_t)pvChunk, NULL); - if (RT_FAILURE(rc)) - return VMSetError(pVM, rc, RT_SRC_POS, N_("GMM seeding failed")); + return rc; } + return VMSetError(pVM, rc, RT_SRC_POS, N_("GMMR0AllocatePages failed to allocate %u pages"), pReq->cPages); } @@ -378,15 +357,6 @@ GMMR3DECL(int) GMMR3FreeLargePage(PVM pVM, uint32_t idPage) /** - * @see GMMR0SeedChunk - */ -GMMR3DECL(int) GMMR3SeedChunk(PVM pVM, RTR3PTR pvR3) -{ - return VMMR3CallR0(pVM, VMMR0_DO_GMM_SEED_CHUNK, (uintptr_t)pvR3, NULL); -} - - -/** * @see GMMR0RegisterSharedModule */ GMMR3DECL(int) GMMR3RegisterSharedModule(PVM pVM, PGMMREGISTERSHAREDMODULEREQ pReq) diff --git a/src/VBox/VMM/VMMR3/PGM.cpp b/src/VBox/VMM/VMMR3/PGM.cpp index 2fe136068b9..efd65ccf5db 100644 --- a/src/VBox/VMM/VMMR3/PGM.cpp +++ b/src/VBox/VMM/VMMR3/PGM.cpp @@ -250,8 +250,8 @@ * * @section sec_pgmPhys_Definitions Definitions * - * Allocation chunk - A RTR0MemObjAllocPhysNC object and the tracking - * machinery associated with it. + * Allocation chunk - A RTR0MemObjAllocPhysNC or RTR0MemObjAllocPhys allocate + * memory object and the tracking machinery associated with it. * * * @@ -582,29 +582,6 @@ * -# Leave the critsect. * * - * @section sec_pgmPhys_Fallback Fallback - * - * Current all the "second tier" hosts will not support the RTR0MemObjAllocPhysNC - * API and thus require a fallback. - * - * So, when RTR0MemObjAllocPhysNC returns VERR_NOT_SUPPORTED the page allocator - * will return to the ring-3 caller (and later ring-0) and asking it to seed - * the page allocator with some fresh pages (VERR_GMM_SEED_ME). Ring-3 will - * then perform an SUPR3PageAlloc(cbChunk >> PAGE_SHIFT) call and make a - * "SeededAllocPages" call to ring-0. - * - * The first time ring-0 sees the VERR_NOT_SUPPORTED failure it will disable - * all page sharing (zero page detection will continue). It will also force - * all allocations to come from the VM which seeded the page. Both these - * measures are taken to make sure that there will never be any need for - * mapping anything into ring-3 - everything will be mapped already. - * - * Whether we'll continue to use the current MM locked memory management - * for this I don't quite know (I'd prefer not to and just ditch that all - * together), we'll see what's simplest to do. - * - * - * * @section sec_pgmPhys_Changes Changes * * Breakdown of the changes involved? diff --git a/src/VBox/VMM/VMMR3/PGMPhys.cpp b/src/VBox/VMM/VMMR3/PGMPhys.cpp index 9b8265f07c1..819245b0d39 100644 --- a/src/VBox/VMM/VMMR3/PGMPhys.cpp +++ b/src/VBox/VMM/VMMR3/PGMPhys.cpp @@ -5961,20 +5961,6 @@ VMMR3DECL(int) PGMR3PhysAllocateHandyPages(PVM pVM) int rcAlloc = VINF_SUCCESS; int rcSeed = VINF_SUCCESS; int rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, 0, NULL); - while (rc == VERR_GMM_SEED_ME) - { - void *pvChunk; - rcAlloc = rc = SUPR3PageAlloc(GMM_CHUNK_SIZE >> PAGE_SHIFT, &pvChunk); - if (RT_SUCCESS(rc)) - { - rcSeed = rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_SEED_CHUNK, (uintptr_t)pvChunk, NULL); - if (RT_FAILURE(rc)) - SUPR3PageFree(pvChunk, GMM_CHUNK_SIZE >> PAGE_SHIFT); - } - if (RT_SUCCESS(rc)) - rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, 0, NULL); - } - /** @todo we should split this up into an allocate and flush operation. sometimes you want to flush and not allocate more (which will trigger the vm account limit error) */ if ( rc == VERR_GMM_HIT_VM_ACCOUNT_LIMIT && pVM->pgm.s.cHandyPages > 0) |