From 4944e76d81801b8e60ed3e7789443f210c16ed65 Mon Sep 17 00:00:00 2001 From: Paolo 'Blaisorblade' Giarrusso Date: Sat, 3 Sep 2005 15:54:56 -0700 Subject: [PATCH] mm: remove implied vm_ops check If !vma->vm-ops we already BUG above, so retesting it is useless. The compiler cannot optimize this because BUG is a macro and is not thus marked noreturn; that should possibly be fixed. Signed-off-by: Paolo 'Blaisorblade' Giarrusso Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm/memory.c') diff --git a/mm/memory.c b/mm/memory.c index a596c1172248..b25f5e58a14c 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1955,7 +1955,7 @@ static int do_file_page(struct mm_struct * mm, struct vm_area_struct * vma, * Fall back to the linear mapping if the fs does not support * ->populate: */ - if (!vma->vm_ops || !vma->vm_ops->populate || + if (!vma->vm_ops->populate || (write_access && !(vma->vm_flags & VM_SHARED))) { pte_clear(mm, address, pte); return do_no_page(mm, vma, address, write_access, pte, pmd); -- cgit v1.2.1 From a600388d28419305aad3c4c0af52c223cf6fa0af Mon Sep 17 00:00:00 2001 From: Zachary Amsden Date: Sat, 3 Sep 2005 15:55:04 -0700 Subject: [PATCH] x86: ptep_clear optimization Add a new accessor for PTEs, which passes the full hint from the mmu_gather struct; this allows architectures with hardware pagetables to optimize away atomic PTE operations when destroying an address space. Removing the locked operation should allow better pipelining of memory access in this loop. I measured an average savings of 30-35 cycles per zap_pte_range on the first 500 destructions on Pentium-M, but I believe the optimization would win more on older processors which still assert the bus lock on xchg for an exclusive cacheline. Update: I made some new measurements, and this saves exactly 26 cycles over ptep_get_and_clear on Pentium M. On P4, with a PAE kernel, this saves 180 cycles per ptep_get_and_clear, for a whopping 92160 cycles savings for a full address space destruction. pte_clear_full is not yet used, but is provided for future optimizations (in particular, when running inside of a hypervisor that queues page table updates, the full hint allows us to avoid queueing unnecessary page table update for an address space in the process of being destroyed. This is not a huge win, but it does help a bit, and sets the stage for further hypervisor optimization of the mm layer on all architectures. Signed-off-by: Zachary Amsden Cc: Christoph Lameter Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'mm/memory.c') diff --git a/mm/memory.c b/mm/memory.c index b25f5e58a14c..788a62810340 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -562,7 +562,8 @@ static void zap_pte_range(struct mmu_gather *tlb, pmd_t *pmd, page->index > details->last_index)) continue; } - ptent = ptep_get_and_clear(tlb->mm, addr, pte); + ptent = ptep_get_and_clear_full(tlb->mm, addr, pte, + tlb->fullmm); tlb_remove_tlb_entry(tlb, pte, addr); if (unlikely(!page)) continue; @@ -590,7 +591,7 @@ static void zap_pte_range(struct mmu_gather *tlb, pmd_t *pmd, continue; if (!pte_file(ptent)) free_swap_and_cache(pte_to_swp_entry(ptent)); - pte_clear(tlb->mm, addr, pte); + pte_clear_full(tlb->mm, addr, pte, tlb->fullmm); } while (pte++, addr += PAGE_SIZE, addr != end); pte_unmap(pte - 1); } -- cgit v1.2.1