summaryrefslogtreecommitdiff
path: root/arch/tile/mm/pgtable.c
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@tilera.com>2012-04-01 14:04:21 -0400
committerChris Metcalf <cmetcalf@tilera.com>2012-05-25 12:48:27 -0400
commit621b19551507c8fd9d721f4038509c5bb155a983 (patch)
tree62d8d5e7a783364940153b4523fcfba821cee241 /arch/tile/mm/pgtable.c
parentd9ed9faac283a3be73f0e11a2ef49ee55aece4db (diff)
downloadlinux-next-621b19551507c8fd9d721f4038509c5bb155a983.tar.gz
arch/tile: support multiple huge page sizes dynamically
This change adds support for a new "super" bit in the PTE, using the new arch_make_huge_pte() method. The Tilera hypervisor sees the bit set at a given level of the page table and gangs together 4, 16, or 64 consecutive pages from that level of the hierarchy to create a larger TLB entry. One extra "super" page size can be specified at each of the three levels of the page table hierarchy on tilegx, using the "hugepagesz" argument on the boot command line. A new hypervisor API is added to allow Linux to tell the hypervisor how many PTEs to gang together at each level of the page table. To allow pre-allocating huge pages larger than the buddy allocator can handle, this change modifies the Tilera bootmem support to put all of memory on tilegx platforms into bootmem. As part of this change I eliminate the vestigial CONFIG_HIGHPTE support, which never worked anyway, and eliminate the hv_page_size() API in favor of the standard vma_kernel_pagesize() API. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Diffstat (limited to 'arch/tile/mm/pgtable.c')
-rw-r--r--arch/tile/mm/pgtable.c13
1 files changed, 0 insertions, 13 deletions
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
index 3d7074347e6d..345edfed9fcd 100644
--- a/arch/tile/mm/pgtable.c
+++ b/arch/tile/mm/pgtable.c
@@ -132,15 +132,6 @@ void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
}
-#if defined(CONFIG_HIGHPTE)
-pte_t *_pte_offset_map(pmd_t *dir, unsigned long address)
-{
- pte_t *pte = kmap_atomic(pmd_page(*dir)) +
- (pmd_ptfn(*dir) << HV_LOG2_PAGE_TABLE_ALIGN) & ~PAGE_MASK;
- return &pte[pte_index(address)];
-}
-#endif
-
/**
* shatter_huge_page() - ensure a given address is mapped by a small page.
*
@@ -296,10 +287,6 @@ struct page *pgtable_alloc_one(struct mm_struct *mm, unsigned long address,
struct page *p;
int i;
-#ifdef CONFIG_HIGHPTE
- flags |= __GFP_HIGHMEM;
-#endif
-
p = alloc_pages(flags, L2_USER_PGTABLE_ORDER);
if (p == NULL)
return NULL;