diff options
Diffstat (limited to 'arch/sh/mm')
-rw-r--r-- | arch/sh/mm/Kconfig | 11 | ||||
-rw-r--r-- | arch/sh/mm/Makefile | 2 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh4.c | 13 | ||||
-rw-r--r-- | arch/sh/mm/cache.c | 8 | ||||
-rw-r--r-- | arch/sh/mm/nommu.c | 4 | ||||
-rw-r--r-- | arch/sh/mm/pgtable.c | 57 |
6 files changed, 72 insertions, 23 deletions
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index b3f6c1a30b22..358c860aeb9b 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig @@ -82,8 +82,7 @@ config 32BIT config PMB_ENABLE bool "Support 32-bit physical addressing through PMB" - depends on MMU && EXPERIMENTAL && CPU_SH4A - default y + depends on MMU && EXPERIMENTAL && CPU_SH4A && !CPU_SH4AL_DSP help If you say Y here, physical addressing will be extended to 32-bits through the SH-4A PMB. If this is not set, legacy @@ -96,7 +95,7 @@ choice config PMB bool "PMB" - depends on MMU && EXPERIMENTAL && CPU_SH4A + depends on MMU && EXPERIMENTAL && CPU_SH4A && !CPU_SH4AL_DSP help If you say Y here, physical addressing will be extended to 32-bits through the SH-4A PMB. If this is not set, legacy @@ -104,7 +103,7 @@ config PMB config PMB_FIXED bool "fixed PMB" - depends on MMU && EXPERIMENTAL && CPU_SH4A + depends on MMU && EXPERIMENTAL && CPU_SH4A && !CPU_SH4AL_DSP select 32BIT help If this option is enabled, fixed PMB mappings are inherited @@ -220,7 +219,7 @@ config PAGE_SIZE_4KB config PAGE_SIZE_8KB bool "8kB" - depends on !MMU || X2TLB && !PGTABLE_LEVELS_3 + depends on !MMU || X2TLB help This enables 8kB pages as supported by SH-X2 and later MMUs. @@ -232,7 +231,7 @@ config PAGE_SIZE_16KB config PAGE_SIZE_64KB bool "64kB" - depends on !MMU || CPU_SH4 && !PGTABLE_LEVELS_3 || CPU_SH5 + depends on !MMU || CPU_SH4 || CPU_SH5 help This enables support for 64kB pages, possible on all SH-4 CPUs and later. diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile index 8a70535fa7ce..dd5010c708e0 100644 --- a/arch/sh/mm/Makefile +++ b/arch/sh/mm/Makefile @@ -15,7 +15,7 @@ obj-y += $(cacheops-y) mmu-y := nommu.o extable_32.o mmu-$(CONFIG_MMU) := extable_$(BITS).o fault_$(BITS).o \ - ioremap_$(BITS).o kmap.o tlbflush_$(BITS).o + ioremap_$(BITS).o kmap.o pgtable.o tlbflush_$(BITS).o obj-y += $(mmu-y) obj-$(CONFIG_DEBUG_FS) += asids-debugfs.o diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index 560ddb6bc8a7..a2301daeefa3 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c @@ -109,6 +109,7 @@ static inline void flush_cache_one(unsigned long start, unsigned long phys) static void sh4_flush_dcache_page(void *arg) { struct page *page = arg; + unsigned long addr = (unsigned long)page_address(page); #ifndef CONFIG_SMP struct address_space *mapping = page_mapping(page); @@ -116,16 +117,8 @@ static void sh4_flush_dcache_page(void *arg) set_bit(PG_dcache_dirty, &page->flags); else #endif - { - unsigned long phys = page_to_phys(page); - unsigned long addr = CACHE_OC_ADDRESS_ARRAY; - int i, n; - - /* Loop all the D-cache */ - n = boot_cpu_data.dcache.n_aliases; - for (i = 0; i < n; i++, addr += PAGE_SIZE) - flush_cache_one(addr, phys); - } + flush_cache_one(CACHE_OC_ADDRESS_ARRAY | + (addr & shm_align_mask), page_to_phys(page)); wmb(); } diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c index e9415d3ea94a..b8607fa7ae12 100644 --- a/arch/sh/mm/cache.c +++ b/arch/sh/mm/cache.c @@ -133,12 +133,8 @@ void __update_cache(struct vm_area_struct *vma, page = pfn_to_page(pfn); if (pfn_valid(pfn)) { int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); - if (dirty) { - unsigned long addr = (unsigned long)page_address(page); - - if (pages_do_alias(addr, address & PAGE_MASK)) - __flush_purge_region((void *)addr, PAGE_SIZE); - } + if (dirty) + __flush_purge_region(page_address(page), PAGE_SIZE); } } diff --git a/arch/sh/mm/nommu.c b/arch/sh/mm/nommu.c index ac16c05917ef..7694f50c9034 100644 --- a/arch/sh/mm/nommu.c +++ b/arch/sh/mm/nommu.c @@ -94,3 +94,7 @@ void __init page_table_range_init(unsigned long start, unsigned long end, void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot) { } + +void pgtable_cache_init(void) +{ +} diff --git a/arch/sh/mm/pgtable.c b/arch/sh/mm/pgtable.c new file mode 100644 index 000000000000..e1bc5483cc07 --- /dev/null +++ b/arch/sh/mm/pgtable.c @@ -0,0 +1,57 @@ +#include <linux/mm.h> + +#define PGALLOC_GFP GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO + +static struct kmem_cache *pgd_cachep; + +#ifdef CONFIG_PGTABLE_LEVELS_3 +static struct kmem_cache *pmd_cachep; +#endif + +void pgd_ctor(void *x) +{ + pgd_t *pgd = x; + + memcpy(pgd + USER_PTRS_PER_PGD, + swapper_pg_dir + USER_PTRS_PER_PGD, + (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); +} + +void pgtable_cache_init(void) +{ + pgd_cachep = kmem_cache_create("pgd_cache", + PTRS_PER_PGD * (1<<PTE_MAGNITUDE), + PAGE_SIZE, SLAB_PANIC, pgd_ctor); +#ifdef CONFIG_PGTABLE_LEVELS_3 + pmd_cachep = kmem_cache_create("pmd_cache", + PTRS_PER_PMD * (1<<PTE_MAGNITUDE), + PAGE_SIZE, SLAB_PANIC, NULL); +#endif +} + +pgd_t *pgd_alloc(struct mm_struct *mm) +{ + return kmem_cache_alloc(pgd_cachep, PGALLOC_GFP); +} + +void pgd_free(struct mm_struct *mm, pgd_t *pgd) +{ + kmem_cache_free(pgd_cachep, pgd); +} + +#ifdef CONFIG_PGTABLE_LEVELS_3 +void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) +{ + set_pud(pud, __pud((unsigned long)pmd)); +} + +pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) +{ + return kmem_cache_alloc(pmd_cachep, PGALLOC_GFP); +} + +void pmd_free(struct mm_struct *mm, pmd_t *pmd) +{ + kmem_cache_free(pmd_cachep, pmd); +} +#endif /* CONFIG_PGTABLE_LEVELS_3 */ |