From 09cfefb7fa70c3af011b0db0a513fd80b2f18abc Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Tue, 31 May 2022 18:04:11 +0800 Subject: LoongArch: Add memory management Add memory management support for LoongArch, including: cache and tlb management, page fault handling and ioremap/mmap support. Reviewed-by: WANG Xuerui Reviewed-by: Jiaxun Yang Signed-off-by: Huacai Chen --- arch/loongarch/mm/cache.c | 141 ++++++++++++ arch/loongarch/mm/extable.c | 22 ++ arch/loongarch/mm/fault.c | 261 ++++++++++++++++++++++ arch/loongarch/mm/hugetlbpage.c | 87 ++++++++ arch/loongarch/mm/init.c | 165 ++++++++++++++ arch/loongarch/mm/ioremap.c | 27 +++ arch/loongarch/mm/maccess.c | 10 + arch/loongarch/mm/mmap.c | 125 +++++++++++ arch/loongarch/mm/page.S | 84 +++++++ arch/loongarch/mm/pgtable.c | 130 +++++++++++ arch/loongarch/mm/tlb.c | 282 ++++++++++++++++++++++++ arch/loongarch/mm/tlbex.S | 477 ++++++++++++++++++++++++++++++++++++++++ 12 files changed, 1811 insertions(+) create mode 100644 arch/loongarch/mm/cache.c create mode 100644 arch/loongarch/mm/extable.c create mode 100644 arch/loongarch/mm/fault.c create mode 100644 arch/loongarch/mm/hugetlbpage.c create mode 100644 arch/loongarch/mm/init.c create mode 100644 arch/loongarch/mm/ioremap.c create mode 100644 arch/loongarch/mm/maccess.c create mode 100644 arch/loongarch/mm/mmap.c create mode 100644 arch/loongarch/mm/page.S create mode 100644 arch/loongarch/mm/pgtable.c create mode 100644 arch/loongarch/mm/tlb.c create mode 100644 arch/loongarch/mm/tlbex.S (limited to 'arch/loongarch/mm') diff --git a/arch/loongarch/mm/cache.c b/arch/loongarch/mm/cache.c new file mode 100644 index 000000000000..9e5ce5aa73f7 --- /dev/null +++ b/arch/loongarch/mm/cache.c @@ -0,0 +1,141 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + * + * Derived from MIPS: + * Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org) + * Copyright (C) 2007 MIPS Technologies, Inc. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +/* + * LoongArch maintains ICache/DCache coherency by hardware, + * we just need "ibar" to avoid instruction hazard here. + */ +void local_flush_icache_range(unsigned long start, unsigned long end) +{ + asm volatile ("\tibar 0\n"::); +} +EXPORT_SYMBOL(local_flush_icache_range); + +void cache_error_setup(void) +{ + extern char __weak except_vec_cex; + set_merr_handler(0x0, &except_vec_cex, 0x80); +} + +static unsigned long icache_size __read_mostly; +static unsigned long dcache_size __read_mostly; +static unsigned long vcache_size __read_mostly; +static unsigned long scache_size __read_mostly; + +static char *way_string[] = { NULL, "direct mapped", "2-way", + "3-way", "4-way", "5-way", "6-way", "7-way", "8-way", + "9-way", "10-way", "11-way", "12-way", + "13-way", "14-way", "15-way", "16-way", +}; + +static void probe_pcache(void) +{ + struct cpuinfo_loongarch *c = ¤t_cpu_data; + unsigned int lsize, sets, ways; + unsigned int config; + + config = read_cpucfg(LOONGARCH_CPUCFG17); + lsize = 1 << ((config & CPUCFG17_L1I_SIZE_M) >> CPUCFG17_L1I_SIZE); + sets = 1 << ((config & CPUCFG17_L1I_SETS_M) >> CPUCFG17_L1I_SETS); + ways = ((config & CPUCFG17_L1I_WAYS_M) >> CPUCFG17_L1I_WAYS) + 1; + + c->icache.linesz = lsize; + c->icache.sets = sets; + c->icache.ways = ways; + icache_size = sets * ways * lsize; + c->icache.waysize = icache_size / c->icache.ways; + + config = read_cpucfg(LOONGARCH_CPUCFG18); + lsize = 1 << ((config & CPUCFG18_L1D_SIZE_M) >> CPUCFG18_L1D_SIZE); + sets = 1 << ((config & CPUCFG18_L1D_SETS_M) >> CPUCFG18_L1D_SETS); + ways = ((config & CPUCFG18_L1D_WAYS_M) >> CPUCFG18_L1D_WAYS) + 1; + + c->dcache.linesz = lsize; + c->dcache.sets = sets; + c->dcache.ways = ways; + dcache_size = sets * ways * lsize; + c->dcache.waysize = dcache_size / c->dcache.ways; + + c->options |= LOONGARCH_CPU_PREFETCH; + + pr_info("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n", + icache_size >> 10, way_string[c->icache.ways], "VIPT", c->icache.linesz); + + pr_info("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n", + dcache_size >> 10, way_string[c->dcache.ways], "VIPT", "no aliases", c->dcache.linesz); +} + +static void probe_vcache(void) +{ + struct cpuinfo_loongarch *c = ¤t_cpu_data; + unsigned int lsize, sets, ways; + unsigned int config; + + config = read_cpucfg(LOONGARCH_CPUCFG19); + lsize = 1 << ((config & CPUCFG19_L2_SIZE_M) >> CPUCFG19_L2_SIZE); + sets = 1 << ((config & CPUCFG19_L2_SETS_M) >> CPUCFG19_L2_SETS); + ways = ((config & CPUCFG19_L2_WAYS_M) >> CPUCFG19_L2_WAYS) + 1; + + c->vcache.linesz = lsize; + c->vcache.sets = sets; + c->vcache.ways = ways; + vcache_size = lsize * sets * ways; + c->vcache.waysize = vcache_size / c->vcache.ways; + + pr_info("Unified victim cache %ldkB %s, linesize %d bytes.\n", + vcache_size >> 10, way_string[c->vcache.ways], c->vcache.linesz); +} + +static void probe_scache(void) +{ + struct cpuinfo_loongarch *c = ¤t_cpu_data; + unsigned int lsize, sets, ways; + unsigned int config; + + config = read_cpucfg(LOONGARCH_CPUCFG20); + lsize = 1 << ((config & CPUCFG20_L3_SIZE_M) >> CPUCFG20_L3_SIZE); + sets = 1 << ((config & CPUCFG20_L3_SETS_M) >> CPUCFG20_L3_SETS); + ways = ((config & CPUCFG20_L3_WAYS_M) >> CPUCFG20_L3_WAYS) + 1; + + c->scache.linesz = lsize; + c->scache.sets = sets; + c->scache.ways = ways; + /* 4 cores. scaches are shared */ + scache_size = lsize * sets * ways; + c->scache.waysize = scache_size / c->scache.ways; + + pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n", + scache_size >> 10, way_string[c->scache.ways], c->scache.linesz); +} + +void cpu_cache_init(void) +{ + probe_pcache(); + probe_vcache(); + probe_scache(); + + shm_align_mask = PAGE_SIZE - 1; +} diff --git a/arch/loongarch/mm/extable.c b/arch/loongarch/mm/extable.c new file mode 100644 index 000000000000..bc20988f2b87 --- /dev/null +++ b/arch/loongarch/mm/extable.c @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#include +#include +#include +#include + +int fixup_exception(struct pt_regs *regs) +{ + const struct exception_table_entry *fixup; + + fixup = search_exception_tables(exception_era(regs)); + if (fixup) { + regs->csr_era = fixup->fixup; + + return 1; + } + + return 0; +} diff --git a/arch/loongarch/mm/fault.c b/arch/loongarch/mm/fault.c new file mode 100644 index 000000000000..605579b19a00 --- /dev/null +++ b/arch/loongarch/mm/fault.c @@ -0,0 +1,261 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + * + * Derived from MIPS: + * Copyright (C) 1995 - 2000 by Ralf Baechle + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +int show_unhandled_signals = 1; + +static void __kprobes no_context(struct pt_regs *regs, unsigned long address) +{ + const int field = sizeof(unsigned long) * 2; + + /* Are we prepared to handle this kernel fault? */ + if (fixup_exception(regs)) + return; + + /* + * Oops. The kernel tried to access some bad page. We'll have to + * terminate things with extreme prejudice. + */ + bust_spinlocks(1); + + pr_alert("CPU %d Unable to handle kernel paging request at " + "virtual address %0*lx, era == %0*lx, ra == %0*lx\n", + raw_smp_processor_id(), field, address, field, regs->csr_era, + field, regs->regs[1]); + die("Oops", regs); +} + +static void __kprobes do_out_of_memory(struct pt_regs *regs, unsigned long address) +{ + /* + * We ran out of memory, call the OOM killer, and return the userspace + * (which will retry the fault, or kill us if we got oom-killed). + */ + if (!user_mode(regs)) { + no_context(regs, address); + return; + } + pagefault_out_of_memory(); +} + +static void __kprobes do_sigbus(struct pt_regs *regs, + unsigned long write, unsigned long address, int si_code) +{ + /* Kernel mode? Handle exceptions or die */ + if (!user_mode(regs)) { + no_context(regs, address); + return; + } + + /* + * Send a sigbus, regardless of whether we were in kernel + * or user mode. + */ + current->thread.csr_badvaddr = address; + current->thread.trap_nr = read_csr_excode(); + force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address); +} + +static void __kprobes do_sigsegv(struct pt_regs *regs, + unsigned long write, unsigned long address, int si_code) +{ + const int field = sizeof(unsigned long) * 2; + static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10); + + /* Kernel mode? Handle exceptions or die */ + if (!user_mode(regs)) { + no_context(regs, address); + return; + } + + /* User mode accesses just cause a SIGSEGV */ + current->thread.csr_badvaddr = address; + if (!write) + current->thread.error_code = 1; + else + current->thread.error_code = 2; + current->thread.trap_nr = read_csr_excode(); + + if (show_unhandled_signals && + unhandled_signal(current, SIGSEGV) && __ratelimit(&ratelimit_state)) { + pr_info("do_page_fault(): sending SIGSEGV to %s for invalid %s %0*lx\n", + current->comm, + write ? "write access to" : "read access from", + field, address); + pr_info("era = %0*lx in", field, + (unsigned long) regs->csr_era); + print_vma_addr(KERN_CONT " ", regs->csr_era); + pr_cont("\n"); + pr_info("ra = %0*lx in", field, + (unsigned long) regs->regs[1]); + print_vma_addr(KERN_CONT " ", regs->regs[1]); + pr_cont("\n"); + } + force_sig_fault(SIGSEGV, si_code, (void __user *)address); +} + +/* + * This routine handles page faults. It determines the address, + * and the problem, and then passes it off to one of the appropriate + * routines. + */ +static void __kprobes __do_page_fault(struct pt_regs *regs, + unsigned long write, unsigned long address) +{ + int si_code = SEGV_MAPERR; + unsigned int flags = FAULT_FLAG_DEFAULT; + struct task_struct *tsk = current; + struct mm_struct *mm = tsk->mm; + struct vm_area_struct *vma = NULL; + vm_fault_t fault; + + /* + * We fault-in kernel-space virtual memory on-demand. The + * 'reference' page table is init_mm.pgd. + * + * NOTE! We MUST NOT take any locks for this case. We may + * be in an interrupt or a critical region, and should + * only copy the information from the master page table, + * nothing more. + */ + if (address & __UA_LIMIT) { + if (!user_mode(regs)) + no_context(regs, address); + else + do_sigsegv(regs, write, address, si_code); + return; + } + + /* + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ + if (faulthandler_disabled() || !mm) { + do_sigsegv(regs, write, address, si_code); + return; + } + + if (user_mode(regs)) + flags |= FAULT_FLAG_USER; + + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); +retry: + mmap_read_lock(mm); + vma = find_vma(mm, address); + if (!vma) + goto bad_area; + if (vma->vm_start <= address) + goto good_area; + if (!(vma->vm_flags & VM_GROWSDOWN)) + goto bad_area; + if (!expand_stack(vma, address)) + goto good_area; +/* + * Something tried to access memory that isn't in our memory map.. + * Fix it, but check if it's kernel or user first.. + */ +bad_area: + mmap_read_unlock(mm); + do_sigsegv(regs, write, address, si_code); + return; + +/* + * Ok, we have a good vm_area for this memory access, so + * we can handle it.. + */ +good_area: + si_code = SEGV_ACCERR; + + if (write) { + flags |= FAULT_FLAG_WRITE; + if (!(vma->vm_flags & VM_WRITE)) + goto bad_area; + } else { + if (!(vma->vm_flags & VM_READ) && address != exception_era(regs)) + goto bad_area; + if (!(vma->vm_flags & VM_EXEC) && address == exception_era(regs)) + goto bad_area; + } + + /* + * If for any reason at all we couldn't handle the fault, + * make sure we exit gracefully rather than endlessly redo + * the fault. + */ + fault = handle_mm_fault(vma, address, flags, regs); + + if (fault_signal_pending(fault, regs)) { + if (!user_mode(regs)) + no_context(regs, address); + return; + } + + if (unlikely(fault & VM_FAULT_RETRY)) { + flags |= FAULT_FLAG_TRIED; + + /* + * No need to mmap_read_unlock(mm) as we would + * have already released it in __lock_page_or_retry + * in mm/filemap.c. + */ + goto retry; + } + if (unlikely(fault & VM_FAULT_ERROR)) { + mmap_read_unlock(mm); + if (fault & VM_FAULT_OOM) { + do_out_of_memory(regs, address); + return; + } else if (fault & VM_FAULT_SIGSEGV) { + do_sigsegv(regs, write, address, si_code); + return; + } else if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { + do_sigbus(regs, write, address, si_code); + return; + } + BUG(); + } + + mmap_read_unlock(mm); +} + +asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, + unsigned long write, unsigned long address) +{ + irqentry_state_t state = irqentry_enter(regs); + + /* Enable interrupt if enabled in parent context */ + if (likely(regs->csr_prmd & CSR_PRMD_PIE)) + local_irq_enable(); + + __do_page_fault(regs, write, address); + + local_irq_disable(); + + irqentry_exit(regs, state); +} diff --git a/arch/loongarch/mm/hugetlbpage.c b/arch/loongarch/mm/hugetlbpage.c new file mode 100644 index 000000000000..ba138117b124 --- /dev/null +++ b/arch/loongarch/mm/hugetlbpage.c @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, unsigned long sz) +{ + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; + pte_t *pte = NULL; + + pgd = pgd_offset(mm, addr); + p4d = p4d_alloc(mm, pgd, addr); + pud = pud_alloc(mm, p4d, addr); + if (pud) + pte = (pte_t *)pmd_alloc(mm, pud, addr); + + return pte; +} + +pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, + unsigned long sz) +{ + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd = NULL; + + pgd = pgd_offset(mm, addr); + if (pgd_present(*pgd)) { + p4d = p4d_offset(pgd, addr); + if (p4d_present(*p4d)) { + pud = pud_offset(p4d, addr); + if (pud_present(*pud)) + pmd = pmd_offset(pud, addr); + } + } + return (pte_t *) pmd; +} + +/* + * This function checks for proper alignment of input addr and len parameters. + */ +int is_aligned_hugepage_range(unsigned long addr, unsigned long len) +{ + if (len & ~HPAGE_MASK) + return -EINVAL; + if (addr & ~HPAGE_MASK) + return -EINVAL; + return 0; +} + +int pmd_huge(pmd_t pmd) +{ + return (pmd_val(pmd) & _PAGE_HUGE) != 0; +} + +int pud_huge(pud_t pud) +{ + return (pud_val(pud) & _PAGE_HUGE) != 0; +} + +uint64_t pmd_to_entrylo(unsigned long pmd_val) +{ + uint64_t val; + /* PMD as PTE. Must be huge page */ + if (!pmd_huge(__pmd(pmd_val))) + panic("%s", __func__); + + val = pmd_val ^ _PAGE_HUGE; + val |= ((val & _PAGE_HGLOBAL) >> + (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)); + + return val; +} diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c new file mode 100644 index 000000000000..afd6634ce171 --- /dev/null +++ b/arch/loongarch/mm/init.c @@ -0,0 +1,165 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * We have up to 8 empty zeroed pages so we can map one of the right colour + * when needed. Since page is never written to after the initialization we + * don't have to care about aliases on other CPUs. + */ +unsigned long empty_zero_page, zero_page_mask; +EXPORT_SYMBOL_GPL(empty_zero_page); +EXPORT_SYMBOL(zero_page_mask); + +void setup_zero_pages(void) +{ + unsigned int order, i; + struct page *page; + + order = 0; + + empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); + if (!empty_zero_page) + panic("Oh boy, that early out of memory?"); + + page = virt_to_page((void *)empty_zero_page); + split_page(page, order); + for (i = 0; i < (1 << order); i++, page++) + mark_page_reserved(page); + + zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK; +} + +void copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma) +{ + void *vfrom, *vto; + + vto = kmap_atomic(to); + vfrom = kmap_atomic(from); + copy_page(vto, vfrom); + kunmap_atomic(vfrom); + kunmap_atomic(vto); + /* Make sure this page is cleared on other CPU's too before using it */ + smp_wmb(); +} + +int __ref page_is_ram(unsigned long pfn) +{ + unsigned long addr = PFN_PHYS(pfn); + + return memblock_is_memory(addr) && !memblock_is_reserved(addr); +} + +void __init paging_init(void) +{ + unsigned long max_zone_pfns[MAX_NR_ZONES]; + +#ifdef CONFIG_ZONE_DMA + max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; +#endif +#ifdef CONFIG_ZONE_DMA32 + max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; +#endif + max_zone_pfns[ZONE_NORMAL] = max_low_pfn; + + free_area_init(max_zone_pfns); +} + +void __init mem_init(void) +{ + max_mapnr = max_low_pfn; + high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); + + memblock_free_all(); + setup_zero_pages(); /* Setup zeroed pages. */ +} + +void __ref free_initmem(void) +{ + free_initmem_default(POISON_FREE_INITMEM); +} + +#ifdef CONFIG_MEMORY_HOTPLUG +int arch_add_memory(int nid, u64 start, u64 size, struct mhp_params *params) +{ + unsigned long start_pfn = start >> PAGE_SHIFT; + unsigned long nr_pages = size >> PAGE_SHIFT; + int ret; + + ret = __add_pages(nid, start_pfn, nr_pages, params); + + if (ret) + pr_warn("%s: Problem encountered in __add_pages() as ret=%d\n", + __func__, ret); + + return ret; +} + +#ifdef CONFIG_MEMORY_HOTREMOVE +void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) +{ + unsigned long start_pfn = start >> PAGE_SHIFT; + unsigned long nr_pages = size >> PAGE_SHIFT; + struct page *page = pfn_to_page(start_pfn); + + /* With altmap the first mapped page is offset from @start */ + if (altmap) + page += vmem_altmap_offset(altmap); + __remove_pages(start_pfn, nr_pages, altmap); +} +#endif +#endif + +/* + * Align swapper_pg_dir in to 64K, allows its address to be loaded + * with a single LUI instruction in the TLB handlers. If we used + * __aligned(64K), its size would get rounded up to the alignment + * size, and waste space. So we place it in its own section and align + * it in the linker script. + */ +pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(".bss..swapper_pg_dir"); + +pgd_t invalid_pg_dir[_PTRS_PER_PGD] __page_aligned_bss; +#ifndef __PAGETABLE_PUD_FOLDED +pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss; +#endif +#ifndef __PAGETABLE_PMD_FOLDED +pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss; +EXPORT_SYMBOL_GPL(invalid_pmd_table); +#endif +pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss; +EXPORT_SYMBOL(invalid_pte_table); diff --git a/arch/loongarch/mm/ioremap.c b/arch/loongarch/mm/ioremap.c new file mode 100644 index 000000000000..73b0980ab6f5 --- /dev/null +++ b/arch/loongarch/mm/ioremap.c @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#include + +void __init __iomem *early_ioremap(u64 phys_addr, unsigned long size) +{ + return ((void __iomem *)TO_CACHE(phys_addr)); +} + +void __init early_iounmap(void __iomem *addr, unsigned long size) +{ + +} + +void *early_memremap_ro(resource_size_t phys_addr, unsigned long size) +{ + return early_memremap(phys_addr, size); +} + +void *early_memremap_prot(resource_size_t phys_addr, unsigned long size, + unsigned long prot_val) +{ + return early_memremap(phys_addr, size); +} diff --git a/arch/loongarch/mm/maccess.c b/arch/loongarch/mm/maccess.c new file mode 100644 index 000000000000..58173842c6be --- /dev/null +++ b/arch/loongarch/mm/maccess.c @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include +#include + +bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size) +{ + /* highest bit set means kernel space */ + return (unsigned long)unsafe_src >> (BITS_PER_LONG - 1); +} diff --git a/arch/loongarch/mm/mmap.c b/arch/loongarch/mm/mmap.c new file mode 100644 index 000000000000..52e40f0ba732 --- /dev/null +++ b/arch/loongarch/mm/mmap.c @@ -0,0 +1,125 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ +EXPORT_SYMBOL(shm_align_mask); + +#define COLOUR_ALIGN(addr, pgoff) \ + ((((addr) + shm_align_mask) & ~shm_align_mask) + \ + (((pgoff) << PAGE_SHIFT) & shm_align_mask)) + +enum mmap_allocation_direction {UP, DOWN}; + +static unsigned long arch_get_unmapped_area_common(struct file *filp, + unsigned long addr0, unsigned long len, unsigned long pgoff, + unsigned long flags, enum mmap_allocation_direction dir) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + unsigned long addr = addr0; + int do_color_align; + struct vm_unmapped_area_info info; + + if (unlikely(len > TASK_SIZE)) + return -ENOMEM; + + if (flags & MAP_FIXED) { + /* Even MAP_FIXED mappings must reside within TASK_SIZE */ + if (TASK_SIZE - len < addr) + return -EINVAL; + + /* + * We do not accept a shared mapping if it would violate + * cache aliasing constraints. + */ + if ((flags & MAP_SHARED) && + ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) + return -EINVAL; + return addr; + } + + do_color_align = 0; + if (filp || (flags & MAP_SHARED)) + do_color_align = 1; + + /* requesting a specific address */ + if (addr) { + if (do_color_align) + addr = COLOUR_ALIGN(addr, pgoff); + else + addr = PAGE_ALIGN(addr); + + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && + (!vma || addr + len <= vm_start_gap(vma))) + return addr; + } + + info.length = len; + info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0; + info.align_offset = pgoff << PAGE_SHIFT; + + if (dir == DOWN) { + info.flags = VM_UNMAPPED_AREA_TOPDOWN; + info.low_limit = PAGE_SIZE; + info.high_limit = mm->mmap_base; + addr = vm_unmapped_area(&info); + + if (!(addr & ~PAGE_MASK)) + return addr; + + /* + * A failed mmap() very likely causes application failure, + * so fall back to the bottom-up function here. This scenario + * can happen with large stack limits and large mmap() + * allocations. + */ + } + + info.flags = 0; + info.low_limit = mm->mmap_base; + info.high_limit = TASK_SIZE; + return vm_unmapped_area(&info); +} + +unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0, + unsigned long len, unsigned long pgoff, unsigned long flags) +{ + return arch_get_unmapped_area_common(filp, + addr0, len, pgoff, flags, UP); +} + +/* + * There is no need to export this but sched.h declares the function as + * extern so making it static here results in an error. + */ +unsigned long arch_get_unmapped_area_topdown(struct file *filp, + unsigned long addr0, unsigned long len, unsigned long pgoff, + unsigned long flags) +{ + return arch_get_unmapped_area_common(filp, + addr0, len, pgoff, flags, DOWN); +} + +int __virt_addr_valid(volatile void *kaddr) +{ + unsigned long vaddr = (unsigned long)kaddr; + + if ((vaddr < PAGE_OFFSET) || (vaddr >= vm_map_base)) + return 0; + + return pfn_valid(PFN_DOWN(virt_to_phys(kaddr))); +} +EXPORT_SYMBOL_GPL(__virt_addr_valid); diff --git a/arch/loongarch/mm/page.S b/arch/loongarch/mm/page.S new file mode 100644 index 000000000000..ddc78ab33c7b --- /dev/null +++ b/arch/loongarch/mm/page.S @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#include +#include +#include +#include +#include + + .align 5 +SYM_FUNC_START(clear_page) + lu12i.w t0, 1 << (PAGE_SHIFT - 12) + add.d t0, t0, a0 +1: + st.d zero, a0, 0 + st.d zero, a0, 8 + st.d zero, a0, 16 + st.d zero, a0, 24 + st.d zero, a0, 32 + st.d zero, a0, 40 + st.d zero, a0, 48 + st.d zero, a0, 56 + addi.d a0, a0, 128 + st.d zero, a0, -64 + st.d zero, a0, -56 + st.d zero, a0, -48 + st.d zero, a0, -40 + st.d zero, a0, -32 + st.d zero, a0, -24 + st.d zero, a0, -16 + st.d zero, a0, -8 + bne t0, a0, 1b + + jirl $r0, ra, 0 +SYM_FUNC_END(clear_page) +EXPORT_SYMBOL(clear_page) + +.align 5 +SYM_FUNC_START(copy_page) + lu12i.w t8, 1 << (PAGE_SHIFT - 12) + add.d t8, t8, a0 +1: + ld.d t0, a1, 0 + ld.d t1, a1, 8 + ld.d t2, a1, 16 + ld.d t3, a1, 24 + ld.d t4, a1, 32 + ld.d t5, a1, 40 + ld.d t6, a1, 48 + ld.d t7, a1, 56 + + st.d t0, a0, 0 + st.d t1, a0, 8 + ld.d t0, a1, 64 + ld.d t1, a1, 72 + st.d t2, a0, 16 + st.d t3, a0, 24 + ld.d t2, a1, 80 + ld.d t3, a1, 88 + st.d t4, a0, 32 + st.d t5, a0, 40 + ld.d t4, a1, 96 + ld.d t5, a1, 104 + st.d t6, a0, 48 + st.d t7, a0, 56 + ld.d t6, a1, 112 + ld.d t7, a1, 120 + addi.d a0, a0, 128 + addi.d a1, a1, 128 + + st.d t0, a0, -64 + st.d t1, a0, -56 + st.d t2, a0, -48 + st.d t3, a0, -40 + st.d t4, a0, -32 + st.d t5, a0, -24 + st.d t6, a0, -16 + st.d t7, a0, -8 + + bne t8, a0, 1b + jirl $r0, ra, 0 +SYM_FUNC_END(copy_page) +EXPORT_SYMBOL(copy_page) diff --git a/arch/loongarch/mm/pgtable.c b/arch/loongarch/mm/pgtable.c new file mode 100644 index 000000000000..0569647152e9 --- /dev/null +++ b/arch/loongarch/mm/pgtable.c @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#include +#include +#include +#include +#include +#include + +pgd_t *pgd_alloc(struct mm_struct *mm) +{ + pgd_t *ret, *init; + + ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER); + if (ret) { + init = pgd_offset(&init_mm, 0UL); + pgd_init((unsigned long)ret); + memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, + (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); + } + + return ret; +} +EXPORT_SYMBOL_GPL(pgd_alloc); + +void pgd_init(unsigned long page) +{ + unsigned long *p, *end; + unsigned long entry; + +#if !defined(__PAGETABLE_PUD_FOLDED) + entry = (unsigned long)invalid_pud_table; +#elif !defined(__PAGETABLE_PMD_FOLDED) + entry = (unsigned long)invalid_pmd_table; +#else + entry = (unsigned long)invalid_pte_table; +#endif + + p = (unsigned long *) page; + end = p + PTRS_PER_PGD; + + do { + p[0] = entry; + p[1] = entry; + p[2] = entry; + p[3] = entry; + p[4] = entry; + p += 8; + p[-3] = entry; + p[-2] = entry; + p[-1] = entry; + } while (p != end); +} +EXPORT_SYMBOL_GPL(pgd_init); + +#ifndef __PAGETABLE_PMD_FOLDED +void pmd_init(unsigned long addr, unsigned long pagetable) +{ + unsigned long *p, *end; + + p = (unsigned long *) addr; + end = p + PTRS_PER_PMD; + + do { + p[0] = pagetable; + p[1] = pagetable; + p[2] = pagetable; + p[3] = pagetable; + p[4] = pagetable; + p += 8; + p[-3] = pagetable; + p[-2] = pagetable; + p[-1] = pagetable; + } while (p != end); +} +EXPORT_SYMBOL_GPL(pmd_init); +#endif + +#ifndef __PAGETABLE_PUD_FOLDED +void pud_init(unsigned long addr, unsigned long pagetable) +{ + unsigned long *p, *end; + + p = (unsigned long *)addr; + end = p + PTRS_PER_PUD; + + do { + p[0] = pagetable; + p[1] = pagetable; + p[2] = pagetable; + p[3] = pagetable; + p[4] = pagetable; + p += 8; + p[-3] = pagetable; + p[-2] = pagetable; + p[-1] = pagetable; + } while (p != end); +} +#endif + +pmd_t mk_pmd(struct page *page, pgprot_t prot) +{ + pmd_t pmd; + + pmd_val(pmd) = (page_to_pfn(page) << _PFN_SHIFT) | pgprot_val(prot); + + return pmd; +} + +void set_pmd_at(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp, pmd_t pmd) +{ + *pmdp = pmd; + flush_tlb_all(); +} + +void __init pagetable_init(void) +{ + /* Initialize the entire pgd. */ + pgd_init((unsigned long)swapper_pg_dir); + pgd_init((unsigned long)invalid_pg_dir); +#ifndef __PAGETABLE_PUD_FOLDED + pud_init((unsigned long)invalid_pud_table, (unsigned long)invalid_pmd_table); +#endif +#ifndef __PAGETABLE_PMD_FOLDED + pmd_init((unsigned long)invalid_pmd_table, (unsigned long)invalid_pte_table); +#endif +} diff --git a/arch/loongarch/mm/tlb.c b/arch/loongarch/mm/tlb.c new file mode 100644 index 000000000000..78dd328cec77 --- /dev/null +++ b/arch/loongarch/mm/tlb.c @@ -0,0 +1,282 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +void local_flush_tlb_all(void) +{ + invtlb_all(INVTLB_CURRENT_ALL, 0, 0); +} +EXPORT_SYMBOL(local_flush_tlb_all); + +void local_flush_tlb_user(void) +{ + invtlb_all(INVTLB_CURRENT_GFALSE, 0, 0); +} +EXPORT_SYMBOL(local_flush_tlb_user); + +void local_flush_tlb_kernel(void) +{ + invtlb_all(INVTLB_CURRENT_GTRUE, 0, 0); +} +EXPORT_SYMBOL(local_flush_tlb_kernel); + +/* + * All entries common to a mm share an asid. To effectively flush + * these entries, we just bump the asid. + */ +void local_flush_tlb_mm(struct mm_struct *mm) +{ + int cpu; + + preempt_disable(); + + cpu = smp_processor_id(); + + if (asid_valid(mm, cpu)) + drop_mmu_context(mm, cpu); + else + cpumask_clear_cpu(cpu, mm_cpumask(mm)); + + preempt_enable(); +} + +void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end) +{ + struct mm_struct *mm = vma->vm_mm; + int cpu = smp_processor_id(); + + if (asid_valid(mm, cpu)) { + unsigned long size, flags; + + local_irq_save(flags); + start = round_down(start, PAGE_SIZE << 1); + end = round_up(end, PAGE_SIZE << 1); + size = (end - start) >> (PAGE_SHIFT + 1); + if (size <= (current_cpu_data.tlbsizestlbsets ? + current_cpu_data.tlbsize / 8 : + current_cpu_data.tlbsize / 2)) { + int asid = cpu_asid(cpu, mm); + + while (start < end) { + invtlb(INVTLB_ADDR_GFALSE_AND_ASID, asid, start); + start += (PAGE_SIZE << 1); + } + } else { + drop_mmu_context(mm, cpu); + } + local_irq_restore(flags); + } else { + cpumask_clear_cpu(cpu, mm_cpumask(mm)); + } +} + +void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) +{ + unsigned long size, flags; + + local_irq_save(flags); + size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; + size = (size + 1) >> 1; + if (size <= (current_cpu_data.tlbsizestlbsets ? + current_cpu_data.tlbsize / 8 : + current_cpu_data.tlbsize / 2)) { + + start &= (PAGE_MASK << 1); + end += ((PAGE_SIZE << 1) - 1); + end &= (PAGE_MASK << 1); + + while (start < end) { + invtlb_addr(INVTLB_ADDR_GTRUE_OR_ASID, 0, start); + start += (PAGE_SIZE << 1); + } + } else { + local_flush_tlb_kernel(); + } + local_irq_restore(flags); +} + +void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) +{ + int cpu = smp_processor_id(); + + if (asid_valid(vma->vm_mm, cpu)) { + int newpid; + + newpid = cpu_asid(cpu, vma->vm_mm); + page &= (PAGE_MASK << 1); + invtlb(INVTLB_ADDR_GFALSE_AND_ASID, newpid, page); + } else { + cpumask_clear_cpu(cpu, mm_cpumask(vma->vm_mm)); + } +} + +/* + * This one is only used for pages with the global bit set so we don't care + * much about the ASID. + */ +void local_flush_tlb_one(unsigned long page) +{ + page &= (PAGE_MASK << 1); + invtlb_addr(INVTLB_ADDR_GTRUE_OR_ASID, 0, page); +} + +static void __update_hugetlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) +{ +#ifdef CONFIG_HUGETLB_PAGE + int idx; + unsigned long lo; + unsigned long flags; + + local_irq_save(flags); + + address &= (PAGE_MASK << 1); + write_csr_entryhi(address); + tlb_probe(); + idx = read_csr_tlbidx(); + write_csr_pagesize(PS_HUGE_SIZE); + lo = pmd_to_entrylo(pte_val(*ptep)); + write_csr_entrylo0(lo); + write_csr_entrylo1(lo + (HPAGE_SIZE >> 1)); + + if (idx < 0) + tlb_write_random(); + else + tlb_write_indexed(); + write_csr_pagesize(PS_DEFAULT_SIZE); + + local_irq_restore(flags); +#endif +} + +void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) +{ + int idx; + unsigned long flags; + + /* + * Handle debugger faulting in for debugee. + */ + if (current->active_mm != vma->vm_mm) + return; + + if (pte_val(*ptep) & _PAGE_HUGE) { + __update_hugetlb(vma, address, ptep); + return; + } + + local_irq_save(flags); + + if ((unsigned long)ptep & sizeof(pte_t)) + ptep--; + + address &= (PAGE_MASK << 1); + write_csr_entryhi(address); + tlb_probe(); + idx = read_csr_tlbidx(); + write_csr_pagesize(PS_DEFAULT_SIZE); + write_csr_entrylo0(pte_val(*ptep++)); + write_csr_entrylo1(pte_val(*ptep)); + if (idx < 0) + tlb_write_random(); + else + tlb_write_indexed(); + + local_irq_restore(flags); +} + +static void setup_ptwalker(void) +{ + unsigned long pwctl0, pwctl1; + unsigned long pgd_i = 0, pgd_w = 0; + unsigned long pud_i = 0, pud_w = 0; + unsigned long pmd_i = 0, pmd_w = 0; + unsigned long pte_i = 0, pte_w = 0; + + pgd_i = PGDIR_SHIFT; + pgd_w = PAGE_SHIFT - 3; +#if CONFIG_PGTABLE_LEVELS > 3 + pud_i = PUD_SHIFT; + pud_w = PAGE_SHIFT - 3; +#endif +#if CONFIG_PGTABLE_LEVELS > 2 + pmd_i = PMD_SHIFT; + pmd_w = PAGE_SHIFT - 3; +#endif + pte_i = PAGE_SHIFT; + pte_w = PAGE_SHIFT - 3; + + pwctl0 = pte_i | pte_w << 5 | pmd_i << 10 | pmd_w << 15 | pud_i << 20 | pud_w << 25; + pwctl1 = pgd_i | pgd_w << 6; + + csr_write64(pwctl0, LOONGARCH_CSR_PWCTL0); + csr_write64(pwctl1, LOONGARCH_CSR_PWCTL1); + csr_write64((long)swapper_pg_dir, LOONGARCH_CSR_PGDH); + csr_write64((long)invalid_pg_dir, LOONGARCH_CSR_PGDL); + csr_write64((long)smp_processor_id(), LOONGARCH_CSR_TMID); +} + +static void output_pgtable_bits_defines(void) +{ +#define pr_define(fmt, ...) \ + pr_debug("#define " fmt, ##__VA_ARGS__) + + pr_debug("#include \n"); + pr_debug("#include \n"); + pr_debug("\n"); + + pr_define("_PAGE_VALID_SHIFT %d\n", _PAGE_VALID_SHIFT); + pr_define("_PAGE_DIRTY_SHIFT %d\n", _PAGE_DIRTY_SHIFT); + pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT); + pr_define("_PAGE_GLOBAL_SHIFT %d\n", _PAGE_GLOBAL_SHIFT); + pr_define("_PAGE_PRESENT_SHIFT %d\n", _PAGE_PRESENT_SHIFT); + pr_define("_PAGE_WRITE_SHIFT %d\n", _PAGE_WRITE_SHIFT); + pr_define("_PAGE_NO_READ_SHIFT %d\n", _PAGE_NO_READ_SHIFT); + pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT); + pr_define("_PFN_SHIFT %d\n", _PFN_SHIFT); + pr_debug("\n"); +} + +void setup_tlb_handler(void) +{ + static int run_once = 0; + + setup_ptwalker(); + output_pgtable_bits_defines(); + + /* The tlb handlers are generated only once */ + if (!run_once) { + memcpy((void *)tlbrentry, handle_tlb_refill, 0x80); + local_flush_icache_range(tlbrentry, tlbrentry + 0x80); + set_handler(EXCCODE_TLBI * VECSIZE, handle_tlb_load, VECSIZE); + set_handler(EXCCODE_TLBL * VECSIZE, handle_tlb_load, VECSIZE); + set_handler(EXCCODE_TLBS * VECSIZE, handle_tlb_store, VECSIZE); + set_handler(EXCCODE_TLBM * VECSIZE, handle_tlb_modify, VECSIZE); + set_handler(EXCCODE_TLBNR * VECSIZE, handle_tlb_protect, VECSIZE); + set_handler(EXCCODE_TLBNX * VECSIZE, handle_tlb_protect, VECSIZE); + set_handler(EXCCODE_TLBPE * VECSIZE, handle_tlb_protect, VECSIZE); + run_once++; + } +} + +void tlb_init(void) +{ + write_csr_pagesize(PS_DEFAULT_SIZE); + write_csr_stlbpgsize(PS_DEFAULT_SIZE); + write_csr_tlbrefill_pagesize(PS_DEFAULT_SIZE); + setup_tlb_handler(); + local_flush_tlb_all(); +} diff --git a/arch/loongarch/mm/tlbex.S b/arch/loongarch/mm/tlbex.S new file mode 100644 index 000000000000..bef740710a3b --- /dev/null +++ b/arch/loongarch/mm/tlbex.S @@ -0,0 +1,477 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#include +#include +#include +#include +#include +#include +#include + + .macro tlb_do_page_fault, write + SYM_FUNC_START(tlb_do_page_fault_\write) + SAVE_ALL + csrrd a2, LOONGARCH_CSR_BADV + move a0, sp + REG_S a2, sp, PT_BVADDR + li.w a1, \write + la.abs t0, do_page_fault + jirl ra, t0, 0 + RESTORE_ALL_AND_RET + SYM_FUNC_END(tlb_do_page_fault_\write) + .endm + + tlb_do_page_fault 0 + tlb_do_page_fault 1 + +SYM_FUNC_START(handle_tlb_protect) + BACKUP_T0T1 + SAVE_ALL + move a0, sp + move a1, zero + csrrd a2, LOONGARCH_CSR_BADV + REG_S a2, sp, PT_BVADDR + la.abs t0, do_page_fault + jirl ra, t0, 0 + RESTORE_ALL_AND_RET +SYM_FUNC_END(handle_tlb_protect) + +SYM_FUNC_START(handle_tlb_load) + csrwr t0, EXCEPTION_KS0 + csrwr t1, EXCEPTION_KS1 + csrwr ra, EXCEPTION_KS2 + + /* + * The vmalloc handling is not in the hotpath. + */ + csrrd t0, LOONGARCH_CSR_BADV + blt t0, $r0, vmalloc_load + csrrd t1, LOONGARCH_CSR_PGDL + +vmalloc_done_load: + /* Get PGD offset in bytes */ + srli.d t0, t0, PGDIR_SHIFT + andi t0, t0, (PTRS_PER_PGD - 1) + slli.d t0, t0, 3 + add.d t1, t1, t0 +#if CONFIG_PGTABLE_LEVELS > 3 + csrrd t0, LOONGARCH_CSR_BADV + ld.d t1, t1, 0 + srli.d t0, t0, PUD_SHIFT + andi t0, t0, (PTRS_PER_PUD - 1) + slli.d t0, t0, 3 + add.d t1, t1, t0 +#endif +#if CONFIG_PGTABLE_LEVELS > 2 + csrrd t0, LOONGARCH_CSR_BADV + ld.d t1, t1, 0 + srli.d t0, t0, PMD_SHIFT + andi t0, t0, (PTRS_PER_PMD - 1) + slli.d t0, t0, 3 + add.d t1, t1, t0 +#endif + ld.d ra, t1, 0 + + /* + * For huge tlb entries, pmde doesn't contain an address but + * instead contains the tlb pte. Check the PAGE_HUGE bit and + * see if we need to jump to huge tlb processing. + */ + andi t0, ra, _PAGE_HUGE + bne t0, $r0, tlb_huge_update_load + + csrrd t0, LOONGARCH_CSR_BADV + srli.d t0, t0, (PAGE_SHIFT + PTE_ORDER) + andi t0, t0, (PTRS_PER_PTE - 1) + slli.d t0, t0, _PTE_T_LOG2 + add.d t1, ra, t0 + + ld.d t0, t1, 0 + tlbsrch + + srli.d ra, t0, _PAGE_PRESENT_SHIFT + andi ra, ra, 1 + beq ra, $r0, nopage_tlb_load + + ori t0, t0, _PAGE_VALID + st.d t0, t1, 0 + ori t1, t1, 8 + xori t1, t1, 8 + ld.d t0, t1, 0 + ld.d t1, t1, 8 + csrwr t0, LOONGARCH_CSR_TLBELO0 + csrwr t1, LOONGARCH_CSR_TLBELO1 + tlbwr +leave_load: + csrrd t0, EXCEPTION_KS0 + csrrd t1, EXCEPTION_KS1 + csrrd ra, EXCEPTION_KS2 + ertn +#ifdef CONFIG_64BIT +vmalloc_load: + la.abs t1, swapper_pg_dir + b vmalloc_done_load +#endif + + /* + * This is the entry point when build_tlbchange_handler_head + * spots a huge page. + */ +tlb_huge_update_load: + ld.d t0, t1, 0 + srli.d ra, t0, _PAGE_PRESENT_SHIFT + andi ra, ra, 1 + beq ra, $r0, nopage_tlb_load + tlbsrch + + ori t0, t0, _PAGE_VALID + st.d t0, t1, 0 + addu16i.d t1, $r0, -(CSR_TLBIDX_EHINV >> 16) + addi.d ra, t1, 0 + csrxchg ra, t1, LOONGARCH_CSR_TLBIDX + tlbwr + + csrxchg $r0, t1, LOONGARCH_CSR_TLBIDX + + /* + * A huge PTE describes an area the size of the + * configured huge page size. This is twice the + * of the large TLB entry size we intend to use. + * A TLB entry half the size of the configured + * huge page size is configured into entrylo0 + * and entrylo1 to cover the contiguous huge PTE + * address space. + */ + /* Huge page: Move Global bit */ + xori t0, t0, _PAGE_HUGE + lu12i.w t1, _PAGE_HGLOBAL >> 12 + and t1, t0, t1 + srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT) + or t0, t0, t1 + + addi.d ra, t0, 0 + csrwr t0, LOONGARCH_CSR_TLBELO0 + addi.d t0, ra, 0 + + /* Convert to entrylo1 */ + addi.d t1, $r0, 1 + slli.d t1, t1, (HPAGE_SHIFT - 1) + add.d t0, t0, t1 + csrwr t0, LOONGARCH_CSR_TLBELO1 + + /* Set huge page tlb entry size */ + addu16i.d t0, $r0, (CSR_TLBIDX_PS >> 16) + addu16i.d t1, $r0, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16)) + csrxchg t1, t0, LOONGARCH_CSR_TLBIDX + + tlbfill + + addu16i.d t0, $r0, (CSR_TLBIDX_PS >> 16) + addu16i.d t1, $r0, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16)) + csrxchg t1, t0, LOONGARCH_CSR_TLBIDX + +nopage_tlb_load: + csrrd ra, EXCEPTION_KS2 + la.abs t0, tlb_do_page_fault_0 + jirl $r0, t0, 0 +SYM_FUNC_END(handle_tlb_load) + +SYM_FUNC_START(handle_tlb_store) + csrwr t0, EXCEPTION_KS0 + csrwr t1, EXCEPTION_KS1 + csrwr ra, EXCEPTION_KS2 + + /* + * The vmalloc handling is not in the hotpath. + */ + csrrd t0, LOONGARCH_CSR_BADV + blt t0, $r0, vmalloc_store + csrrd t1, LOONGARCH_CSR_PGDL + +vmalloc_done_store: + /* Get PGD offset in bytes */ + srli.d t0, t0, PGDIR_SHIFT + andi t0, t0, (PTRS_PER_PGD - 1) + slli.d t0, t0, 3 + add.d t1, t1, t0 + +#if CONFIG_PGTABLE_LEVELS > 3 + csrrd t0, LOONGARCH_CSR_BADV + ld.d t1, t1, 0 + srli.d t0, t0, PUD_SHIFT + andi t0, t0, (PTRS_PER_PUD - 1) + slli.d t0, t0, 3 + add.d t1, t1, t0 +#endif +#if CONFIG_PGTABLE_LEVELS > 2 + csrrd t0, LOONGARCH_CSR_BADV + ld.d t1, t1, 0 + srli.d t0, t0, PMD_SHIFT + andi t0, t0, (PTRS_PER_PMD - 1) + slli.d t0, t0, 3 + add.d t1, t1, t0 +#endif + ld.d ra, t1, 0 + + /* + * For huge tlb entries, pmde doesn't contain an address but + * instead contains the tlb pte. Check the PAGE_HUGE bit and + * see if we need to jump to huge tlb processing. + */ + andi t0, ra, _PAGE_HUGE + bne t0, $r0, tlb_huge_update_store + + csrrd t0, LOONGARCH_CSR_BADV + srli.d t0, t0, (PAGE_SHIFT + PTE_ORDER) + andi t0, t0, (PTRS_PER_PTE - 1) + slli.d t0, t0, _PTE_T_LOG2 + add.d t1, ra, t0 + + ld.d t0, t1, 0 + tlbsrch + + srli.d ra, t0, _PAGE_PRESENT_SHIFT + andi ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT) + xori ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT) + bne ra, $r0, nopage_tlb_store + + ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) + st.d t0, t1, 0 + + ori t1, t1, 8 + xori t1, t1, 8 + ld.d t0, t1, 0 + ld.d t1, t1, 8 + csrwr t0, LOONGARCH_CSR_TLBELO0 + csrwr t1, LOONGARCH_CSR_TLBELO1 + tlbwr +leave_store: + csrrd t0, EXCEPTION_KS0 + csrrd t1, EXCEPTION_KS1 + csrrd ra, EXCEPTION_KS2 + ertn +#ifdef CONFIG_64BIT +vmalloc_store: + la.abs t1, swapper_pg_dir + b vmalloc_done_store +#endif + + /* + * This is the entry point when build_tlbchange_handler_head + * spots a huge page. + */ +tlb_huge_update_store: + ld.d t0, t1, 0 + srli.d ra, t0, _PAGE_PRESENT_SHIFT + andi ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT) + xori ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT) + bne ra, $r0, nopage_tlb_store + + tlbsrch + ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) + + st.d t0, t1, 0 + addu16i.d t1, $r0, -(CSR_TLBIDX_EHINV >> 16) + addi.d ra, t1, 0 + csrxchg ra, t1, LOONGARCH_CSR_TLBIDX + tlbwr + + csrxchg $r0, t1, LOONGARCH_CSR_TLBIDX + /* + * A huge PTE describes an area the size of the + * configured huge page size. This is twice the + * of the large TLB entry size we intend to use. + * A TLB entry half the size of the configured + * huge page size is configured into entrylo0 + * and entrylo1 to cover the contiguous huge PTE + * address space. + */ + /* Huge page: Move Global bit */ + xori t0, t0, _PAGE_HUGE + lu12i.w t1, _PAGE_HGLOBAL >> 12 + and t1, t0, t1 + srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT) + or t0, t0, t1 + + addi.d ra, t0, 0 + csrwr t0, LOONGARCH_CSR_TLBELO0 + addi.d t0, ra, 0 + + /* Convert to entrylo1 */ + addi.d t1, $r0, 1 + slli.d t1, t1, (HPAGE_SHIFT - 1) + add.d t0, t0, t1 + csrwr t0, LOONGARCH_CSR_TLBELO1 + + /* Set huge page tlb entry size */ + addu16i.d t0, $r0, (CSR_TLBIDX_PS >> 16) + addu16i.d t1, $r0, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16)) + csrxchg t1, t0, LOONGARCH_CSR_TLBIDX + + tlbfill + + /* Reset default page size */ + addu16i.d t0, $r0, (CSR_TLBIDX_PS >> 16) + addu16i.d t1, $r0, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16)) + csrxchg t1, t0, LOONGARCH_CSR_TLBIDX + +nopage_tlb_store: + csrrd ra, EXCEPTION_KS2 + la.abs t0, tlb_do_page_fault_1 + jirl $r0, t0, 0 +SYM_FUNC_END(handle_tlb_store) + +SYM_FUNC_START(handle_tlb_modify) + csrwr t0, EXCEPTION_KS0 + csrwr t1, EXCEPTION_KS1 + csrwr ra, EXCEPTION_KS2 + + /* + * The vmalloc handling is not in the hotpath. + */ + csrrd t0, LOONGARCH_CSR_BADV + blt t0, $r0, vmalloc_modify + csrrd t1, LOONGARCH_CSR_PGDL + +vmalloc_done_modify: + /* Get PGD offset in bytes */ + srli.d t0, t0, PGDIR_SHIFT + andi t0, t0, (PTRS_PER_PGD - 1) + slli.d t0, t0, 3 + add.d t1, t1, t0 +#if CONFIG_PGTABLE_LEVELS > 3 + csrrd t0, LOONGARCH_CSR_BADV + ld.d t1, t1, 0 + srli.d t0, t0, PUD_SHIFT + andi t0, t0, (PTRS_PER_PUD - 1) + slli.d t0, t0, 3 + add.d t1, t1, t0 +#endif +#if CONFIG_PGTABLE_LEVELS > 2 + csrrd t0, LOONGARCH_CSR_BADV + ld.d t1, t1, 0 + srli.d t0, t0, PMD_SHIFT + andi t0, t0, (PTRS_PER_PMD - 1) + slli.d t0, t0, 3 + add.d t1, t1, t0 +#endif + ld.d ra, t1, 0 + + /* + * For huge tlb entries, pmde doesn't contain an address but + * instead contains the tlb pte. Check the PAGE_HUGE bit and + * see if we need to jump to huge tlb processing. + */ + andi t0, ra, _PAGE_HUGE + bne t0, $r0, tlb_huge_update_modify + + csrrd t0, LOONGARCH_CSR_BADV + srli.d t0, t0, (PAGE_SHIFT + PTE_ORDER) + andi t0, t0, (PTRS_PER_PTE - 1) + slli.d t0, t0, _PTE_T_LOG2 + add.d t1, ra, t0 + + ld.d t0, t1, 0 + tlbsrch + + srli.d ra, t0, _PAGE_WRITE_SHIFT + andi ra, ra, 1 + beq ra, $r0, nopage_tlb_modify + + ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) + st.d t0, t1, 0 + ori t1, t1, 8 + xori t1, t1, 8 + ld.d t0, t1, 0 + ld.d t1, t1, 8 + csrwr t0, LOONGARCH_CSR_TLBELO0 + csrwr t1, LOONGARCH_CSR_TLBELO1 + tlbwr +leave_modify: + csrrd t0, EXCEPTION_KS0 + csrrd t1, EXCEPTION_KS1 + csrrd ra, EXCEPTION_KS2 + ertn +#ifdef CONFIG_64BIT +vmalloc_modify: + la.abs t1, swapper_pg_dir + b vmalloc_done_modify +#endif + + /* + * This is the entry point when + * build_tlbchange_handler_head spots a huge page. + */ +tlb_huge_update_modify: + ld.d t0, t1, 0 + + srli.d ra, t0, _PAGE_WRITE_SHIFT + andi ra, ra, 1 + beq ra, $r0, nopage_tlb_modify + + tlbsrch + ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) + + st.d t0, t1, 0 + /* + * A huge PTE describes an area the size of the + * configured huge page size. This is twice the + * of the large TLB entry size we intend to use. + * A TLB entry half the size of the configured + * huge page size is configured into entrylo0 + * and entrylo1 to cover the contiguous huge PTE + * address space. + */ + /* Huge page: Move Global bit */ + xori t0, t0, _PAGE_HUGE + lu12i.w t1, _PAGE_HGLOBAL >> 12 + and t1, t0, t1 + srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT) + or t0, t0, t1 + + addi.d ra, t0, 0 + csrwr t0, LOONGARCH_CSR_TLBELO0 + addi.d t0, ra, 0 + + /* Convert to entrylo1 */ + addi.d t1, $r0, 1 + slli.d t1, t1, (HPAGE_SHIFT - 1) + add.d t0, t0, t1 + csrwr t0, LOONGARCH_CSR_TLBELO1 + + /* Set huge page tlb entry size */ + addu16i.d t0, $r0, (CSR_TLBIDX_PS >> 16) + addu16i.d t1, $r0, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16)) + csrxchg t1, t0, LOONGARCH_CSR_TLBIDX + + tlbwr + + /* Reset default page size */ + addu16i.d t0, $r0, (CSR_TLBIDX_PS >> 16) + addu16i.d t1, $r0, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16)) + csrxchg t1, t0, LOONGARCH_CSR_TLBIDX + +nopage_tlb_modify: + csrrd ra, EXCEPTION_KS2 + la.abs t0, tlb_do_page_fault_1 + jirl $r0, t0, 0 +SYM_FUNC_END(handle_tlb_modify) + +SYM_FUNC_START(handle_tlb_refill) + csrwr t0, LOONGARCH_CSR_TLBRSAVE + csrrd t0, LOONGARCH_CSR_PGD + lddir t0, t0, 3 +#if CONFIG_PGTABLE_LEVELS > 3 + lddir t0, t0, 2 +#endif +#if CONFIG_PGTABLE_LEVELS > 2 + lddir t0, t0, 1 +#endif + ldpte t0, 0 + ldpte t0, 1 + tlbfill + csrrd t0, LOONGARCH_CSR_TLBRSAVE + ertn +SYM_FUNC_END(handle_tlb_refill) -- cgit v1.2.1