diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-11-05 08:15:46 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-11-05 08:15:46 -0700 |
commit | 5c0b0c676ac2d84f69568715af91e45b610fe17a (patch) | |
tree | b417c3b16ec559f58182be398ba9193432400ed4 /arch/powerpc/mm | |
parent | a3f36773802d44d1e50e7c4c09b3e17018581d11 (diff) | |
parent | c12ab8dbc492b992e1ea717db933cee568780c47 (diff) | |
download | linux-stable-5c0b0c676ac2d84f69568715af91e45b610fe17a.tar.gz |
Merge tag 'powerpc-5.16-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc updates from Michael Ellerman:
- Enable STRICT_KERNEL_RWX for Freescale 85xx platforms.
- Activate CONFIG_STRICT_KERNEL_RWX by default, while still allowing it
to be disabled.
- Add support for out-of-line static calls on 32-bit.
- Fix oopses doing bpf-to-bpf calls when STRICT_KERNEL_RWX is enabled.
- Fix boot hangs on e5500 due to stale value in ESR passed to
do_page_fault().
- Fix several bugs on pseries in handling of device tree cache
information for hotplugged CPUs, and/or during partition migration.
- Various other small features and fixes.
Thanks to Alexey Kardashevskiy, Alistair Popple, Anatolij Gustschin,
Andrew Donnellan, Athira Rajeev, Bixuan Cui, Bjorn Helgaas, Cédric Le
Goater, Christophe Leroy, Daniel Axtens, Daniel Henrique Barboza, Denis
Kirjanov, Fabiano Rosas, Frederic Barrat, Gustavo A. R. Silva, Hari
Bathini, Jacques de Laval, Joel Stanley, Kai Song, Kajol Jain, Laurent
Vivier, Leonardo Bras, Madhavan Srinivasan, Nathan Chancellor, Nathan
Lynch, Naveen N. Rao, Nicholas Piggin, Nick Desaulniers, Niklas
Schnelle, Oliver O'Halloran, Rob Herring, Russell Currey, Srikar
Dronamraju, Stan Johnson, Tyrel Datwyler, Uwe Kleine-König, Vasant
Hegde, Wan Jiabing, and Xiaoming Ni,
* tag 'powerpc-5.16-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (73 commits)
powerpc/8xx: Fix Oops with STRICT_KERNEL_RWX without DEBUG_RODATA_TEST
powerpc/32e: Ignore ESR in instruction storage interrupt handler
powerpc/powernv/prd: Unregister OPAL_MSG_PRD2 notifier during module unload
powerpc: Don't provide __kernel_map_pages() without ARCH_SUPPORTS_DEBUG_PAGEALLOC
MAINTAINERS: Update powerpc KVM entry
powerpc/xmon: fix task state output
powerpc/44x/fsp2: add missing of_node_put
powerpc/dcr: Use cmplwi instead of 3-argument cmpli
KVM: PPC: Tick accounting should defer vtime accounting 'til after IRQ handling
powerpc/security: Use a mutex for interrupt exit code patching
powerpc/83xx/mpc8349emitx: Make mcu_gpiochip_remove() return void
powerpc/fsl_booke: Fix setting of exec flag when setting TLBCAMs
powerpc/book3e: Fix set_memory_x() and set_memory_nx()
powerpc/nohash: Fix __ptep_set_access_flags() and ptep_set_wrprotect()
powerpc/bpf: Fix write protecting JIT code
selftests/powerpc: Use date instead of EPOCHSECONDS in mitigation-patching.sh
powerpc/64s/interrupt: Fix check_return_regs_valid() false positive
powerpc/boot: Set LC_ALL=C in wrapper script
powerpc/64s: Default to 64K pages for 64 bit book3s
Revert "powerpc/audit: Convert powerpc to AUDIT_ARCH_COMPAT_GENERIC"
...
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r-- | arch/powerpc/mm/book3s64/hash_utils.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/book3s64/radix_pgtable.c | 7 | ||||
-rw-r--r-- | arch/powerpc/mm/mem.c | 4 | ||||
-rw-r--r-- | arch/powerpc/mm/mmu_decl.h | 4 | ||||
-rw-r--r-- | arch/powerpc/mm/nohash/Makefile | 4 | ||||
-rw-r--r-- | arch/powerpc/mm/nohash/fsl_book3e.c (renamed from arch/powerpc/mm/nohash/fsl_booke.c) | 76 | ||||
-rw-r--r-- | arch/powerpc/mm/nohash/kaslr_booke.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/nohash/tlb.c | 6 | ||||
-rw-r--r-- | arch/powerpc/mm/nohash/tlb_low.S | 8 | ||||
-rw-r--r-- | arch/powerpc/mm/nohash/tlb_low_64e.S | 8 | ||||
-rw-r--r-- | arch/powerpc/mm/pgtable.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/pgtable_32.c | 2 |
12 files changed, 92 insertions, 33 deletions
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index c145776d3ae5..cfd45245d009 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -1988,7 +1988,7 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi) mmu_kernel_ssize, 0); } -void __kernel_map_pages(struct page *page, int numpages, int enable) +void hash__kernel_map_pages(struct page *page, int numpages, int enable) { unsigned long flags, vaddr, lmi; int i; diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index ae20add7954a..3a600bd7fbc6 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -920,6 +920,13 @@ void __meminit radix__vmemmap_remove_mapping(unsigned long start, unsigned long #endif #endif +#ifdef CONFIG_DEBUG_PAGEALLOC +void radix__kernel_map_pages(struct page *page, int numpages, int enable) +{ + pr_warn_once("DEBUG_PAGEALLOC not supported in radix mode\n"); +} +#endif + #ifdef CONFIG_TRANSPARENT_HUGEPAGE unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index c3c4e31462ec..bd5d91a31183 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -20,8 +20,8 @@ #include <asm/machdep.h> #include <asm/rtas.h> #include <asm/kasan.h> -#include <asm/sparsemem.h> #include <asm/svm.h> +#include <asm/mmzone.h> #include <mm/mmu_decl.h> @@ -256,7 +256,7 @@ void __init mem_init(void) #endif high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); - set_max_mapnr(max_pfn); + set_max_mapnr(max_low_pfn); kasan_late_init(); diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h index dd1cabc2ea0f..0dd4c18f8363 100644 --- a/arch/powerpc/mm/mmu_decl.h +++ b/arch/powerpc/mm/mmu_decl.h @@ -126,7 +126,7 @@ unsigned long mmu_mapin_ram(unsigned long base, unsigned long top); #ifdef CONFIG_PPC_FSL_BOOK3E extern unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx, - bool dryrun); + bool dryrun, bool init); extern unsigned long calc_cam_sz(unsigned long ram, unsigned long virt, phys_addr_t phys); #ifdef CONFIG_PPC32 @@ -168,7 +168,7 @@ static inline phys_addr_t v_block_mapped(unsigned long va) { return 0; } static inline unsigned long p_block_mapped(phys_addr_t pa) { return 0; } #endif -#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_8xx) +#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC_FSL_BOOK3E) void mmu_mark_initmem_nx(void); void mmu_mark_rodata_ro(void); #else diff --git a/arch/powerpc/mm/nohash/Makefile b/arch/powerpc/mm/nohash/Makefile index 0424f6ce5bd8..b1f630d423d8 100644 --- a/arch/powerpc/mm/nohash/Makefile +++ b/arch/powerpc/mm/nohash/Makefile @@ -7,7 +7,7 @@ obj-$(CONFIG_PPC_BOOK3E_64) += tlb_low_64e.o book3e_pgtable.o obj-$(CONFIG_40x) += 40x.o obj-$(CONFIG_44x) += 44x.o obj-$(CONFIG_PPC_8xx) += 8xx.o -obj-$(CONFIG_PPC_FSL_BOOK3E) += fsl_booke.o +obj-$(CONFIG_PPC_FSL_BOOK3E) += fsl_book3e.o obj-$(CONFIG_RANDOMIZE_BASE) += kaslr_booke.o ifdef CONFIG_HUGETLB_PAGE obj-$(CONFIG_PPC_FSL_BOOK3E) += book3e_hugetlbpage.o @@ -16,4 +16,4 @@ endif # Disable kcov instrumentation on sensitive code # This is necessary for booting with kcov enabled on book3e machines KCOV_INSTRUMENT_tlb.o := n -KCOV_INSTRUMENT_fsl_booke.o := n +KCOV_INSTRUMENT_fsl_book3e.o := n diff --git a/arch/powerpc/mm/nohash/fsl_booke.c b/arch/powerpc/mm/nohash/fsl_book3e.c index 03dacbe940e5..b231a54f540c 100644 --- a/arch/powerpc/mm/nohash/fsl_booke.c +++ b/arch/powerpc/mm/nohash/fsl_book3e.c @@ -122,15 +122,18 @@ static void settlbcam(int index, unsigned long virt, phys_addr_t phys, TLBCAM[index].MAS2 |= (flags & _PAGE_GUARDED) ? MAS2_G : 0; TLBCAM[index].MAS2 |= (flags & _PAGE_ENDIAN) ? MAS2_E : 0; - TLBCAM[index].MAS3 = (phys & MAS3_RPN) | MAS3_SX | MAS3_SR; - TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_SW : 0); + TLBCAM[index].MAS3 = (phys & MAS3_RPN) | MAS3_SR; + TLBCAM[index].MAS3 |= (flags & _PAGE_RW) ? MAS3_SW : 0; if (mmu_has_feature(MMU_FTR_BIG_PHYS)) TLBCAM[index].MAS7 = (u64)phys >> 32; /* Below is unlikely -- only for large user pages or similar */ if (pte_user(__pte(flags))) { - TLBCAM[index].MAS3 |= MAS3_UX | MAS3_UR; - TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_UW : 0); + TLBCAM[index].MAS3 |= MAS3_UR; + TLBCAM[index].MAS3 |= (flags & _PAGE_EXEC) ? MAS3_UX : 0; + TLBCAM[index].MAS3 |= (flags & _PAGE_RW) ? MAS3_UW : 0; + } else { + TLBCAM[index].MAS3 |= (flags & _PAGE_EXEC) ? MAS3_SX : 0; } tlbcam_addrs[index].start = virt; @@ -165,19 +168,38 @@ unsigned long calc_cam_sz(unsigned long ram, unsigned long virt, static unsigned long map_mem_in_cams_addr(phys_addr_t phys, unsigned long virt, unsigned long ram, int max_cam_idx, - bool dryrun) + bool dryrun, bool init) { int i; unsigned long amount_mapped = 0; + unsigned long boundary; + + if (strict_kernel_rwx_enabled()) + boundary = (unsigned long)(_sinittext - _stext); + else + boundary = ram; /* Calculate CAM values */ - for (i = 0; ram && i < max_cam_idx; i++) { + for (i = 0; boundary && i < max_cam_idx; i++) { + unsigned long cam_sz; + pgprot_t prot = init ? PAGE_KERNEL_X : PAGE_KERNEL_ROX; + + cam_sz = calc_cam_sz(boundary, virt, phys); + if (!dryrun) + settlbcam(i, virt, phys, cam_sz, pgprot_val(prot), 0); + + boundary -= cam_sz; + amount_mapped += cam_sz; + virt += cam_sz; + phys += cam_sz; + } + for (ram -= amount_mapped; ram && i < max_cam_idx; i++) { unsigned long cam_sz; + pgprot_t prot = init ? PAGE_KERNEL_X : PAGE_KERNEL; cam_sz = calc_cam_sz(ram, virt, phys); if (!dryrun) - settlbcam(i, virt, phys, cam_sz, - pgprot_val(PAGE_KERNEL_X), 0); + settlbcam(i, virt, phys, cam_sz, pgprot_val(prot), 0); ram -= cam_sz; amount_mapped += cam_sz; @@ -188,8 +210,13 @@ static unsigned long map_mem_in_cams_addr(phys_addr_t phys, unsigned long virt, if (dryrun) return amount_mapped; - loadcam_multi(0, i, max_cam_idx); - tlbcam_index = i; + if (init) { + loadcam_multi(0, i, max_cam_idx); + tlbcam_index = i; + } else { + loadcam_multi(0, i, 0); + WARN_ON(i > tlbcam_index); + } #ifdef CONFIG_PPC64 get_paca()->tcd.esel_next = i; @@ -200,12 +227,12 @@ static unsigned long map_mem_in_cams_addr(phys_addr_t phys, unsigned long virt, return amount_mapped; } -unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx, bool dryrun) +unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx, bool dryrun, bool init) { unsigned long virt = PAGE_OFFSET; phys_addr_t phys = memstart_addr; - return map_mem_in_cams_addr(phys, virt, ram, max_cam_idx, dryrun); + return map_mem_in_cams_addr(phys, virt, ram, max_cam_idx, dryrun, init); } #ifdef CONFIG_PPC32 @@ -246,7 +273,7 @@ void __init adjust_total_lowmem(void) ram = min((phys_addr_t)__max_low_memory, (phys_addr_t)total_lowmem); i = switch_to_as1(); - __max_low_memory = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, false); + __max_low_memory = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, false, true); restore_to_as0(i, 0, 0, 1); pr_info("Memory CAM mapping: "); @@ -258,6 +285,25 @@ void __init adjust_total_lowmem(void) memblock_set_current_limit(memstart_addr + __max_low_memory); } +#ifdef CONFIG_STRICT_KERNEL_RWX +void mmu_mark_rodata_ro(void) +{ + /* Everything is done in mmu_mark_initmem_nx() */ +} +#endif + +void mmu_mark_initmem_nx(void) +{ + unsigned long remapped; + + if (!strict_kernel_rwx_enabled()) + return; + + remapped = map_mem_in_cams(__max_low_memory, CONFIG_LOWMEM_CAM_NUM, false, false); + + WARN_ON(__max_low_memory != remapped); +} + void setup_initial_memory_limit(phys_addr_t first_memblock_base, phys_addr_t first_memblock_size) { @@ -317,11 +363,11 @@ notrace void __init relocate_init(u64 dt_ptr, phys_addr_t start) /* map a 64M area for the second relocation */ if (memstart_addr > start) map_mem_in_cams(0x4000000, CONFIG_LOWMEM_CAM_NUM, - false); + false, true); else map_mem_in_cams_addr(start, PAGE_OFFSET + offset, 0x4000000, CONFIG_LOWMEM_CAM_NUM, - false); + false, true); restore_to_as0(n, offset, __va(dt_ptr), 1); /* We should never reach here */ panic("Relocation error"); diff --git a/arch/powerpc/mm/nohash/kaslr_booke.c b/arch/powerpc/mm/nohash/kaslr_booke.c index 4c74e8a5482b..8fc49b1b4a91 100644 --- a/arch/powerpc/mm/nohash/kaslr_booke.c +++ b/arch/powerpc/mm/nohash/kaslr_booke.c @@ -314,7 +314,7 @@ static unsigned long __init kaslr_choose_location(void *dt_ptr, phys_addr_t size pr_warn("KASLR: No safe seed for randomizing the kernel base.\n"); ram = min_t(phys_addr_t, __max_low_memory, size); - ram = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, true); + ram = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, true, false); linear_sz = min_t(unsigned long, ram, SZ_512M); /* If the linear size is smaller than 64M, do not randmize */ diff --git a/arch/powerpc/mm/nohash/tlb.c b/arch/powerpc/mm/nohash/tlb.c index 5872f69141d5..89353d4f5604 100644 --- a/arch/powerpc/mm/nohash/tlb.c +++ b/arch/powerpc/mm/nohash/tlb.c @@ -185,6 +185,7 @@ EXPORT_PER_CPU_SYMBOL(next_tlbcam_idx); * processor */ +#ifndef CONFIG_PPC_8xx /* * These are the base non-SMP variants of page and mm flushing */ @@ -218,6 +219,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) mmu_get_tsize(mmu_virtual_psize), 0); } EXPORT_SYMBOL(local_flush_tlb_page); +#endif /* * And here are the SMP non-local implementations @@ -643,7 +645,7 @@ static void early_init_this_mmu(void) if (map) linear_map_top = map_mem_in_cams(linear_map_top, - num_cams, false); + num_cams, true, true); } #endif @@ -764,7 +766,7 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base, num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4; linear_sz = map_mem_in_cams(first_memblock_size, num_cams, - true); + false, true); ppc64_rma_size = min_t(u64, linear_sz, 0x40000000); } else diff --git a/arch/powerpc/mm/nohash/tlb_low.S b/arch/powerpc/mm/nohash/tlb_low.S index 5add4a51e51f..dd39074de9af 100644 --- a/arch/powerpc/mm/nohash/tlb_low.S +++ b/arch/powerpc/mm/nohash/tlb_low.S @@ -369,7 +369,7 @@ _GLOBAL(_tlbivax_bcast) * extern void loadcam_entry(unsigned int index) * * Load TLBCAM[index] entry in to the L2 CAM MMU - * Must preserve r7, r8, r9, r10 and r11 + * Must preserve r7, r8, r9, r10, r11, r12 */ _GLOBAL(loadcam_entry) mflr r5 @@ -401,7 +401,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS) * * r3 = first entry to write * r4 = number of entries to write - * r5 = temporary tlb entry + * r5 = temporary tlb entry (0 means no switch to AS1) */ _GLOBAL(loadcam_multi) mflr r8 @@ -409,6 +409,8 @@ _GLOBAL(loadcam_multi) mfmsr r11 andi. r11,r11,MSR_IS bne 10f + mr. r12, r5 + beq 10f /* * Set up temporary TLB entry that is the same as what we're @@ -446,6 +448,8 @@ _GLOBAL(loadcam_multi) /* Don't return to AS=0 if we were in AS=1 at function start */ andi. r11,r11,MSR_IS bne 3f + cmpwi r12, 0 + beq 3f /* Return to AS=0 and clear the temporary entry */ mfmsr r6 diff --git a/arch/powerpc/mm/nohash/tlb_low_64e.S b/arch/powerpc/mm/nohash/tlb_low_64e.S index bf24451f3e71..9235e720e357 100644 --- a/arch/powerpc/mm/nohash/tlb_low_64e.S +++ b/arch/powerpc/mm/nohash/tlb_low_64e.S @@ -222,7 +222,7 @@ tlb_miss_kernel_bolted: tlb_miss_fault_bolted: /* We need to check if it was an instruction miss */ - andi. r10,r11,_PAGE_EXEC|_PAGE_BAP_SX + andi. r10,r11,_PAGE_BAP_UX|_PAGE_BAP_SX bne itlb_miss_fault_bolted dtlb_miss_fault_bolted: tlb_epilog_bolted @@ -239,7 +239,7 @@ itlb_miss_fault_bolted: srdi r15,r16,60 /* get region */ bne- itlb_miss_fault_bolted - li r11,_PAGE_PRESENT|_PAGE_EXEC /* Base perm */ + li r11,_PAGE_PRESENT|_PAGE_BAP_UX /* Base perm */ /* We do the user/kernel test for the PID here along with the RW test */ @@ -614,7 +614,7 @@ itlb_miss_fault_e6500: /* We do the user/kernel test for the PID here along with the RW test */ - li r11,_PAGE_PRESENT|_PAGE_EXEC /* Base perm */ + li r11,_PAGE_PRESENT|_PAGE_BAP_UX /* Base perm */ oris r11,r11,_PAGE_ACCESSED@h cmpldi cr0,r15,0 /* Check for user region */ @@ -734,7 +734,7 @@ normal_tlb_miss_done: normal_tlb_miss_access_fault: /* We need to check if it was an instruction miss */ - andi. r10,r11,_PAGE_EXEC + andi. r10,r11,_PAGE_BAP_UX bne 1f ld r14,EX_TLB_DEAR(r12) ld r15,EX_TLB_ESR(r12) diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c index cd16b407f47e..ce9482383144 100644 --- a/arch/powerpc/mm/pgtable.c +++ b/arch/powerpc/mm/pgtable.c @@ -271,7 +271,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_ { pmd_t *pmd = pmd_off(mm, addr); pte_basic_t val; - pte_basic_t *entry = &ptep->pte; + pte_basic_t *entry = (pte_basic_t *)ptep; int num, i; /* diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index dcf5ecca19d9..fde1ed445ca4 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -173,7 +173,7 @@ void mark_rodata_ro(void) } #endif -#ifdef CONFIG_DEBUG_PAGEALLOC +#if defined(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) && defined(CONFIG_DEBUG_PAGEALLOC) void __kernel_map_pages(struct page *page, int numpages, int enable) { unsigned long addr = (unsigned long)page_address(page); |