summaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-08-06 16:38:17 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-08-06 16:38:17 -0700
commitcae4199f9319f42534ee2e2e4aadf183b9bb7f73 (patch)
treeb0028cbba73deaaf842fcb72e7c0dc0a0e0a9923 /arch/powerpc/kernel
parent4d1044fcb996e8de9b9ab392f4a767890e45202d (diff)
parent4cfa6ff24a9744ba484521c38bea613134fbfcb3 (diff)
downloadlinux-cae4199f9319f42534ee2e2e4aadf183b9bb7f73.tar.gz
Merge tag 'powerpc-6.0-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc updates from Michael Ellerman: - Add support for syscall stack randomization - Add support for atomic operations to the 32 & 64-bit BPF JIT - Full support for KASAN on 64-bit Book3E - Add a watchdog driver for the new PowerVM hypervisor watchdog - Add a number of new selftests for the Power10 PMU support - Add a driver for the PowerVM Platform KeyStore - Increase the NMI watchdog timeout during live partition migration, to avoid timeouts due to increased memory access latency - Add support for using the 'linux,pci-domain' device tree property for PCI domain assignment - Many other small features and fixes Thanks to Alexey Kardashevskiy, Andy Shevchenko, Arnd Bergmann, Athira Rajeev, Bagas Sanjaya, Christophe Leroy, Erhard Furtner, Fabiano Rosas, Greg Kroah-Hartman, Greg Kurz, Haowen Bai, Hari Bathini, Jason A. Donenfeld, Jason Wang, Jiang Jian, Joel Stanley, Juerg Haefliger, Kajol Jain, Kees Cook, Laurent Dufour, Madhavan Srinivasan, Masahiro Yamada, Maxime Bizon, Miaoqian Lin, Murilo Opsfelder Araújo, Nathan Lynch, Naveen N. Rao, Nayna Jain, Nicholas Piggin, Ning Qiang, Pali Rohár, Petr Mladek, Rashmica Gupta, Sachin Sant, Scott Cheloha, Segher Boessenkool, Stephen Rothwell, Uwe Kleine-König, Wolfram Sang, Xiu Jianfeng, and Zhouyi Zhou. * tag 'powerpc-6.0-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (191 commits) powerpc/64e: Fix kexec build error EDAC/ppc_4xx: Include required of_irq header directly powerpc/pci: Fix PHB numbering when using opal-phbid powerpc/64: Init jump labels before parse_early_param() selftests/powerpc: Avoid GCC 12 uninitialised variable warning powerpc/cell/axon_msi: Fix refcount leak in setup_msi_msg_address powerpc/xive: Fix refcount leak in xive_get_max_prio powerpc/spufs: Fix refcount leak in spufs_init_isolated_loader powerpc/perf: Include caps feature for power10 DD1 version powerpc: add support for syscall stack randomization powerpc: Move system_call_exception() to syscall.c powerpc/powernv: rename remaining rng powernv_ functions to pnv_ powerpc/powernv/kvm: Use darn for H_RANDOM on Power9 powerpc/powernv: Avoid crashing if rng is NULL selftests/powerpc: Fix matrix multiply assist test powerpc/signal: Update comment for clarity powerpc: make facility_unavailable_exception 64s powerpc/platforms/83xx/suspend: Remove write-only global variable powerpc/platforms/83xx/suspend: Prevent unloading the driver powerpc/platforms/83xx/suspend: Reorder to get rid of a forward declaration ...
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/Makefile11
-rw-r--r--arch/powerpc/kernel/asm-offsets.c2
-rw-r--r--arch/powerpc/kernel/btext.c2
-rw-r--r--arch/powerpc/kernel/cputable.c67
-rw-r--r--arch/powerpc/kernel/dawr.c1
-rw-r--r--arch/powerpc/kernel/dt_cpu_ftrs.c4
-rw-r--r--arch/powerpc/kernel/eeh_driver.c2
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S2
-rw-r--r--arch/powerpc/kernel/head_64.S3
-rw-r--r--arch/powerpc/kernel/head_book3s_32.S4
-rw-r--r--arch/powerpc/kernel/interrupt.c161
-rw-r--r--arch/powerpc/kernel/iommu.c5
-rw-r--r--arch/powerpc/kernel/irq.c496
-rw-r--r--arch/powerpc/kernel/irq_64.c466
-rw-r--r--arch/powerpc/kernel/kprobes.c2
-rw-r--r--arch/powerpc/kernel/mce.c2
-rw-r--r--arch/powerpc/kernel/pci-common.c32
-rw-r--r--arch/powerpc/kernel/pci_32.c27
-rw-r--r--arch/powerpc/kernel/pci_64.c2
-rw-r--r--arch/powerpc/kernel/pci_dn.c2
-rw-r--r--arch/powerpc/kernel/prom.c10
-rw-r--r--arch/powerpc/kernel/prom_init.c2
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace-vsx.c2
-rw-r--r--arch/powerpc/kernel/setup_64.c1
-rw-r--r--arch/powerpc/kernel/signal_64.c9
-rw-r--r--arch/powerpc/kernel/smp.c29
-rw-r--r--arch/powerpc/kernel/syscall.c190
-rw-r--r--arch/powerpc/kernel/trace/ftrace.c30
-rw-r--r--arch/powerpc/kernel/traps.c2
-rw-r--r--arch/powerpc/kernel/vdso/cacheflush.S1
-rw-r--r--arch/powerpc/kernel/watchdog.c23
31 files changed, 832 insertions, 760 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index c8cf924bf9c0..06d2d1f78f71 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -54,6 +54,13 @@ CFLAGS_cputable.o += -DDISABLE_BRANCH_PROFILING
CFLAGS_btext.o += -DDISABLE_BRANCH_PROFILING
endif
+#ifdef CONFIG_RANDOMIZE_KSTACK_OFFSET
+# Remove stack protector to avoid triggering unneeded stack canary
+# checks due to randomize_kstack_offset.
+CFLAGS_REMOVE_syscall.o = -fstack-protector -fstack-protector-strong
+CFLAGS_syscall.o += -fno-stack-protector
+#endif
+
obj-y := cputable.o syscalls.o \
irq.o align.o signal_$(BITS).o pmc.o vdso.o \
process.o systbl.o idle.o \
@@ -62,9 +69,9 @@ obj-y := cputable.o syscalls.o \
udbg.o misc.o io.o misc_$(BITS).o \
of_platform.o prom_parse.o firmware.o \
hw_breakpoint_constraints.o interrupt.o \
- kdebugfs.o stacktrace.o
+ kdebugfs.o stacktrace.o syscall.o
obj-y += ptrace/
-obj-$(CONFIG_PPC64) += setup_64.o \
+obj-$(CONFIG_PPC64) += setup_64.o irq_64.o\
paca.o nvram_64.o note.o
obj-$(CONFIG_COMPAT) += sys_ppc32.o signal_32.o
obj-$(CONFIG_VDSO32) += vdso32_wrapper.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index eec536aef83a..8c10f536e478 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -379,7 +379,7 @@ int main(void)
OFFSET(VCPU_SPRG2, kvm_vcpu, arch.shregs.sprg2);
OFFSET(VCPU_SPRG3, kvm_vcpu, arch.shregs.sprg3);
#endif
-#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
+#ifdef CONFIG_KVM_BOOK3S_HV_P8_TIMING
OFFSET(VCPU_TB_RMENTRY, kvm_vcpu, arch.rm_entry);
OFFSET(VCPU_TB_RMINTR, kvm_vcpu, arch.rm_intr);
OFFSET(VCPU_TB_RMEXIT, kvm_vcpu, arch.rm_exit);
diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c
index 8f69bb07e500..2769889219bf 100644
--- a/arch/powerpc/kernel/btext.c
+++ b/arch/powerpc/kernel/btext.c
@@ -73,7 +73,7 @@ static inline void rmci_maybe_off(void)
* the display during identify_machine() and MMU_Init()
*
* The display is mapped to virtual address 0xD0000000, rather
- * than 1:1, because some some CHRP machines put the frame buffer
+ * than 1:1, because some CHRP machines put the frame buffer
* in the region starting at 0xC0000000 (PAGE_OFFSET).
* This mapping is temporary and will disappear as soon as the
* setup done by MMU_Init() is applied.
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index a5dbfccd2047..d8e42ef750f1 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -149,7 +149,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_IBM,
.cpu_setup = __setup_cpu_ppc970,
.cpu_restore = __restore_cpu_ppc970,
- .oprofile_cpu_type = "ppc64/970",
.platform = "ppc970",
},
{ /* PPC970FX */
@@ -166,7 +165,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_IBM,
.cpu_setup = __setup_cpu_ppc970,
.cpu_restore = __restore_cpu_ppc970,
- .oprofile_cpu_type = "ppc64/970",
.platform = "ppc970",
},
{ /* PPC970MP DD1.0 - no DEEPNAP, use regular 970 init */
@@ -183,7 +181,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_IBM,
.cpu_setup = __setup_cpu_ppc970,
.cpu_restore = __restore_cpu_ppc970,
- .oprofile_cpu_type = "ppc64/970MP",
.platform = "ppc970",
},
{ /* PPC970MP */
@@ -200,7 +197,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_IBM,
.cpu_setup = __setup_cpu_ppc970MP,
.cpu_restore = __restore_cpu_ppc970,
- .oprofile_cpu_type = "ppc64/970MP",
.platform = "ppc970",
},
{ /* PPC970GX */
@@ -216,7 +212,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 8,
.pmc_type = PPC_PMC_IBM,
.cpu_setup = __setup_cpu_ppc970,
- .oprofile_cpu_type = "ppc64/970",
.platform = "ppc970",
},
{ /* Power5 GR */
@@ -230,7 +225,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 128,
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
- .oprofile_cpu_type = "ppc64/power5",
.platform = "power5",
},
{ /* Power5++ */
@@ -243,7 +237,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 6,
- .oprofile_cpu_type = "ppc64/power5++",
.platform = "power5+",
},
{ /* Power5 GS */
@@ -257,7 +250,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 128,
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
- .oprofile_cpu_type = "ppc64/power5+",
.platform = "power5+",
},
{ /* POWER6 in P5+ mode; 2.04-compliant processor */
@@ -269,7 +261,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.mmu_features = MMU_FTRS_POWER5,
.icache_bsize = 128,
.dcache_bsize = 128,
- .oprofile_cpu_type = "ppc64/ibm-compat-v1",
.platform = "power5+",
},
{ /* Power6 */
@@ -284,7 +275,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 128,
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
- .oprofile_cpu_type = "ppc64/power6",
.platform = "power6x",
},
{ /* 2.05-compliant processor, i.e. Power6 "architected" mode */
@@ -296,7 +286,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.mmu_features = MMU_FTRS_POWER6,
.icache_bsize = 128,
.dcache_bsize = 128,
- .oprofile_cpu_type = "ppc64/ibm-compat-v1",
.platform = "power6",
},
{ /* 2.06-compliant processor, i.e. Power7 "architected" mode */
@@ -309,7 +298,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.mmu_features = MMU_FTRS_POWER7,
.icache_bsize = 128,
.dcache_bsize = 128,
- .oprofile_cpu_type = "ppc64/ibm-compat-v1",
.cpu_setup = __setup_cpu_power7,
.cpu_restore = __restore_cpu_power7,
.machine_check_early = __machine_check_early_realmode_p7,
@@ -325,7 +313,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.mmu_features = MMU_FTRS_POWER8,
.icache_bsize = 128,
.dcache_bsize = 128,
- .oprofile_cpu_type = "ppc64/ibm-compat-v1",
.cpu_setup = __setup_cpu_power8,
.cpu_restore = __restore_cpu_power8,
.machine_check_early = __machine_check_early_realmode_p8,
@@ -341,7 +328,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.mmu_features = MMU_FTRS_POWER9,
.icache_bsize = 128,
.dcache_bsize = 128,
- .oprofile_cpu_type = "ppc64/ibm-compat-v1",
.cpu_setup = __setup_cpu_power9,
.cpu_restore = __restore_cpu_power9,
.platform = "power9",
@@ -356,7 +342,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.mmu_features = MMU_FTRS_POWER10,
.icache_bsize = 128,
.dcache_bsize = 128,
- .oprofile_cpu_type = "ppc64/ibm-compat-v1",
.cpu_setup = __setup_cpu_power10,
.cpu_restore = __restore_cpu_power10,
.platform = "power10",
@@ -373,7 +358,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 128,
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
- .oprofile_cpu_type = "ppc64/power7",
.cpu_setup = __setup_cpu_power7,
.cpu_restore = __restore_cpu_power7,
.machine_check_early = __machine_check_early_realmode_p7,
@@ -391,7 +375,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 128,
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
- .oprofile_cpu_type = "ppc64/power7",
.cpu_setup = __setup_cpu_power7,
.cpu_restore = __restore_cpu_power7,
.machine_check_early = __machine_check_early_realmode_p7,
@@ -409,7 +392,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 128,
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
- .oprofile_cpu_type = "ppc64/power8",
.cpu_setup = __setup_cpu_power8,
.cpu_restore = __restore_cpu_power8,
.machine_check_early = __machine_check_early_realmode_p8,
@@ -427,7 +409,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 128,
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
- .oprofile_cpu_type = "ppc64/power8",
.cpu_setup = __setup_cpu_power8,
.cpu_restore = __restore_cpu_power8,
.machine_check_early = __machine_check_early_realmode_p8,
@@ -445,7 +426,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 128,
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
- .oprofile_cpu_type = "ppc64/power8",
.cpu_setup = __setup_cpu_power8,
.cpu_restore = __restore_cpu_power8,
.machine_check_early = __machine_check_early_realmode_p8,
@@ -463,7 +443,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 128,
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
- .oprofile_cpu_type = "ppc64/power9",
.cpu_setup = __setup_cpu_power9,
.cpu_restore = __restore_cpu_power9,
.machine_check_early = __machine_check_early_realmode_p9,
@@ -481,7 +460,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 128,
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
- .oprofile_cpu_type = "ppc64/power9",
.cpu_setup = __setup_cpu_power9,
.cpu_restore = __restore_cpu_power9,
.machine_check_early = __machine_check_early_realmode_p9,
@@ -499,7 +477,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 128,
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
- .oprofile_cpu_type = "ppc64/power9",
.cpu_setup = __setup_cpu_power9,
.cpu_restore = __restore_cpu_power9,
.machine_check_early = __machine_check_early_realmode_p9,
@@ -517,7 +494,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 128,
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
- .oprofile_cpu_type = "ppc64/power9",
.cpu_setup = __setup_cpu_power9,
.cpu_restore = __restore_cpu_power9,
.machine_check_early = __machine_check_early_realmode_p9,
@@ -535,7 +511,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 128,
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
- .oprofile_cpu_type = "ppc64/power10",
.cpu_setup = __setup_cpu_power10,
.cpu_restore = __restore_cpu_power10,
.machine_check_early = __machine_check_early_realmode_p10,
@@ -554,7 +529,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 128,
.num_pmcs = 4,
.pmc_type = PPC_PMC_IBM,
- .oprofile_cpu_type = "ppc64/cell-be",
.platform = "ppc-cell-be",
},
{ /* PA Semi PA6T */
@@ -570,7 +544,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_PA6T,
.cpu_setup = __setup_cpu_pa6t,
.cpu_restore = __restore_cpu_pa6t,
- .oprofile_cpu_type = "ppc64/pa6t",
.platform = "pa6t",
},
{ /* default match */
@@ -734,7 +707,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_setup = __setup_cpu_750,
.machine_check = machine_check_generic,
.platform = "ppc750",
- .oprofile_cpu_type = "ppc/750",
},
{ /* 745/755 */
.pvr_mask = 0xfffff000,
@@ -765,7 +737,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_setup = __setup_cpu_750,
.machine_check = machine_check_generic,
.platform = "ppc750",
- .oprofile_cpu_type = "ppc/750",
},
{ /* 750FX rev 2.0 must disable HID0[DPM] */
.pvr_mask = 0xffffffff,
@@ -781,7 +752,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_setup = __setup_cpu_750,
.machine_check = machine_check_generic,
.platform = "ppc750",
- .oprofile_cpu_type = "ppc/750",
},
{ /* 750FX (All revs except 2.0) */
.pvr_mask = 0xffff0000,
@@ -797,7 +767,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_setup = __setup_cpu_750fx,
.machine_check = machine_check_generic,
.platform = "ppc750",
- .oprofile_cpu_type = "ppc/750",
},
{ /* 750GX */
.pvr_mask = 0xffff0000,
@@ -813,7 +782,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_setup = __setup_cpu_750fx,
.machine_check = machine_check_generic,
.platform = "ppc750",
- .oprofile_cpu_type = "ppc/750",
},
{ /* 740/750 (L2CR bit need fixup for 740) */
.pvr_mask = 0xffff0000,
@@ -891,7 +859,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 6,
.pmc_type = PPC_PMC_G4,
.cpu_setup = __setup_cpu_745x,
- .oprofile_cpu_type = "ppc/7450",
.machine_check = machine_check_generic,
.platform = "ppc7450",
},
@@ -908,7 +875,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 6,
.pmc_type = PPC_PMC_G4,
.cpu_setup = __setup_cpu_745x,
- .oprofile_cpu_type = "ppc/7450",
.machine_check = machine_check_generic,
.platform = "ppc7450",
},
@@ -925,7 +891,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 6,
.pmc_type = PPC_PMC_G4,
.cpu_setup = __setup_cpu_745x,
- .oprofile_cpu_type = "ppc/7450",
.machine_check = machine_check_generic,
.platform = "ppc7450",
},
@@ -942,7 +907,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 6,
.pmc_type = PPC_PMC_G4,
.cpu_setup = __setup_cpu_745x,
- .oprofile_cpu_type = "ppc/7450",
.machine_check = machine_check_generic,
.platform = "ppc7450",
},
@@ -959,7 +923,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 6,
.pmc_type = PPC_PMC_G4,
.cpu_setup = __setup_cpu_745x,
- .oprofile_cpu_type = "ppc/7450",
.machine_check = machine_check_generic,
.platform = "ppc7450",
},
@@ -976,7 +939,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 6,
.pmc_type = PPC_PMC_G4,
.cpu_setup = __setup_cpu_745x,
- .oprofile_cpu_type = "ppc/7450",
.machine_check = machine_check_generic,
.platform = "ppc7450",
},
@@ -993,7 +955,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 6,
.pmc_type = PPC_PMC_G4,
.cpu_setup = __setup_cpu_745x,
- .oprofile_cpu_type = "ppc/7450",
.machine_check = machine_check_generic,
.platform = "ppc7450",
},
@@ -1010,7 +971,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 6,
.pmc_type = PPC_PMC_G4,
.cpu_setup = __setup_cpu_745x,
- .oprofile_cpu_type = "ppc/7450",
.machine_check = machine_check_generic,
.platform = "ppc7450",
},
@@ -1026,7 +986,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 6,
.pmc_type = PPC_PMC_G4,
.cpu_setup = __setup_cpu_745x,
- .oprofile_cpu_type = "ppc/7450",
.machine_check = machine_check_generic,
.platform = "ppc7450",
},
@@ -1043,7 +1002,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 6,
.pmc_type = PPC_PMC_G4,
.cpu_setup = __setup_cpu_745x,
- .oprofile_cpu_type = "ppc/7450",
.machine_check = machine_check_generic,
.platform = "ppc7450",
},
@@ -1060,7 +1018,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 6,
.pmc_type = PPC_PMC_G4,
.cpu_setup = __setup_cpu_745x,
- .oprofile_cpu_type = "ppc/7450",
.machine_check = machine_check_generic,
.platform = "ppc7450",
},
@@ -1172,7 +1129,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_setup = __setup_cpu_603,
.machine_check = machine_check_83xx,
.num_pmcs = 4,
- .oprofile_cpu_type = "ppc/e300",
.platform = "ppc603",
},
{ /* e300c4 (e300c1, plus one IU) */
@@ -1188,7 +1144,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_setup = __setup_cpu_603,
.machine_check = machine_check_83xx,
.num_pmcs = 4,
- .oprofile_cpu_type = "ppc/e300",
.platform = "ppc603",
},
#endif
@@ -1884,7 +1839,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
- .oprofile_cpu_type = "ppc/e500",
.cpu_setup = __setup_cpu_e500v1,
.machine_check = machine_check_e500,
.platform = "ppc8540",
@@ -1903,7 +1857,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
- .oprofile_cpu_type = "ppc/e500",
.cpu_setup = __setup_cpu_e500v2,
.machine_check = machine_check_e500,
.platform = "ppc8548",
@@ -1922,7 +1875,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.icache_bsize = 64,
.dcache_bsize = 64,
.num_pmcs = 4,
- .oprofile_cpu_type = "ppc/e500mc",
.cpu_setup = __setup_cpu_e500mc,
.machine_check = machine_check_e500mc,
.platform = "ppce500mc",
@@ -1943,7 +1895,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.icache_bsize = 64,
.dcache_bsize = 64,
.num_pmcs = 4,
- .oprofile_cpu_type = "ppc/e500mc",
.cpu_setup = __setup_cpu_e5500,
#ifndef CONFIG_PPC32
.cpu_restore = __restore_cpu_e5500,
@@ -1965,7 +1916,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.icache_bsize = 64,
.dcache_bsize = 64,
.num_pmcs = 6,
- .oprofile_cpu_type = "ppc/e6500",
.cpu_setup = __setup_cpu_e6500,
#ifndef CONFIG_PPC32
.cpu_restore = __restore_cpu_e6500,
@@ -2033,23 +1983,10 @@ static struct cpu_spec * __init setup_cpu_spec(unsigned long offset,
t->pmc_type = old.pmc_type;
/*
- * If we have passed through this logic once before and
- * have pulled the default case because the real PVR was
- * not found inside cpu_specs[], then we are possibly
- * running in compatibility mode. In that case, let the
- * oprofiler know which set of compatibility counters to
- * pull from by making sure the oprofile_cpu_type string
- * is set to that of compatibility mode. If the
- * oprofile_cpu_type already has a value, then we are
- * possibly overriding a real PVR with a logical one,
- * and, in that case, keep the current value for
- * oprofile_cpu_type. Furthermore, let's ensure that the
+ * Let's ensure that the
* fix for the PMAO bug is enabled on compatibility mode.
*/
- if (old.oprofile_cpu_type != NULL) {
- t->oprofile_cpu_type = old.oprofile_cpu_type;
- t->cpu_features |= old.cpu_features & CPU_FTR_PMAO_BUG;
- }
+ t->cpu_features |= old.cpu_features & CPU_FTR_PMAO_BUG;
}
*PTRRELOC(&cur_cpu_spec) = &the_cpu_spec;
diff --git a/arch/powerpc/kernel/dawr.c b/arch/powerpc/kernel/dawr.c
index 30d4eca88d17..909a05cd2809 100644
--- a/arch/powerpc/kernel/dawr.c
+++ b/arch/powerpc/kernel/dawr.c
@@ -11,6 +11,7 @@
#include <linux/debugfs.h>
#include <asm/machdep.h>
#include <asm/hvcall.h>
+#include <asm/firmware.h>
bool dawr_force_enable;
EXPORT_SYMBOL_GPL(dawr_force_enable);
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
index 2ad365c21afa..fc800a9fb2c4 100644
--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -102,7 +102,6 @@ static struct cpu_spec __initdata base_cpu_spec = {
.dcache_bsize = 32, /* cache info init. */
.num_pmcs = 0,
.pmc_type = PPC_PMC_DEFAULT,
- .oprofile_cpu_type = NULL,
.cpu_setup = NULL,
.cpu_restore = __restore_cpu_cpufeatures,
.machine_check_early = NULL,
@@ -387,7 +386,6 @@ static int __init feat_enable_pmu_power8(struct dt_cpu_feature *f)
cur_cpu_spec->num_pmcs = 6;
cur_cpu_spec->pmc_type = PPC_PMC_IBM;
- cur_cpu_spec->oprofile_cpu_type = "ppc64/power8";
return 1;
}
@@ -423,7 +421,6 @@ static int __init feat_enable_pmu_power9(struct dt_cpu_feature *f)
cur_cpu_spec->num_pmcs = 6;
cur_cpu_spec->pmc_type = PPC_PMC_IBM;
- cur_cpu_spec->oprofile_cpu_type = "ppc64/power9";
return 1;
}
@@ -449,7 +446,6 @@ static int __init feat_enable_pmu_power10(struct dt_cpu_feature *f)
cur_cpu_spec->num_pmcs = 6;
cur_cpu_spec->pmc_type = PPC_PMC_IBM;
- cur_cpu_spec->oprofile_cpu_type = "ppc64/power10";
return 1;
}
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 260273e56431..f279295179bd 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -750,7 +750,7 @@ static void eeh_pe_cleanup(struct eeh_pe *pe)
* @pdev: pci_dev to check
*
* This function may return a false positive if we can't determine the slot's
- * presence state. This might happen for for PCIe slots if the PE containing
+ * presence state. This might happen for PCIe slots if the PE containing
* the upstream bridge is also frozen, or the bridge is part of the same PE
* as the device.
*
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index b66dd6f775a4..3d0dc133a9ae 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -2779,7 +2779,7 @@ EXC_COMMON_BEGIN(soft_nmi_common)
/*
* An interrupt came in while soft-disabled. We set paca->irq_happened, then:
- * - If it was a decrementer interrupt, we bump the dec to max and and return.
+ * - If it was a decrementer interrupt, we bump the dec to max and return.
* - If it was a doorbell we return immediately since doorbells are edge
* triggered and won't automatically refire.
* - If it was a HMI we return immediately since we handled it in realmode
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index d3eea633d11a..cf2c08902c05 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -965,6 +965,9 @@ start_here_multiplatform:
* and SLB setup before we turn on relocation.
*/
+#ifdef CONFIG_KASAN
+ bl kasan_early_init
+#endif
/* Restore parameters passed from prom_init/kexec */
mr r3,r31
LOAD_REG_ADDR(r12, DOTSYM(early_setup))
diff --git a/arch/powerpc/kernel/head_book3s_32.S b/arch/powerpc/kernel/head_book3s_32.S
index 6c739beb938c..519b60695167 100644
--- a/arch/powerpc/kernel/head_book3s_32.S
+++ b/arch/powerpc/kernel/head_book3s_32.S
@@ -418,14 +418,14 @@ InstructionTLBMiss:
*/
/* Get PTE (linux-style) and check access */
mfspr r3,SPRN_IMISS
-#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
+#ifdef CONFIG_MODULES
lis r1, TASK_SIZE@h /* check if kernel address */
cmplw 0,r1,r3
#endif
mfspr r2, SPRN_SDR1
li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC | _PAGE_USER
rlwinm r2, r2, 28, 0xfffff000
-#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
+#ifdef CONFIG_MODULES
bgt- 112f
lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c
index 784ea3289c84..0e75cb03244a 100644
--- a/arch/powerpc/kernel/interrupt.c
+++ b/arch/powerpc/kernel/interrupt.c
@@ -24,8 +24,6 @@
unsigned long global_dbcr0[NR_CPUS];
#endif
-typedef long (*syscall_fn)(long, long, long, long, long, long);
-
#ifdef CONFIG_PPC_BOOK3S_64
DEFINE_STATIC_KEY_FALSE(interrupt_exit_not_reentrant);
static inline bool exit_must_hard_disable(void)
@@ -73,165 +71,6 @@ static notrace __always_inline bool prep_irq_for_enabled_exit(bool restartable)
return true;
}
-/* Has to run notrace because it is entered not completely "reconciled" */
-notrace long system_call_exception(long r3, long r4, long r5,
- long r6, long r7, long r8,
- unsigned long r0, struct pt_regs *regs)
-{
- syscall_fn f;
-
- kuap_lock();
-
- regs->orig_gpr3 = r3;
-
- if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
- BUG_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
-
- trace_hardirqs_off(); /* finish reconciling */
-
- CT_WARN_ON(ct_state() == CONTEXT_KERNEL);
- user_exit_irqoff();
-
- BUG_ON(regs_is_unrecoverable(regs));
- BUG_ON(!(regs->msr & MSR_PR));
- BUG_ON(arch_irq_disabled_regs(regs));
-
-#ifdef CONFIG_PPC_PKEY
- if (mmu_has_feature(MMU_FTR_PKEY)) {
- unsigned long amr, iamr;
- bool flush_needed = false;
- /*
- * When entering from userspace we mostly have the AMR/IAMR
- * different from kernel default values. Hence don't compare.
- */
- amr = mfspr(SPRN_AMR);
- iamr = mfspr(SPRN_IAMR);
- regs->amr = amr;
- regs->iamr = iamr;
- if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) {
- mtspr(SPRN_AMR, AMR_KUAP_BLOCKED);
- flush_needed = true;
- }
- if (mmu_has_feature(MMU_FTR_BOOK3S_KUEP)) {
- mtspr(SPRN_IAMR, AMR_KUEP_BLOCKED);
- flush_needed = true;
- }
- if (flush_needed)
- isync();
- } else
-#endif
- kuap_assert_locked();
-
- booke_restore_dbcr0();
-
- account_cpu_user_entry();
-
- account_stolen_time();
-
- /*
- * This is not required for the syscall exit path, but makes the
- * stack frame look nicer. If this was initialised in the first stack
- * frame, or if the unwinder was taught the first stack frame always
- * returns to user with IRQS_ENABLED, this store could be avoided!
- */
- irq_soft_mask_regs_set_state(regs, IRQS_ENABLED);
-
- /*
- * If system call is called with TM active, set _TIF_RESTOREALL to
- * prevent RFSCV being used to return to userspace, because POWER9
- * TM implementation has problems with this instruction returning to
- * transactional state. Final register values are not relevant because
- * the transaction will be aborted upon return anyway. Or in the case
- * of unsupported_scv SIGILL fault, the return state does not much
- * matter because it's an edge case.
- */
- if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
- unlikely(MSR_TM_TRANSACTIONAL(regs->msr)))
- set_bits(_TIF_RESTOREALL, &current_thread_info()->flags);
-
- /*
- * If the system call was made with a transaction active, doom it and
- * return without performing the system call. Unless it was an
- * unsupported scv vector, in which case it's treated like an illegal
- * instruction.
- */
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- if (unlikely(MSR_TM_TRANSACTIONAL(regs->msr)) &&
- !trap_is_unsupported_scv(regs)) {
- /* Enable TM in the kernel, and disable EE (for scv) */
- hard_irq_disable();
- mtmsr(mfmsr() | MSR_TM);
-
- /* tabort, this dooms the transaction, nothing else */
- asm volatile(".long 0x7c00071d | ((%0) << 16)"
- :: "r"(TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT));
-
- /*
- * Userspace will never see the return value. Execution will
- * resume after the tbegin. of the aborted transaction with the
- * checkpointed register state. A context switch could occur
- * or signal delivered to the process before resuming the
- * doomed transaction context, but that should all be handled
- * as expected.
- */
- return -ENOSYS;
- }
-#endif // CONFIG_PPC_TRANSACTIONAL_MEM
-
- local_irq_enable();
-
- if (unlikely(read_thread_flags() & _TIF_SYSCALL_DOTRACE)) {
- if (unlikely(trap_is_unsupported_scv(regs))) {
- /* Unsupported scv vector */
- _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
- return regs->gpr[3];
- }
- /*
- * We use the return value of do_syscall_trace_enter() as the
- * syscall number. If the syscall was rejected for any reason
- * do_syscall_trace_enter() returns an invalid syscall number
- * and the test against NR_syscalls will fail and the return
- * value to be used is in regs->gpr[3].
- */
- r0 = do_syscall_trace_enter(regs);
- if (unlikely(r0 >= NR_syscalls))
- return regs->gpr[3];
- r3 = regs->gpr[3];
- r4 = regs->gpr[4];
- r5 = regs->gpr[5];
- r6 = regs->gpr[6];
- r7 = regs->gpr[7];
- r8 = regs->gpr[8];
-
- } else if (unlikely(r0 >= NR_syscalls)) {
- if (unlikely(trap_is_unsupported_scv(regs))) {
- /* Unsupported scv vector */
- _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
- return regs->gpr[3];
- }
- return -ENOSYS;
- }
-
- /* May be faster to do array_index_nospec? */
- barrier_nospec();
-
- if (unlikely(is_compat_task())) {
- f = (void *)compat_sys_call_table[r0];
-
- r3 &= 0x00000000ffffffffULL;
- r4 &= 0x00000000ffffffffULL;
- r5 &= 0x00000000ffffffffULL;
- r6 &= 0x00000000ffffffffULL;
- r7 &= 0x00000000ffffffffULL;
- r8 &= 0x00000000ffffffffULL;
-
- } else {
- f = (void *)sys_call_table[r0];
- }
-
- return f(r3, r4, r5, r6, r7, r8);
-}
-
static notrace void booke_load_dbcr0(void)
{
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 7e56ddb3e0b9..caebe1431596 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -775,6 +775,11 @@ bool iommu_table_in_use(struct iommu_table *tbl)
/* ignore reserved bit0 */
if (tbl->it_offset == 0)
start = 1;
+
+ /* Simple case with no reserved MMIO32 region */
+ if (!tbl->it_reserved_start && !tbl->it_reserved_end)
+ return find_next_bit(tbl->it_map, tbl->it_size, start) != tbl->it_size;
+
end = tbl->it_reserved_start - tbl->it_offset;
if (find_next_bit(tbl->it_map, end, start) != end)
return true;
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 0822a274a549..0f17268c1f0b 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -65,13 +65,8 @@
#include <asm/smp.h>
#include <asm/hw_irq.h>
#include <asm/softirq_stack.h>
+#include <asm/ppc_asm.h>
-#ifdef CONFIG_PPC64
-#include <asm/paca.h>
-#include <asm/firmware.h>
-#include <asm/lv1call.h>
-#include <asm/dbell.h>
-#endif
#define CREATE_TRACE_POINTS
#include <asm/trace.h>
#include <asm/cpu_has_feature.h>
@@ -88,411 +83,6 @@ u32 tau_interrupts(unsigned long cpu);
#endif
#endif /* CONFIG_PPC32 */
-#ifdef CONFIG_PPC64
-
-int distribute_irqs = 1;
-
-static inline notrace unsigned long get_irq_happened(void)
-{
- unsigned long happened;
-
- __asm__ __volatile__("lbz %0,%1(13)"
- : "=r" (happened) : "i" (offsetof(struct paca_struct, irq_happened)));
-
- return happened;
-}
-
-void replay_soft_interrupts(void)
-{
- struct pt_regs regs;
-
- /*
- * Be careful here, calling these interrupt handlers can cause
- * softirqs to be raised, which they may run when calling irq_exit,
- * which will cause local_irq_enable() to be run, which can then
- * recurse into this function. Don't keep any state across
- * interrupt handler calls which may change underneath us.
- *
- * We use local_paca rather than get_paca() to avoid all the
- * debug_smp_processor_id() business in this low level function.
- */
-
- ppc_save_regs(&regs);
- regs.softe = IRQS_ENABLED;
- regs.msr |= MSR_EE;
-
-again:
- if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
- WARN_ON_ONCE(mfmsr() & MSR_EE);
-
- /*
- * Force the delivery of pending soft-disabled interrupts on PS3.
- * Any HV call will have this side effect.
- */
- if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
- u64 tmp, tmp2;
- lv1_get_version_info(&tmp, &tmp2);
- }
-
- /*
- * Check if an hypervisor Maintenance interrupt happened.
- * This is a higher priority interrupt than the others, so
- * replay it first.
- */
- if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (local_paca->irq_happened & PACA_IRQ_HMI)) {
- local_paca->irq_happened &= ~PACA_IRQ_HMI;
- regs.trap = INTERRUPT_HMI;
- handle_hmi_exception(&regs);
- if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
- hard_irq_disable();
- }
-
- if (local_paca->irq_happened & PACA_IRQ_DEC) {
- local_paca->irq_happened &= ~PACA_IRQ_DEC;
- regs.trap = INTERRUPT_DECREMENTER;
- timer_interrupt(&regs);
- if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
- hard_irq_disable();
- }
-
- if (local_paca->irq_happened & PACA_IRQ_EE) {
- local_paca->irq_happened &= ~PACA_IRQ_EE;
- regs.trap = INTERRUPT_EXTERNAL;
- do_IRQ(&regs);
- if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
- hard_irq_disable();
- }
-
- if (IS_ENABLED(CONFIG_PPC_DOORBELL) && (local_paca->irq_happened & PACA_IRQ_DBELL)) {
- local_paca->irq_happened &= ~PACA_IRQ_DBELL;
- regs.trap = INTERRUPT_DOORBELL;
- doorbell_exception(&regs);
- if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
- hard_irq_disable();
- }
-
- /* Book3E does not support soft-masking PMI interrupts */
- if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (local_paca->irq_happened & PACA_IRQ_PMI)) {
- local_paca->irq_happened &= ~PACA_IRQ_PMI;
- regs.trap = INTERRUPT_PERFMON;
- performance_monitor_exception(&regs);
- if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
- hard_irq_disable();
- }
-
- if (local_paca->irq_happened & ~PACA_IRQ_HARD_DIS) {
- /*
- * We are responding to the next interrupt, so interrupt-off
- * latencies should be reset here.
- */
- trace_hardirqs_on();
- trace_hardirqs_off();
- goto again;
- }
-}
-
-#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_KUAP)
-static inline void replay_soft_interrupts_irqrestore(void)
-{
- unsigned long kuap_state = get_kuap();
-
- /*
- * Check if anything calls local_irq_enable/restore() when KUAP is
- * disabled (user access enabled). We handle that case here by saving
- * and re-locking AMR but we shouldn't get here in the first place,
- * hence the warning.
- */
- kuap_assert_locked();
-
- if (kuap_state != AMR_KUAP_BLOCKED)
- set_kuap(AMR_KUAP_BLOCKED);
-
- replay_soft_interrupts();
-
- if (kuap_state != AMR_KUAP_BLOCKED)
- set_kuap(kuap_state);
-}
-#else
-#define replay_soft_interrupts_irqrestore() replay_soft_interrupts()
-#endif
-
-notrace void arch_local_irq_restore(unsigned long mask)
-{
- unsigned char irq_happened;
-
- /* Write the new soft-enabled value if it is a disable */
- if (mask) {
- irq_soft_mask_set(mask);
- return;
- }
-
- if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
- WARN_ON_ONCE(in_nmi() || in_hardirq());
-
- /*
- * After the stb, interrupts are unmasked and there are no interrupts
- * pending replay. The restart sequence makes this atomic with
- * respect to soft-masked interrupts. If this was just a simple code
- * sequence, a soft-masked interrupt could become pending right after
- * the comparison and before the stb.
- *
- * This allows interrupts to be unmasked without hard disabling, and
- * also without new hard interrupts coming in ahead of pending ones.
- */
- asm_volatile_goto(
-"1: \n"
-" lbz 9,%0(13) \n"
-" cmpwi 9,0 \n"
-" bne %l[happened] \n"
-" stb 9,%1(13) \n"
-"2: \n"
- RESTART_TABLE(1b, 2b, 1b)
- : : "i" (offsetof(struct paca_struct, irq_happened)),
- "i" (offsetof(struct paca_struct, irq_soft_mask))
- : "cr0", "r9"
- : happened);
-
- if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
- WARN_ON_ONCE(!(mfmsr() & MSR_EE));
-
- return;
-
-happened:
- irq_happened = get_irq_happened();
- if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
- WARN_ON_ONCE(!irq_happened);
-
- if (irq_happened == PACA_IRQ_HARD_DIS) {
- if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
- WARN_ON_ONCE(mfmsr() & MSR_EE);
- irq_soft_mask_set(IRQS_ENABLED);
- local_paca->irq_happened = 0;
- __hard_irq_enable();
- return;
- }
-
- /* Have interrupts to replay, need to hard disable first */
- if (!(irq_happened & PACA_IRQ_HARD_DIS)) {
- if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
- if (!(mfmsr() & MSR_EE)) {
- /*
- * An interrupt could have come in and cleared
- * MSR[EE] and set IRQ_HARD_DIS, so check
- * IRQ_HARD_DIS again and warn if it is still
- * clear.
- */
- irq_happened = get_irq_happened();
- WARN_ON_ONCE(!(irq_happened & PACA_IRQ_HARD_DIS));
- }
- }
- __hard_irq_disable();
- local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
- } else {
- if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
- if (WARN_ON_ONCE(mfmsr() & MSR_EE))
- __hard_irq_disable();
- }
- }
-
- /*
- * Disable preempt here, so that the below preempt_enable will
- * perform resched if required (a replayed interrupt may set
- * need_resched).
- */
- preempt_disable();
- irq_soft_mask_set(IRQS_ALL_DISABLED);
- trace_hardirqs_off();
-
- replay_soft_interrupts_irqrestore();
- local_paca->irq_happened = 0;
-
- trace_hardirqs_on();
- irq_soft_mask_set(IRQS_ENABLED);
- __hard_irq_enable();
- preempt_enable();
-}
-EXPORT_SYMBOL(arch_local_irq_restore);
-
-/*
- * This is a helper to use when about to go into idle low-power
- * when the latter has the side effect of re-enabling interrupts
- * (such as calling H_CEDE under pHyp).
- *
- * You call this function with interrupts soft-disabled (this is
- * already the case when ppc_md.power_save is called). The function
- * will return whether to enter power save or just return.
- *
- * In the former case, it will have notified lockdep of interrupts
- * being re-enabled and generally sanitized the lazy irq state,
- * and in the latter case it will leave with interrupts hard
- * disabled and marked as such, so the local_irq_enable() call
- * in arch_cpu_idle() will properly re-enable everything.
- */
-bool prep_irq_for_idle(void)
-{
- /*
- * First we need to hard disable to ensure no interrupt
- * occurs before we effectively enter the low power state
- */
- __hard_irq_disable();
- local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
-
- /*
- * If anything happened while we were soft-disabled,
- * we return now and do not enter the low power state.
- */
- if (lazy_irq_pending())
- return false;
-
- /* Tell lockdep we are about to re-enable */
- trace_hardirqs_on();
-
- /*
- * Mark interrupts as soft-enabled and clear the
- * PACA_IRQ_HARD_DIS from the pending mask since we
- * are about to hard enable as well as a side effect
- * of entering the low power state.
- */
- local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
- irq_soft_mask_set(IRQS_ENABLED);
-
- /* Tell the caller to enter the low power state */
- return true;
-}
-
-#ifdef CONFIG_PPC_BOOK3S
-/*
- * This is for idle sequences that return with IRQs off, but the
- * idle state itself wakes on interrupt. Tell the irq tracer that
- * IRQs are enabled for the duration of idle so it does not get long
- * off times. Must be paired with fini_irq_for_idle_irqsoff.
- */
-bool prep_irq_for_idle_irqsoff(void)
-{
- WARN_ON(!irqs_disabled());
-
- /*
- * First we need to hard disable to ensure no interrupt
- * occurs before we effectively enter the low power state
- */
- __hard_irq_disable();
- local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
-
- /*
- * If anything happened while we were soft-disabled,
- * we return now and do not enter the low power state.
- */
- if (lazy_irq_pending())
- return false;
-
- /* Tell lockdep we are about to re-enable */
- trace_hardirqs_on();
-
- return true;
-}
-
-/*
- * Take the SRR1 wakeup reason, index into this table to find the
- * appropriate irq_happened bit.
- *
- * Sytem reset exceptions taken in idle state also come through here,
- * but they are NMI interrupts so do not need to wait for IRQs to be
- * restored, and should be taken as early as practical. These are marked
- * with 0xff in the table. The Power ISA specifies 0100b as the system
- * reset interrupt reason.
- */
-#define IRQ_SYSTEM_RESET 0xff
-
-static const u8 srr1_to_lazyirq[0x10] = {
- 0, 0, 0,
- PACA_IRQ_DBELL,
- IRQ_SYSTEM_RESET,
- PACA_IRQ_DBELL,
- PACA_IRQ_DEC,
- 0,
- PACA_IRQ_EE,
- PACA_IRQ_EE,
- PACA_IRQ_HMI,
- 0, 0, 0, 0, 0 };
-
-void replay_system_reset(void)
-{
- struct pt_regs regs;
-
- ppc_save_regs(&regs);
- regs.trap = 0x100;
- get_paca()->in_nmi = 1;
- system_reset_exception(&regs);
- get_paca()->in_nmi = 0;
-}
-EXPORT_SYMBOL_GPL(replay_system_reset);
-
-void irq_set_pending_from_srr1(unsigned long srr1)
-{
- unsigned int idx = (srr1 & SRR1_WAKEMASK_P8) >> 18;
- u8 reason = srr1_to_lazyirq[idx];
-
- /*
- * Take the system reset now, which is immediately after registers
- * are restored from idle. It's an NMI, so interrupts need not be
- * re-enabled before it is taken.
- */
- if (unlikely(reason == IRQ_SYSTEM_RESET)) {
- replay_system_reset();
- return;
- }
-
- if (reason == PACA_IRQ_DBELL) {
- /*
- * When doorbell triggers a system reset wakeup, the message
- * is not cleared, so if the doorbell interrupt is replayed
- * and the IPI handled, the doorbell interrupt would still
- * fire when EE is enabled.
- *
- * To avoid taking the superfluous doorbell interrupt,
- * execute a msgclr here before the interrupt is replayed.
- */
- ppc_msgclr(PPC_DBELL_MSGTYPE);
- }
-
- /*
- * The 0 index (SRR1[42:45]=b0000) must always evaluate to 0,
- * so this can be called unconditionally with the SRR1 wake
- * reason as returned by the idle code, which uses 0 to mean no
- * interrupt.
- *
- * If a future CPU was to designate this as an interrupt reason,
- * then a new index for no interrupt must be assigned.
- */
- local_paca->irq_happened |= reason;
-}
-#endif /* CONFIG_PPC_BOOK3S */
-
-/*
- * Force a replay of the external interrupt handler on this CPU.
- */
-void force_external_irq_replay(void)
-{
- /*
- * This must only be called with interrupts soft-disabled,
- * the replay will happen when re-enabling.
- */
- WARN_ON(!arch_irqs_disabled());
-
- /*
- * Interrupts must always be hard disabled before irq_happened is
- * modified (to prevent lost update in case of interrupt between
- * load and store).
- */
- __hard_irq_disable();
- local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
-
- /* Indicate in the PACA that we have an interrupt to replay */
- local_paca->irq_happened |= PACA_IRQ_EE;
-}
-
-#endif /* CONFIG_PPC64 */
-
int arch_show_interrupts(struct seq_file *p, int prec)
{
int j;
@@ -595,17 +185,15 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
return sum;
}
-static inline void check_stack_overflow(void)
+static inline void check_stack_overflow(unsigned long sp)
{
- long sp;
-
if (!IS_ENABLED(CONFIG_DEBUG_STACKOVERFLOW))
return;
- sp = current_stack_pointer & (THREAD_SIZE - 1);
+ sp &= THREAD_SIZE - 1;
- /* check for stack overflow: is there less than 2KB free? */
- if (unlikely(sp < 2048)) {
+ /* check for stack overflow: is there less than 1/4th free? */
+ if (unlikely(sp < THREAD_SIZE / 4)) {
pr_err("do_IRQ: stack overflow: %ld\n", sp);
dump_stack();
}
@@ -632,36 +220,16 @@ static __always_inline void call_do_softirq(const void *sp)
}
#endif
-static __always_inline void call_do_irq(struct pt_regs *regs, void *sp)
-{
- register unsigned long r3 asm("r3") = (unsigned long)regs;
-
- /* Temporarily switch r1 to sp, call __do_irq() then restore r1. */
- asm volatile (
- PPC_STLU " %%r1, %[offset](%[sp]) ;"
- "mr %%r1, %[sp] ;"
- "bl %[callee] ;"
- PPC_LL " %%r1, 0(%%r1) ;"
- : // Outputs
- "+r" (r3)
- : // Inputs
- [sp] "b" (sp), [offset] "i" (THREAD_SIZE - STACK_FRAME_OVERHEAD),
- [callee] "i" (__do_irq)
- : // Clobbers
- "lr", "xer", "ctr", "memory", "cr0", "cr1", "cr5", "cr6",
- "cr7", "r0", "r4", "r5", "r6", "r7", "r8", "r9", "r10",
- "r11", "r12"
- );
-}
-
DEFINE_STATIC_CALL_RET0(ppc_get_irq, *ppc_md.get_irq);
-void __do_irq(struct pt_regs *regs)
+static void __do_irq(struct pt_regs *regs, unsigned long oldsp)
{
unsigned int irq;
trace_irq_entry(regs);
+ check_stack_overflow(oldsp);
+
/*
* Query the platform PIC for the interrupt & ack it.
*
@@ -682,6 +250,29 @@ void __do_irq(struct pt_regs *regs)
trace_irq_exit(regs);
}
+static __always_inline void call_do_irq(struct pt_regs *regs, void *sp)
+{
+ register unsigned long r3 asm("r3") = (unsigned long)regs;
+
+ /* Temporarily switch r1 to sp, call __do_irq() then restore r1. */
+ asm volatile (
+ PPC_STLU " %%r1, %[offset](%[sp]) ;"
+ "mr %%r4, %%r1 ;"
+ "mr %%r1, %[sp] ;"
+ "bl %[callee] ;"
+ PPC_LL " %%r1, 0(%%r1) ;"
+ : // Outputs
+ "+r" (r3)
+ : // Inputs
+ [sp] "b" (sp), [offset] "i" (THREAD_SIZE - STACK_FRAME_OVERHEAD),
+ [callee] "i" (__do_irq)
+ : // Clobbers
+ "lr", "xer", "ctr", "memory", "cr0", "cr1", "cr5", "cr6",
+ "cr7", "r0", "r4", "r5", "r6", "r7", "r8", "r9", "r10",
+ "r11", "r12"
+ );
+}
+
void __do_IRQ(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
@@ -692,16 +283,11 @@ void __do_IRQ(struct pt_regs *regs)
irqsp = hardirq_ctx[raw_smp_processor_id()];
sirqsp = softirq_ctx[raw_smp_processor_id()];
- check_stack_overflow();
-
- /* Already there ? */
- if (unlikely(cursp == irqsp || cursp == sirqsp)) {
- __do_irq(regs);
- set_irq_regs(old_regs);
- return;
- }
- /* Switch stack and call */
- call_do_irq(regs, irqsp);
+ /* Already there ? If not switch stack and call */
+ if (unlikely(cursp == irqsp || cursp == sirqsp))
+ __do_irq(regs, current_stack_pointer);
+ else
+ call_do_irq(regs, irqsp);
set_irq_regs(old_regs);
}
@@ -798,13 +384,3 @@ int irq_choose_cpu(const struct cpumask *mask)
return hard_smp_processor_id();
}
#endif
-
-#ifdef CONFIG_PPC64
-static int __init setup_noirqdistrib(char *str)
-{
- distribute_irqs = 0;
- return 1;
-}
-
-__setup("noirqdistrib", setup_noirqdistrib);
-#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/kernel/irq_64.c b/arch/powerpc/kernel/irq_64.c
new file mode 100644
index 000000000000..01645e03e9f0
--- /dev/null
+++ b/arch/powerpc/kernel/irq_64.c
@@ -0,0 +1,466 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Derived from arch/i386/kernel/irq.c
+ * Copyright (C) 1992 Linus Torvalds
+ * Adapted from arch/i386 by Gary Thomas
+ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ * Updated and modified by Cort Dougan <cort@fsmlabs.com>
+ * Copyright (C) 1996-2001 Cort Dougan
+ * Adapted for Power Macintosh by Paul Mackerras
+ * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
+ *
+ * This file contains the code used by various IRQ handling routines:
+ * asking for different IRQ's should be done through these routines
+ * instead of just grabbing them. Thus setups with different IRQ numbers
+ * shouldn't result in any weird surprises, and installing new handlers
+ * should be easier.
+ */
+
+#undef DEBUG
+
+#include <linux/export.h>
+#include <linux/threads.h>
+#include <linux/kernel_stat.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/timex.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/seq_file.h>
+#include <linux/cpumask.h>
+#include <linux/profile.h>
+#include <linux/bitops.h>
+#include <linux/list.h>
+#include <linux/radix-tree.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/debugfs.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/vmalloc.h>
+#include <linux/pgtable.h>
+#include <linux/static_call.h>
+
+#include <linux/uaccess.h>
+#include <asm/interrupt.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/cache.h>
+#include <asm/ptrace.h>
+#include <asm/machdep.h>
+#include <asm/udbg.h>
+#include <asm/smp.h>
+#include <asm/hw_irq.h>
+#include <asm/softirq_stack.h>
+#include <asm/ppc_asm.h>
+
+#include <asm/paca.h>
+#include <asm/firmware.h>
+#include <asm/lv1call.h>
+#include <asm/dbell.h>
+#include <asm/trace.h>
+#include <asm/cpu_has_feature.h>
+
+int distribute_irqs = 1;
+
+void replay_soft_interrupts(void)
+{
+ struct pt_regs regs;
+
+ /*
+ * Be careful here, calling these interrupt handlers can cause
+ * softirqs to be raised, which they may run when calling irq_exit,
+ * which will cause local_irq_enable() to be run, which can then
+ * recurse into this function. Don't keep any state across
+ * interrupt handler calls which may change underneath us.
+ *
+ * We use local_paca rather than get_paca() to avoid all the
+ * debug_smp_processor_id() business in this low level function.
+ */
+
+ ppc_save_regs(&regs);
+ regs.softe = IRQS_ENABLED;
+ regs.msr |= MSR_EE;
+
+again:
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ WARN_ON_ONCE(mfmsr() & MSR_EE);
+
+ /*
+ * Force the delivery of pending soft-disabled interrupts on PS3.
+ * Any HV call will have this side effect.
+ */
+ if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
+ u64 tmp, tmp2;
+ lv1_get_version_info(&tmp, &tmp2);
+ }
+
+ /*
+ * Check if an hypervisor Maintenance interrupt happened.
+ * This is a higher priority interrupt than the others, so
+ * replay it first.
+ */
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (local_paca->irq_happened & PACA_IRQ_HMI)) {
+ local_paca->irq_happened &= ~PACA_IRQ_HMI;
+ regs.trap = INTERRUPT_HMI;
+ handle_hmi_exception(&regs);
+ if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
+ hard_irq_disable();
+ }
+
+ if (local_paca->irq_happened & PACA_IRQ_DEC) {
+ local_paca->irq_happened &= ~PACA_IRQ_DEC;
+ regs.trap = INTERRUPT_DECREMENTER;
+ timer_interrupt(&regs);
+ if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
+ hard_irq_disable();
+ }
+
+ if (local_paca->irq_happened & PACA_IRQ_EE) {
+ local_paca->irq_happened &= ~PACA_IRQ_EE;
+ regs.trap = INTERRUPT_EXTERNAL;
+ do_IRQ(&regs);
+ if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
+ hard_irq_disable();
+ }
+
+ if (IS_ENABLED(CONFIG_PPC_DOORBELL) && (local_paca->irq_happened & PACA_IRQ_DBELL)) {
+ local_paca->irq_happened &= ~PACA_IRQ_DBELL;
+ regs.trap = INTERRUPT_DOORBELL;
+ doorbell_exception(&regs);
+ if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
+ hard_irq_disable();
+ }
+
+ /* Book3E does not support soft-masking PMI interrupts */
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (local_paca->irq_happened & PACA_IRQ_PMI)) {
+ local_paca->irq_happened &= ~PACA_IRQ_PMI;
+ regs.trap = INTERRUPT_PERFMON;
+ performance_monitor_exception(&regs);
+ if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
+ hard_irq_disable();
+ }
+
+ if (local_paca->irq_happened & ~PACA_IRQ_HARD_DIS) {
+ /*
+ * We are responding to the next interrupt, so interrupt-off
+ * latencies should be reset here.
+ */
+ trace_hardirqs_on();
+ trace_hardirqs_off();
+ goto again;
+ }
+}
+
+#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_KUAP)
+static inline void replay_soft_interrupts_irqrestore(void)
+{
+ unsigned long kuap_state = get_kuap();
+
+ /*
+ * Check if anything calls local_irq_enable/restore() when KUAP is
+ * disabled (user access enabled). We handle that case here by saving
+ * and re-locking AMR but we shouldn't get here in the first place,
+ * hence the warning.
+ */
+ kuap_assert_locked();
+
+ if (kuap_state != AMR_KUAP_BLOCKED)
+ set_kuap(AMR_KUAP_BLOCKED);
+
+ replay_soft_interrupts();
+
+ if (kuap_state != AMR_KUAP_BLOCKED)
+ set_kuap(kuap_state);
+}
+#else
+#define replay_soft_interrupts_irqrestore() replay_soft_interrupts()
+#endif
+
+notrace void arch_local_irq_restore(unsigned long mask)
+{
+ unsigned char irq_happened;
+
+ /* Write the new soft-enabled value if it is a disable */
+ if (mask) {
+ irq_soft_mask_set(mask);
+ return;
+ }
+
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ WARN_ON_ONCE(in_nmi() || in_hardirq());
+
+ /*
+ * After the stb, interrupts are unmasked and there are no interrupts
+ * pending replay. The restart sequence makes this atomic with
+ * respect to soft-masked interrupts. If this was just a simple code
+ * sequence, a soft-masked interrupt could become pending right after
+ * the comparison and before the stb.
+ *
+ * This allows interrupts to be unmasked without hard disabling, and
+ * also without new hard interrupts coming in ahead of pending ones.
+ */
+ asm_volatile_goto(
+"1: \n"
+" lbz 9,%0(13) \n"
+" cmpwi 9,0 \n"
+" bne %l[happened] \n"
+" stb 9,%1(13) \n"
+"2: \n"
+ RESTART_TABLE(1b, 2b, 1b)
+ : : "i" (offsetof(struct paca_struct, irq_happened)),
+ "i" (offsetof(struct paca_struct, irq_soft_mask))
+ : "cr0", "r9"
+ : happened);
+
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ WARN_ON_ONCE(!(mfmsr() & MSR_EE));
+
+ return;
+
+happened:
+ irq_happened = READ_ONCE(local_paca->irq_happened);
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ WARN_ON_ONCE(!irq_happened);
+
+ if (irq_happened == PACA_IRQ_HARD_DIS) {
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ WARN_ON_ONCE(mfmsr() & MSR_EE);
+ irq_soft_mask_set(IRQS_ENABLED);
+ local_paca->irq_happened = 0;
+ __hard_irq_enable();
+ return;
+ }
+
+ /* Have interrupts to replay, need to hard disable first */
+ if (!(irq_happened & PACA_IRQ_HARD_DIS)) {
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
+ if (!(mfmsr() & MSR_EE)) {
+ /*
+ * An interrupt could have come in and cleared
+ * MSR[EE] and set IRQ_HARD_DIS, so check
+ * IRQ_HARD_DIS again and warn if it is still
+ * clear.
+ */
+ irq_happened = READ_ONCE(local_paca->irq_happened);
+ WARN_ON_ONCE(!(irq_happened & PACA_IRQ_HARD_DIS));
+ }
+ }
+ __hard_irq_disable();
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+ } else {
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
+ if (WARN_ON_ONCE(mfmsr() & MSR_EE))
+ __hard_irq_disable();
+ }
+ }
+
+ /*
+ * Disable preempt here, so that the below preempt_enable will
+ * perform resched if required (a replayed interrupt may set
+ * need_resched).
+ */
+ preempt_disable();
+ irq_soft_mask_set(IRQS_ALL_DISABLED);
+ trace_hardirqs_off();
+
+ replay_soft_interrupts_irqrestore();
+ local_paca->irq_happened = 0;
+
+ trace_hardirqs_on();
+ irq_soft_mask_set(IRQS_ENABLED);
+ __hard_irq_enable();
+ preempt_enable();
+}
+EXPORT_SYMBOL(arch_local_irq_restore);
+
+/*
+ * This is a helper to use when about to go into idle low-power
+ * when the latter has the side effect of re-enabling interrupts
+ * (such as calling H_CEDE under pHyp).
+ *
+ * You call this function with interrupts soft-disabled (this is
+ * already the case when ppc_md.power_save is called). The function
+ * will return whether to enter power save or just return.
+ *
+ * In the former case, it will have notified lockdep of interrupts
+ * being re-enabled and generally sanitized the lazy irq state,
+ * and in the latter case it will leave with interrupts hard
+ * disabled and marked as such, so the local_irq_enable() call
+ * in arch_cpu_idle() will properly re-enable everything.
+ */
+bool prep_irq_for_idle(void)
+{
+ /*
+ * First we need to hard disable to ensure no interrupt
+ * occurs before we effectively enter the low power state
+ */
+ __hard_irq_disable();
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+
+ /*
+ * If anything happened while we were soft-disabled,
+ * we return now and do not enter the low power state.
+ */
+ if (lazy_irq_pending())
+ return false;
+
+ /* Tell lockdep we are about to re-enable */
+ trace_hardirqs_on();
+
+ /*
+ * Mark interrupts as soft-enabled and clear the
+ * PACA_IRQ_HARD_DIS from the pending mask since we
+ * are about to hard enable as well as a side effect
+ * of entering the low power state.
+ */
+ local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
+ irq_soft_mask_set(IRQS_ENABLED);
+
+ /* Tell the caller to enter the low power state */
+ return true;
+}
+
+#ifdef CONFIG_PPC_BOOK3S
+/*
+ * This is for idle sequences that return with IRQs off, but the
+ * idle state itself wakes on interrupt. Tell the irq tracer that
+ * IRQs are enabled for the duration of idle so it does not get long
+ * off times. Must be paired with fini_irq_for_idle_irqsoff.
+ */
+bool prep_irq_for_idle_irqsoff(void)
+{
+ WARN_ON(!irqs_disabled());
+
+ /*
+ * First we need to hard disable to ensure no interrupt
+ * occurs before we effectively enter the low power state
+ */
+ __hard_irq_disable();
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+
+ /*
+ * If anything happened while we were soft-disabled,
+ * we return now and do not enter the low power state.
+ */
+ if (lazy_irq_pending())
+ return false;
+
+ /* Tell lockdep we are about to re-enable */
+ trace_hardirqs_on();
+
+ return true;
+}
+
+/*
+ * Take the SRR1 wakeup reason, index into this table to find the
+ * appropriate irq_happened bit.
+ *
+ * Sytem reset exceptions taken in idle state also come through here,
+ * but they are NMI interrupts so do not need to wait for IRQs to be
+ * restored, and should be taken as early as practical. These are marked
+ * with 0xff in the table. The Power ISA specifies 0100b as the system
+ * reset interrupt reason.
+ */
+#define IRQ_SYSTEM_RESET 0xff
+
+static const u8 srr1_to_lazyirq[0x10] = {
+ 0, 0, 0,
+ PACA_IRQ_DBELL,
+ IRQ_SYSTEM_RESET,
+ PACA_IRQ_DBELL,
+ PACA_IRQ_DEC,
+ 0,
+ PACA_IRQ_EE,
+ PACA_IRQ_EE,
+ PACA_IRQ_HMI,
+ 0, 0, 0, 0, 0 };
+
+void replay_system_reset(void)
+{
+ struct pt_regs regs;
+
+ ppc_save_regs(&regs);
+ regs.trap = 0x100;
+ get_paca()->in_nmi = 1;
+ system_reset_exception(&regs);
+ get_paca()->in_nmi = 0;
+}
+EXPORT_SYMBOL_GPL(replay_system_reset);
+
+void irq_set_pending_from_srr1(unsigned long srr1)
+{
+ unsigned int idx = (srr1 & SRR1_WAKEMASK_P8) >> 18;
+ u8 reason = srr1_to_lazyirq[idx];
+
+ /*
+ * Take the system reset now, which is immediately after registers
+ * are restored from idle. It's an NMI, so interrupts need not be
+ * re-enabled before it is taken.
+ */
+ if (unlikely(reason == IRQ_SYSTEM_RESET)) {
+ replay_system_reset();
+ return;
+ }
+
+ if (reason == PACA_IRQ_DBELL) {
+ /*
+ * When doorbell triggers a system reset wakeup, the message
+ * is not cleared, so if the doorbell interrupt is replayed
+ * and the IPI handled, the doorbell interrupt would still
+ * fire when EE is enabled.
+ *
+ * To avoid taking the superfluous doorbell interrupt,
+ * execute a msgclr here before the interrupt is replayed.
+ */
+ ppc_msgclr(PPC_DBELL_MSGTYPE);
+ }
+
+ /*
+ * The 0 index (SRR1[42:45]=b0000) must always evaluate to 0,
+ * so this can be called unconditionally with the SRR1 wake
+ * reason as returned by the idle code, which uses 0 to mean no
+ * interrupt.
+ *
+ * If a future CPU was to designate this as an interrupt reason,
+ * then a new index for no interrupt must be assigned.
+ */
+ local_paca->irq_happened |= reason;
+}
+#endif /* CONFIG_PPC_BOOK3S */
+
+/*
+ * Force a replay of the external interrupt handler on this CPU.
+ */
+void force_external_irq_replay(void)
+{
+ /*
+ * This must only be called with interrupts soft-disabled,
+ * the replay will happen when re-enabling.
+ */
+ WARN_ON(!arch_irqs_disabled());
+
+ /*
+ * Interrupts must always be hard disabled before irq_happened is
+ * modified (to prevent lost update in case of interrupt between
+ * load and store).
+ */
+ __hard_irq_disable();
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+
+ /* Indicate in the PACA that we have an interrupt to replay */
+ local_paca->irq_happened |= PACA_IRQ_EE;
+}
+
+static int __init setup_noirqdistrib(char *str)
+{
+ distribute_irqs = 0;
+ return 1;
+}
+
+__setup("noirqdistrib", setup_noirqdistrib);
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 1c97c0f177ae..912d4f8a13be 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -269,7 +269,7 @@ static int try_to_emulate(struct kprobe *p, struct pt_regs *regs)
* So, we should never get here... but, its still
* good to catch them, just in case...
*/
- printk("Can't step on instruction %s\n", ppc_inst_as_str(insn));
+ printk("Can't step on instruction %08lx\n", ppc_inst_as_ulong(insn));
BUG();
} else {
/*
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
index 18173199b79d..6c5d30fba766 100644
--- a/arch/powerpc/kernel/mce.c
+++ b/arch/powerpc/kernel/mce.c
@@ -756,7 +756,7 @@ void __init mce_init(void)
mce_info = memblock_alloc_try_nid(sizeof(*mce_info),
__alignof__(*mce_info),
MEMBLOCK_LOW_LIMIT,
- limit, cpu_to_node(i));
+ limit, early_cpu_to_node(i));
if (!mce_info)
goto err;
paca_ptrs[i]->mce_info = mce_info;
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 068410cd54a3..bdd3332200c5 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -39,6 +39,7 @@
#include <asm/machdep.h>
#include <asm/ppc-pci.h>
#include <asm/eeh.h>
+#include <asm/setup.h>
#include "../../../drivers/pci/pci.h"
@@ -74,16 +75,32 @@ void __init set_pci_dma_ops(const struct dma_map_ops *dma_ops)
static int get_phb_number(struct device_node *dn)
{
int ret, phb_id = -1;
- u32 prop_32;
u64 prop;
/*
* Try fixed PHB numbering first, by checking archs and reading
- * the respective device-tree properties. Firstly, try powernv by
- * reading "ibm,opal-phbid", only present in OPAL environment.
+ * the respective device-tree properties. Firstly, try reading
+ * standard "linux,pci-domain", then try reading "ibm,opal-phbid"
+ * (only present in powernv OPAL environment), then try device-tree
+ * alias and as the last try to use lower bits of "reg" property.
*/
- ret = of_property_read_u64(dn, "ibm,opal-phbid", &prop);
+ ret = of_get_pci_domain_nr(dn);
+ if (ret >= 0) {
+ prop = ret;
+ ret = 0;
+ }
+ if (ret)
+ ret = of_property_read_u64(dn, "ibm,opal-phbid", &prop);
+
if (ret) {
+ ret = of_alias_get_id(dn, "pci");
+ if (ret >= 0) {
+ prop = ret;
+ ret = 0;
+ }
+ }
+ if (ret) {
+ u32 prop_32;
ret = of_property_read_u32_index(dn, "reg", 1, &prop_32);
prop = prop_32;
}
@@ -95,10 +112,7 @@ static int get_phb_number(struct device_node *dn)
if ((phb_id >= 0) && !test_and_set_bit(phb_id, phb_bitmap))
return phb_id;
- /*
- * If not pseries nor powernv, or if fixed PHB numbering tried to add
- * the same PHB number twice, then fallback to dynamic PHB numbering.
- */
+ /* If everything fails then fallback to dynamic PHB numbering. */
phb_id = find_first_zero_bit(phb_bitmap, MAX_PHBS);
BUG_ON(phb_id >= MAX_PHBS);
set_bit(phb_id, phb_bitmap);
@@ -1087,7 +1101,7 @@ void pcibios_fixup_bus(struct pci_bus *bus)
*/
pci_read_bridge_bases(bus);
- /* Now fixup the bus bus */
+ /* Now fixup the bus */
pcibios_setup_bus_self(bus);
}
EXPORT_SYMBOL(pcibios_fixup_bus);
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index 5a174936c9a0..433965bf37b4 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -36,18 +36,13 @@ int pcibios_assign_bus_offset = 1;
EXPORT_SYMBOL(isa_io_base);
EXPORT_SYMBOL(pci_dram_offset);
-void __init pcibios_make_OF_bus_map(void);
-
static void fixup_cpc710_pci64(struct pci_dev* dev);
-static u8* pci_to_OF_bus_map;
/* By default, we don't re-assign bus numbers. We do this only on
* some pmacs
*/
static int pci_assign_all_buses;
-static int pci_bus_count;
-
/* This will remain NULL for now, until isa-bridge.c is made common
* to both 32-bit and 64-bit.
*/
@@ -67,6 +62,11 @@ fixup_cpc710_pci64(struct pci_dev* dev)
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CPC710_PCI64, fixup_cpc710_pci64);
+#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_CHRP)
+
+static u8* pci_to_OF_bus_map;
+static int pci_bus_count;
+
/*
* Functions below are used on OpenFirmware machines.
*/
@@ -108,7 +108,7 @@ make_one_node_map(struct device_node* node, u8 pci_bus)
}
}
-void __init
+static void __init
pcibios_make_OF_bus_map(void)
{
int i;
@@ -154,6 +154,7 @@ pcibios_make_OF_bus_map(void)
}
+#ifdef CONFIG_PPC_PMAC
/*
* Returns the PCI device matching a given OF node
*/
@@ -193,7 +194,9 @@ int pci_device_from_OF_node(struct device_node *node, u8 *bus, u8 *devfn)
return -ENODEV;
}
EXPORT_SYMBOL(pci_device_from_OF_node);
+#endif
+#ifdef CONFIG_PPC_CHRP
/* We create the "pci-OF-bus-map" property now so it appears in the
* /proc device tree
*/
@@ -218,6 +221,9 @@ pci_create_OF_bus_map(void)
of_node_put(dn);
}
}
+#endif
+
+#endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_CHRP) */
void pcibios_setup_phb_io_space(struct pci_controller *hose)
{
@@ -233,7 +239,9 @@ void pcibios_setup_phb_io_space(struct pci_controller *hose)
static int __init pcibios_init(void)
{
struct pci_controller *hose, *tmp;
+#ifndef CONFIG_PPC_PCI_BUS_NUM_DOMAIN_DEPENDENT
int next_busno = 0;
+#endif
printk(KERN_INFO "PCI: Probing PCI hardware\n");
@@ -242,14 +250,20 @@ static int __init pcibios_init(void)
/* Scan all of the recorded PCI controllers. */
list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
+#ifndef CONFIG_PPC_PCI_BUS_NUM_DOMAIN_DEPENDENT
if (pci_assign_all_buses)
hose->first_busno = next_busno;
+#endif
hose->last_busno = 0xff;
pcibios_scan_phb(hose);
pci_bus_add_devices(hose->bus);
+#ifndef CONFIG_PPC_PCI_BUS_NUM_DOMAIN_DEPENDENT
if (pci_assign_all_buses || next_busno <= hose->last_busno)
next_busno = hose->last_busno + pcibios_assign_bus_offset;
+#endif
}
+
+#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_CHRP)
pci_bus_count = next_busno;
/* OpenFirmware based machines need a map of OF bus
@@ -258,6 +272,7 @@ static int __init pcibios_init(void)
*/
if (pci_assign_all_buses)
pcibios_make_OF_bus_map();
+#endif
/* Call common code to handle resource allocation */
pcibios_resource_survey();
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 19b03ddf5631..0c7cfb9fab04 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -286,6 +286,7 @@ int pcibus_to_node(struct pci_bus *bus)
EXPORT_SYMBOL(pcibus_to_node);
#endif
+#ifdef CONFIG_PPC_PMAC
int pci_device_from_OF_node(struct device_node *np, u8 *bus, u8 *devfn)
{
if (!PCI_DN(np))
@@ -294,3 +295,4 @@ int pci_device_from_OF_node(struct device_node *np, u8 *bus, u8 *devfn)
*devfn = PCI_DN(np)->devfn;
return 0;
}
+#endif
diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c
index 938ab8838ab5..7a35fc25a304 100644
--- a/arch/powerpc/kernel/pci_dn.c
+++ b/arch/powerpc/kernel/pci_dn.c
@@ -259,7 +259,7 @@ void remove_sriov_vf_pdns(struct pci_dev *pdev)
if (edev) {
/*
* We allocate pci_dn's for the totalvfs count,
- * but only only the vfs that were activated
+ * but only the vfs that were activated
* have a configured PE.
*/
if (edev->pe)
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index feae8509b59c..a730b951b64b 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -44,7 +44,7 @@
#include <asm/iommu.h>
#include <asm/btext.h>
#include <asm/sections.h>
-#include <asm/machdep.h>
+#include <asm/setup.h>
#include <asm/pci-bridge.h>
#include <asm/kexec.h>
#include <asm/opal.h>
@@ -54,6 +54,7 @@
#include <asm/dt_cpu_ftrs.h>
#include <asm/drmem.h>
#include <asm/ultravisor.h>
+#include <asm/prom.h>
#include <mm/mmu_decl.h>
@@ -751,6 +752,13 @@ void __init early_init_devtree(void *params)
early_init_dt_scan_root();
early_init_dt_scan_memory_ppc();
+ /*
+ * As generic code authors expect to be able to use static keys
+ * in early_param() handlers, we initialize the static keys just
+ * before parsing early params (it's fine to call jump_label_init()
+ * more than once).
+ */
+ jump_label_init();
parse_early_param();
/* make sure we've parsed cmdline for mem= before this */
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 13d6cb188835..a6669c40c1db 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -42,7 +42,7 @@
#include <asm/iommu.h>
#include <asm/btext.h>
#include <asm/sections.h>
-#include <asm/machdep.h>
+#include <asm/setup.h>
#include <asm/asm-prototypes.h>
#include <asm/ultravisor-api.h>
diff --git a/arch/powerpc/kernel/ptrace/ptrace-vsx.c b/arch/powerpc/kernel/ptrace/ptrace-vsx.c
index 1da4303128ef..7df08004c47d 100644
--- a/arch/powerpc/kernel/ptrace/ptrace-vsx.c
+++ b/arch/powerpc/kernel/ptrace/ptrace-vsx.c
@@ -71,7 +71,7 @@ int fpr_set(struct task_struct *target, const struct user_regset *regset,
}
/*
- * Currently to set and and get all the vsx state, you need to call
+ * Currently to set and get all the vsx state, you need to call
* the fp and VMX calls as well. This only get/sets the lower 32
* 128bit VSX registers.
*/
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 5761f08dae95..2b2d0b0fbb30 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -113,7 +113,6 @@ void __init setup_tlb_core_data(void)
* Should we panic instead?
*/
WARN_ONCE(smt_enabled_at_boot >= 2 &&
- !mmu_has_feature(MMU_FTR_USE_TLBRSRV) &&
book3e_htw_mode != PPC_HTW_E6500,
"%s: unsupported MMU configuration\n", __func__);
}
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 472596a109e2..86bb5bb4c143 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -377,9 +377,12 @@ static long notrace __unsafe_restore_sigcontext(struct task_struct *tsk, sigset_
unsafe_get_user(set->sig[0], &sc->oldmask, efault_out);
/*
- * Force reload of FP/VEC.
- * This has to be done before copying stuff into tsk->thread.fpr/vr
- * for the reasons explained in the previous comment.
+ * Force reload of FP/VEC/VSX so userspace sees any changes.
+ * Clear these bits from the user process' MSR before copying into the
+ * thread struct. If we are rescheduled or preempted and another task
+ * uses FP/VEC/VSX, and this process has the MSR bits set, then the
+ * context switch code will save the current CPU state into the
+ * thread_struct - possibly overwriting the data we are updating here.
*/
regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX));
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index bcefab484ea6..6b850c157a62 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -35,6 +35,7 @@
#include <linux/stackprotector.h>
#include <linux/pgtable.h>
#include <linux/clockchips.h>
+#include <linux/kexec.h>
#include <asm/ptrace.h>
#include <linux/atomic.h>
@@ -55,7 +56,6 @@
#endif
#include <asm/vdso.h>
#include <asm/debug.h>
-#include <asm/kexec.h>
#include <asm/cpu_has_feature.h>
#include <asm/ftrace.h>
#include <asm/kup.h>
@@ -619,20 +619,6 @@ void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
}
#endif
-#ifdef CONFIG_NMI_IPI
-static void crash_stop_this_cpu(struct pt_regs *regs)
-#else
-static void crash_stop_this_cpu(void *dummy)
-#endif
-{
- /*
- * Just busy wait here and avoid marking CPU as offline to ensure
- * register data is captured appropriately.
- */
- while (1)
- cpu_relax();
-}
-
void crash_smp_send_stop(void)
{
static bool stopped = false;
@@ -651,11 +637,14 @@ void crash_smp_send_stop(void)
stopped = true;
-#ifdef CONFIG_NMI_IPI
- smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_stop_this_cpu, 1000000);
-#else
- smp_call_function(crash_stop_this_cpu, NULL, 0);
-#endif /* CONFIG_NMI_IPI */
+#ifdef CONFIG_KEXEC_CORE
+ if (kexec_crash_image) {
+ crash_kexec_prepare();
+ return;
+ }
+#endif
+
+ smp_send_stop();
}
#ifdef CONFIG_NMI_IPI
diff --git a/arch/powerpc/kernel/syscall.c b/arch/powerpc/kernel/syscall.c
new file mode 100644
index 000000000000..81ace9e8b72b
--- /dev/null
+++ b/arch/powerpc/kernel/syscall.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/compat.h>
+#include <linux/context_tracking.h>
+#include <linux/randomize_kstack.h>
+
+#include <asm/interrupt.h>
+#include <asm/kup.h>
+#include <asm/syscall.h>
+#include <asm/time.h>
+#include <asm/tm.h>
+#include <asm/unistd.h>
+
+
+typedef long (*syscall_fn)(long, long, long, long, long, long);
+
+/* Has to run notrace because it is entered not completely "reconciled" */
+notrace long system_call_exception(long r3, long r4, long r5,
+ long r6, long r7, long r8,
+ unsigned long r0, struct pt_regs *regs)
+{
+ long ret;
+ syscall_fn f;
+
+ kuap_lock();
+
+ add_random_kstack_offset();
+ regs->orig_gpr3 = r3;
+
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ BUG_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
+
+ trace_hardirqs_off(); /* finish reconciling */
+
+ CT_WARN_ON(ct_state() == CONTEXT_KERNEL);
+ user_exit_irqoff();
+
+ BUG_ON(regs_is_unrecoverable(regs));
+ BUG_ON(!(regs->msr & MSR_PR));
+ BUG_ON(arch_irq_disabled_regs(regs));
+
+#ifdef CONFIG_PPC_PKEY
+ if (mmu_has_feature(MMU_FTR_PKEY)) {
+ unsigned long amr, iamr;
+ bool flush_needed = false;
+ /*
+ * When entering from userspace we mostly have the AMR/IAMR
+ * different from kernel default values. Hence don't compare.
+ */
+ amr = mfspr(SPRN_AMR);
+ iamr = mfspr(SPRN_IAMR);
+ regs->amr = amr;
+ regs->iamr = iamr;
+ if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) {
+ mtspr(SPRN_AMR, AMR_KUAP_BLOCKED);
+ flush_needed = true;
+ }
+ if (mmu_has_feature(MMU_FTR_BOOK3S_KUEP)) {
+ mtspr(SPRN_IAMR, AMR_KUEP_BLOCKED);
+ flush_needed = true;
+ }
+ if (flush_needed)
+ isync();
+ } else
+#endif
+ kuap_assert_locked();
+
+ booke_restore_dbcr0();
+
+ account_cpu_user_entry();
+
+ account_stolen_time();
+
+ /*
+ * This is not required for the syscall exit path, but makes the
+ * stack frame look nicer. If this was initialised in the first stack
+ * frame, or if the unwinder was taught the first stack frame always
+ * returns to user with IRQS_ENABLED, this store could be avoided!
+ */
+ irq_soft_mask_regs_set_state(regs, IRQS_ENABLED);
+
+ /*
+ * If system call is called with TM active, set _TIF_RESTOREALL to
+ * prevent RFSCV being used to return to userspace, because POWER9
+ * TM implementation has problems with this instruction returning to
+ * transactional state. Final register values are not relevant because
+ * the transaction will be aborted upon return anyway. Or in the case
+ * of unsupported_scv SIGILL fault, the return state does not much
+ * matter because it's an edge case.
+ */
+ if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
+ unlikely(MSR_TM_TRANSACTIONAL(regs->msr)))
+ set_bits(_TIF_RESTOREALL, &current_thread_info()->flags);
+
+ /*
+ * If the system call was made with a transaction active, doom it and
+ * return without performing the system call. Unless it was an
+ * unsupported scv vector, in which case it's treated like an illegal
+ * instruction.
+ */
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ if (unlikely(MSR_TM_TRANSACTIONAL(regs->msr)) &&
+ !trap_is_unsupported_scv(regs)) {
+ /* Enable TM in the kernel, and disable EE (for scv) */
+ hard_irq_disable();
+ mtmsr(mfmsr() | MSR_TM);
+
+ /* tabort, this dooms the transaction, nothing else */
+ asm volatile(".long 0x7c00071d | ((%0) << 16)"
+ :: "r"(TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT));
+
+ /*
+ * Userspace will never see the return value. Execution will
+ * resume after the tbegin. of the aborted transaction with the
+ * checkpointed register state. A context switch could occur
+ * or signal delivered to the process before resuming the
+ * doomed transaction context, but that should all be handled
+ * as expected.
+ */
+ return -ENOSYS;
+ }
+#endif // CONFIG_PPC_TRANSACTIONAL_MEM
+
+ local_irq_enable();
+
+ if (unlikely(read_thread_flags() & _TIF_SYSCALL_DOTRACE)) {
+ if (unlikely(trap_is_unsupported_scv(regs))) {
+ /* Unsupported scv vector */
+ _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
+ return regs->gpr[3];
+ }
+ /*
+ * We use the return value of do_syscall_trace_enter() as the
+ * syscall number. If the syscall was rejected for any reason
+ * do_syscall_trace_enter() returns an invalid syscall number
+ * and the test against NR_syscalls will fail and the return
+ * value to be used is in regs->gpr[3].
+ */
+ r0 = do_syscall_trace_enter(regs);
+ if (unlikely(r0 >= NR_syscalls))
+ return regs->gpr[3];
+ r3 = regs->gpr[3];
+ r4 = regs->gpr[4];
+ r5 = regs->gpr[5];
+ r6 = regs->gpr[6];
+ r7 = regs->gpr[7];
+ r8 = regs->gpr[8];
+
+ } else if (unlikely(r0 >= NR_syscalls)) {
+ if (unlikely(trap_is_unsupported_scv(regs))) {
+ /* Unsupported scv vector */
+ _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
+ return regs->gpr[3];
+ }
+ return -ENOSYS;
+ }
+
+ /* May be faster to do array_index_nospec? */
+ barrier_nospec();
+
+ if (unlikely(is_compat_task())) {
+ f = (void *)compat_sys_call_table[r0];
+
+ r3 &= 0x00000000ffffffffULL;
+ r4 &= 0x00000000ffffffffULL;
+ r5 &= 0x00000000ffffffffULL;
+ r6 &= 0x00000000ffffffffULL;
+ r7 &= 0x00000000ffffffffULL;
+ r8 &= 0x00000000ffffffffULL;
+
+ } else {
+ f = (void *)sys_call_table[r0];
+ }
+
+ ret = f(r3, r4, r5, r6, r7, r8);
+
+ /*
+ * Ultimately, this value will get limited by KSTACK_OFFSET_MAX(),
+ * so the maximum stack offset is 1k bytes (10 bits).
+ *
+ * The actual entropy will be further reduced by the compiler when
+ * applying stack alignment constraints: the powerpc architecture
+ * may have two kinds of stack alignment (16-bytes and 8-bytes).
+ *
+ * So the resulting 6 or 7 bits of entropy is seen in SP[9:4] or SP[9:3].
+ */
+ choose_random_kstack_offset(mftb());
+
+ return ret;
+}
diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c
index 2a893e06e4f1..cb158c32b50b 100644
--- a/arch/powerpc/kernel/trace/ftrace.c
+++ b/arch/powerpc/kernel/trace/ftrace.c
@@ -69,8 +69,8 @@ ftrace_modify_code(unsigned long ip, ppc_inst_t old, ppc_inst_t new)
/* Make sure it is what we expect it to be */
if (!ppc_inst_equal(replaced, old)) {
- pr_err("%p: replaced (%s) != old (%s)",
- (void *)ip, ppc_inst_as_str(replaced), ppc_inst_as_str(old));
+ pr_err("%p: replaced (%08lx) != old (%08lx)", (void *)ip,
+ ppc_inst_as_ulong(replaced), ppc_inst_as_ulong(old));
return -EINVAL;
}
@@ -125,9 +125,9 @@ __ftrace_make_nop(struct module *mod,
return -EFAULT;
}
- /* Make sure that that this is still a 24bit jump */
+ /* Make sure that this is still a 24bit jump */
if (!is_bl_op(op)) {
- pr_err("Not expected bl: opcode is %s\n", ppc_inst_as_str(op));
+ pr_err("Not expected bl: opcode is %08lx\n", ppc_inst_as_ulong(op));
return -EINVAL;
}
@@ -159,8 +159,8 @@ __ftrace_make_nop(struct module *mod,
/* We expect either a mflr r0, or a std r0, LRSAVE(r1) */
if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_MFLR(_R0))) &&
!ppc_inst_equal(op, ppc_inst(PPC_INST_STD_LR))) {
- pr_err("Unexpected instruction %s around bl _mcount\n",
- ppc_inst_as_str(op));
+ pr_err("Unexpected instruction %08lx around bl _mcount\n",
+ ppc_inst_as_ulong(op));
return -EINVAL;
}
} else if (IS_ENABLED(CONFIG_PPC64)) {
@@ -174,7 +174,8 @@ __ftrace_make_nop(struct module *mod,
}
if (!ppc_inst_equal(op, ppc_inst(PPC_INST_LD_TOC))) {
- pr_err("Expected %08lx found %s\n", PPC_INST_LD_TOC, ppc_inst_as_str(op));
+ pr_err("Expected %08lx found %08lx\n", PPC_INST_LD_TOC,
+ ppc_inst_as_ulong(op));
return -EINVAL;
}
}
@@ -310,9 +311,9 @@ static int __ftrace_make_nop_kernel(struct dyn_ftrace *rec, unsigned long addr)
return -EFAULT;
}
- /* Make sure that that this is still a 24bit jump */
+ /* Make sure that this is still a 24bit jump */
if (!is_bl_op(op)) {
- pr_err("Not expected bl: opcode is %s\n", ppc_inst_as_str(op));
+ pr_err("Not expected bl: opcode is %08lx\n", ppc_inst_as_ulong(op));
return -EINVAL;
}
@@ -416,8 +417,8 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
return -EFAULT;
if (!expected_nop_sequence(ip, op[0], op[1])) {
- pr_err("Unexpected call sequence at %p: %s %s\n",
- ip, ppc_inst_as_str(op[0]), ppc_inst_as_str(op[1]));
+ pr_err("Unexpected call sequence at %p: %08lx %08lx\n", ip,
+ ppc_inst_as_ulong(op[0]), ppc_inst_as_ulong(op[1]));
return -EINVAL;
}
@@ -486,7 +487,8 @@ static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr)
}
if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_NOP()))) {
- pr_err("Unexpected call sequence at %p: %s\n", ip, ppc_inst_as_str(op));
+ pr_err("Unexpected call sequence at %p: %08lx\n",
+ ip, ppc_inst_as_ulong(op));
return -EINVAL;
}
@@ -562,9 +564,9 @@ __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
return -EFAULT;
}
- /* Make sure that that this is still a 24bit jump */
+ /* Make sure that this is still a 24bit jump */
if (!is_bl_op(op)) {
- pr_err("Not expected bl: opcode is %s\n", ppc_inst_as_str(op));
+ pr_err("Not expected bl: opcode is %08lx\n", ppc_inst_as_ulong(op));
return -EINVAL;
}
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 3aaa50e5c72f..dadfcef5d6db 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -1676,7 +1676,7 @@ DEFINE_INTERRUPT_HANDLER(vsx_unavailable_exception)
die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
}
-#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_BOOK3S_64
static void tm_unavailable(struct pt_regs *regs)
{
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
diff --git a/arch/powerpc/kernel/vdso/cacheflush.S b/arch/powerpc/kernel/vdso/cacheflush.S
index d4e43ab2d5df..0085ae464dac 100644
--- a/arch/powerpc/kernel/vdso/cacheflush.S
+++ b/arch/powerpc/kernel/vdso/cacheflush.S
@@ -91,6 +91,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
3:
crclr cr0*4+so
sync
+ icbi 0,r1
isync
li r3,0
blr
diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c
index 7d28b9553654..dbcc4a793f0b 100644
--- a/arch/powerpc/kernel/watchdog.c
+++ b/arch/powerpc/kernel/watchdog.c
@@ -91,6 +91,10 @@ static cpumask_t wd_smp_cpus_pending;
static cpumask_t wd_smp_cpus_stuck;
static u64 wd_smp_last_reset_tb;
+#ifdef CONFIG_PPC_PSERIES
+static u64 wd_timeout_pct;
+#endif
+
/*
* Try to take the exclusive watchdog action / NMI IPI / printing lock.
* wd_smp_lock must be held. If this fails, we should return and wait
@@ -353,7 +357,7 @@ static void watchdog_timer_interrupt(int cpu)
if (__wd_nmi_output && xchg(&__wd_nmi_output, 0)) {
/*
* Something has called printk from NMI context. It might be
- * stuck, so this this triggers a flush that will get that
+ * stuck, so this triggers a flush that will get that
* printk output to the console.
*
* See wd_lockup_ipi.
@@ -527,7 +531,13 @@ static int stop_watchdog_on_cpu(unsigned int cpu)
static void watchdog_calc_timeouts(void)
{
- wd_panic_timeout_tb = watchdog_thresh * ppc_tb_freq;
+ u64 threshold = watchdog_thresh;
+
+#ifdef CONFIG_PPC_PSERIES
+ threshold += (READ_ONCE(wd_timeout_pct) * threshold) / 100;
+#endif
+
+ wd_panic_timeout_tb = threshold * ppc_tb_freq;
/* Have the SMP detector trigger a bit later */
wd_smp_panic_timeout_tb = wd_panic_timeout_tb * 3 / 2;
@@ -570,3 +580,12 @@ int __init watchdog_nmi_probe(void)
}
return 0;
}
+
+#ifdef CONFIG_PPC_PSERIES
+void watchdog_nmi_set_timeout_pct(u64 pct)
+{
+ pr_info("Set the NMI watchdog timeout factor to %llu%%\n", pct);
+ WRITE_ONCE(wd_timeout_pct, pct);
+ lockup_detector_reconfigure();
+}
+#endif