From d9b2b2a277219d4812311d995054ce4f95067725 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 13 Feb 2008 16:56:49 -0800 Subject: [LIB]: Make PowerPC LMB code generic so sparc64 can use it too. Signed-off-by: David S. Miller --- arch/powerpc/kernel/btext.c | 3 ++- arch/powerpc/kernel/crash.c | 3 ++- arch/powerpc/kernel/crash_dump.c | 3 ++- arch/powerpc/kernel/machine_kexec.c | 3 ++- arch/powerpc/kernel/prom.c | 2 +- arch/powerpc/kernel/rtas.c | 2 +- arch/powerpc/kernel/setup-common.c | 2 +- arch/powerpc/kernel/setup_64.c | 2 +- arch/powerpc/kernel/vdso.c | 3 ++- 9 files changed, 14 insertions(+), 9 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c index 80e2eef05b2e..9f9377745490 100644 --- a/arch/powerpc/kernel/btext.c +++ b/arch/powerpc/kernel/btext.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include @@ -15,7 +16,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c index 571132ed12c1..eae401de3f76 100644 --- a/arch/powerpc/kernel/crash.c +++ b/arch/powerpc/kernel/crash.c @@ -24,12 +24,13 @@ #include #include #include +#include #include #include #include #include -#include +#include #include #include #include diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c index 29ff77c468ac..9ee3c5278db0 100644 --- a/arch/powerpc/kernel/crash_dump.c +++ b/arch/powerpc/kernel/crash_dump.c @@ -13,8 +13,9 @@ #include #include +#include #include -#include +#include #include #include diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c index c0c8e8c3ced9..2d202f274e73 100644 --- a/arch/powerpc/kernel/machine_kexec.c +++ b/arch/powerpc/kernel/machine_kexec.c @@ -12,8 +12,9 @@ #include #include #include +#include #include -#include +#include void machine_crash_shutdown(struct pt_regs *regs) { diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 8b5efbce8d90..c17a5851bdb7 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -31,10 +31,10 @@ #include #include #include +#include #include #include -#include #include #include #include diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 52e95c2158c0..e2e78d967f31 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -34,7 +35,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 6adb5a1e98bb..12cc41c16b0d 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include #include @@ -56,7 +57,6 @@ #include #include #include -#include #include #include diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 3b1529c103ef..2c2d8315193c 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -55,7 +56,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index 3702df7dc567..d715048370df 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -21,13 +21,14 @@ #include #include #include +#include #include #include #include #include #include -#include +#include #include #include #include -- cgit v1.2.1 From 445857e0fc16ba3f74e778dd5a8707459c84f933 Mon Sep 17 00:00:00 2001 From: Dale Farnsworth Date: Fri, 1 Feb 2008 08:11:41 +1100 Subject: [POWERPC] Remove dead code at KernelAltiVec This code isn't referenced anywhere, so remove it. Signed-off-by: Dale Farnsworth Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/head_32.S | 17 ----------------- 1 file changed, 17 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index 0f4fac512020..c16d1354b19d 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S @@ -762,23 +762,6 @@ load_up_altivec: /* we haven't used ctr or xer or lr */ b fast_exception_return -/* - * AltiVec unavailable trap from kernel - print a message, but let - * the task use AltiVec in the kernel until it returns to user mode. - */ -KernelAltiVec: - lwz r3,_MSR(r1) - oris r3,r3,MSR_VEC@h - stw r3,_MSR(r1) /* enable use of AltiVec after return */ - lis r3,87f@h - ori r3,r3,87f@l - mr r4,r2 /* current */ - lwz r5,_NIP(r1) - bl printk - b ret_from_except -87: .string "AltiVec used in kernel (task=%p, pc=%x) \n" - .align 4,0 - /* * giveup_altivec(tsk) * Disable AltiVec for the task given as the argument, -- cgit v1.2.1 From 0276c1368f111b1fd75e6fe0bf4cd78a6903798f Mon Sep 17 00:00:00 2001 From: Marian Balakowicz Date: Sat, 10 Nov 2007 04:11:43 +1100 Subject: [POWERPC] Add 'model: ...' line to common show_cpuinfo() Print out 'model' property of '/' node as a machine name in generic show_cpuinfo() routine. Signed-off-by: Marian Balakowicz Acked-by: David Gibson Acked-by: Olof Johansson Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/setup-common.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 12cc41c16b0d..db540eab09f4 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -167,6 +167,8 @@ static int show_cpuinfo(struct seq_file *m, void *v) unsigned short min; if (cpu_id == NR_CPUS) { + struct device_node *root; + const char *model = NULL; #if defined(CONFIG_SMP) && defined(CONFIG_PPC32) unsigned long bogosum = 0; int i; @@ -178,6 +180,13 @@ static int show_cpuinfo(struct seq_file *m, void *v) seq_printf(m, "timebase\t: %lu\n", ppc_tb_freq); if (ppc_md.name) seq_printf(m, "platform\t: %s\n", ppc_md.name); + root = of_find_node_by_path("/"); + if (root) + model = of_get_property(root, "model", NULL); + if (model) + seq_printf(m, "model\t\t: %s\n", model); + of_node_put(root); + if (ppc_md.show_cpuinfo != NULL) ppc_md.show_cpuinfo(m); -- cgit v1.2.1 From bceabd15050d8d0e3742af350f072a110510a2aa Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Thu, 13 Mar 2008 16:20:31 +1100 Subject: [POWERPC] Really export empty_zero_page It was being protected by CONFIG_PPC32, but we want to export it on 64-bit also. This moves it out of the ifdef. Signed-off-by: Stephen Rothwell Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/ppc_ksyms.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index 9c98424277a8..a722ede726db 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c @@ -58,7 +58,6 @@ extern void program_check_exception(struct pt_regs *regs); extern void single_step_exception(struct pt_regs *regs); extern int sys_sigreturn(struct pt_regs *regs); -EXPORT_SYMBOL(empty_zero_page); EXPORT_SYMBOL(clear_pages); EXPORT_SYMBOL(copy_page); EXPORT_SYMBOL(ISA_DMA_THRESHOLD); @@ -192,3 +191,4 @@ EXPORT_SYMBOL(intercept_table); EXPORT_SYMBOL(__mtdcr); EXPORT_SYMBOL(__mfdcr); #endif +EXPORT_SYMBOL(empty_zero_page); -- cgit v1.2.1 From 71e91a0abb839f8d2372236d8fe0513c295ec717 Mon Sep 17 00:00:00 2001 From: Roland McGrath Date: Mon, 17 Mar 2008 16:01:20 +1100 Subject: [POWERPC] Don't touch PT_DTRACE in exec The PT_DTRACE flag is meaningless and obsolete. Don't touch it. Signed-off-by: Roland McGrath Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/process.c | 5 ----- arch/powerpc/kernel/sys_ppc32.c | 5 ----- 2 files changed, 10 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 4846bf543a8c..7c8e3da23810 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -862,11 +862,6 @@ int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2, flush_spe_to_thread(current); error = do_execve(filename, (char __user * __user *) a1, (char __user * __user *) a2, regs); - if (error == 0) { - task_lock(current); - current->ptrace &= ~PT_DTRACE; - task_unlock(current); - } putname(filename); out: return error; diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c index 4a4f5c6b560b..9c3371e6958e 100644 --- a/arch/powerpc/kernel/sys_ppc32.c +++ b/arch/powerpc/kernel/sys_ppc32.c @@ -368,11 +368,6 @@ long compat_sys_execve(unsigned long a0, unsigned long a1, unsigned long a2, error = compat_do_execve(filename, compat_ptr(a1), compat_ptr(a2), regs); - if (error == 0) { - task_lock(current); - current->ptrace &= ~PT_DTRACE; - task_unlock(current); - } putname(filename); out: -- cgit v1.2.1 From 163dab39b5432761437ae59164ab351a8680ca4f Mon Sep 17 00:00:00 2001 From: Roland McGrath Date: Thu, 20 Mar 2008 08:07:51 +1100 Subject: [POWERPC] powerpc32: Remove asm-offsets ptrace cruft These items in asm-offsets.c are not used anywhere. This removes them. Signed-off-by: Roland McGrath Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/asm-offsets.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 4b749c416464..e932b43bd82f 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -26,8 +26,6 @@ #ifdef CONFIG_PPC64 #include #include -#else -#include #endif #include @@ -60,7 +58,6 @@ int main(void) DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context)); #else DEFINE(THREAD_INFO, offsetof(struct task_struct, stack)); - DEFINE(PTRACE, offsetof(struct task_struct, ptrace)); #endif /* CONFIG_PPC64 */ DEFINE(KSP, offsetof(struct thread_struct, ksp)); @@ -80,7 +77,6 @@ int main(void) DEFINE(PGDIR, offsetof(struct thread_struct, pgdir)); #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) DEFINE(THREAD_DBCR0, offsetof(struct thread_struct, dbcr0)); - DEFINE(PT_PTRACED, PT_PTRACED); #endif #ifdef CONFIG_SPE DEFINE(THREAD_EVR0, offsetof(struct thread_struct, evr[0])); -- cgit v1.2.1 From 6ac26c8a7eb149dbd669cc6cd9b77ffc9cd0d2fb Mon Sep 17 00:00:00 2001 From: Manish Ahuja Date: Sat, 22 Mar 2008 10:37:08 +1100 Subject: [POWERPC] pseries: phyp dump: Reserve and release memory Initial patch for reserving memory in early boot, and freeing it later. If the previous boot had ended with a crash, the reserved memory would contain a copy of the crashed kernel data. Signed-off-by: Manish Ahuja Signed-off-by: Linas Vepstas Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/prom.c | 52 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index ff600ef0b4d6..e6c022ef12ee 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -51,6 +51,7 @@ #include #include #include +#include #include #ifdef DEBUG @@ -1040,6 +1041,51 @@ static void __init early_reserve_mem(void) #endif } +#ifdef CONFIG_PHYP_DUMP +/** + * phyp_dump_reserve_mem() - reserve all not-yet-dumped mmemory + * + * This routine may reserve memory regions in the kernel only + * if the system is supported and a dump was taken in last + * boot instance or if the hardware is supported and the + * scratch area needs to be setup. In other instances it returns + * without reserving anything. The memory in case of dump being + * active is freed when the dump is collected (by userland tools). + */ +static void __init phyp_dump_reserve_mem(void) +{ + unsigned long base, size; + if (!phyp_dump_info->phyp_dump_configured) { + printk(KERN_ERR "Phyp-dump not supported on this hardware\n"); + return; + } + + if (phyp_dump_info->phyp_dump_is_active) { + /* Reserve *everything* above RMR.Area freed by userland tools*/ + base = PHYP_DUMP_RMR_END; + size = lmb_end_of_DRAM() - base; + + /* XXX crashed_ram_end is wrong, since it may be beyond + * the memory_limit, it will need to be adjusted. */ + lmb_reserve(base, size); + + phyp_dump_info->init_reserve_start = base; + phyp_dump_info->init_reserve_size = size; + } else { + size = phyp_dump_info->cpu_state_size + + phyp_dump_info->hpte_region_size + + PHYP_DUMP_RMR_END; + base = lmb_end_of_DRAM() - size; + lmb_reserve(base, size); + phyp_dump_info->init_reserve_start = base; + phyp_dump_info->init_reserve_size = size; + } +} +#else +static inline void __init phyp_dump_reserve_mem(void) {} +#endif /* CONFIG_PHYP_DUMP && CONFIG_PPC_RTAS */ + + void __init early_init_devtree(void *params) { DBG(" -> early_init_devtree(%p)\n", params); @@ -1052,6 +1098,11 @@ void __init early_init_devtree(void *params) of_scan_flat_dt(early_init_dt_scan_rtas, NULL); #endif +#ifdef CONFIG_PHYP_DUMP + /* scan tree to see if dump occured during last boot */ + of_scan_flat_dt(early_init_dt_scan_phyp_dump, NULL); +#endif + /* Retrieve various informations from the /chosen node of the * device-tree, including the platform type, initrd location and * size, TCE reserve, and more ... @@ -1072,6 +1123,7 @@ void __init early_init_devtree(void *params) reserve_kdump_trampoline(); reserve_crashkernel(); early_reserve_mem(); + phyp_dump_reserve_mem(); lmb_enforce_memory_limit(memory_limit); lmb_analyze(); -- cgit v1.2.1 From 654f596da4a83a8d2734fba26c2a1257533e6d75 Mon Sep 17 00:00:00 2001 From: Manish Ahuja Date: Sat, 22 Mar 2008 11:38:59 +1100 Subject: [POWERPC] pseries: phyp dump: Disable phyp-dump through boot-var This adds a kernel command line option "phyp_dump", which takes a 0/1 value for disabling/ enabling phyp_dump at boot time. Kdump can use this on cmdline (phyp_dump=0) to disable phyp-dump during boot when enabling itself. This will ensure only one dumping mechanism is active at any given time. Signed-off-by: Manish Ahuja Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/prom.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index e6c022ef12ee..9330920265f3 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -1060,6 +1060,11 @@ static void __init phyp_dump_reserve_mem(void) return; } + if (!phyp_dump_info->phyp_dump_at_boot) { + printk(KERN_INFO "Phyp-dump disabled at boot time\n"); + return; + } + if (phyp_dump_info->phyp_dump_is_active) { /* Reserve *everything* above RMR.Area freed by userland tools*/ base = PHYP_DUMP_RMR_END; -- cgit v1.2.1 From 464076a4b328946528998513c4ef799fd60de588 Mon Sep 17 00:00:00 2001 From: Stefan Roese Date: Sun, 24 Feb 2008 08:07:41 +1100 Subject: [POWERPC] 4xx: Add AMCC 460EX/460GT support to cputable.c & cpu_setup_44x.S This patch adds basic support for the AMCC 460EX/460GT PPC's to arch/powerpc. Currently those PPC's are still based on a 440 core and *not* a 460 core. Here some basic features of those SoC's: 460EX: - Up to 1.2GHz, 32kB L1 I-cache and D-cache, 256kB L2-cache, FPU - 1 * PCI (max 66MHz), 2 * PCIe (one 4-lane, one 1-lane) - 2 * GBit Ethernet with TCP/IP acceleration - USB 2.0 Host/Device OTG and Host interface - SATA controller - Optional security feature 460GT (only changes to 460EX): - 4 * GBit Ethernet with TCP/IP acceleration - RapidIO - No SATA - No USB Signed-off-by: Stefan Roese Signed-off-by: Josh Boyer --- arch/powerpc/kernel/cpu_setup_44x.S | 5 ++++- arch/powerpc/kernel/cputable.c | 28 +++++++++++++++++++++++++++- 2 files changed, 31 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/cpu_setup_44x.S b/arch/powerpc/kernel/cpu_setup_44x.S index 6250443ab9c9..5465e8de0e61 100644 --- a/arch/powerpc/kernel/cpu_setup_44x.S +++ b/arch/powerpc/kernel/cpu_setup_44x.S @@ -3,7 +3,7 @@ * Valentine Barshak * MontaVista Software, Inc (c) 2007 * - * Based on cpu_setup_6xx code by + * Based on cpu_setup_6xx code by * Benjamin Herrenschmidt * * This program is free software; you can redistribute it and/or @@ -32,6 +32,9 @@ _GLOBAL(__setup_cpu_440grx) bl __fixup_440A_mcheck mtlr r4 blr +_GLOBAL(__setup_cpu_460ex) +_GLOBAL(__setup_cpu_460gt) + b __init_fpu_44x _GLOBAL(__setup_cpu_440gx) _GLOBAL(__setup_cpu_440spe) b __fixup_440A_mcheck diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 2a8f5cc5184f..26ffb44e2701 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -36,6 +36,8 @@ extern void __setup_cpu_440epx(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_440gx(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_440grx(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_440spe(unsigned long offset, struct cpu_spec* spec); +extern void __setup_cpu_460ex(unsigned long offset, struct cpu_spec* spec); +extern void __setup_cpu_460gt(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_603(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_604(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_750(unsigned long offset, struct cpu_spec* spec); @@ -1397,6 +1399,30 @@ static struct cpu_spec __initdata cpu_specs[] = { .machine_check = machine_check_440A, .platform = "ppc440", }, + { /* 460EX */ + .pvr_mask = 0xffff0002, + .pvr_value = 0x13020002, + .cpu_name = "460EX", + .cpu_features = CPU_FTRS_44X, + .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, + .icache_bsize = 32, + .dcache_bsize = 32, + .cpu_setup = __setup_cpu_460ex, + .machine_check = machine_check_440A, + .platform = "ppc440", + }, + { /* 460GT */ + .pvr_mask = 0xffff0002, + .pvr_value = 0x13020000, + .cpu_name = "460GT", + .cpu_features = CPU_FTRS_44X, + .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, + .icache_bsize = 32, + .dcache_bsize = 32, + .cpu_setup = __setup_cpu_460gt, + .machine_check = machine_check_440A, + .platform = "ppc440", + }, #endif /* CONFIG_44x */ #ifdef CONFIG_FSL_BOOKE #ifdef CONFIG_E200 @@ -1512,7 +1538,7 @@ struct cpu_spec * __init identify_cpu(unsigned long offset, unsigned int pvr) *t = *s; *PTRRELOC(&cur_cpu_spec) = &the_cpu_spec; #if defined(CONFIG_PPC64) || defined(CONFIG_BOOKE) - /* ppc64 and booke expect identify_cpu to also call + /* ppc64 and booke expect identify_cpu to also call * setup_cpu for that processor. I will consolidate * that at a later time, for now, just use #ifdef. * we also don't need to PTRRELOC the function pointer -- cgit v1.2.1 From 18f032cb51a6de7d9c875e1112650ecf975fee12 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Sat, 29 Mar 2008 03:07:45 +1100 Subject: [POWERPC] move_device_tree() should be __init WARNING: vmlinux.o(.text+0x1e4c0): Section mismatch in reference from the function .move_device_tree() to the function .init.text:.lmb_alloc_base() The function .move_device_tree() references the function __init .lmb_alloc_base(). This is often because .move_device_tree lacks a __init annotation or the annotation of .lmb_alloc_base is wrong. move_device_tree() is called from early_init_devtree() only, which is __init Signed-off-by: Geert Uytterhoeven Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/prom.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 9330920265f3..31d5b22c59a2 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -437,7 +437,7 @@ early_param("mem", early_parse_mem); * The device tree may be allocated beyond our memory limit, or inside the * crash kernel region for kdump. If so, move it out of the way. */ -static void move_device_tree(void) +static void __init move_device_tree(void) { unsigned long start, size; void *p; -- cgit v1.2.1 From a78bfbfcfaca64e6198f164c43a60afc8a50e2c6 Mon Sep 17 00:00:00 2001 From: Robert Brose Date: Sat, 29 Mar 2008 07:20:23 +1100 Subject: [POWERPC] Add kernel parameter to set l3cr for MPC745x Old-world powermacs don't set L2CR or L3CR on processor upgrade cards. This simple patch allows the setting of L3CR via a kernel parameter (like the existing kernel parameter to set L2CR). Signed-off-by: Robert Brose Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/setup_32.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index cd870a823d18..eac936eb3190 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -172,6 +172,18 @@ int __init ppc_setup_l2cr(char *str) } __setup("l2cr=", ppc_setup_l2cr); +/* Checks "l3cr=xxxx" command-line option */ +int __init ppc_setup_l3cr(char *str) +{ + if (cpu_has_feature(CPU_FTR_L3CR)) { + unsigned long val = simple_strtoul(str, NULL, 0); + printk(KERN_INFO "l3cr set to %lx\n", val); + _set_L3CR(val); /* and enable it */ + } + return 1; +} +__setup("l3cr=", ppc_setup_l3cr); + #ifdef CONFIG_GENERIC_NVRAM /* Generic nvram hooks used by drivers/char/gen_nvram.c */ -- cgit v1.2.1 From e48b1b452ff630288c930fd8e0c2d808bc15f7ad Mon Sep 17 00:00:00 2001 From: Harvey Harrison Date: Sat, 29 Mar 2008 08:21:07 +1100 Subject: [POWERPC] Replace remaining __FUNCTION__ occurrences __FUNCTION__ is gcc-specific, use __func__ Signed-off-by: Harvey Harrison Cc: Benjamin Herrenschmidt Signed-off-by: Andrew Morton Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/ibmebus.c | 12 ++++++------ arch/powerpc/kernel/iommu.c | 4 ++-- arch/powerpc/kernel/isa-bridge.c | 2 +- arch/powerpc/kernel/lparcfg.c | 12 ++++++------ arch/powerpc/kernel/rtas.c | 2 +- arch/powerpc/kernel/rtas_flash.c | 2 +- arch/powerpc/kernel/rtas_pci.c | 10 +++++----- arch/powerpc/kernel/vio.c | 10 +++++----- 8 files changed, 27 insertions(+), 27 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c index 2f50bb5d00f9..9971159c8040 100644 --- a/arch/powerpc/kernel/ibmebus.c +++ b/arch/powerpc/kernel/ibmebus.c @@ -183,7 +183,7 @@ static int ibmebus_create_devices(const struct of_device_id *matches) ret = ibmebus_create_device(child); if (ret) { printk(KERN_ERR "%s: failed to create device (%i)", - __FUNCTION__, ret); + __func__, ret); of_node_put(child); break; } @@ -269,7 +269,7 @@ static ssize_t ibmebus_store_probe(struct bus_type *bus, if (bus_find_device(&ibmebus_bus_type, NULL, path, ibmebus_match_path)) { printk(KERN_WARNING "%s: %s has already been probed\n", - __FUNCTION__, path); + __func__, path); rc = -EEXIST; goto out; } @@ -279,7 +279,7 @@ static ssize_t ibmebus_store_probe(struct bus_type *bus, of_node_put(dn); } else { printk(KERN_WARNING "%s: no such device node: %s\n", - __FUNCTION__, path); + __func__, path); rc = -ENODEV; } @@ -308,7 +308,7 @@ static ssize_t ibmebus_store_remove(struct bus_type *bus, return count; } else { printk(KERN_WARNING "%s: %s not on the bus\n", - __FUNCTION__, path); + __func__, path); kfree(path); return -ENODEV; @@ -337,14 +337,14 @@ static int __init ibmebus_bus_init(void) err = of_bus_type_init(&ibmebus_bus_type, "ibmebus"); if (err) { printk(KERN_ERR "%s: failed to register IBM eBus.\n", - __FUNCTION__); + __func__); return err; } err = device_register(&ibmebus_bus_device); if (err) { printk(KERN_WARNING "%s: device_register returned %i\n", - __FUNCTION__, err); + __func__, err); bus_unregister(&ibmebus_bus_type); return err; diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index 8f1f4e539c4b..0c663669bc32 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -520,7 +520,7 @@ void iommu_free_table(struct iommu_table *tbl, const char *node_name) unsigned int order; if (!tbl || !tbl->it_map) { - printk(KERN_ERR "%s: expected TCE map for %s\n", __FUNCTION__, + printk(KERN_ERR "%s: expected TCE map for %s\n", __func__, node_name); return; } @@ -530,7 +530,7 @@ void iommu_free_table(struct iommu_table *tbl, const char *node_name) for (i = 0; i < (tbl->it_size/64); i++) { if (tbl->it_map[i] != 0) { printk(KERN_WARNING "%s: Unexpected TCEs for %s\n", - __FUNCTION__, node_name); + __func__, node_name); break; } } diff --git a/arch/powerpc/kernel/isa-bridge.c b/arch/powerpc/kernel/isa-bridge.c index ee172aa42aa7..d73551da2ff1 100644 --- a/arch/powerpc/kernel/isa-bridge.c +++ b/arch/powerpc/kernel/isa-bridge.c @@ -99,7 +99,7 @@ static void __devinit pci_process_ISA_OF_ranges(struct device_node *isa_node, */ if ((pci_addr != 0) || (isa_addr != 0)) { printk(KERN_ERR "unexpected isa to pci mapping: %s\n", - __FUNCTION__); + __func__); return; } diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c index dcb89a88df46..1ffacc698ffb 100644 --- a/arch/powerpc/kernel/lparcfg.c +++ b/arch/powerpc/kernel/lparcfg.c @@ -226,7 +226,7 @@ static void parse_system_parameter_string(struct seq_file *m) unsigned char *local_buffer = kmalloc(SPLPAR_MAXLENGTH, GFP_KERNEL); if (!local_buffer) { printk(KERN_ERR "%s %s kmalloc failure at line %d \n", - __FILE__, __FUNCTION__, __LINE__); + __FILE__, __func__, __LINE__); return; } @@ -243,14 +243,14 @@ static void parse_system_parameter_string(struct seq_file *m) if (call_status != 0) { printk(KERN_INFO "%s %s Error calling get-system-parameter (0x%x)\n", - __FILE__, __FUNCTION__, call_status); + __FILE__, __func__, call_status); } else { int splpar_strlen; int idx, w_idx; char *workbuffer = kzalloc(SPLPAR_MAXLENGTH, GFP_KERNEL); if (!workbuffer) { printk(KERN_ERR "%s %s kmalloc failure at line %d \n", - __FILE__, __FUNCTION__, __LINE__); + __FILE__, __func__, __LINE__); kfree(local_buffer); return; } @@ -484,10 +484,10 @@ static ssize_t lparcfg_write(struct file *file, const char __user * buf, current_weight = (resource >> 5 * 8) & 0xFF; pr_debug("%s: current_entitled = %lu, current_weight = %u\n", - __FUNCTION__, current_entitled, current_weight); + __func__, current_entitled, current_weight); pr_debug("%s: new_entitled = %lu, new_weight = %u\n", - __FUNCTION__, *new_entitled_ptr, *new_weight_ptr); + __func__, *new_entitled_ptr, *new_weight_ptr); retval = plpar_hcall_norets(H_SET_PPP, *new_entitled_ptr, *new_weight_ptr); @@ -502,7 +502,7 @@ static ssize_t lparcfg_write(struct file *file, const char __user * buf, retval = -EINVAL; } else { printk(KERN_WARNING "%s: received unknown hv return code %ld", - __FUNCTION__, retval); + __func__, retval); retval = -EIO; } diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index e2e78d967f31..eb3beea4536e 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -507,7 +507,7 @@ int rtas_error_rc(int rtas_rc) break; default: printk(KERN_ERR "%s: unexpected RTAS error %d\n", - __FUNCTION__, rtas_rc); + __func__, rtas_rc); rc = -ERANGE; break; } diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c index f2276593f416..e1756e112344 100644 --- a/arch/powerpc/kernel/rtas_flash.c +++ b/arch/powerpc/kernel/rtas_flash.c @@ -807,7 +807,7 @@ int __init rtas_flash_init(void) rtas_block_ctor); if (!flash_block_cache) { printk(KERN_ERR "%s: failed to create block cache\n", - __FUNCTION__); + __func__); rc = -ENOMEM; goto cleanup; } diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c index 433a0a0949fb..945e8f4db873 100644 --- a/arch/powerpc/kernel/rtas_pci.c +++ b/arch/powerpc/kernel/rtas_pci.c @@ -326,7 +326,7 @@ int pcibios_remove_root_bus(struct pci_controller *phb) res = b->resource[0]; if (!res->flags) { - printk(KERN_ERR "%s: no IO resource for PHB %s\n", __FUNCTION__, + printk(KERN_ERR "%s: no IO resource for PHB %s\n", __func__, b->name); return 1; } @@ -334,13 +334,13 @@ int pcibios_remove_root_bus(struct pci_controller *phb) rc = pcibios_unmap_io_space(b); if (rc) { printk(KERN_ERR "%s: failed to unmap IO on bus %s\n", - __FUNCTION__, b->name); + __func__, b->name); return 1; } if (release_resource(res)) { printk(KERN_ERR "%s: failed to release IO on bus %s\n", - __FUNCTION__, b->name); + __func__, b->name); return 1; } @@ -348,13 +348,13 @@ int pcibios_remove_root_bus(struct pci_controller *phb) res = b->resource[i]; if (!res->flags && i == 0) { printk(KERN_ERR "%s: no MEM resource for PHB %s\n", - __FUNCTION__, b->name); + __func__, b->name); return 1; } if (res->flags && release_resource(res)) { printk(KERN_ERR "%s: failed to release IO %d on bus %s\n", - __FUNCTION__, i, b->name); + __func__, i, b->name); return 1; } } diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c index f98867252ee2..b77f8af7ddde 100644 --- a/arch/powerpc/kernel/vio.c +++ b/arch/powerpc/kernel/vio.c @@ -139,7 +139,7 @@ static int vio_bus_remove(struct device *dev) */ int vio_register_driver(struct vio_driver *viodrv) { - printk(KERN_DEBUG "%s: driver %s registering\n", __FUNCTION__, + printk(KERN_DEBUG "%s: driver %s registering\n", __func__, viodrv->driver.name); /* fill in 'struct driver' fields */ @@ -184,7 +184,7 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node) /* we need the 'device_type' property, in order to match with drivers */ if (of_node->type == NULL) { printk(KERN_WARNING "%s: node %s missing 'device_type'\n", - __FUNCTION__, + __func__, of_node->name ? of_node->name : ""); return NULL; } @@ -192,7 +192,7 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node) unit_address = of_get_property(of_node, "reg", NULL); if (unit_address == NULL) { printk(KERN_WARNING "%s: node %s missing 'reg'\n", - __FUNCTION__, + __func__, of_node->name ? of_node->name : ""); return NULL; } @@ -227,7 +227,7 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node) /* register with generic device framework */ if (device_register(&viodev->dev)) { printk(KERN_ERR "%s: failed to register device %s\n", - __FUNCTION__, viodev->dev.bus_id); + __func__, viodev->dev.bus_id); /* XXX free TCE table */ kfree(viodev); return NULL; @@ -258,7 +258,7 @@ static int __init vio_bus_init(void) err = device_register(&vio_bus_device.dev); if (err) { printk(KERN_WARNING "%s: device_register returned %i\n", - __FUNCTION__, err); + __func__, err); return err; } -- cgit v1.2.1 From 4df4441e418c809f263939b9f371b67aca28a280 Mon Sep 17 00:00:00 2001 From: Roel Kluin <12o3l@tiscali.nl> Date: Mon, 28 Jan 2008 21:06:55 +1100 Subject: [POWERPC] Replace logical-AND by bit-AND in pci_process_ISA_OF_ranges() Replace logical "&&" by bit "&" for ISA_SPACE_MASK. Signed-off-by: Roel Kluin <12o3l@tiscali.nl> Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/isa-bridge.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/isa-bridge.c b/arch/powerpc/kernel/isa-bridge.c index d73551da2ff1..289af348978d 100644 --- a/arch/powerpc/kernel/isa-bridge.c +++ b/arch/powerpc/kernel/isa-bridge.c @@ -80,13 +80,13 @@ static void __devinit pci_process_ISA_OF_ranges(struct device_node *isa_node, * (size depending on dev->n_addr_cells) * cell 5: the size of the range */ - if ((range->isa_addr.a_hi && ISA_SPACE_MASK) != ISA_SPACE_IO) { + if ((range->isa_addr.a_hi & ISA_SPACE_MASK) != ISA_SPACE_IO) { range++; rlen -= sizeof(struct isa_range); if (rlen < sizeof(struct isa_range)) goto inval_range; } - if ((range->isa_addr.a_hi && ISA_SPACE_MASK) != ISA_SPACE_IO) + if ((range->isa_addr.a_hi & ISA_SPACE_MASK) != ISA_SPACE_IO) goto inval_range; isa_addr = range->isa_addr.a_lo; -- cgit v1.2.1 From 0119536cd314ef95553604208c25bc35581f7f0a Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Sat, 1 Mar 2008 03:04:57 +1100 Subject: [POWERPC] Add hand-coded assembly strcmp We have an assembly version of strncmp for the bootwrapper, but not for the kernel, so we end up using the C version in the kernel. This takes the strncmp code from the bootup and copies it to the kernel proper, adding two instructions so it copes correctly with len==0. Signed-off-by: Steven Rostedt Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/ppc_ksyms.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index a722ede726db..5a4c76eada48 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c @@ -78,6 +78,7 @@ EXPORT_SYMBOL(strncpy); EXPORT_SYMBOL(strcat); EXPORT_SYMBOL(strlen); EXPORT_SYMBOL(strcmp); +EXPORT_SYMBOL(strncmp); EXPORT_SYMBOL(csum_partial); EXPORT_SYMBOL(csum_partial_copy_generic); -- cgit v1.2.1 From c6d4d5a8a83e4a564bcf233fdd565183c33df5d1 Mon Sep 17 00:00:00 2001 From: Nathan Lynch Date: Fri, 14 Mar 2008 06:52:10 +1100 Subject: [POWERPC] Convert pci and eeh code to of_device_is_available A couple of places are duplicating the function of of_device_is_available; convert them to use it. Signed-off-by: Nathan Lynch Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/rtas_pci.c | 19 ++----------------- 1 file changed, 2 insertions(+), 17 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c index 945e8f4db873..3ab88a9dc70d 100644 --- a/arch/powerpc/kernel/rtas_pci.c +++ b/arch/powerpc/kernel/rtas_pci.c @@ -56,21 +56,6 @@ static inline int config_access_valid(struct pci_dn *dn, int where) return 0; } -static int of_device_available(struct device_node * dn) -{ - const char *status; - - status = of_get_property(dn, "status", NULL); - - if (!status) - return 1; - - if (!strcmp(status, "okay")) - return 1; - - return 0; -} - int rtas_read_config(struct pci_dn *pdn, int where, int size, u32 *val) { int returnval = -1; @@ -117,7 +102,7 @@ static int rtas_pci_read_config(struct pci_bus *bus, for (dn = busdn->child; dn; dn = dn->sibling) { struct pci_dn *pdn = PCI_DN(dn); if (pdn && pdn->devfn == devfn - && of_device_available(dn)) + && of_device_is_available(dn)) return rtas_read_config(pdn, where, size, val); } @@ -164,7 +149,7 @@ static int rtas_pci_write_config(struct pci_bus *bus, for (dn = busdn->child; dn; dn = dn->sibling) { struct pci_dn *pdn = PCI_DN(dn); if (pdn && pdn->devfn == devfn - && of_device_available(dn)) + && of_device_is_available(dn)) return rtas_write_config(pdn, where, size, val); } return PCIBIOS_DEVICE_NOT_FOUND; -- cgit v1.2.1 From 320787c75ccac3189a1b7aae81f0efc1055f6d3a Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 14 Apr 2008 13:59:02 +1000 Subject: [POWERPC] Fix handling of unrecoverable SLB miss interrupts If an SLB miss interrupt happens while the RI bit of MSR is zero, we can't just return, because RI being zero indicates that SRR0/SRR1 potentially had live values in them, and the process of taking an interrupt overwrites them. This should never happen, but if it does, we try to print a nice oops message. That doesn't work, however, because the code at unrecov_slb assumes that the MMU has been turned on, but we call it with the MMU off (and have done so since the SLB miss handler was rewritten to run without turning the MMU on) -- except on iSeries, where everything runs with the MMU on. This fixes it by adding the necessary code to turn the MMU on if necessary. Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/head_64.S | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index d3aee08e6814..43a38d89eafc 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -621,7 +621,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) mtlr r10 andi. r10,r12,MSR_RI /* check for unrecoverable exception */ - beq- unrecov_slb + beq- 2f .machine push .machine "power4" @@ -643,6 +643,22 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) rfid b . /* prevent speculative execution */ +2: +#ifdef CONFIG_PPC_ISERIES +BEGIN_FW_FTR_SECTION + b unrecov_slb +END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) +#endif /* CONFIG_PPC_ISERIES */ + mfspr r11,SPRN_SRR0 + clrrdi r10,r13,32 + LOAD_HANDLER(r10,unrecov_slb) + mtspr SPRN_SRR0,r10 + mfmsr r10 + ori r10,r10,MSR_IR|MSR_DR|MSR_RI + mtspr SPRN_SRR1,r10 + rfid + b . + unrecov_slb: EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB) DISABLE_INTS -- cgit v1.2.1 From 3eb9cf076180ed2003db77bd2c33ac4ed0211089 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Thu, 10 Apr 2008 16:39:18 +1000 Subject: [POWERPC] iSeries: Use alternate paca structure for booting The iSeries HV only needs the first two fields of the paca statically initialised, so create an alternate paca that contains only those and switch to our real paca immediately after boot. This is in order to make the 1024 cpu patches easier since they will no longer have to statically initialise the pacas for iSeries. Signed-off-by: Stephen Rothwell Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/asm-offsets.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index e932b43bd82f..292c6d8db0e1 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -44,6 +44,9 @@ #include #include #endif +#ifdef CONFIG_PPC_ISERIES +#include +#endif #define DEFINE(sym, val) \ asm volatile("\n->" #sym " %0 " #val : : "i" (val)) @@ -321,6 +324,9 @@ int main(void) DEFINE(PAGE_OFFSET_VSID, KERNEL_VSID(PAGE_OFFSET)); DEFINE(VMALLOC_START_ESID, GET_ESID(VMALLOC_START)); DEFINE(VMALLOC_START_VSID, KERNEL_VSID(VMALLOC_START)); + + /* alpaca */ + DEFINE(ALPACA_SIZE, sizeof(struct alpaca)); #endif DEFINE(PGD_TABLE_SIZE, PGD_TABLE_SIZE); -- cgit v1.2.1 From 30ff2e87ed55e83b4eb436f5f14a7e49ff81ad99 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Thu, 10 Apr 2008 16:43:47 +1000 Subject: [POWERPC] iSeries: Make iseries_reg_save private to iSeries Now that we have the alpaca, the reg_save_ptr is no longer needed in the paca. Eradicate all global uses of it and make it static in the iSeries lpardata.c Signed-off-by: Stephen Rothwell Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/paca.c | 33 ++++++++------------------------- 1 file changed, 8 insertions(+), 25 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index 55f1a25085cd..867b22d17385 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include @@ -25,13 +24,13 @@ extern unsigned long __toc_start; /* - * iSeries structure which the hypervisor knows about - this structure + * The structure which the hypervisor knows about - this structure * should not cross a page boundary. The vpa_init/register_vpa call * is now known to fail if the lppaca structure crosses a page - * boundary. The lppaca is also used on POWER5 pSeries boxes. The - * lppaca is 640 bytes long, and cannot readily change since the - * hypervisor knows its layout, so a 1kB alignment will suffice to - * ensure that it doesn't cross a page boundary. + * boundary. The lppaca is also used on legacy iSeries and POWER5 + * pSeries boxes. The lppaca is 640 bytes long, and cannot readily + * change since the hypervisor knows its layout, so a 1kB alignment + * will suffice to ensure that it doesn't cross a page boundary. */ struct lppaca lppaca[] = { [0 ... (NR_CPUS-1)] = { @@ -66,32 +65,16 @@ struct slb_shadow slb_shadow[] __cacheline_aligned = { * processors. The processor VPD array needs one entry per physical * processor (not thread). */ -#define PACA_INIT_COMMON(number) \ +#define PACA_INIT(number) \ +{ \ .lppaca_ptr = &lppaca[number], \ .lock_token = 0x8000, \ .paca_index = (number), /* Paca Index */ \ .kernel_toc = (unsigned long)(&__toc_start) + 0x8000UL, \ .hw_cpu_id = 0xffff, \ - .slb_shadow_ptr = &slb_shadow[number], - -#ifdef CONFIG_PPC_ISERIES -#define PACA_INIT_ISERIES(number) \ - .reg_save_ptr = &iseries_reg_save[number], - -#define PACA_INIT(number) \ -{ \ - PACA_INIT_COMMON(number) \ - PACA_INIT_ISERIES(number) \ + .slb_shadow_ptr = &slb_shadow[number], \ } -#else -#define PACA_INIT(number) \ -{ \ - PACA_INIT_COMMON(number) \ -} - -#endif - struct paca_struct paca[] = { PACA_INIT(0), #if NR_CPUS > 1 -- cgit v1.2.1 From 7c6352a4699e9a3a2d91b2cddbf3f1048207e904 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Wed, 9 Apr 2008 17:21:26 +1000 Subject: [POWERPC] Initialize paca->current earlier Currently, we initialize the "current" pointer in the PACA (which is used by the "current" macro in the kernel) before calling setup_system(). That means that early_setup() is called with current still "NULL" which is -not- a good idea. It happens to work so far but breaks with lockdep when early code calls printk. This changes it so that all PACAs are statically initialized with __current pointing to the init task. For non-0 CPUs, this is fixed up before use. Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/head_64.S | 4 ---- arch/powerpc/kernel/paca.c | 1 + 2 files changed, 1 insertion(+), 4 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 43a38d89eafc..44229c3749ac 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -1521,10 +1521,6 @@ _INIT_GLOBAL(start_here_common) li r0,0 stdu r0,-STACK_FRAME_OVERHEAD(r1) - /* ptr to current */ - LOAD_REG_IMMEDIATE(r4, init_task) - std r4,PACACURRENT(r13) - /* Load the TOC */ ld r2,PACATOC(r13) std r1,PACAKSAVE(r13) diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index 867b22d17385..ac163bd46cfd 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -73,6 +73,7 @@ struct slb_shadow slb_shadow[] __cacheline_aligned = { .kernel_toc = (unsigned long)(&__toc_start) + 0x8000UL, \ .hw_cpu_id = 0xffff, \ .slb_shadow_ptr = &slb_shadow[number], \ + .__current = &init_task, \ } struct paca_struct paca[] = { -- cgit v1.2.1 From e6768a4f392d05bc11ed508d35938932c73aac5a Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Wed, 9 Apr 2008 17:21:28 +1000 Subject: [POWERPC] Fixup softirq preempt count This fixes the handling of the preempt count when switching interrupt stacks so that HW interrupt properly get the softirq mask copied over from the previous stack. It also initializes the softirq stack preempt_count to 0 instead of SOFTIRQ_OFFSET, like x86, as __do_softirq() does the increment, and we hit some lockdep checks if we have it twice. That means we do run for a little while off the softirq stack with the preempt-count set to 0, which could be deadly if we try to take a softirq at that point, however we do so with interrupts disabled, so I think we are ok. Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/irq.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 292163f5b39a..4617b65d464d 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -310,8 +310,21 @@ void do_IRQ(struct pt_regs *regs) handler = &__do_IRQ; irqtp->task = curtp->task; irqtp->flags = 0; + + /* Copy the softirq bits in preempt_count so that the + * softirq checks work in the hardirq context. + */ + irqtp->preempt_count = + (irqtp->preempt_count & ~SOFTIRQ_MASK) | + (curtp->preempt_count & SOFTIRQ_MASK); + call_handle_irq(irq, desc, irqtp, handler); irqtp->task = NULL; + + + /* Set any flag that may have been set on the + * alternate stack + */ if (irqtp->flags) set_bits(irqtp->flags, &curtp->flags); } else @@ -357,7 +370,7 @@ void irq_ctx_init(void) memset((void *)softirq_ctx[i], 0, THREAD_SIZE); tp = softirq_ctx[i]; tp->cpu = i; - tp->preempt_count = SOFTIRQ_OFFSET; + tp->preempt_count = 0; memset((void *)hardirq_ctx[i], 0, THREAD_SIZE); tp = hardirq_ctx[i]; -- cgit v1.2.1 From 7f4392cdcc63fea72fc77d14497059267d77d5d0 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Mon, 14 Apr 2008 02:52:38 +1000 Subject: [POWERPC] Efika: Really, don't pretend to be CHRP Fedora 9 works on Efika without the separate 'device-tree supplement', thanks to the kernel's own fixups. With one exception -- because 'CHRP' still appears on the 'machine:' line in /proc/cpuinfo, the installer misdetects the platform and misconfigures yaboot, putting it into a PReP boot partition instead of in the /boot filesystem where the Efika's firmware could find it. The kernel's fixups for Efika already correct one instance of 'chrp', in the 'device_type' property. This fixes it in the 'CODEGEN,description' property too, since that's what's exposed to userspace in /proc/cpuinfo. Signed-off-by: David Woodhouse Acked-by: Grant Likely Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/prom_init.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 5ab4c8466cc9..6d6df1e60325 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -2240,6 +2240,14 @@ static void __init fixup_device_tree_efika(void) if (rv != PROM_ERROR && (strcmp(prop, "chrp") == 0)) prom_setprop(node, "/", "device_type", "efika", sizeof("efika")); + /* CODEGEN,description is exposed in /proc/cpuinfo so + fix that too */ + rv = prom_getprop(node, "CODEGEN,description", prop, sizeof(prop)); + if (rv != PROM_ERROR && (strstr(prop, "CHRP"))) + prom_setprop(node, "/", "CODEGEN,description", + "Efika 5200B PowerPC System", + sizeof("Efika 5200B PowerPC System")); + /* Fixup bestcomm interrupts property */ node = call_prom("finddevice", 1, 1, ADDR("/builtin/bestcomm")); if (PHANDLE_VALID(node)) { -- cgit v1.2.1 From 0aef996b37d08757562ecf0bb0c1f6998e634c8b Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Wed, 16 Apr 2008 05:52:23 +1000 Subject: [POWERPC] 85xx: Cleanup TLB initialization * Determine the RPN we are running the kernel at runtime rather than using compile time constant for initial TLB * Cleanup adjust_total_lowmem() to respect memstart_addr and be a bit more clear on variables that are sizes vs addresses. Signed-off-by: Kumar Gala Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/head_fsl_booke.S | 34 +++++++++++++++++++++++++++------- 1 file changed, 27 insertions(+), 7 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index d9cc2c288d9e..9f40b3e77100 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S @@ -68,7 +68,9 @@ _ENTRY(_start); mr r29,r5 mr r28,r6 mr r27,r7 + li r25,0 /* phys kernel start (low) */ li r24,0 /* CPU number */ + li r23,0 /* phys kernel start (high) */ /* We try to not make any assumptions about how the boot loader * setup or used the TLBs. We invalidate all mappings from the @@ -167,7 +169,28 @@ skpinv: addi r6,r6,1 /* Increment */ mtspr SPRN_MAS0,r7 tlbre - /* Just modify the entry ID, EPN and RPN for the temp mapping */ + /* grab and fixup the RPN */ + mfspr r6,SPRN_MAS1 /* extract MAS1[SIZE] */ + rlwinm r6,r6,25,27,30 + li r8,-1 + addi r6,r6,10 + slw r6,r8,r6 /* convert to mask */ + + bl 1f /* Find our address */ +1: mflr r7 + + mfspr r8,SPRN_MAS3 +#ifdef CONFIG_PHYS_64BIT + mfspr r23,SPRN_MAS7 +#endif + and r8,r6,r8 + subfic r9,r6,-4096 + and r9,r9,r7 + + or r25,r8,r9 + ori r8,r25,(MAS3_SX|MAS3_SW|MAS3_SR) + + /* Just modify the entry ID and EPN for the temp mapping */ lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */ mtspr SPRN_MAS0,r7 @@ -177,12 +200,10 @@ skpinv: addi r6,r6,1 /* Increment */ ori r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_4K))@l mtspr SPRN_MAS1,r6 mfspr r6,SPRN_MAS2 - lis r7,PHYSICAL_START@h + li r7,0 /* temp EPN = 0 */ rlwimi r7,r6,0,20,31 mtspr SPRN_MAS2,r7 - mfspr r6,SPRN_MAS3 - rlwimi r7,r6,0,20,31 - mtspr SPRN_MAS3,r7 + mtspr SPRN_MAS3,r8 tlbwe xori r6,r4,1 @@ -232,8 +253,7 @@ skpinv: addi r6,r6,1 /* Increment */ ori r6,r6,PAGE_OFFSET@l rlwimi r6,r7,0,20,31 mtspr SPRN_MAS2,r6 - li r7,(MAS3_SX|MAS3_SW|MAS3_SR) - mtspr SPRN_MAS3,r7 + mtspr SPRN_MAS3,r8 tlbwe /* 7. Jump to KERNELBASE mapping */ -- cgit v1.2.1 From 4846c5deb9776a7306d0f656ade7505278ac39ba Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Wed, 16 Apr 2008 05:52:26 +1000 Subject: [POWERPC] Clean up some linker and symbol usage * PAGE_OFFSET is not always the start of code, use _stext instead. * grab PAGE_SIZE and KERNELBASE from asm/page.h like ppc64 does. Makes the code a bit more common and provide a single place to manipulate the defines for things like kdump. Signed-off-by: Kumar Gala Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/setup_32.c | 2 +- arch/powerpc/kernel/setup_64.c | 2 +- arch/powerpc/kernel/vmlinux.lds.S | 4 +--- 3 files changed, 3 insertions(+), 5 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index eac936eb3190..d813c394f0ff 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -289,7 +289,7 @@ void __init setup_arch(char **cmdline_p) if (ppc_md.panic) setup_panic(); - init_mm.start_code = PAGE_OFFSET; + init_mm.start_code = (unsigned long)_stext; init_mm.end_code = (unsigned long) _etext; init_mm.end_data = (unsigned long) _edata; init_mm.brk = klimit; diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 2c2d8315193c..0205d408d2ed 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -510,7 +510,7 @@ void __init setup_arch(char **cmdline_p) if (ppc_md.panic) setup_panic(); - init_mm.start_code = PAGE_OFFSET; + init_mm.start_code = (unsigned long)_stext; init_mm.end_code = (unsigned long) _etext; init_mm.end_data = (unsigned long) _edata; init_mm.brk = klimit; diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 0afb9e31d2a0..b5a76bcd95a6 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -1,11 +1,9 @@ #ifdef CONFIG_PPC64 -#include #define PROVIDE32(x) PROVIDE(__unused__##x) #else -#define PAGE_SIZE 4096 -#define KERNELBASE CONFIG_KERNEL_START #define PROVIDE32(x) PROVIDE(x) #endif +#include #include #include -- cgit v1.2.1 From 366234f657879aeb7a1e2ca582f2f24f3fae9269 Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Wed, 16 Apr 2008 05:52:28 +1000 Subject: [POWERPC] Update linker script to properly set physical addresses We can set LOAD_OFFSET and use the AT attribute on sections and the linker will properly set the physical address of the LOAD program header for us. This allows us to know how the PHYSICAL_START the user configured a kernel with by just looking at the resulting vmlinux ELF. This is pretty much stolen from how x86 does things in their linker scripts. Signed-off-by: Kumar Gala Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/vmlinux.lds.S | 47 +++++++++++++++++++-------------------- 1 file changed, 23 insertions(+), 24 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index b5a76bcd95a6..0c3000bf8d75 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -31,7 +31,7 @@ SECTIONS */ /* Text and gots */ - .text : { + .text : AT(ADDR(.text) - LOAD_OFFSET) { ALIGN_FUNCTION(); *(.text.head) _text = .; @@ -56,7 +56,7 @@ SECTIONS RODATA /* Exception & bug tables */ - __ex_table : { + __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { __start___ex_table = .; *(__ex_table) __stop___ex_table = .; @@ -72,7 +72,7 @@ SECTIONS . = ALIGN(PAGE_SIZE); __init_begin = .; - .init.text : { + .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { _sinittext = .; INIT_TEXT _einittext = .; @@ -81,11 +81,11 @@ SECTIONS /* .exit.text is discarded at runtime, not link time, * to deal with references from __bug_table */ - .exit.text : { + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { EXIT_TEXT } - .init.data : { + .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { INIT_DATA __vtop_table_begin = .; *(.vtop_fixup); @@ -101,19 +101,19 @@ SECTIONS } . = ALIGN(16); - .init.setup : { + .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) { __setup_start = .; *(.init.setup) __setup_end = .; } - .initcall.init : { + .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) { __initcall_start = .; INITCALLS __initcall_end = .; } - .con_initcall.init : { + .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) { __con_initcall_start = .; *(.con_initcall.init) __con_initcall_end = .; @@ -122,14 +122,14 @@ SECTIONS SECURITY_INIT . = ALIGN(8); - __ftr_fixup : { + __ftr_fixup : AT(ADDR(__ftr_fixup) - LOAD_OFFSET) { __start___ftr_fixup = .; *(__ftr_fixup) __stop___ftr_fixup = .; } #ifdef CONFIG_PPC64 . = ALIGN(8); - __fw_ftr_fixup : { + __fw_ftr_fixup : AT(ADDR(__fw_ftr_fixup) - LOAD_OFFSET) { __start___fw_ftr_fixup = .; *(__fw_ftr_fixup) __stop___fw_ftr_fixup = .; @@ -137,14 +137,14 @@ SECTIONS #endif #ifdef CONFIG_BLK_DEV_INITRD . = ALIGN(PAGE_SIZE); - .init.ramfs : { + .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { __initramfs_start = .; *(.init.ramfs) __initramfs_end = .; } #endif . = ALIGN(PAGE_SIZE); - .data.percpu : { + .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { __per_cpu_start = .; *(.data.percpu) *(.data.percpu.shared_aligned) @@ -152,7 +152,7 @@ SECTIONS } . = ALIGN(8); - .machine.desc : { + .machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) { __machine_desc_start = . ; *(.machine.desc) __machine_desc_end = . ; @@ -170,25 +170,24 @@ SECTIONS _sdata = .; #ifdef CONFIG_PPC32 - .data : - { + .data : AT(ADDR(.data) - LOAD_OFFSET) { DATA_DATA *(.sdata) *(.got.plt) *(.got) } #else - .data : { + .data : AT(ADDR(.data) - LOAD_OFFSET) { DATA_DATA *(.data.rel*) *(.toc1) *(.branch_lt) } - .opd : { + .opd : AT(ADDR(.opd) - LOAD_OFFSET) { *(.opd) } - .got : { + .got : AT(ADDR(.got) - LOAD_OFFSET) { __toc_start = .; *(.got) *(.toc) @@ -205,26 +204,26 @@ SECTIONS #else . = ALIGN(16384); #endif - .data.init_task : { + .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) { *(.data.init_task) } . = ALIGN(PAGE_SIZE); - .data.page_aligned : { + .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) { *(.data.page_aligned) } - .data.cacheline_aligned : { + .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) { *(.data.cacheline_aligned) } . = ALIGN(L1_CACHE_BYTES); - .data.read_mostly : { + .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) { *(.data.read_mostly) } . = ALIGN(PAGE_SIZE); - __data_nosave : { + .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { __nosave_begin = .; *(.data.nosave) . = ALIGN(PAGE_SIZE); @@ -235,7 +234,7 @@ SECTIONS * And finally the bss */ - .bss : { + .bss : AT(ADDR(.bss) - LOAD_OFFSET) { __bss_start = .; *(.sbss) *(.scommon) *(.dynbss) -- cgit v1.2.1 From 37ddd5d053c57fee798d72fa9c18660f59a9299b Mon Sep 17 00:00:00 2001 From: Manish Ahuja Date: Sat, 12 Apr 2008 09:31:52 +1000 Subject: [POWERPC] pseries/phyp dump: Reserve a variable amount of space at boot This changes the way we calculate how much space to reserve for the pHyp dump. Currently we reserve 256MB only. With this change, the code first checks to see if an amount has been specified on the boot command line with the "phyp_dump_reserve_size" option, and if so, uses that much. Otherwise it computes 5% of total ram and rounds it down to a multiple of 256MB, and uses the larger of that or 256MB. This is for large systems with a lot of memory (10GB or more). The aim is to have more space available for the kernel on reboot on machines with more resources. Although the dump will be collected pretty fast and the memory released really early on allowing the machine to have the full memory available, this alleviates any issues that can be caused by having way too little memory on very very large systems during those few minutes. Signed-off-by: Manish Ahuja Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/prom.c | 35 +++++++++++++++++++++++++++++++++-- 1 file changed, 33 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 31d5b22c59a2..8a9359ae4718 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -1042,6 +1042,33 @@ static void __init early_reserve_mem(void) } #ifdef CONFIG_PHYP_DUMP +/** + * phyp_dump_calculate_reserve_size() - reserve variable boot area 5% or arg + * + * Function to find the largest size we need to reserve + * during early boot process. + * + * It either looks for boot param and returns that OR + * returns larger of 256 or 5% rounded down to multiples of 256MB. + * + */ +static inline unsigned long phyp_dump_calculate_reserve_size(void) +{ + unsigned long tmp; + + if (phyp_dump_info->reserve_bootvar) + return phyp_dump_info->reserve_bootvar; + + /* divide by 20 to get 5% of value */ + tmp = lmb_end_of_DRAM(); + do_div(tmp, 20); + + /* round it down in multiples of 256 */ + tmp = tmp & ~0x0FFFFFFFUL; + + return (tmp > PHYP_DUMP_RMR_END ? tmp : PHYP_DUMP_RMR_END); +} + /** * phyp_dump_reserve_mem() - reserve all not-yet-dumped mmemory * @@ -1055,6 +1082,8 @@ static void __init early_reserve_mem(void) static void __init phyp_dump_reserve_mem(void) { unsigned long base, size; + unsigned long variable_reserve_size; + if (!phyp_dump_info->phyp_dump_configured) { printk(KERN_ERR "Phyp-dump not supported on this hardware\n"); return; @@ -1065,9 +1094,11 @@ static void __init phyp_dump_reserve_mem(void) return; } + variable_reserve_size = phyp_dump_calculate_reserve_size(); + if (phyp_dump_info->phyp_dump_is_active) { /* Reserve *everything* above RMR.Area freed by userland tools*/ - base = PHYP_DUMP_RMR_END; + base = variable_reserve_size; size = lmb_end_of_DRAM() - base; /* XXX crashed_ram_end is wrong, since it may be beyond @@ -1079,7 +1110,7 @@ static void __init phyp_dump_reserve_mem(void) } else { size = phyp_dump_info->cpu_state_size + phyp_dump_info->hpte_region_size + - PHYP_DUMP_RMR_END; + variable_reserve_size; base = lmb_end_of_DRAM() - size; lmb_reserve(base, size); phyp_dump_info->init_reserve_start = base; -- cgit v1.2.1 From eb0cd5fd295f469b4782d8088f3e39019da44707 Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Wed, 9 Apr 2008 06:06:11 -0500 Subject: [POWERPC] Rework Book-E debug exception handling The architecture allows for "Book-E" style debug interrupts to either go to critial interrupts of their own debug interrupt level. To allow for a dynamic kernel to support machines of either type we want to be able to compile in the interrupt handling code for both exception levels. Towards this goal we renamed the debug handling macros to specify the interrupt level in their name (DEBUG_CRIT_EXCEPTION/DebugCrit and DEBUG_DEBUG_EXCEPTION/DebugDebug). Additionally, on the Freescale Book-e parts we expanded the exception stacks to cover the maximum case of needing three exception stacks (normal, machine check and debug). There is some kernel text space optimization to be gained if a kernel is configured for a specific Freescale implementation but we aren't handling that now to allow for the single kernel image support. Signed-off-by: Kumar Gala --- arch/powerpc/kernel/head_44x.S | 4 ++-- arch/powerpc/kernel/head_booke.h | 33 ++++++++++++++++++++------------- arch/powerpc/kernel/head_fsl_booke.S | 10 ++++++++-- 3 files changed, 30 insertions(+), 17 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S index ad071a146a8d..b84ec6a2fc94 100644 --- a/arch/powerpc/kernel/head_44x.S +++ b/arch/powerpc/kernel/head_44x.S @@ -211,7 +211,7 @@ skpinv: addi r4,r4,1 /* Increment */ SET_IVOR(12, WatchdogTimer); SET_IVOR(13, DataTLBError); SET_IVOR(14, InstructionTLBError); - SET_IVOR(15, Debug); + SET_IVOR(15, DebugCrit); /* Establish the interrupt vector base */ lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */ @@ -578,7 +578,7 @@ interrupt_base: b InstructionStorage /* Debug Interrupt */ - DEBUG_EXCEPTION + DEBUG_CRIT_EXCEPTION /* * Local functions diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h index ba9393f8e77a..aefafc6330c9 100644 --- a/arch/powerpc/kernel/head_booke.h +++ b/arch/powerpc/kernel/head_booke.h @@ -56,8 +56,17 @@ * is necessary since the MMU is always on, for Book-E parts, and the stacks * are offset from KERNELBASE. * + * There is some space optimization to be had here if desired. However + * to allow for a common kernel with support for debug exceptions either + * going to critical or their own debug level we aren't currently + * providing configurations that micro-optimize space usage. */ -#define BOOKE_EXCEPTION_STACK_SIZE (8192) +#ifdef CONFIG_44x +#define NUM_EXCEPTION_LVLS 2 +#else +#define NUM_EXCEPTION_LVLS 3 +#endif +#define BOOKE_EXCEPTION_STACK_SIZE (4096 * NUM_EXCEPTION_LVLS) /* CRIT_SPRG only used in critical exception handling */ #define CRIT_SPRG SPRN_SPRG2 @@ -68,7 +77,7 @@ #define CRIT_STACK_TOP (exception_stack_top) /* only on e200 for now */ -#define DEBUG_STACK_TOP (exception_stack_top - 4096) +#define DEBUG_STACK_TOP (exception_stack_top - 8192) #define DEBUG_SPRG SPRN_SPRG6W #ifdef CONFIG_SMP @@ -212,9 +221,8 @@ label: * save (and later restore) the MSR via SPRN_CSRR1, which will still have * the MSR_DE bit set. */ -#ifdef CONFIG_E200 -#define DEBUG_EXCEPTION \ - START_EXCEPTION(Debug); \ +#define DEBUG_DEBUG_EXCEPTION \ + START_EXCEPTION(DebugDebug); \ DEBUG_EXCEPTION_PROLOG; \ \ /* \ @@ -234,8 +242,8 @@ label: cmplw r12,r10; \ blt+ 2f; /* addr below exception vectors */ \ \ - lis r10,Debug@h; \ - ori r10,r10,Debug@l; \ + lis r10,DebugDebug@h; \ + ori r10,r10,DebugDebug@l; \ cmplw r12,r10; \ bgt+ 2f; /* addr above exception vectors */ \ \ @@ -265,9 +273,9 @@ label: 2: mfspr r4,SPRN_DBSR; \ addi r3,r1,STACK_FRAME_OVERHEAD; \ EXC_XFER_TEMPLATE(DebugException, 0x2002, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), NOCOPY, debug_transfer_to_handler, ret_from_debug_exc) -#else -#define DEBUG_EXCEPTION \ - START_EXCEPTION(Debug); \ + +#define DEBUG_CRIT_EXCEPTION \ + START_EXCEPTION(DebugCrit); \ CRITICAL_EXCEPTION_PROLOG; \ \ /* \ @@ -287,8 +295,8 @@ label: cmplw r12,r10; \ blt+ 2f; /* addr below exception vectors */ \ \ - lis r10,Debug@h; \ - ori r10,r10,Debug@l; \ + lis r10,DebugCrit@h; \ + ori r10,r10,DebugCrit@l; \ cmplw r12,r10; \ bgt+ 2f; /* addr above exception vectors */ \ \ @@ -318,7 +326,6 @@ label: 2: mfspr r4,SPRN_DBSR; \ addi r3,r1,STACK_FRAME_OVERHEAD; \ EXC_XFER_TEMPLATE(DebugException, 0x2002, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), NOCOPY, crit_transfer_to_handler, ret_from_crit_exc) -#endif #define INSTRUCTION_STORAGE_EXCEPTION \ START_EXCEPTION(InstructionStorage) \ diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index 9f40b3e77100..4ff744143566 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S @@ -303,7 +303,10 @@ skpinv: addi r6,r6,1 /* Increment */ SET_IVOR(12, WatchdogTimer); SET_IVOR(13, DataTLBError); SET_IVOR(14, InstructionTLBError); - SET_IVOR(15, Debug); + SET_IVOR(15, DebugDebug); +#if defined(CONFIG_E500) + SET_IVOR(15, DebugCrit); +#endif SET_IVOR(32, SPEUnavailable); SET_IVOR(33, SPEFloatingPointData); SET_IVOR(34, SPEFloatingPointRound); @@ -738,7 +741,10 @@ interrupt_base: /* Debug Interrupt */ - DEBUG_EXCEPTION + DEBUG_DEBUG_EXCEPTION +#if defined(CONFIG_E500) + DEBUG_CRIT_EXCEPTION +#endif /* * Local functions -- cgit v1.2.1 From 4eaddb4d7ec380abe95523ba0bdbbe8558f7fef4 Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Wed, 9 Apr 2008 16:15:40 -0500 Subject: [POWERPC] Make Book-E debug handling SMP safe global_dbcr0 needs to be a per cpu set of save areas instead of a single global on all processors. Also, we switch to using DBCR0_IDM to determine if the user space app is being debugged as its a more consistent way. In the future we should support features like hardware breakpoint and watchpoints which will have DBCR0_IDM set but not necessarily DBCR0_IC (single step). Signed-off-by: Kumar Gala --- arch/powerpc/kernel/entry_32.S | 30 +++++++++++++++++++++--------- 1 file changed, 21 insertions(+), 9 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 69a91bd46115..84c868633068 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -110,9 +110,9 @@ transfer_to_handler: stw r11,PT_REGS(r12) #if defined(CONFIG_40x) || defined(CONFIG_BOOKE) /* Check to see if the dbcr0 register is set up to debug. Use the - single-step bit to do this. */ + internal debug mode bit to do this. */ lwz r12,THREAD_DBCR0(r12) - andis. r12,r12,DBCR0_IC@h + andis. r12,r12,DBCR0_IDM@h beq+ 3f /* From user and task is ptraced - load up global dbcr0 */ li r12,-1 /* clear all pending debug events */ @@ -120,6 +120,12 @@ transfer_to_handler: lis r11,global_dbcr0@ha tophys(r11,r11) addi r11,r11,global_dbcr0@l +#ifdef CONFIG_SMP + rlwinm r9,r1,0,0,(31-THREAD_SHIFT) + lwz r9,TI_CPU(r9) + slwi r9,r9,3 + add r11,r11,r9 +#endif lwz r12,0(r11) mtspr SPRN_DBCR0,r12 lwz r12,4(r11) @@ -238,10 +244,10 @@ ret_from_syscall: stw r11,_CCR(r1) syscall_exit_cont: #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) - /* If the process has its own DBCR0 value, load it up. The single - step bit tells us that dbcr0 should be loaded. */ + /* If the process has its own DBCR0 value, load it up. The internal + debug mode bit tells us that dbcr0 should be loaded. */ lwz r0,THREAD+THREAD_DBCR0(r2) - andis. r10,r0,DBCR0_IC@h + andis. r10,r0,DBCR0_IDM@h bnel- load_dbcr0 #endif #ifdef CONFIG_44x @@ -666,10 +672,10 @@ user_exc_return: /* r10 contains MSR_KERNEL here */ restore_user: #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) - /* Check whether this process has its own DBCR0 value. The single - step bit tells us that dbcr0 should be loaded. */ + /* Check whether this process has its own DBCR0 value. The internal + debug mode bit tells us that dbcr0 should be loaded. */ lwz r0,THREAD+THREAD_DBCR0(r2) - andis. r10,r0,DBCR0_IC@h + andis. r10,r0,DBCR0_IDM@h bnel- load_dbcr0 #endif @@ -879,6 +885,12 @@ load_dbcr0: mfspr r10,SPRN_DBCR0 lis r11,global_dbcr0@ha addi r11,r11,global_dbcr0@l +#ifdef CONFIG_SMP + rlwinm r9,r1,0,0,(31-THREAD_SHIFT) + lwz r9,TI_CPU(r9) + slwi r9,r9,3 + add r11,r11,r9 +#endif stw r10,0(r11) mtspr SPRN_DBCR0,r0 lwz r10,4(r11) @@ -891,7 +903,7 @@ load_dbcr0: .section .bss .align 4 global_dbcr0: - .space 8 + .space 8*NR_CPUS .previous #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */ -- cgit v1.2.1 From f4ac7b5eb79ef15819c966b1f6b84bf443949123 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Wed, 9 Apr 2008 17:21:36 +1000 Subject: [POWERPC] Fix device-tree locking vs. interrupts Lockdep found out that we can occasionally take the device-tree lock for reading from softirq time (from rtas_token called by the rtas real time clock code called by the NTP code), while we take it occasionally for writing without masking interrupts. The combination of those two can thus deadlock. While some of those cases of interrupt read lock could be fixed (such as caching the RTAS tokens) I figured that taking the lock for writing is so rare (device-tree modification) that we may as well penalize that case and allow reading from interrupts. Thus, this turns all the writers to take the lock with irqs masked to avoid the situation. Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/prom.c | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 8a9359ae4718..3bfe7837e820 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -1332,12 +1332,14 @@ EXPORT_SYMBOL(of_node_put); */ void of_attach_node(struct device_node *np) { - write_lock(&devtree_lock); + unsigned long flags; + + write_lock_irqsave(&devtree_lock, flags); np->sibling = np->parent->child; np->allnext = allnodes; np->parent->child = np; allnodes = np; - write_unlock(&devtree_lock); + write_unlock_irqrestore(&devtree_lock, flags); } /* @@ -1348,8 +1350,9 @@ void of_attach_node(struct device_node *np) void of_detach_node(struct device_node *np) { struct device_node *parent; + unsigned long flags; - write_lock(&devtree_lock); + write_lock_irqsave(&devtree_lock, flags); parent = np->parent; if (!parent) @@ -1380,7 +1383,7 @@ void of_detach_node(struct device_node *np) of_node_set_flag(np, OF_DETACHED); out_unlock: - write_unlock(&devtree_lock); + write_unlock_irqrestore(&devtree_lock, flags); } #ifdef CONFIG_PPC_PSERIES @@ -1461,20 +1464,21 @@ __initcall(prom_reconfig_setup); int prom_add_property(struct device_node* np, struct property* prop) { struct property **next; + unsigned long flags; prop->next = NULL; - write_lock(&devtree_lock); + write_lock_irqsave(&devtree_lock, flags); next = &np->properties; while (*next) { if (strcmp(prop->name, (*next)->name) == 0) { /* duplicate ! don't insert it */ - write_unlock(&devtree_lock); + write_unlock_irqrestore(&devtree_lock, flags); return -1; } next = &(*next)->next; } *next = prop; - write_unlock(&devtree_lock); + write_unlock_irqrestore(&devtree_lock, flags); #ifdef CONFIG_PROC_DEVICETREE /* try to add to proc as well if it was initialized */ @@ -1494,9 +1498,10 @@ int prom_add_property(struct device_node* np, struct property* prop) int prom_remove_property(struct device_node *np, struct property *prop) { struct property **next; + unsigned long flags; int found = 0; - write_lock(&devtree_lock); + write_lock_irqsave(&devtree_lock, flags); next = &np->properties; while (*next) { if (*next == prop) { @@ -1509,7 +1514,7 @@ int prom_remove_property(struct device_node *np, struct property *prop) } next = &(*next)->next; } - write_unlock(&devtree_lock); + write_unlock_irqrestore(&devtree_lock, flags); if (!found) return -ENODEV; @@ -1535,9 +1540,10 @@ int prom_update_property(struct device_node *np, struct property *oldprop) { struct property **next; + unsigned long flags; int found = 0; - write_lock(&devtree_lock); + write_lock_irqsave(&devtree_lock, flags); next = &np->properties; while (*next) { if (*next == oldprop) { @@ -1551,7 +1557,7 @@ int prom_update_property(struct device_node *np, } next = &(*next)->next; } - write_unlock(&devtree_lock); + write_unlock_irqrestore(&devtree_lock, flags); if (!found) return -ENODEV; -- cgit v1.2.1 From ec2b36b9f23cfbbe94d89724b796b44fd57d5221 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Thu, 17 Apr 2008 14:34:59 +1000 Subject: [POWERPC] Move stackframe definitions to common header This moves various definitions used all over the place to parse stack frames to ptrace.h so only one definition is needed. Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/entry_64.S | 3 ++- arch/powerpc/kernel/head_32.S | 5 +++-- arch/powerpc/kernel/process.c | 28 +++++++--------------------- 3 files changed, 12 insertions(+), 24 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 148a3547c9aa..13019845536b 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -29,6 +29,7 @@ #include #include #include +#include /* * System calls. @@ -39,7 +40,7 @@ /* This value is used to mark exception frames on the stack. */ exception_marker: - .tc ID_72656773_68657265[TC],0x7265677368657265 + .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER .section ".text" .align 7 diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index c16d1354b19d..785af9b56591 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S @@ -30,6 +30,7 @@ #include #include #include +#include /* 601 only have IBAT; cr0.eq is set on 601 when using this macro */ #define LOAD_BAT(n, reg, RA, RB) \ @@ -268,8 +269,8 @@ __secondary_hold_acknowledge: li r10,MSR_KERNEL & ~(MSR_IR|MSR_DR); /* can take exceptions */ \ MTMSRD(r10); /* (except for mach check in rtas) */ \ stw r0,GPR0(r11); \ - lis r10,0x7265; /* put exception frame marker */ \ - addi r10,r10,0x6773; \ + lis r10,STACK_FRAME_REGS_MARKER@ha; /* exception frame marker */ \ + addi r10,r10,STACK_FRAME_REGS_MARKER@l; \ stw r10,8(r11); \ SAVE_4GPRS(3, r11); \ SAVE_2GPRS(7, r11) diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index a27910207c7e..703100d5e458 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -914,20 +914,6 @@ int validate_sp(unsigned long sp, struct task_struct *p, return valid_irq_stack(sp, p, nbytes); } -#ifdef CONFIG_PPC64 -#define MIN_STACK_FRAME 112 /* same as STACK_FRAME_OVERHEAD, in fact */ -#define FRAME_LR_SAVE 2 -#define INT_FRAME_SIZE (sizeof(struct pt_regs) + STACK_FRAME_OVERHEAD + 288) -#define REGS_MARKER 0x7265677368657265ul -#define FRAME_MARKER 12 -#else -#define MIN_STACK_FRAME 16 -#define FRAME_LR_SAVE 1 -#define INT_FRAME_SIZE (sizeof(struct pt_regs) + STACK_FRAME_OVERHEAD) -#define REGS_MARKER 0x72656773ul -#define FRAME_MARKER 2 -#endif - EXPORT_SYMBOL(validate_sp); unsigned long get_wchan(struct task_struct *p) @@ -939,15 +925,15 @@ unsigned long get_wchan(struct task_struct *p) return 0; sp = p->thread.ksp; - if (!validate_sp(sp, p, MIN_STACK_FRAME)) + if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD)) return 0; do { sp = *(unsigned long *)sp; - if (!validate_sp(sp, p, MIN_STACK_FRAME)) + if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD)) return 0; if (count > 0) { - ip = ((unsigned long *)sp)[FRAME_LR_SAVE]; + ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE]; if (!in_sched_functions(ip)) return ip; } @@ -976,12 +962,12 @@ void show_stack(struct task_struct *tsk, unsigned long *stack) lr = 0; printk("Call Trace:\n"); do { - if (!validate_sp(sp, tsk, MIN_STACK_FRAME)) + if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD)) return; stack = (unsigned long *) sp; newsp = stack[0]; - ip = stack[FRAME_LR_SAVE]; + ip = stack[STACK_FRAME_LR_SAVE]; if (!firstframe || ip != lr) { printk("["REG"] ["REG"] ", sp, ip); print_symbol("%s", ip); @@ -995,8 +981,8 @@ void show_stack(struct task_struct *tsk, unsigned long *stack) * See if this is an exception frame. * We look for the "regshere" marker in the current frame. */ - if (validate_sp(sp, tsk, INT_FRAME_SIZE) - && stack[FRAME_MARKER] == REGS_MARKER) { + if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE) + && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) { struct pt_regs *regs = (struct pt_regs *) (sp + STACK_FRAME_OVERHEAD); printk("--- Exception: %lx", regs->trap); -- cgit v1.2.1 From fd3e0bbc6052ca9747a5332b382584ece83aab6d Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 17 Apr 2008 14:35:00 +1000 Subject: [POWERPC] Stacktrace support for lockdep This adds stacktrace support for powerpc, which will be needed for lockdep. Signed-off-by: Christoph Hellwig Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/Makefile | 1 + arch/powerpc/kernel/stacktrace.c | 47 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 48 insertions(+) create mode 100644 arch/powerpc/kernel/stacktrace.c (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index c1baf9d5903f..5183a9012a08 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -67,6 +67,7 @@ obj-$(CONFIG_BOOTX_TEXT) += btext.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_PPC_UDBG_16550) += legacy_serial.o udbg_16550.o +obj-$(CONFIG_STACKTRACE) += stacktrace.o pci64-$(CONFIG_PPC64) += pci_dn.o isa-bridge.o obj-$(CONFIG_PCI) += pci_$(CONFIG_WORD_SIZE).o $(pci64-y) \ diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c new file mode 100644 index 000000000000..e3638eeaaae7 --- /dev/null +++ b/arch/powerpc/kernel/stacktrace.c @@ -0,0 +1,47 @@ +/* + * Stack trace utility + * + * Copyright 2008 Christoph Hellwig, IBM Corp. + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include + +/* + * Save stack-backtrace addresses into a stack_trace buffer. + */ +void save_stack_trace(struct stack_trace *trace) +{ + unsigned long sp; + + asm("mr %0,1" : "=r" (sp)); + + for (;;) { + unsigned long *stack = (unsigned long *) sp; + unsigned long newsp, ip; + + if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD)) + return; + + newsp = stack[0]; + ip = stack[STACK_FRAME_LR_SAVE]; + + if (!trace->skip) + trace->entries[trace->nr_entries++] = ip; + else + trace->skip--; + + if (trace->nr_entries >= trace->max_entries) + return; + + sp = newsp; + } +} -- cgit v1.2.1 From 945feb174b14e7098cc7ecf0cf4768d35bc52f9c Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Thu, 17 Apr 2008 14:35:01 +1000 Subject: [POWERPC] irqtrace support for 64-bit powerpc This adds the low level irq tracing hooks to the powerpc architecture needed to enable full lockdep functionality. This is partly based on Johannes Berg's initial version. I removed the asm trampoline that isn't needed (thus improving performance) and modified all sorts of bits and pieces, reworking most of the assembly, etc... Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/entry_64.S | 27 +++++++++++++++++++++-- arch/powerpc/kernel/head_64.S | 47 ++++++++++++++++++++++++++++------------- arch/powerpc/kernel/irq.c | 3 ++- arch/powerpc/kernel/ppc_ksyms.c | 4 ---- arch/powerpc/kernel/setup_64.c | 4 ++++ 5 files changed, 63 insertions(+), 22 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 13019845536b..c0db5b769e55 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -30,6 +30,7 @@ #include #include #include +#include /* * System calls. @@ -89,6 +90,14 @@ system_call_common: addi r9,r1,STACK_FRAME_OVERHEAD ld r11,exception_marker@toc(r2) std r11,-16(r9) /* "regshere" marker */ +#ifdef CONFIG_TRACE_IRQFLAGS + bl .trace_hardirqs_on + REST_GPR(0,r1) + REST_4GPRS(3,r1) + REST_2GPRS(7,r1) + addi r9,r1,STACK_FRAME_OVERHEAD + ld r12,_MSR(r1) +#endif /* CONFIG_TRACE_IRQFLAGS */ li r10,1 stb r10,PACASOFTIRQEN(r13) stb r10,PACAHARDIRQEN(r13) @@ -103,7 +112,7 @@ BEGIN_FW_FTR_SECTION b hardware_interrupt_entry 2: END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) -#endif +#endif /* CONFIG_PPC_ISERIES */ mfmsr r11 ori r11,r11,MSR_EE mtmsrd r11,1 @@ -505,6 +514,10 @@ BEGIN_FW_FTR_SECTION li r3,0 stb r3,PACASOFTIRQEN(r13) /* ensure we are soft-disabled */ +#ifdef CONFIG_TRACE_IRQFLAGS + bl .trace_hardirqs_off + mfmsr r10 +#endif ori r10,r10,MSR_EE mtmsrd r10 /* hard-enable again */ addi r3,r1,STACK_FRAME_OVERHEAD @@ -513,7 +526,7 @@ BEGIN_FW_FTR_SECTION 4: END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) #endif - stb r5,PACASOFTIRQEN(r13) + TRACE_AND_RESTORE_IRQ(r5); /* extract EE bit and use it to restore paca->hard_enabled */ ld r3,_MSR(r1) @@ -581,6 +594,16 @@ do_work: bne restore /* here we are preempting the current task */ 1: +#ifdef CONFIG_TRACE_IRQFLAGS + bl .trace_hardirqs_on + /* Note: we just clobbered r10 which used to contain the previous + * MSR before the hard-disabling done by the caller of do_work. + * We don't have that value anymore, but it doesn't matter as + * we will hard-enable unconditionally, we can just reload the + * current MSR into r10 + */ + mfmsr r10 +#endif /* CONFIG_TRACE_IRQFLAGS */ li r0,1 stb r0,PACASOFTIRQEN(r13) stb r0,PACAHARDIRQEN(r13) diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 44229c3749ac..215973a2c8d5 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -36,8 +36,7 @@ #include #include #include - -#define DO_SOFT_DISABLE +#include /* * We layout physical memory as follows: @@ -450,8 +449,8 @@ bad_stack: */ fast_exc_return_irq: /* restores irq state too */ ld r3,SOFTE(r1) + TRACE_AND_RESTORE_IRQ(r3); ld r12,_MSR(r1) - stb r3,PACASOFTIRQEN(r13) /* restore paca->soft_enabled */ rldicl r4,r12,49,63 /* get MSR_EE to LSB */ stb r4,PACAHARDIRQEN(r13) /* restore paca->hard_enabled */ b 1f @@ -824,7 +823,7 @@ _STATIC(load_up_altivec) * Hash table stuff */ .align 7 -_GLOBAL(do_hash_page) +_STATIC(do_hash_page) std r3,_DAR(r1) std r4,_DSISR(r1) @@ -835,6 +834,27 @@ BEGIN_FTR_SECTION bne- do_ste_alloc /* If so handle it */ END_FTR_SECTION_IFCLR(CPU_FTR_SLB) + /* + * On iSeries, we soft-disable interrupts here, then + * hard-enable interrupts so that the hash_page code can spin on + * the hash_table_lock without problems on a shared processor. + */ + DISABLE_INTS + + /* + * Currently, trace_hardirqs_off() will be called by DISABLE_INTS + * and will clobber volatile registers when irq tracing is enabled + * so we need to reload them. It may be possible to be smarter here + * and move the irq tracing elsewhere but let's keep it simple for + * now + */ +#ifdef CONFIG_TRACE_IRQFLAGS + ld r3,_DAR(r1) + ld r4,_DSISR(r1) + ld r5,_TRAP(r1) + ld r12,_MSR(r1) + clrrdi r5,r5,4 +#endif /* CONFIG_TRACE_IRQFLAGS */ /* * We need to set the _PAGE_USER bit if MSR_PR is set or if we are * accessing a userspace segment (even from the kernel). We assume @@ -847,13 +867,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SLB) ori r4,r4,1 /* add _PAGE_PRESENT */ rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */ - /* - * On iSeries, we soft-disable interrupts here, then - * hard-enable interrupts so that the hash_page code can spin on - * the hash_table_lock without problems on a shared processor. - */ - DISABLE_INTS - /* * r3 contains the faulting address * r4 contains the required access permissions @@ -864,7 +877,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SLB) bl .hash_page /* build HPTE if possible */ cmpdi r3,0 /* see if hash_page succeeded */ -#ifdef DO_SOFT_DISABLE BEGIN_FW_FTR_SECTION /* * If we had interrupts soft-enabled at the point where the @@ -876,7 +888,7 @@ BEGIN_FW_FTR_SECTION */ beq 13f END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) -#endif + BEGIN_FW_FTR_SECTION /* * Here we have interrupts hard-disabled, so it is sufficient @@ -890,11 +902,12 @@ END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES) /* * hash_page couldn't handle it, set soft interrupt enable back - * to what it was before the trap. Note that .local_irq_restore + * to what it was before the trap. Note that .raw_local_irq_restore * handles any interrupts pending at this point. */ ld r3,SOFTE(r1) - bl .local_irq_restore + TRACE_AND_RESTORE_IRQ_PARTIAL(r3, 11f) + bl .raw_local_irq_restore b 11f /* Here we have a page fault that hash_page can't handle. */ @@ -1493,6 +1506,10 @@ _INIT_STATIC(start_here_multiplatform) addi r2,r2,0x4000 add r2,r2,r26 + /* Set initial ptr to current */ + LOAD_REG_IMMEDIATE(r4, init_task) + std r4,PACACURRENT(r13) + /* Do very early kernel initializations, including initial hash table, * stab and slb setup before we turn on relocation. */ diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 4617b65d464d..425616f92d18 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -114,7 +114,7 @@ static inline void set_soft_enabled(unsigned long enable) : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); } -void local_irq_restore(unsigned long en) +void raw_local_irq_restore(unsigned long en) { /* * get_paca()->soft_enabled = en; @@ -174,6 +174,7 @@ void local_irq_restore(unsigned long en) __hard_irq_enable(); } +EXPORT_SYMBOL(raw_local_irq_restore); #endif /* CONFIG_PPC64 */ int show_interrupts(struct seq_file *p, void *v) diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index 5a4c76eada48..b9b765c7d1a7 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c @@ -45,10 +45,6 @@ #include #include -#ifdef CONFIG_PPC64 -EXPORT_SYMBOL(local_irq_restore); -#endif - #ifdef CONFIG_PPC32 extern void transfer_to_handler(void); extern void do_IRQ(struct pt_regs *regs); diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 0205d408d2ed..31ada9fdfc5c 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -178,6 +179,9 @@ void __init early_setup(unsigned long dt_ptr) /* Enable early debugging if any specified (see udbg.h) */ udbg_early_init(); + /* Initialize lockdep early or else spinlocks will blow */ + lockdep_init(); + DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr); /* -- cgit v1.2.1 From e4cc58944c1e2ce41e3079d4eb60c95e7ce04b2b Mon Sep 17 00:00:00 2001 From: Andreas Schwab Date: Sun, 20 Apr 2008 02:25:13 +1000 Subject: [POWERPC] Add compat handler for PTRACE_GETSIGINFO Current versions of gdb require a working implementation of PTRACE_GETSIGINFO for proper watchpoint support. Since struct siginfo contains pointers it must be converted when passed to a 32-bit debugger. Signed-off-by: Andreas Schwab Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/ppc32.h | 2 ++ arch/powerpc/kernel/ptrace32.c | 27 +++++++++++++++++++++++++++ 2 files changed, 29 insertions(+) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/ppc32.h b/arch/powerpc/kernel/ppc32.h index 90e562771791..fda05e2211d6 100644 --- a/arch/powerpc/kernel/ppc32.h +++ b/arch/powerpc/kernel/ppc32.h @@ -135,4 +135,6 @@ struct ucontext32 { struct mcontext32 uc_mcontext; }; +extern int copy_siginfo_to_user32(struct compat_siginfo __user *d, siginfo_t *s); + #endif /* _PPC64_PPC32_H */ diff --git a/arch/powerpc/kernel/ptrace32.c b/arch/powerpc/kernel/ptrace32.c index 4c1de6af4c09..9d30e10970ac 100644 --- a/arch/powerpc/kernel/ptrace32.c +++ b/arch/powerpc/kernel/ptrace32.c @@ -29,12 +29,15 @@ #include #include #include +#include #include #include #include #include +#include "ppc32.h" + /* * does not yet catch signals sent when the child dies. * in exit.c or in signal.c. @@ -64,6 +67,27 @@ static long compat_ptrace_old(struct task_struct *child, long request, return -EPERM; } +static int compat_ptrace_getsiginfo(struct task_struct *child, compat_siginfo_t __user *data) +{ + siginfo_t lastinfo; + int error = -ESRCH; + + read_lock(&tasklist_lock); + if (likely(child->sighand != NULL)) { + error = -EINVAL; + spin_lock_irq(&child->sighand->siglock); + if (likely(child->last_siginfo != NULL)) { + lastinfo = *child->last_siginfo; + error = 0; + } + spin_unlock_irq(&child->sighand->siglock); + } + read_unlock(&tasklist_lock); + if (!error) + return copy_siginfo_to_user32(data, &lastinfo); + return error; +} + long compat_arch_ptrace(struct task_struct *child, compat_long_t request, compat_ulong_t caddr, compat_ulong_t cdata) { @@ -282,6 +306,9 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, 0, PT_REGS_COUNT * sizeof(compat_long_t), compat_ptr(data)); + case PTRACE_GETSIGINFO: + return compat_ptrace_getsiginfo(child, compat_ptr(data)); + case PTRACE_GETFPREGS: case PTRACE_SETFPREGS: case PTRACE_GETVRREGS: -- cgit v1.2.1 From 9c0c44dbd9bc380bee53e2f768c4ad5410b8aae2 Mon Sep 17 00:00:00 2001 From: Roland McGrath Date: Sun, 20 Apr 2008 08:19:24 +1000 Subject: [POWERPC] Define copy_siginfo_from_user32 Define the copy_siginfo_from_user32 entry point for powerpc, so that generic CONFIG_COMPAT code can call it. We already had the code rolled into compat_sys_rt_sigqueueinfo, this just moves it out into the canonical function that other arch's define. Signed-off-by: Roland McGrath Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/signal_32.c | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index d840bc772fd3..ad6943468ee9 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -621,6 +621,18 @@ int copy_siginfo_to_user32(struct compat_siginfo __user *d, siginfo_t *s) #define copy_siginfo_to_user copy_siginfo_to_user32 +int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from) +{ + memset(to, 0, sizeof *to); + + if (copy_from_user(to, from, 3*sizeof(int)) || + copy_from_user(to->_sifields._pad, + from->_sifields._pad, SI_PAD_SIZE32)) + return -EFAULT; + + return 0; +} + /* * Note: it is necessary to treat pid and sig as unsigned ints, with the * corresponding cast to a signed int to insure that the proper conversion @@ -634,9 +646,10 @@ long compat_sys_rt_sigqueueinfo(u32 pid, u32 sig, compat_siginfo_t __user *uinfo int ret; mm_segment_t old_fs = get_fs(); - if (copy_from_user (&info, uinfo, 3*sizeof(int)) || - copy_from_user (info._sifields._pad, uinfo->_sifields._pad, SI_PAD_SIZE32)) - return -EFAULT; + ret = copy_siginfo_from_user32(&info, uinfo); + if (unlikely(ret)) + return ret; + set_fs (KERNEL_DS); /* The __user pointer cast is valid becasuse of the set_fs() */ ret = sys_rt_sigqueueinfo((int)pid, (int)sig, (siginfo_t __user *) &info); -- cgit v1.2.1