diff options
Diffstat (limited to 'arch')
30 files changed, 237 insertions, 214 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index cf006d40342c..36586dba6fa6 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1186,6 +1186,15 @@ if !MMU source "arch/arm/Kconfig-nommu" endif +config ARM_ERRATA_326103 + bool "ARM errata: FSR write bit incorrect on a SWP to read-only memory" + depends on CPU_V6 + help + Executing a SWP instruction to read-only memory does not set bit 11 + of the FSR on the ARM 1136 prior to r1p0. This causes the kernel to + treat the access as a read, preventing a COW from occurring and + causing the faulting task to livelock. + config ARM_ERRATA_411920 bool "ARM errata: Invalidation of the Instruction Cache operation can fail" depends on CPU_V6 || CPU_V6K diff --git a/arch/arm/boot/dts/versatile-ab.dts b/arch/arm/boot/dts/versatile-ab.dts index 0b32925f2147..e2fe3195c0d1 100644 --- a/arch/arm/boot/dts/versatile-ab.dts +++ b/arch/arm/boot/dts/versatile-ab.dts @@ -173,7 +173,7 @@ mmc@5000 { compatible = "arm,primecell"; reg = < 0x5000 0x1000>; - interrupts = <22>; + interrupts = <22 34>; }; kmi@6000 { compatible = "arm,pl050", "arm,primecell"; diff --git a/arch/arm/boot/dts/versatile-pb.dts b/arch/arm/boot/dts/versatile-pb.dts index 166461073b78..7e8175269064 100644 --- a/arch/arm/boot/dts/versatile-pb.dts +++ b/arch/arm/boot/dts/versatile-pb.dts @@ -41,7 +41,7 @@ mmc@b000 { compatible = "arm,primecell"; reg = <0xb000 0x1000>; - interrupts = <23>; + interrupts = <23 34>; }; }; }; diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index d4c24d412a8d..0f04d84582e1 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -118,6 +118,13 @@ extern void iwmmxt_task_switch(struct thread_info *); extern void vfp_sync_hwstate(struct thread_info *); extern void vfp_flush_hwstate(struct thread_info *); +struct user_vfp; +struct user_vfp_exc; + +extern int vfp_preserve_user_clear_hwstate(struct user_vfp __user *, + struct user_vfp_exc __user *); +extern int vfp_restore_user_hwstate(struct user_vfp __user *, + struct user_vfp_exc __user *); #endif /* diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h index 60843eb0f61c..73409e6c0251 100644 --- a/arch/arm/include/asm/tls.h +++ b/arch/arm/include/asm/tls.h @@ -7,6 +7,8 @@ .macro set_tls_v6k, tp, tmp1, tmp2 mcr p15, 0, \tp, c13, c0, 3 @ set TLS register + mov \tmp1, #0 + mcr p15, 0, \tmp1, c13, c0, 2 @ clear user r/w TLS register .endm .macro set_tls_v6, tp, tmp1, tmp2 @@ -15,6 +17,8 @@ mov \tmp2, #0xffff0fff tst \tmp1, #HWCAP_TLS @ hardware TLS available? mcrne p15, 0, \tp, c13, c0, 3 @ yes, set TLS register + movne \tmp1, #0 + mcrne p15, 0, \tmp1, c13, c0, 2 @ clear user r/w TLS register streq \tp, [\tmp2, #-15] @ set TLS value at 0xffff0ff0 .endm diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 71ccdbfed662..8349d4e97e2b 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -155,10 +155,10 @@ static bool migrate_one_irq(struct irq_desc *desc) } c = irq_data_get_irq_chip(d); - if (c->irq_set_affinity) - c->irq_set_affinity(d, affinity, true); - else + if (!c->irq_set_affinity) pr_debug("IRQ%u: unable to set affinity\n", d->irq); + else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret) + cpumask_copy(d->affinity, affinity); return ret; } diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 7cb532fc8aa4..d68d1b694680 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -180,44 +180,23 @@ static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame) static int preserve_vfp_context(struct vfp_sigframe __user *frame) { - struct thread_info *thread = current_thread_info(); - struct vfp_hard_struct *h = &thread->vfpstate.hard; const unsigned long magic = VFP_MAGIC; const unsigned long size = VFP_STORAGE_SIZE; int err = 0; - vfp_sync_hwstate(thread); __put_user_error(magic, &frame->magic, err); __put_user_error(size, &frame->size, err); - /* - * Copy the floating point registers. There can be unused - * registers see asm/hwcap.h for details. - */ - err |= __copy_to_user(&frame->ufp.fpregs, &h->fpregs, - sizeof(h->fpregs)); - /* - * Copy the status and control register. - */ - __put_user_error(h->fpscr, &frame->ufp.fpscr, err); - - /* - * Copy the exception registers. - */ - __put_user_error(h->fpexc, &frame->ufp_exc.fpexc, err); - __put_user_error(h->fpinst, &frame->ufp_exc.fpinst, err); - __put_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err); + if (err) + return -EFAULT; - return err ? -EFAULT : 0; + return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc); } static int restore_vfp_context(struct vfp_sigframe __user *frame) { - struct thread_info *thread = current_thread_info(); - struct vfp_hard_struct *h = &thread->vfpstate.hard; unsigned long magic; unsigned long size; - unsigned long fpexc; int err = 0; __get_user_error(magic, &frame->magic, err); @@ -228,33 +207,7 @@ static int restore_vfp_context(struct vfp_sigframe __user *frame) if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE) return -EINVAL; - vfp_flush_hwstate(thread); - - /* - * Copy the floating point registers. There can be unused - * registers see asm/hwcap.h for details. - */ - err |= __copy_from_user(&h->fpregs, &frame->ufp.fpregs, - sizeof(h->fpregs)); - /* - * Copy the status and control register. - */ - __get_user_error(h->fpscr, &frame->ufp.fpscr, err); - - /* - * Sanitise and restore the exception registers. - */ - __get_user_error(fpexc, &frame->ufp_exc.fpexc, err); - /* Ensure the VFP is enabled. */ - fpexc |= FPEXC_EN; - /* Ensure FPINST2 is invalid and the exception flag is cleared. */ - fpexc &= ~(FPEXC_EX | FPEXC_FP2V); - h->fpexc = fpexc; - - __get_user_error(h->fpinst, &frame->ufp_exc.fpinst, err); - __get_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err); - - return err ? -EFAULT : 0; + return vfp_restore_user_hwstate(&frame->ufp, &frame->ufp_exc); } #endif diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index addbbe8028c2..f6a4d32b0421 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -510,10 +510,6 @@ static void ipi_cpu_stop(unsigned int cpu) local_fiq_disable(); local_irq_disable(); -#ifdef CONFIG_HOTPLUG_CPU - platform_cpu_kill(cpu); -#endif - while (1) cpu_relax(); } @@ -576,17 +572,25 @@ void smp_send_reschedule(int cpu) smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); } +#ifdef CONFIG_HOTPLUG_CPU +static void smp_kill_cpus(cpumask_t *mask) +{ + unsigned int cpu; + for_each_cpu(cpu, mask) + platform_cpu_kill(cpu); +} +#else +static void smp_kill_cpus(cpumask_t *mask) { } +#endif + void smp_send_stop(void) { unsigned long timeout; + struct cpumask mask; - if (num_online_cpus() > 1) { - struct cpumask mask; - cpumask_copy(&mask, cpu_online_mask); - cpumask_clear_cpu(smp_processor_id(), &mask); - - smp_cross_call(&mask, IPI_CPU_STOP); - } + cpumask_copy(&mask, cpu_online_mask); + cpumask_clear_cpu(smp_processor_id(), &mask); + smp_cross_call(&mask, IPI_CPU_STOP); /* Wait up to one second for other CPUs to stop */ timeout = USEC_PER_SEC; @@ -595,6 +599,8 @@ void smp_send_stop(void) if (num_online_cpus() > 1) pr_warning("SMP: failed to stop secondary CPUs\n"); + + smp_kill_cpus(&mask); } /* diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S index ff1f7cc11f87..80741992a9fc 100644 --- a/arch/arm/mm/abort-ev6.S +++ b/arch/arm/mm/abort-ev6.S @@ -26,18 +26,23 @@ ENTRY(v6_early_abort) mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r0, c6, c0, 0 @ get FAR /* - * Faulty SWP instruction on 1136 doesn't set bit 11 in DFSR (erratum 326103). - * The test below covers all the write situations, including Java bytecodes + * Faulty SWP instruction on 1136 doesn't set bit 11 in DFSR. */ - bic r1, r1, #1 << 11 @ clear bit 11 of FSR +#ifdef CONFIG_ARM_ERRATA_326103 + ldr ip, =0x4107b36 + mrc p15, 0, r3, c0, c0, 0 @ get processor id + teq ip, r3, lsr #4 @ r0 ARM1136? + bne do_DataAbort tst r5, #PSR_J_BIT @ Java? + tsteq r5, #PSR_T_BIT @ Thumb? bne do_DataAbort - do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 - ldreq r3, [r4] @ read aborted ARM instruction + bic r1, r1, #1 << 11 @ clear bit 11 of FSR + ldr r3, [r4] @ read aborted ARM instruction #ifdef CONFIG_CPU_ENDIAN_BE8 - reveq r3, r3 + rev r3, r3 #endif do_ldrd_abort tmp=ip, insn=r3 tst r3, #1 << 20 @ L = 0 -> write orreq r1, r1, #1 << 11 @ yes. +#endif b do_DataAbort diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index a53fd2aaa2f4..2a8e380501e8 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -32,6 +32,7 @@ static void __iomem *l2x0_base; static DEFINE_RAW_SPINLOCK(l2x0_lock); static u32 l2x0_way_mask; /* Bitmask of active ways */ static u32 l2x0_size; +static unsigned long sync_reg_offset = L2X0_CACHE_SYNC; struct l2x0_regs l2x0_saved_regs; @@ -61,12 +62,7 @@ static inline void cache_sync(void) { void __iomem *base = l2x0_base; -#ifdef CONFIG_PL310_ERRATA_753970 - /* write to an unmmapped register */ - writel_relaxed(0, base + L2X0_DUMMY_REG); -#else - writel_relaxed(0, base + L2X0_CACHE_SYNC); -#endif + writel_relaxed(0, base + sync_reg_offset); cache_wait(base + L2X0_CACHE_SYNC, 1); } @@ -85,10 +81,13 @@ static inline void l2x0_inv_line(unsigned long addr) } #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915) +static inline void debug_writel(unsigned long val) +{ + if (outer_cache.set_debug) + outer_cache.set_debug(val); +} -#define debug_writel(val) outer_cache.set_debug(val) - -static void l2x0_set_debug(unsigned long val) +static void pl310_set_debug(unsigned long val) { writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL); } @@ -98,7 +97,7 @@ static inline void debug_writel(unsigned long val) { } -#define l2x0_set_debug NULL +#define pl310_set_debug NULL #endif #ifdef CONFIG_PL310_ERRATA_588369 @@ -331,6 +330,11 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) else ways = 8; type = "L310"; +#ifdef CONFIG_PL310_ERRATA_753970 + /* Unmapped register. */ + sync_reg_offset = L2X0_DUMMY_REG; +#endif + outer_cache.set_debug = pl310_set_debug; break; case L2X0_CACHE_ID_PART_L210: ways = (aux >> 13) & 0xf; @@ -379,7 +383,6 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) outer_cache.flush_all = l2x0_flush_all; outer_cache.inv_all = l2x0_inv_all; outer_cache.disable = l2x0_disable; - outer_cache.set_debug = l2x0_set_debug; printk(KERN_INFO "%s cache controller enabled\n", type); printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n", diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 595079fa9d1d..8f5813bbffb5 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -293,11 +293,11 @@ EXPORT_SYMBOL(pfn_valid); #endif #ifndef CONFIG_SPARSEMEM -static void arm_memory_present(void) +static void __init arm_memory_present(void) { } #else -static void arm_memory_present(void) +static void __init arm_memory_present(void) { struct memblock_region *reg; diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index b86f8933ff91..2c7cf2f9c837 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -618,8 +618,8 @@ static void __init alloc_init_section(pud_t *pud, unsigned long addr, } } -static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end, - unsigned long phys, const struct mem_type *type) +static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, + unsigned long end, unsigned long phys, const struct mem_type *type) { pud_t *pud = pud_offset(pgd, addr); unsigned long next; diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c index ecdb3da0dea9..c58d896cd5c3 100644 --- a/arch/arm/plat-omap/dma.c +++ b/arch/arm/plat-omap/dma.c @@ -916,6 +916,13 @@ void omap_start_dma(int lch) l |= OMAP_DMA_CCR_BUFFERING_DISABLE; l |= OMAP_DMA_CCR_EN; + /* + * As dma_write() uses IO accessors which are weakly ordered, there + * is no guarantee that data in coherent DMA memory will be visible + * to the DMA device. Add a memory barrier here to ensure that any + * such data is visible prior to enabling DMA. + */ + mb(); p->dma_write(l, CCR, lch); dma_chan[lch].flags |= OMAP_DMA_ACTIVE; @@ -965,6 +972,13 @@ void omap_stop_dma(int lch) p->dma_write(l, CCR, lch); } + /* + * Ensure that data transferred by DMA is visible to any access + * after DMA has been disabled. This is important for coherent + * DMA regions. + */ + mb(); + if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) { int next_lch, cur_lch = lch; char dma_chan_link_map[dma_lch_count]; diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index 858748eaa144..bc683b8219b5 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -17,6 +17,8 @@ #include <linux/sched.h> #include <linux/smp.h> #include <linux/init.h> +#include <linux/uaccess.h> +#include <linux/user.h> #include <asm/cp15.h> #include <asm/cputype.h> @@ -529,6 +531,103 @@ void vfp_flush_hwstate(struct thread_info *thread) } /* + * Save the current VFP state into the provided structures and prepare + * for entry into a new function (signal handler). + */ +int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp, + struct user_vfp_exc __user *ufp_exc) +{ + struct thread_info *thread = current_thread_info(); + struct vfp_hard_struct *hwstate = &thread->vfpstate.hard; + int err = 0; + + /* Ensure that the saved hwstate is up-to-date. */ + vfp_sync_hwstate(thread); + + /* + * Copy the floating point registers. There can be unused + * registers see asm/hwcap.h for details. + */ + err |= __copy_to_user(&ufp->fpregs, &hwstate->fpregs, + sizeof(hwstate->fpregs)); + /* + * Copy the status and control register. + */ + __put_user_error(hwstate->fpscr, &ufp->fpscr, err); + + /* + * Copy the exception registers. + */ + __put_user_error(hwstate->fpexc, &ufp_exc->fpexc, err); + __put_user_error(hwstate->fpinst, &ufp_exc->fpinst, err); + __put_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err); + + if (err) + return -EFAULT; + + /* Ensure that VFP is disabled. */ + vfp_flush_hwstate(thread); + + /* + * As per the PCS, clear the length and stride bits for function + * entry. + */ + hwstate->fpscr &= ~(FPSCR_LENGTH_MASK | FPSCR_STRIDE_MASK); + + /* + * Disable VFP in the hwstate so that we can detect if it gets + * used. + */ + hwstate->fpexc &= ~FPEXC_EN; + return 0; +} + +/* Sanitise and restore the current VFP state from the provided structures. */ +int vfp_restore_user_hwstate(struct user_vfp __user *ufp, + struct user_vfp_exc __user *ufp_exc) +{ + struct thread_info *thread = current_thread_info(); + struct vfp_hard_struct *hwstate = &thread->vfpstate.hard; + unsigned long fpexc; + int err = 0; + + /* + * If VFP has been used, then disable it to avoid corrupting + * the new thread state. + */ + if (hwstate->fpexc & FPEXC_EN) + vfp_flush_hwstate(thread); + + /* + * Copy the floating point registers. There can be unused + * registers see asm/hwcap.h for details. + */ + err |= __copy_from_user(&hwstate->fpregs, &ufp->fpregs, + sizeof(hwstate->fpregs)); + /* + * Copy the status and control register. + */ + __get_user_error(hwstate->fpscr, &ufp->fpscr, err); + + /* + * Sanitise and restore the exception registers. + */ + __get_user_error(fpexc, &ufp_exc->fpexc, err); + + /* Ensure the VFP is enabled. */ + fpexc |= FPEXC_EN; + + /* Ensure FPINST2 is invalid and the exception flag is cleared. */ + fpexc &= ~(FPEXC_EX | FPEXC_FP2V); + hwstate->fpexc = fpexc; + + __get_user_error(hwstate->fpinst, &ufp_exc->fpinst, err); + __get_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err); + + return err ? -EFAULT : 0; +} + +/* * VFP hardware can lose all context when a CPU goes offline. * As we will be running in SMP mode with CPU hotplug, we will save the * hardware state at every thread switch. We clear our held state when diff --git a/arch/mips/ath79/dev-wmac.c b/arch/mips/ath79/dev-wmac.c index e21507052066..9c717bf98ffe 100644 --- a/arch/mips/ath79/dev-wmac.c +++ b/arch/mips/ath79/dev-wmac.c @@ -58,8 +58,8 @@ static void __init ar913x_wmac_setup(void) static int ar933x_wmac_reset(void) { - ath79_device_reset_clear(AR933X_RESET_WMAC); ath79_device_reset_set(AR933X_RESET_WMAC); + ath79_device_reset_clear(AR933X_RESET_WMAC); return 0; } diff --git a/arch/mips/include/asm/mach-jz4740/irq.h b/arch/mips/include/asm/mach-jz4740/irq.h index a865c983c70a..5ad1a9c113c6 100644 --- a/arch/mips/include/asm/mach-jz4740/irq.h +++ b/arch/mips/include/asm/mach-jz4740/irq.h @@ -45,7 +45,7 @@ #define JZ4740_IRQ_LCD JZ4740_IRQ(30) /* 2nd-level interrupts */ -#define JZ4740_IRQ_DMA(x) (JZ4740_IRQ(32) + (X)) +#define JZ4740_IRQ_DMA(x) (JZ4740_IRQ(32) + (x)) #define JZ4740_IRQ_INTC_GPIO(x) (JZ4740_IRQ_GPIO0 - (x)) #define JZ4740_IRQ_GPIO(x) (JZ4740_IRQ(48) + (x)) diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h index 73c0d45798de..9b02cfba7449 100644 --- a/arch/mips/include/asm/mmu_context.h +++ b/arch/mips/include/asm/mmu_context.h @@ -37,12 +37,6 @@ extern void tlbmiss_handler_setup_pgd(unsigned long pgd); write_c0_xcontext((unsigned long) smp_processor_id() << 51); \ } while (0) - -static inline unsigned long get_current_pgd(void) -{ - return PHYS_TO_XKSEG_CACHED((read_c0_context() >> 11) & ~0xfffUL); -} - #else /* CONFIG_MIPS_PGD_C0_CONTEXT: using pgd_current*/ /* diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 185ca00c4c84..d5a338a1739c 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c @@ -257,11 +257,8 @@ asmlinkage int sys_sigsuspend(nabi_no_regargs struct pt_regs regs) return -EFAULT; sigdelsetmask(&newset, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); current->saved_sigmask = current->blocked; - current->blocked = newset; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&newset); current->state = TASK_INTERRUPTIBLE; schedule(); @@ -286,11 +283,8 @@ asmlinkage int sys_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) return -EFAULT; sigdelsetmask(&newset, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); current->saved_sigmask = current->blocked; - current->blocked = newset; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&newset); current->state = TASK_INTERRUPTIBLE; schedule(); @@ -362,10 +356,7 @@ asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs) goto badframe; sigdelsetmask(&blocked, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = blocked; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&blocked); sig = restore_sigcontext(®s, &frame->sf_sc); if (sig < 0) @@ -401,10 +392,7 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&set); sig = restore_sigcontext(®s, &frame->rs_uc.uc_mcontext); if (sig < 0) @@ -580,12 +568,7 @@ static int handle_signal(unsigned long sig, siginfo_t *info, if (ret) return ret; - spin_lock_irq(¤t->sighand->siglock); - sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask); - if (!(ka->sa.sa_flags & SA_NODEFER)) - sigaddset(¤t->blocked, sig); - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + block_sigmask(ka, sig); return ret; } diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index 06b5da392e24..ac3b8d89aae5 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c @@ -290,11 +290,8 @@ asmlinkage int sys32_sigsuspend(nabi_no_regargs struct pt_regs regs) return -EFAULT; sigdelsetmask(&newset, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); current->saved_sigmask = current->blocked; - current->blocked = newset; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&newset); current->state = TASK_INTERRUPTIBLE; schedule(); @@ -318,11 +315,8 @@ asmlinkage int sys32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) return -EFAULT; sigdelsetmask(&newset, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); current->saved_sigmask = current->blocked; - current->blocked = newset; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&newset); current->state = TASK_INTERRUPTIBLE; schedule(); @@ -488,10 +482,7 @@ asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs) goto badframe; sigdelsetmask(&blocked, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = blocked; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&blocked); sig = restore_sigcontext32(®s, &frame->sf_sc); if (sig < 0) @@ -529,10 +520,7 @@ asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&set); sig = restore_sigcontext32(®s, &frame->rs_uc.uc_mcontext); if (sig < 0) diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c index ae29e894ab8d..86eb4b04631c 100644 --- a/arch/mips/kernel/signal_n32.c +++ b/arch/mips/kernel/signal_n32.c @@ -93,11 +93,8 @@ asmlinkage int sysn32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) sigset_from_compat(&newset, &uset); sigdelsetmask(&newset, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); current->saved_sigmask = current->blocked; - current->blocked = newset; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&newset); current->state = TASK_INTERRUPTIBLE; schedule(); @@ -121,10 +118,7 @@ asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&set); sig = restore_sigcontext(®s, &frame->rs_uc.uc_mcontext); if (sig < 0) diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h index e648af92ced1..0e40843a1c6e 100644 --- a/arch/powerpc/include/asm/irq.h +++ b/arch/powerpc/include/asm/irq.h @@ -18,10 +18,6 @@ #include <linux/atomic.h> -/* Define a way to iterate across irqs. */ -#define for_each_irq(i) \ - for ((i) = 0; (i) < NR_IRQS; ++(i)) - extern atomic_t ppc_n_lost_interrupts; /* This number is used when no interrupt has been assigned */ diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 5ec1b2354ca6..43eb74fcedde 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -330,14 +330,10 @@ void migrate_irqs(void) alloc_cpumask_var(&mask, GFP_KERNEL); - for_each_irq(irq) { + for_each_irq_desc(irq, desc) { struct irq_data *data; struct irq_chip *chip; - desc = irq_to_desc(irq); - if (!desc) - continue; - data = irq_desc_get_irq_data(desc); if (irqd_is_per_cpu(data)) continue; diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c index c957b1202bdc..5df777794403 100644 --- a/arch/powerpc/kernel/machine_kexec.c +++ b/arch/powerpc/kernel/machine_kexec.c @@ -23,14 +23,11 @@ void machine_kexec_mask_interrupts(void) { unsigned int i; + struct irq_desc *desc; - for_each_irq(i) { - struct irq_desc *desc = irq_to_desc(i); + for_each_irq_desc(i, desc) { struct irq_chip *chip; - if (!desc) - continue; - chip = irq_desc_get_chip(desc); if (!chip) continue; diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c index d09f3e8e6867..85825b5401e5 100644 --- a/arch/powerpc/platforms/cell/axon_msi.c +++ b/arch/powerpc/platforms/cell/axon_msi.c @@ -114,7 +114,7 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc) pr_devel("axon_msi: woff %x roff %x msi %x\n", write_offset, msic->read_offset, msi); - if (msi < NR_IRQS && irq_get_chip_data(msi) == msic) { + if (msi < nr_irqs && irq_get_chip_data(msi) == msic) { generic_handle_irq(msi); msic->fifo_virt[idx] = cpu_to_le32(0xffffffff); } else { @@ -276,9 +276,6 @@ static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) if (rc) return rc; - /* We rely on being able to stash a virq in a u16 */ - BUILD_BUG_ON(NR_IRQS > 65536); - list_for_each_entry(entry, &dev->msi_list, list) { virq = irq_create_direct_mapping(msic->irq_domain); if (virq == NO_IRQ) { @@ -392,7 +389,8 @@ static int axon_msi_probe(struct platform_device *device) } memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES); - msic->irq_domain = irq_domain_add_nomap(dn, 0, &msic_host_ops, msic); + /* We rely on being able to stash a virq in a u16, so limit irqs to < 65536 */ + msic->irq_domain = irq_domain_add_nomap(dn, 65536, &msic_host_ops, msic); if (!msic->irq_domain) { printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %s\n", dn->full_name); diff --git a/arch/powerpc/platforms/cell/beat_interrupt.c b/arch/powerpc/platforms/cell/beat_interrupt.c index f9a48af335cb..8c6dc42ecf65 100644 --- a/arch/powerpc/platforms/cell/beat_interrupt.c +++ b/arch/powerpc/platforms/cell/beat_interrupt.c @@ -248,6 +248,6 @@ void beatic_deinit_IRQ(void) { int i; - for (i = 1; i < NR_IRQS; i++) + for (i = 1; i < nr_irqs; i++) beat_destruct_irq_plug(i); } diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c index 66ad93de1d55..c4e630576ff2 100644 --- a/arch/powerpc/platforms/powermac/pic.c +++ b/arch/powerpc/platforms/powermac/pic.c @@ -57,9 +57,9 @@ static int max_real_irqs; static DEFINE_RAW_SPINLOCK(pmac_pic_lock); -#define NR_MASK_WORDS ((NR_IRQS + 31) / 32) -static unsigned long ppc_lost_interrupts[NR_MASK_WORDS]; -static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; +/* The max irq number this driver deals with is 128; see max_irqs */ +static DECLARE_BITMAP(ppc_lost_interrupts, 128); +static DECLARE_BITMAP(ppc_cached_irq_mask, 128); static int pmac_irq_cascade = -1; static struct irq_domain *pmac_pic_host; diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig index aadbe4f6d537..178a5f300bc9 100644 --- a/arch/powerpc/platforms/pseries/Kconfig +++ b/arch/powerpc/platforms/pseries/Kconfig @@ -30,9 +30,9 @@ config PPC_SPLPAR two or more partitions. config EEH - bool "PCI Extended Error Handling (EEH)" if EXPERT + bool depends on PPC_PSERIES && PCI - default y if !EXPERT + default y config PSERIES_MSI bool diff --git a/arch/powerpc/sysdev/cpm2_pic.c b/arch/powerpc/sysdev/cpm2_pic.c index d3be961e2ae7..10386b676d87 100644 --- a/arch/powerpc/sysdev/cpm2_pic.c +++ b/arch/powerpc/sysdev/cpm2_pic.c @@ -51,8 +51,7 @@ static intctl_cpm2_t __iomem *cpm2_intctl; static struct irq_domain *cpm2_pic_host; -#define NR_MASK_WORDS ((NR_IRQS + 31) / 32) -static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; +static unsigned long ppc_cached_irq_mask[2]; /* 2 32-bit registers */ static const u_char irq_to_siureg[] = { 1, 1, 1, 1, 1, 1, 1, 1, diff --git a/arch/powerpc/sysdev/mpc8xx_pic.c b/arch/powerpc/sysdev/mpc8xx_pic.c index d5f5416be310..b724622c3a0b 100644 --- a/arch/powerpc/sysdev/mpc8xx_pic.c +++ b/arch/powerpc/sysdev/mpc8xx_pic.c @@ -18,69 +18,45 @@ extern int cpm_get_irq(struct pt_regs *regs); static struct irq_domain *mpc8xx_pic_host; -#define NR_MASK_WORDS ((NR_IRQS + 31) / 32) -static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; +static unsigned long mpc8xx_cached_irq_mask; static sysconf8xx_t __iomem *siu_reg; -int cpm_get_irq(struct pt_regs *regs); +static inline unsigned long mpc8xx_irqd_to_bit(struct irq_data *d) +{ + return 0x80000000 >> irqd_to_hwirq(d); +} static void mpc8xx_unmask_irq(struct irq_data *d) { - int bit, word; - unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d); - - bit = irq_nr & 0x1f; - word = irq_nr >> 5; - - ppc_cached_irq_mask[word] |= (1 << (31-bit)); - out_be32(&siu_reg->sc_simask, ppc_cached_irq_mask[word]); + mpc8xx_cached_irq_mask |= mpc8xx_irqd_to_bit(d); + out_be32(&siu_reg->sc_simask, mpc8xx_cached_irq_mask); } static void mpc8xx_mask_irq(struct irq_data *d) { - int bit, word; - unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d); - - bit = irq_nr & 0x1f; - word = irq_nr >> 5; - - ppc_cached_irq_mask[word] &= ~(1 << (31-bit)); - out_be32(&siu_reg->sc_simask, ppc_cached_irq_mask[word]); + mpc8xx_cached_irq_mask &= ~mpc8xx_irqd_to_bit(d); + out_be32(&siu_reg->sc_simask, mpc8xx_cached_irq_mask); } static void mpc8xx_ack(struct irq_data *d) { - int bit; - unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d); - - bit = irq_nr & 0x1f; - out_be32(&siu_reg->sc_sipend, 1 << (31-bit)); + out_be32(&siu_reg->sc_sipend, mpc8xx_irqd_to_bit(d)); } static void mpc8xx_end_irq(struct irq_data *d) { - int bit, word; - unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d); - - bit = irq_nr & 0x1f; - word = irq_nr >> 5; - - ppc_cached_irq_mask[word] |= (1 << (31-bit)); - out_be32(&siu_reg->sc_simask, ppc_cached_irq_mask[word]); + mpc8xx_cached_irq_mask |= mpc8xx_irqd_to_bit(d); + out_be32(&siu_reg->sc_simask, mpc8xx_cached_irq_mask); } static int mpc8xx_set_irq_type(struct irq_data *d, unsigned int flow_type) { - if (flow_type & IRQ_TYPE_EDGE_FALLING) { - irq_hw_number_t hw = (unsigned int)irqd_to_hwirq(d); + /* only external IRQ senses are programmable */ + if ((flow_type & IRQ_TYPE_EDGE_FALLING) && !(irqd_to_hwirq(d) & 1)) { unsigned int siel = in_be32(&siu_reg->sc_siel); - - /* only external IRQ senses are programmable */ - if ((hw & 1) == 0) { - siel |= (0x80000000 >> hw); - out_be32(&siu_reg->sc_siel, siel); - __irq_set_handler_locked(d->irq, handle_edge_irq); - } + siel |= mpc8xx_irqd_to_bit(d); + out_be32(&siu_reg->sc_siel, siel); + __irq_set_handler_locked(d->irq, handle_edge_irq); } return 0; } @@ -132,6 +108,9 @@ static int mpc8xx_pic_host_xlate(struct irq_domain *h, struct device_node *ct, IRQ_TYPE_EDGE_FALLING, }; + if (intspec[0] > 0x1f) + return 0; + *out_hwirq = intspec[0]; if (intsize > 1 && intspec[1] < 4) *out_flags = map_pic_senses[intspec[1]]; diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c index ea5e204e3450..cd1d18db92c6 100644 --- a/arch/powerpc/sysdev/xics/xics-common.c +++ b/arch/powerpc/sysdev/xics/xics-common.c @@ -188,6 +188,7 @@ void xics_migrate_irqs_away(void) { int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id(); unsigned int irq, virq; + struct irq_desc *desc; /* If we used to be the default server, move to the new "boot_cpuid" */ if (hw_cpu == xics_default_server) @@ -202,8 +203,7 @@ void xics_migrate_irqs_away(void) /* Allow IPIs again... */ icp_ops->set_priority(DEFAULT_PRIORITY); - for_each_irq(virq) { - struct irq_desc *desc; + for_each_irq_desc(virq, desc) { struct irq_chip *chip; long server; unsigned long flags; @@ -212,9 +212,8 @@ void xics_migrate_irqs_away(void) /* We can't set affinity on ISA interrupts */ if (virq < NUM_ISA_INTERRUPTS) continue; - desc = irq_to_desc(virq); /* We only need to migrate enabled IRQS */ - if (!desc || !desc->action) + if (!desc->action) continue; if (desc->irq_data.domain != xics_host) continue; |