summaryrefslogtreecommitdiff
path: root/arch/arm/mach-omap2/omap-wakeupgen.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2012-12-07 16:49:47 +0200
committerTony Lindgren <tony@atomide.com>2013-02-01 14:33:56 -0800
commitaecb9e1422e904d1950620d90c589a141cb32196 (patch)
tree3b300a0cde3c8002fdedebfe0bc4f9bbec422068 /arch/arm/mach-omap2/omap-wakeupgen.c
parent88b62b915b0b7e25870eb0604ed9a92ba4bfc9f7 (diff)
downloadlinux-next-aecb9e1422e904d1950620d90c589a141cb32196.tar.gz
ARM: OMAP: make wakeupgen_lock raw
When applying RT patch on top of Linux, spinlocks are implemented as RT-mutexes, which means they are preemptable. Current GIC implementation on OMAP is using a spinlock to protect against preemption. As it turns out, we need to convert that lock into a raw_spinlock so that OMAP's interrupt controller works as expected after RT-patch is applied. This patch is simply to decrease the amount of changes RT-team needs to carry out of tree. It doesn't cause any changes in behavior. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Felipe Balbi <balbi@ti.com> Acked-by: Santosh Shilimkar <santosh.shilimkar@ti.com> Signed-off-by: Tony Lindgren <tony@atomide.com>
Diffstat (limited to 'arch/arm/mach-omap2/omap-wakeupgen.c')
-rw-r--r--arch/arm/mach-omap2/omap-wakeupgen.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
index 5d3b4f4f81ae..8633a43acae2 100644
--- a/arch/arm/mach-omap2/omap-wakeupgen.c
+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
@@ -46,7 +46,7 @@
static void __iomem *wakeupgen_base;
static void __iomem *sar_base;
-static DEFINE_SPINLOCK(wakeupgen_lock);
+static DEFINE_RAW_SPINLOCK(wakeupgen_lock);
static unsigned int irq_target_cpu[MAX_IRQS];
static unsigned int irq_banks = MAX_NR_REG_BANKS;
static unsigned int max_irqs = MAX_IRQS;
@@ -134,9 +134,9 @@ static void wakeupgen_mask(struct irq_data *d)
{
unsigned long flags;
- spin_lock_irqsave(&wakeupgen_lock, flags);
+ raw_spin_lock_irqsave(&wakeupgen_lock, flags);
_wakeupgen_clear(d->irq, irq_target_cpu[d->irq]);
- spin_unlock_irqrestore(&wakeupgen_lock, flags);
+ raw_spin_unlock_irqrestore(&wakeupgen_lock, flags);
}
/*
@@ -146,9 +146,9 @@ static void wakeupgen_unmask(struct irq_data *d)
{
unsigned long flags;
- spin_lock_irqsave(&wakeupgen_lock, flags);
+ raw_spin_lock_irqsave(&wakeupgen_lock, flags);
_wakeupgen_set(d->irq, irq_target_cpu[d->irq]);
- spin_unlock_irqrestore(&wakeupgen_lock, flags);
+ raw_spin_unlock_irqrestore(&wakeupgen_lock, flags);
}
#ifdef CONFIG_HOTPLUG_CPU
@@ -189,7 +189,7 @@ static void wakeupgen_irqmask_all(unsigned int cpu, unsigned int set)
{
unsigned long flags;
- spin_lock_irqsave(&wakeupgen_lock, flags);
+ raw_spin_lock_irqsave(&wakeupgen_lock, flags);
if (set) {
_wakeupgen_save_masks(cpu);
_wakeupgen_set_all(cpu, WKG_MASK_ALL);
@@ -197,7 +197,7 @@ static void wakeupgen_irqmask_all(unsigned int cpu, unsigned int set)
_wakeupgen_set_all(cpu, WKG_UNMASK_ALL);
_wakeupgen_restore_masks(cpu);
}
- spin_unlock_irqrestore(&wakeupgen_lock, flags);
+ raw_spin_unlock_irqrestore(&wakeupgen_lock, flags);
}
#endif