summaryrefslogtreecommitdiff
path: root/kernel/time
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2017-08-31 18:19:06 +0200
committerLuis Claudio R. Goncalves <lgoncalv@redhat.com>2021-10-11 07:51:27 -0300
commit8af264204a92b1bc764a2f5476073b4cdd1a616d (patch)
treeebd15931c2fba8f057af35555bfdd6eb3190e0ae /kernel/time
parent8dba09568c1b48ddd950d8f065215a47b355f99f (diff)
downloadlinux-rt-8af264204a92b1bc764a2f5476073b4cdd1a616d.tar.gz
kernel/hrtimer: don't wakeup a process while holding the hrtimer base lock
We must not wake any process (and thus acquire the pi->lock) while holding the hrtimer's base lock. This does not happen usually because the hrtimer-callback is invoked in IRQ-context and so raise_softirq_irqoff() does not wakeup a process. However during CPU-hotplug it might get called from hrtimers_dead_cpu() which would wakeup the thread immediately. Reported-by: Mike Galbraith <efault@gmx.de> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/hrtimer.c15
1 files changed, 10 insertions, 5 deletions
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index a641e549d12c..b53cb2405417 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -1450,7 +1450,7 @@ static inline int hrtimer_rt_defer(struct hrtimer *timer) { return 0; }
static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer);
-static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
+static int __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
{
struct hrtimer_clock_base *base = cpu_base->clock_base;
unsigned int active = cpu_base->active_bases;
@@ -1500,8 +1500,7 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
raise = 1;
}
}
- if (raise)
- raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+ return raise;
}
#ifdef CONFIG_HIGH_RES_TIMERS
@@ -1515,6 +1514,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
ktime_t expires_next, now, entry_time, delta;
int retries = 0;
+ int raise;
BUG_ON(!cpu_base->hres_active);
cpu_base->nr_events++;
@@ -1533,7 +1533,7 @@ retry:
*/
cpu_base->expires_next.tv64 = KTIME_MAX;
- __hrtimer_run_queues(cpu_base, now);
+ raise = __hrtimer_run_queues(cpu_base, now);
/* Reevaluate the clock bases for the next expiry */
expires_next = __hrtimer_get_next_event(cpu_base);
@@ -1544,6 +1544,8 @@ retry:
cpu_base->expires_next = expires_next;
cpu_base->in_hrtirq = 0;
raw_spin_unlock(&cpu_base->lock);
+ if (raise)
+ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
/* Reprogramming necessary ? */
if (!tick_program_event(expires_next, 0)) {
@@ -1623,6 +1625,7 @@ void hrtimer_run_queues(void)
{
struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
ktime_t now;
+ int raise;
if (__hrtimer_hres_active(cpu_base))
return;
@@ -1641,8 +1644,10 @@ void hrtimer_run_queues(void)
raw_spin_lock(&cpu_base->lock);
now = hrtimer_update_base(cpu_base);
- __hrtimer_run_queues(cpu_base, now);
+ raise = __hrtimer_run_queues(cpu_base, now);
raw_spin_unlock(&cpu_base->lock);
+ if (raise)
+ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
}
/*