diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2011-06-25 09:21:04 +0200 |
---|---|---|
committer | Clark Williams <williams@redhat.com> | 2020-11-06 15:43:50 -0600 |
commit | 0c599154d9120942e15b6905f70ea0ce00058f1a (patch) | |
tree | 9efa6e531f87c11aac6e6ad48965df73221df7b5 | |
parent | a1a1c5a562702d10a6e5cc17aabc3deadd035422 (diff) | |
download | linux-rt-0c599154d9120942e15b6905f70ea0ce00058f1a.tar.gz |
sched: Add saved_state for tasks blocked on sleeping locks
Spinlocks are state preserving in !RT. RT changes the state when a
task gets blocked on a lock. So we need to remember the state before
the lock contention. If a regular wakeup (not a RTmutex related
wakeup) happens, the saved_state is updated to running. When the lock
sleep is done, the saved state is restored.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | include/linux/sched.h | 2 | ||||
-rw-r--r-- | kernel/sched/core.c | 31 | ||||
-rw-r--r-- | kernel/sched/sched.h | 1 |
3 files changed, 33 insertions, 1 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 2e96f7e709f0..8d85515ae401 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1493,6 +1493,7 @@ struct task_struct { struct thread_info thread_info; #endif volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ + volatile long saved_state; /* saved state for "spinlock sleepers" */ void *stack; atomic_t usage; unsigned int flags; /* per process flags, defined below */ @@ -2738,6 +2739,7 @@ extern void xtime_update(unsigned long ticks); extern int wake_up_state(struct task_struct *tsk, unsigned int state); extern int wake_up_process(struct task_struct *tsk); +extern int wake_up_lock_sleeper(struct task_struct * tsk); extern void wake_up_new_task(struct task_struct *tsk); #ifdef CONFIG_SMP extern void kick_process(struct task_struct *tsk); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 87e0238b6026..c11a582aeabd 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2034,8 +2034,25 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) */ smp_mb__before_spinlock(); raw_spin_lock_irqsave(&p->pi_lock, flags); - if (!(p->state & state)) + if (!(p->state & state)) { + /* + * The task might be running due to a spinlock sleeper + * wakeup. Check the saved state and set it to running + * if the wakeup condition is true. + */ + if (!(wake_flags & WF_LOCK_SLEEPER)) { + if (p->saved_state & state) + p->saved_state = TASK_RUNNING; + } goto out; + } + + /* + * If this is a regular wakeup, then we can unconditionally + * clear the saved state of a "lock sleeper". + */ + if (!(wake_flags & WF_LOCK_SLEEPER)) + p->saved_state = TASK_RUNNING; trace_sched_waking(p); @@ -2182,6 +2199,18 @@ int wake_up_process(struct task_struct *p) } EXPORT_SYMBOL(wake_up_process); +/** + * wake_up_lock_sleeper - Wake up a specific process blocked on a "sleeping lock" + * @p: The process to be woken up. + * + * Same as wake_up_process() above, but wake_flags=WF_LOCK_SLEEPER to indicate + * the nature of the wakeup. + */ +int wake_up_lock_sleeper(struct task_struct *p) +{ + return try_to_wake_up(p, TASK_ALL, WF_LOCK_SLEEPER); +} + int wake_up_state(struct task_struct *p, unsigned int state) { return try_to_wake_up(p, state, 0); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 819bd5fb0264..72e161f6b9b8 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1174,6 +1174,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) #define WF_SYNC 0x01 /* waker goes to sleep after wakeup */ #define WF_FORK 0x02 /* child wakeup after fork */ #define WF_MIGRATED 0x4 /* internal use, task got migrated */ +#define WF_LOCK_SLEEPER 0x08 /* wakeup spinlock "sleeper" */ /* * To aid in avoiding the subversion of "niceness" due to uneven distribution |