summaryrefslogtreecommitdiff
path: root/patches/workqueue-distangle-from-rq-lock.patch
diff options
context:
space:
mode:
Diffstat (limited to 'patches/workqueue-distangle-from-rq-lock.patch')
-rw-r--r--patches/workqueue-distangle-from-rq-lock.patch49
1 files changed, 27 insertions, 22 deletions
diff --git a/patches/workqueue-distangle-from-rq-lock.patch b/patches/workqueue-distangle-from-rq-lock.patch
index ad9e7ee2ba5e..ec7be2fdb22b 100644
--- a/patches/workqueue-distangle-from-rq-lock.patch
+++ b/patches/workqueue-distangle-from-rq-lock.patch
@@ -24,25 +24,25 @@ Link: http://lkml.kernel.org/r/20110622174919.135236139@linutronix.de
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- kernel/sched/core.c | 81 ++++++++------------------------------------
- kernel/workqueue.c | 52 ++++++++++++----------------
+ kernel/sched/core.c | 86 +++++++-------------------------------------
+ kernel/workqueue.c | 52 +++++++++++---------------
kernel/workqueue_internal.h | 5 +-
- 3 files changed, 41 insertions(+), 97 deletions(-)
+ 3 files changed, 41 insertions(+), 102 deletions(-)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1711,10 +1711,6 @@ static inline void ttwu_activate(struct
+@@ -1690,10 +1690,6 @@ static inline void ttwu_activate(struct
{
activate_task(rq, p, en_flags);
p->on_rq = TASK_ON_RQ_QUEUED;
-
-- /* if a worker is waking up, notify workqueue */
+- /* If a worker is waking up, notify the workqueue: */
- if (p->flags & PF_WQ_WORKER)
- wq_worker_waking_up(p, cpu_of(rq));
}
/*
-@@ -2152,53 +2148,6 @@ try_to_wake_up(struct task_struct *p, un
+@@ -2146,58 +2142,6 @@ try_to_wake_up(struct task_struct *p, un
}
/**
@@ -54,7 +54,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
- * ensure that this_rq() is locked, @p is bound to this_rq() and not
- * the current task.
- */
--static void try_to_wake_up_local(struct task_struct *p, struct pin_cookie cookie)
+-static void try_to_wake_up_local(struct task_struct *p, struct rq_flags *rf)
-{
- struct rq *rq = task_rq(p);
-
@@ -71,11 +71,11 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
- * disabled avoiding further scheduler activity on it and we've
- * not yet picked a replacement task.
- */
-- lockdep_unpin_lock(&rq->lock, cookie);
+- rq_unpin_lock(rq, rf);
- raw_spin_unlock(&rq->lock);
- raw_spin_lock(&p->pi_lock);
- raw_spin_lock(&rq->lock);
-- lockdep_repin_lock(&rq->lock, cookie);
+- rq_repin_lock(rq, rf);
- }
-
- if (!(p->state & TASK_NORMAL))
@@ -83,10 +83,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-
- trace_sched_waking(p);
-
-- if (!task_on_rq_queued(p))
+- if (!task_on_rq_queued(p)) {
+- if (p->in_iowait) {
+- delayacct_blkio_end();
+- atomic_dec(&rq->nr_iowait);
+- }
- ttwu_activate(rq, p, ENQUEUE_WAKEUP);
+- }
-
-- ttwu_do_wakeup(rq, p, 0, cookie);
+- ttwu_do_wakeup(rq, p, 0, rf);
- ttwu_stat(p, smp_processor_id(), 0);
-out:
- raw_spin_unlock(&p->pi_lock);
@@ -96,10 +101,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* wake_up_process - Wake up a specific process
* @p: The process to be woken up.
*
-@@ -3494,21 +3443,6 @@ static void __sched notrace __schedule(b
- } else {
- deactivate_task(rq, prev, DEQUEUE_SLEEP);
- prev->on_rq = 0;
+@@ -3485,21 +3429,6 @@ static void __sched notrace __schedule(b
+ atomic_inc(&rq->nr_iowait);
+ delayacct_blkio_start();
+ }
-
- /*
- * If a worker went to sleep, notify and ask workqueue
@@ -113,12 +118,12 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-
- to_wakeup = wq_worker_sleeping(prev);
- if (to_wakeup)
-- try_to_wake_up_local(to_wakeup, cookie);
+- try_to_wake_up_local(to_wakeup, &rf);
- }
}
switch_count = &prev->nvcsw;
}
-@@ -3567,6 +3501,14 @@ static inline void sched_submit_work(str
+@@ -3564,6 +3493,14 @@ static inline void sched_submit_work(str
{
if (!tsk->state || tsk_is_pi_blocked(tsk))
return;
@@ -133,7 +138,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* If we are going to sleep and we have plugged IO queued,
* make sure to submit it to avoid deadlocks.
-@@ -3575,6 +3517,12 @@ static inline void sched_submit_work(str
+@@ -3572,6 +3509,12 @@ static inline void sched_submit_work(str
blk_schedule_flush_plug(tsk);
}
@@ -146,7 +151,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
asmlinkage __visible void __sched schedule(void)
{
struct task_struct *tsk = current;
-@@ -3585,6 +3533,7 @@ asmlinkage __visible void __sched schedu
+@@ -3582,6 +3525,7 @@ asmlinkage __visible void __sched schedu
__schedule(false);
sched_preempt_enable_no_resched();
} while (need_resched());
@@ -156,7 +161,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
-@@ -841,43 +841,32 @@ static void wake_up_worker(struct worker
+@@ -843,43 +843,32 @@ static void wake_up_worker(struct worker
}
/**
@@ -211,7 +216,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
struct worker_pool *pool;
/*
-@@ -886,13 +875,15 @@ struct task_struct *wq_worker_sleeping(s
+@@ -888,13 +877,15 @@ struct task_struct *wq_worker_sleeping(s
* checking NOT_RUNNING.
*/
if (worker->flags & WORKER_NOT_RUNNING)
@@ -231,7 +236,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The counterpart of the following dec_and_test, implied mb,
-@@ -906,9 +897,12 @@ struct task_struct *wq_worker_sleeping(s
+@@ -908,9 +899,12 @@ struct task_struct *wq_worker_sleeping(s
* lock is safe.
*/
if (atomic_dec_and_test(&pool->nr_running) &&