summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2019-12-20 10:12:41 +0100
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2019-12-20 10:12:41 +0100
commit1d18e577de0eb36bea3e10793c1b880420e9d026 (patch)
treeefd7e75fe0e6ccd3da4e44eaabbcead843e563cd
parentda77ceac3d20f27310a07a7c346a4ee6b40d6c28 (diff)
downloadlinux-rt-1d18e577de0eb36bea3e10793c1b880420e9d026.tar.gz
[ANNOUNCE] v5.4.5-rt2v5.4.5-rt2-patches
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-rw-r--r--patches/0004-workqueue-Convert-the-locks-to-raw-type.patch77
-rw-r--r--patches/localversion.patch2
2 files changed, 48 insertions, 31 deletions
diff --git a/patches/0004-workqueue-Convert-the-locks-to-raw-type.patch b/patches/0004-workqueue-Convert-the-locks-to-raw-type.patch
index d731cf7d6eb9..0571352dd2d3 100644
--- a/patches/0004-workqueue-Convert-the-locks-to-raw-type.patch
+++ b/patches/0004-workqueue-Convert-the-locks-to-raw-type.patch
@@ -8,8 +8,8 @@ The lock is not held over an unbounded period of time/iterations.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- kernel/workqueue.c | 164 ++++++++++++++++++++++++++---------------------------
- 1 file changed, 82 insertions(+), 82 deletions(-)
+ kernel/workqueue.c | 168 ++++++++++++++++++++++++++---------------------------
+ 1 file changed, 84 insertions(+), 84 deletions(-)
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -397,20 +397,25 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Slurp in all works issued via this workqueue and
-@@ -2528,10 +2528,10 @@ static int rescuer_thread(void *__rescue
+@@ -2528,7 +2528,7 @@ static int rescuer_thread(void *__rescue
* incur MAYDAY_INTERVAL delay inbetween.
*/
if (need_to_create_worker(pool)) {
- spin_lock(&wq_mayday_lock);
+ raw_spin_lock(&wq_mayday_lock);
- get_pwq(pwq);
- list_move_tail(&pwq->mayday_node, &wq->maydays);
+ /*
+ * Queue iff we aren't racing destruction
+ * and somebody else hasn't queued it already.
+@@ -2537,7 +2537,7 @@ static int rescuer_thread(void *__rescue
+ get_pwq(pwq);
+ list_add_tail(&pwq->mayday_node, &wq->maydays);
+ }
- spin_unlock(&wq_mayday_lock);
+ raw_spin_unlock(&wq_mayday_lock);
}
}
-@@ -2549,14 +2549,14 @@ static int rescuer_thread(void *__rescue
+@@ -2555,14 +2555,14 @@ static int rescuer_thread(void *__rescue
if (need_more_worker(pool))
wake_up_worker(pool);
@@ -428,7 +433,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (should_stop) {
__set_current_state(TASK_RUNNING);
-@@ -2636,7 +2636,7 @@ static void wq_barrier_func(struct work_
+@@ -2642,7 +2642,7 @@ static void wq_barrier_func(struct work_
* underneath us, so we can't reliably determine pwq from @target.
*
* CONTEXT:
@@ -437,7 +442,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*/
static void insert_wq_barrier(struct pool_workqueue *pwq,
struct wq_barrier *barr,
-@@ -2723,7 +2723,7 @@ static bool flush_workqueue_prep_pwqs(st
+@@ -2729,7 +2729,7 @@ static bool flush_workqueue_prep_pwqs(st
for_each_pwq(pwq, wq) {
struct worker_pool *pool = pwq->pool;
@@ -446,7 +451,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (flush_color >= 0) {
WARN_ON_ONCE(pwq->flush_color != -1);
-@@ -2740,7 +2740,7 @@ static bool flush_workqueue_prep_pwqs(st
+@@ -2746,7 +2746,7 @@ static bool flush_workqueue_prep_pwqs(st
pwq->work_color = work_color;
}
@@ -455,7 +460,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
-@@ -2940,9 +2940,9 @@ void drain_workqueue(struct workqueue_st
+@@ -2946,9 +2946,9 @@ void drain_workqueue(struct workqueue_st
for_each_pwq(pwq, wq) {
bool drained;
@@ -467,7 +472,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (drained)
continue;
-@@ -2978,7 +2978,7 @@ static bool start_flush_work(struct work
+@@ -2984,7 +2984,7 @@ static bool start_flush_work(struct work
return false;
}
@@ -476,7 +481,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* see the comment in try_to_grab_pending() with the same code */
pwq = get_work_pwq(work);
if (pwq) {
-@@ -2994,7 +2994,7 @@ static bool start_flush_work(struct work
+@@ -3000,7 +3000,7 @@ static bool start_flush_work(struct work
check_flush_dependency(pwq->wq, work);
insert_wq_barrier(pwq, barr, work, worker);
@@ -485,7 +490,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Force a lock recursion deadlock when using flush_work() inside a
-@@ -3013,7 +3013,7 @@ static bool start_flush_work(struct work
+@@ -3019,7 +3019,7 @@ static bool start_flush_work(struct work
rcu_read_unlock();
return true;
already_gone:
@@ -494,7 +499,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
rcu_read_unlock();
return false;
}
-@@ -3406,7 +3406,7 @@ static bool wqattrs_equal(const struct w
+@@ -3412,7 +3412,7 @@ static bool wqattrs_equal(const struct w
*/
static int init_worker_pool(struct worker_pool *pool)
{
@@ -503,7 +508,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
pool->id = -1;
pool->cpu = -1;
pool->node = NUMA_NO_NODE;
-@@ -3532,7 +3532,7 @@ static void put_unbound_pool(struct work
+@@ -3538,7 +3538,7 @@ static void put_unbound_pool(struct work
* @pool's workers from blocking on attach_mutex. We're the last
* manager and @pool gets freed with the flag set.
*/
@@ -512,7 +517,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
swait_event_lock_irq(wq_manager_wait,
!(pool->flags & POOL_MANAGER_ACTIVE), pool->lock);
pool->flags |= POOL_MANAGER_ACTIVE;
-@@ -3540,7 +3540,7 @@ static void put_unbound_pool(struct work
+@@ -3546,7 +3546,7 @@ static void put_unbound_pool(struct work
while ((worker = first_idle_worker(pool)))
destroy_worker(worker);
WARN_ON(pool->nr_workers || pool->nr_idle);
@@ -521,7 +526,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
mutex_lock(&wq_pool_attach_mutex);
if (!list_empty(&pool->workers))
-@@ -3696,7 +3696,7 @@ static void pwq_adjust_max_active(struct
+@@ -3702,7 +3702,7 @@ static void pwq_adjust_max_active(struct
return;
/* this function can be called during early boot w/ irq disabled */
@@ -530,7 +535,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* During [un]freezing, the caller is responsible for ensuring that
-@@ -3719,7 +3719,7 @@ static void pwq_adjust_max_active(struct
+@@ -3725,7 +3725,7 @@ static void pwq_adjust_max_active(struct
pwq->max_active = 0;
}
@@ -539,7 +544,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/* initialize newly alloced @pwq which is associated with @wq and @pool */
-@@ -4121,9 +4121,9 @@ static void wq_update_unbound_numa(struc
+@@ -4127,9 +4127,9 @@ static void wq_update_unbound_numa(struc
use_dfl_pwq:
mutex_lock(&wq->mutex);
@@ -551,7 +556,19 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq);
out_unlock:
mutex_unlock(&wq->mutex);
-@@ -4519,10 +4519,10 @@ unsigned int work_busy(struct work_struc
+@@ -4342,9 +4342,9 @@ void destroy_workqueue(struct workqueue_
+ struct worker *rescuer = wq->rescuer;
+
+ /* this prevents new queueing */
+- spin_lock_irq(&wq_mayday_lock);
++ raw_spin_lock_irq(&wq_mayday_lock);
+ wq->rescuer = NULL;
+- spin_unlock_irq(&wq_mayday_lock);
++ raw_spin_unlock_irq(&wq_mayday_lock);
+
+ /* rescuer will empty maydays list before exiting */
+ kthread_stop(rescuer->task);
+@@ -4540,10 +4540,10 @@ unsigned int work_busy(struct work_struc
rcu_read_lock();
pool = get_work_pool(work);
if (pool) {
@@ -564,7 +581,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
rcu_read_unlock();
-@@ -4728,10 +4728,10 @@ void show_workqueue_state(void)
+@@ -4750,10 +4750,10 @@ void show_workqueue_state(void)
pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
for_each_pwq(pwq, wq) {
@@ -577,7 +594,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* We could be printing a lot from atomic context, e.g.
* sysrq-t -> show_workqueue_state(). Avoid triggering
-@@ -4745,7 +4745,7 @@ void show_workqueue_state(void)
+@@ -4767,7 +4767,7 @@ void show_workqueue_state(void)
struct worker *worker;
bool first = true;
@@ -586,7 +603,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (pool->nr_workers == pool->nr_idle)
goto next_pool;
-@@ -4764,7 +4764,7 @@ void show_workqueue_state(void)
+@@ -4786,7 +4786,7 @@ void show_workqueue_state(void)
}
pr_cont("\n");
next_pool:
@@ -595,7 +612,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* We could be printing a lot from atomic context, e.g.
* sysrq-t -> show_workqueue_state(). Avoid triggering
-@@ -4794,7 +4794,7 @@ void wq_worker_comm(char *buf, size_t si
+@@ -4816,7 +4816,7 @@ void wq_worker_comm(char *buf, size_t si
struct worker_pool *pool = worker->pool;
if (pool) {
@@ -604,7 +621,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* ->desc tracks information (wq name or
* set_worker_desc()) for the latest execution. If
-@@ -4808,7 +4808,7 @@ void wq_worker_comm(char *buf, size_t si
+@@ -4830,7 +4830,7 @@ void wq_worker_comm(char *buf, size_t si
scnprintf(buf + off, size - off, "-%s",
worker->desc);
}
@@ -613,7 +630,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
}
-@@ -4839,7 +4839,7 @@ static void unbind_workers(int cpu)
+@@ -4861,7 +4861,7 @@ static void unbind_workers(int cpu)
for_each_cpu_worker_pool(pool, cpu) {
mutex_lock(&wq_pool_attach_mutex);
@@ -622,7 +639,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* We've blocked all attach/detach operations. Make all workers
-@@ -4853,7 +4853,7 @@ static void unbind_workers(int cpu)
+@@ -4875,7 +4875,7 @@ static void unbind_workers(int cpu)
pool->flags |= POOL_DISASSOCIATED;
@@ -631,7 +648,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
mutex_unlock(&wq_pool_attach_mutex);
/*
-@@ -4879,9 +4879,9 @@ static void unbind_workers(int cpu)
+@@ -4901,9 +4901,9 @@ static void unbind_workers(int cpu)
* worker blocking could lead to lengthy stalls. Kick off
* unbound chain execution of currently pending work items.
*/
@@ -643,7 +660,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
}
-@@ -4908,7 +4908,7 @@ static void rebind_workers(struct worker
+@@ -4930,7 +4930,7 @@ static void rebind_workers(struct worker
WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
pool->attrs->cpumask) < 0);
@@ -652,7 +669,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
pool->flags &= ~POOL_DISASSOCIATED;
-@@ -4947,7 +4947,7 @@ static void rebind_workers(struct worker
+@@ -4969,7 +4969,7 @@ static void rebind_workers(struct worker
WRITE_ONCE(worker->flags, worker_flags);
}
diff --git a/patches/localversion.patch b/patches/localversion.patch
index a02382e6df70..279489a1d145 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt1
++-rt2