diff options
31 files changed, 118 insertions, 114 deletions
diff --git a/patches/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch b/patches/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch index bb0fa3e37319..1c1875c14e46 100644 --- a/patches/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch +++ b/patches/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch @@ -57,7 +57,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c -@@ -2818,7 +2818,7 @@ static int _nfs4_open_and_get_state(stru +@@ -2822,7 +2822,7 @@ static int _nfs4_open_and_get_state(stru unsigned int seq; int ret; @@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> ret = _nfs4_proc_open(opendata, ctx); if (ret != 0) -@@ -2856,7 +2856,7 @@ static int _nfs4_open_and_get_state(stru +@@ -2860,7 +2860,7 @@ static int _nfs4_open_and_get_state(stru if (d_inode(dentry) == state->inode) { nfs_inode_attach_open_context(ctx); @@ -86,7 +86,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> mutex_init(&sp->so_delegreturn_mutex); return sp; } -@@ -1560,8 +1560,12 @@ static int nfs4_reclaim_open_state(struc +@@ -1562,8 +1562,12 @@ static int nfs4_reclaim_open_state(struc * recovering after a network partition or a reboot from a * server that doesn't support a grace period. */ @@ -100,7 +100,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> restart: list_for_each_entry(state, &sp->so_states, open_states) { if (!test_and_clear_bit(ops->state_flag_bit, &state->flags)) -@@ -1630,14 +1634,20 @@ static int nfs4_reclaim_open_state(struc +@@ -1632,14 +1636,20 @@ static int nfs4_reclaim_open_state(struc spin_lock(&sp->so_lock); goto restart; } diff --git a/patches/block-blk-mq-move-blk_queue_usage_counter_release-in.patch b/patches/block-blk-mq-move-blk_queue_usage_counter_release-in.patch index abf141ba06c1..7263c0a3deb7 100644 --- a/patches/block-blk-mq-move-blk_queue_usage_counter_release-in.patch +++ b/patches/block-blk-mq-move-blk_queue_usage_counter_release-in.patch @@ -51,7 +51,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/block/blk-core.c +++ b/block/blk-core.c -@@ -965,12 +965,21 @@ void blk_queue_exit(struct request_queue +@@ -969,12 +969,21 @@ void blk_queue_exit(struct request_queue percpu_ref_put(&q->q_usage_counter); } @@ -74,7 +74,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } static void blk_rq_timed_out_timer(struct timer_list *t) -@@ -1064,6 +1073,7 @@ struct request_queue *blk_alloc_queue_no +@@ -1068,6 +1077,7 @@ struct request_queue *blk_alloc_queue_no queue_flag_set_unlocked(QUEUE_FLAG_BYPASS, q); init_waitqueue_head(&q->mq_freeze_wq); @@ -82,7 +82,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* * Init percpu_ref in atomic mode so that it's faster to shutdown. -@@ -3948,6 +3958,8 @@ int __init blk_dev_init(void) +@@ -3957,6 +3967,8 @@ int __init blk_dev_init(void) if (!kblockd_workqueue) panic("Failed to create kblockd\n"); diff --git a/patches/block-mq-don-t-complete-requests-via-IPI.patch b/patches/block-mq-don-t-complete-requests-via-IPI.patch index 15818fba9c18..c35cadc0a6c8 100644 --- a/patches/block-mq-don-t-complete-requests-via-IPI.patch +++ b/patches/block-mq-don-t-complete-requests-via-IPI.patch @@ -37,7 +37,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> INIT_LIST_HEAD(&rq->timeout_list); rq->timeout = 0; -@@ -545,12 +548,24 @@ void blk_mq_end_request(struct request * +@@ -547,12 +550,24 @@ void blk_mq_end_request(struct request * } EXPORT_SYMBOL(blk_mq_end_request); @@ -62,7 +62,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> static void __blk_mq_complete_request(struct request *rq) { -@@ -573,10 +588,18 @@ static void __blk_mq_complete_request(st +@@ -575,10 +590,18 @@ static void __blk_mq_complete_request(st shared = cpus_share_cache(cpu, ctx->cpu); if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) { diff --git a/patches/block-mq-drop-preempt-disable.patch b/patches/block-mq-drop-preempt-disable.patch index 39b3e1252d1d..08d15df28abe 100644 --- a/patches/block-mq-drop-preempt-disable.patch +++ b/patches/block-mq-drop-preempt-disable.patch @@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/block/blk-mq.c +++ b/block/blk-mq.c -@@ -568,7 +568,7 @@ static void __blk_mq_complete_request(st +@@ -570,7 +570,7 @@ static void __blk_mq_complete_request(st return; } @@ -22,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags)) shared = cpus_share_cache(cpu, ctx->cpu); -@@ -580,7 +580,7 @@ static void __blk_mq_complete_request(st +@@ -582,7 +582,7 @@ static void __blk_mq_complete_request(st } else { rq->q->softirq_done_fn(rq); } @@ -31,7 +31,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx) -@@ -1324,14 +1324,14 @@ static void __blk_mq_delay_run_hw_queue( +@@ -1328,14 +1328,14 @@ static void __blk_mq_delay_run_hw_queue( return; if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) { diff --git a/patches/debugobjects-rt.patch b/patches/debugobjects-rt.patch index 319fe4179911..9daf6a4e69c2 100644 --- a/patches/debugobjects-rt.patch +++ b/patches/debugobjects-rt.patch @@ -11,7 +11,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/lib/debugobjects.c +++ b/lib/debugobjects.c -@@ -374,7 +374,10 @@ static void +@@ -377,7 +377,10 @@ static void struct debug_obj *obj; unsigned long flags; diff --git a/patches/fs-dcache-bring-back-explicit-INIT_HLIST_BL_HEAD-in.patch b/patches/fs-dcache-bring-back-explicit-INIT_HLIST_BL_HEAD-in.patch index 5c05159b617c..7ab1869003e4 100644 --- a/patches/fs-dcache-bring-back-explicit-INIT_HLIST_BL_HEAD-in.patch +++ b/patches/fs-dcache-bring-back-explicit-INIT_HLIST_BL_HEAD-in.patch @@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/fs/dcache.c +++ b/fs/dcache.c -@@ -3115,6 +3115,8 @@ static int __init set_dhash_entries(char +@@ -3116,6 +3116,8 @@ static int __init set_dhash_entries(char static void __init dcache_init_early(void) { @@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* If hashes are distributed across NUMA nodes, defer * hash allocation until vmalloc space is available. */ -@@ -3131,11 +3133,16 @@ static void __init dcache_init_early(voi +@@ -3132,11 +3134,16 @@ static void __init dcache_init_early(voi NULL, 0, 0); @@ -40,7 +40,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* * A constructor could be added for stable state like the lists, * but it is probably not worth it because of the cache nature -@@ -3159,6 +3166,10 @@ static void __init dcache_init(void) +@@ -3160,6 +3167,10 @@ static void __init dcache_init(void) NULL, 0, 0); diff --git a/patches/fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch b/patches/fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch index 0fa92bdad053..c8a768865a3e 100644 --- a/patches/fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch +++ b/patches/fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch @@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/fs/dcache.c +++ b/fs/dcache.c -@@ -2430,9 +2430,10 @@ EXPORT_SYMBOL(d_rehash); +@@ -2431,9 +2431,10 @@ EXPORT_SYMBOL(d_rehash); static inline unsigned start_dir_add(struct inode *dir) { @@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> return n; cpu_relax(); } -@@ -2440,7 +2441,8 @@ static inline unsigned start_dir_add(str +@@ -2441,7 +2442,8 @@ static inline unsigned start_dir_add(str static inline void end_dir_add(struct inode *dir, unsigned n) { @@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } static void d_wait_lookup(struct dentry *dentry) -@@ -2473,7 +2475,7 @@ struct dentry *d_alloc_parallel(struct d +@@ -2474,7 +2476,7 @@ struct dentry *d_alloc_parallel(struct d retry: rcu_read_lock(); @@ -55,7 +55,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> r_seq = read_seqbegin(&rename_lock); dentry = __d_lookup_rcu(parent, name, &d_seq); if (unlikely(dentry)) { -@@ -2501,7 +2503,7 @@ struct dentry *d_alloc_parallel(struct d +@@ -2502,7 +2504,7 @@ struct dentry *d_alloc_parallel(struct d } hlist_bl_lock(b); diff --git a/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch b/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch index 5ae1d532bedb..7587954c41fc 100644 --- a/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch +++ b/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch @@ -33,7 +33,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/fs/dcache.c +++ b/fs/dcache.c -@@ -2447,21 +2447,24 @@ static inline void end_dir_add(struct in +@@ -2448,21 +2448,24 @@ static inline void end_dir_add(struct in static void d_wait_lookup(struct dentry *dentry) { @@ -69,7 +69,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> { unsigned int hash = name->hash; struct hlist_bl_head *b = in_lookup_hash(parent, hash); -@@ -2576,7 +2579,7 @@ void __d_lookup_done(struct dentry *dent +@@ -2577,7 +2580,7 @@ void __d_lookup_done(struct dentry *dent hlist_bl_lock(b); dentry->d_flags &= ~DCACHE_PAR_LOOKUP; __hlist_bl_del(&dentry->d_u.d_in_lookup_hash); diff --git a/patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch b/patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch index 292b2464ce78..bc59d9ab8e10 100644 --- a/patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch +++ b/patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch @@ -69,7 +69,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> timer->function = perf_mux_hrtimer_handler; } -@@ -9166,7 +9166,7 @@ static void perf_swevent_init_hrtimer(st +@@ -9170,7 +9170,7 @@ static void perf_swevent_init_hrtimer(st if (!is_sampling_event(event)) return; @@ -102,7 +102,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c -@@ -5182,9 +5182,9 @@ void init_cfs_bandwidth(struct cfs_bandw +@@ -5184,9 +5184,9 @@ void init_cfs_bandwidth(struct cfs_bandw cfs_b->period = ns_to_ktime(default_cfs_period()); INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq); diff --git a/patches/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch b/patches/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch index 24f7b9ef207b..1d311bfc5725 100644 --- a/patches/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch +++ b/patches/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch @@ -26,7 +26,7 @@ Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de> --- a/block/blk-mq.c +++ b/block/blk-mq.c -@@ -2982,10 +2982,9 @@ static bool blk_mq_poll_hybrid_sleep(str +@@ -2986,10 +2986,9 @@ static bool blk_mq_poll_hybrid_sleep(str kt = nsecs; mode = HRTIMER_MODE_REL; diff --git a/patches/hrtimers-prepare-full-preemption.patch b/patches/hrtimers-prepare-full-preemption.patch index 581b7523f63d..40e012ee235c 100644 --- a/patches/hrtimers-prepare-full-preemption.patch +++ b/patches/hrtimers-prepare-full-preemption.patch @@ -173,7 +173,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> expires = timeval_to_ktime(value->it_value); --- a/kernel/time/posix-timers.c +++ b/kernel/time/posix-timers.c -@@ -469,7 +469,7 @@ static struct k_itimer * alloc_posix_tim +@@ -478,7 +478,7 @@ static struct k_itimer * alloc_posix_tim static void k_itimer_rcu_free(struct rcu_head *head) { @@ -182,7 +182,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> kmem_cache_free(posix_timers_cache, tmr); } -@@ -486,7 +486,7 @@ static void release_posix_timer(struct k +@@ -495,7 +495,7 @@ static void release_posix_timer(struct k } put_pid(tmr->it_pid); sigqueue_free(tmr->sigq); @@ -191,7 +191,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } static int common_timer_create(struct k_itimer *new_timer) -@@ -825,6 +825,22 @@ static void common_hrtimer_arm(struct k_ +@@ -834,6 +834,22 @@ static void common_hrtimer_arm(struct k_ hrtimer_start_expires(timer, HRTIMER_MODE_ABS); } @@ -214,7 +214,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> static int common_hrtimer_try_to_cancel(struct k_itimer *timr) { return hrtimer_try_to_cancel(&timr->it.real.timer); -@@ -889,6 +905,7 @@ static int do_timer_settime(timer_t time +@@ -898,6 +914,7 @@ static int do_timer_settime(timer_t time if (!timr) return -EINVAL; @@ -222,7 +222,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> kc = timr->kclock; if (WARN_ON_ONCE(!kc || !kc->timer_set)) error = -EINVAL; -@@ -897,9 +914,12 @@ static int do_timer_settime(timer_t time +@@ -906,9 +923,12 @@ static int do_timer_settime(timer_t time unlock_timer(timr, flag); if (error == TIMER_RETRY) { @@ -235,7 +235,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> return error; } -@@ -981,10 +1001,15 @@ SYSCALL_DEFINE1(timer_delete, timer_t, t +@@ -990,10 +1010,15 @@ SYSCALL_DEFINE1(timer_delete, timer_t, t if (!timer) return -EINVAL; @@ -251,7 +251,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> spin_lock(¤t->sighand->siglock); list_del(&timer->list); -@@ -1010,8 +1035,18 @@ static void itimer_delete(struct k_itime +@@ -1019,8 +1044,18 @@ static void itimer_delete(struct k_itime retry_delete: spin_lock_irqsave(&timer->it_lock, flags); diff --git a/patches/irqwork-push_most_work_into_softirq_context.patch b/patches/irqwork-push_most_work_into_softirq_context.patch index a007866c472b..9be6b7c2b973 100644 --- a/patches/irqwork-push_most_work_into_softirq_context.patch +++ b/patches/irqwork-push_most_work_into_softirq_context.patch @@ -26,8 +26,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> kernel/rcu/tree.c | 1 kernel/sched/topology.c | 1 kernel/time/tick-sched.c | 1 - kernel/time/timer.c | 1 - 6 files changed, 59 insertions(+), 13 deletions(-) + kernel/time/timer.c | 2 + + 6 files changed, 60 insertions(+), 13 deletions(-) --- a/include/linux/irq_work.h +++ b/include/linux/irq_work.h @@ -211,11 +211,12 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* --- a/kernel/time/timer.c +++ b/kernel/time/timer.c -@@ -1717,6 +1717,7 @@ static __latent_entropy void run_timer_s +@@ -1733,6 +1733,8 @@ static __latent_entropy void run_timer_s { struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); + irq_work_tick_soft(); - /* - * must_forward_clk must be cleared before running timers so that any - * timer functions that call mod_timer will not try to forward the ++ + __run_timers(base); + if (IS_ENABLED(CONFIG_NO_HZ_COMMON)) + __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF])); diff --git a/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch b/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch index f5524963d9c4..1715f02d904f 100644 --- a/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch +++ b/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch @@ -153,7 +153,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> ctx->last_ran = raw_smp_processor_id(); --- a/drivers/infiniband/hw/hfi1/affinity.c +++ b/drivers/infiniband/hw/hfi1/affinity.c -@@ -1019,7 +1019,7 @@ int hfi1_get_proc_affinity(int node) +@@ -1037,7 +1037,7 @@ int hfi1_get_proc_affinity(int node) struct hfi1_affinity_node *entry; cpumask_var_t diff, hw_thread_mask, available_mask, intrs_mask; const struct cpumask *node_mask, @@ -162,7 +162,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> struct hfi1_affinity_node_list *affinity = &node_affinity; struct cpu_mask_set *set = &affinity->proc; -@@ -1027,7 +1027,7 @@ int hfi1_get_proc_affinity(int node) +@@ -1045,7 +1045,7 @@ int hfi1_get_proc_affinity(int node) * check whether process/context affinity has already * been set */ @@ -171,7 +171,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl", current->pid, current->comm, cpumask_pr_args(proc_mask)); -@@ -1038,7 +1038,7 @@ int hfi1_get_proc_affinity(int node) +@@ -1056,7 +1056,7 @@ int hfi1_get_proc_affinity(int node) cpu = cpumask_first(proc_mask); cpumask_set_cpu(cpu, &set->used); goto done; @@ -281,7 +281,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/kernel/fork.c +++ b/kernel/fork.c -@@ -845,6 +845,8 @@ static struct task_struct *dup_task_stru +@@ -844,6 +844,8 @@ static struct task_struct *dup_task_stru #ifdef CONFIG_STACKPROTECTOR tsk->stack_canary = get_random_canary(); #endif @@ -523,7 +523,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> !task_on_rq_queued(task))) { --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c -@@ -1614,7 +1614,7 @@ static void task_numa_compare(struct tas +@@ -1616,7 +1616,7 @@ static void task_numa_compare(struct tas */ if (cur) { /* Skip this swap candidate if cannot move to the source CPU: */ @@ -532,7 +532,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> goto unlock; /* -@@ -1724,7 +1724,7 @@ static void task_numa_find_cpu(struct ta +@@ -1726,7 +1726,7 @@ static void task_numa_find_cpu(struct ta for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) { /* Skip this CPU if the source task cannot migrate */ @@ -541,7 +541,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> continue; env->dst_cpu = cpu; -@@ -6016,7 +6016,7 @@ find_idlest_group(struct sched_domain *s +@@ -6018,7 +6018,7 @@ find_idlest_group(struct sched_domain *s /* Skip over this group if it has no CPUs allowed */ if (!cpumask_intersects(sched_group_span(group), @@ -550,7 +550,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> continue; local_group = cpumask_test_cpu(this_cpu, -@@ -6148,7 +6148,7 @@ find_idlest_group_cpu(struct sched_group +@@ -6150,7 +6150,7 @@ find_idlest_group_cpu(struct sched_group return cpumask_first(sched_group_span(group)); /* Traverse only the allowed CPUs */ @@ -559,7 +559,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> if (available_idle_cpu(i)) { struct rq *rq = cpu_rq(i); struct cpuidle_state *idle = idle_get_state(rq); -@@ -6188,7 +6188,7 @@ static inline int find_idlest_cpu(struct +@@ -6190,7 +6190,7 @@ static inline int find_idlest_cpu(struct { int new_cpu = cpu; @@ -568,7 +568,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> return prev_cpu; /* -@@ -6304,7 +6304,7 @@ static int select_idle_core(struct task_ +@@ -6306,7 +6306,7 @@ static int select_idle_core(struct task_ if (!test_idle_cores(target, false)) return -1; @@ -577,7 +577,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> for_each_cpu_wrap(core, cpus, target) { bool idle = true; -@@ -6338,7 +6338,7 @@ static int select_idle_smt(struct task_s +@@ -6340,7 +6340,7 @@ static int select_idle_smt(struct task_s return -1; for_each_cpu(cpu, cpu_smt_mask(target)) { @@ -586,7 +586,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> continue; if (available_idle_cpu(cpu)) return cpu; -@@ -6401,7 +6401,7 @@ static int select_idle_cpu(struct task_s +@@ -6403,7 +6403,7 @@ static int select_idle_cpu(struct task_s for_each_cpu_wrap(cpu, sched_domain_span(sd), target) { if (!--nr) return -1; @@ -595,7 +595,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> continue; if (available_idle_cpu(cpu)) break; -@@ -6438,7 +6438,7 @@ static int select_idle_sibling(struct ta +@@ -6440,7 +6440,7 @@ static int select_idle_sibling(struct ta recent_used_cpu != target && cpus_share_cache(recent_used_cpu, target) && available_idle_cpu(recent_used_cpu) && @@ -604,7 +604,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* * Replace recent_used_cpu with prev as it is a potential * candidate for the next wake: -@@ -6622,7 +6622,7 @@ select_task_rq_fair(struct task_struct * +@@ -6624,7 +6624,7 @@ select_task_rq_fair(struct task_struct * if (sd_flag & SD_BALANCE_WAKE) { record_wakee(p); want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu) @@ -613,7 +613,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } rcu_read_lock(); -@@ -7358,14 +7358,14 @@ int can_migrate_task(struct task_struct +@@ -7360,14 +7360,14 @@ int can_migrate_task(struct task_struct /* * We do not migrate tasks that are: * 1) throttled_lb_pair, or @@ -630,7 +630,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> int cpu; schedstat_inc(p->se.statistics.nr_failed_migrations_affine); -@@ -7385,7 +7385,7 @@ int can_migrate_task(struct task_struct +@@ -7387,7 +7387,7 @@ int can_migrate_task(struct task_struct /* Prevent to re-select dst_cpu via env's CPUs: */ for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) { @@ -639,7 +639,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> env->flags |= LBF_DST_PINNED; env->new_dst_cpu = cpu; break; -@@ -7982,7 +7982,7 @@ check_cpu_capacity(struct rq *rq, struct +@@ -7984,7 +7984,7 @@ check_cpu_capacity(struct rq *rq, struct /* * Group imbalance indicates (and tries to solve) the problem where balancing @@ -648,7 +648,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> * * Imagine a situation of two groups of 4 CPUs each and 4 tasks each with a * cpumask covering 1 CPU of the first group and 3 CPUs of the second group. -@@ -8597,7 +8597,7 @@ static struct sched_group *find_busiest_ +@@ -8599,7 +8599,7 @@ static struct sched_group *find_busiest_ /* * If the busiest group is imbalanced the below checks don't * work because they assume all things are equal, which typically @@ -657,7 +657,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> */ if (busiest->group_type == group_imbalanced) goto force_balance; -@@ -8993,7 +8993,7 @@ static int load_balance(int this_cpu, st +@@ -8995,7 +8995,7 @@ static int load_balance(int this_cpu, st * if the curr task on busiest CPU can't be * moved to this_cpu: */ diff --git a/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch b/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch index 9674b6aa2e5b..bef20c12b1aa 100644 --- a/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch +++ b/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch @@ -25,7 +25,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> #include <linux/vmacache.h> #include <linux/nsproxy.h> #include <linux/capability.h> -@@ -693,6 +694,15 @@ void __put_task_struct(struct task_struc +@@ -692,6 +693,15 @@ void __put_task_struct(struct task_struc WARN_ON(atomic_read(&tsk->usage)); WARN_ON(tsk == current); diff --git a/patches/localversion.patch b/patches/localversion.patch index 72cdd2b3c760..4c1841b6475d 100644 --- a/patches/localversion.patch +++ b/patches/localversion.patch @@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- /dev/null +++ b/localversion-rt @@ -0,0 +1 @@ -+-rt5 ++-rt6 diff --git a/patches/md-raid5-percpu-handling-rt-aware.patch b/patches/md-raid5-percpu-handling-rt-aware.patch index cd5be07e264e..365dbb0e5d9f 100644 --- a/patches/md-raid5-percpu-handling-rt-aware.patch +++ b/patches/md-raid5-percpu-handling-rt-aware.patch @@ -41,7 +41,7 @@ Tested-by: Udo van den Heuvel <udovdh@xs4all.nl> } static void free_stripe(struct kmem_cache *sc, struct stripe_head *sh) -@@ -6793,6 +6795,7 @@ static int raid456_cpu_up_prepare(unsign +@@ -6799,6 +6801,7 @@ static int raid456_cpu_up_prepare(unsign __func__, cpu); return -ENOMEM; } @@ -49,7 +49,7 @@ Tested-by: Udo van den Heuvel <udovdh@xs4all.nl> return 0; } -@@ -6803,7 +6806,6 @@ static int raid5_alloc_percpu(struct r5c +@@ -6809,7 +6812,6 @@ static int raid5_alloc_percpu(struct r5c conf->percpu = alloc_percpu(struct raid5_percpu); if (!conf->percpu) return -ENOMEM; diff --git a/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch b/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch index 27bbab993108..92dc5fe251a6 100644 --- a/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch +++ b/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch @@ -212,7 +212,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> return NULL; } -@@ -8011,7 +8034,7 @@ void zone_pcp_reset(struct zone *zone) +@@ -8015,7 +8038,7 @@ void zone_pcp_reset(struct zone *zone) struct per_cpu_pageset *pset; /* avoid races with drain_pages() */ @@ -221,7 +221,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> if (zone->pageset != &boot_pageset) { for_each_online_cpu(cpu) { pset = per_cpu_ptr(zone->pageset, cpu); -@@ -8020,7 +8043,7 @@ void zone_pcp_reset(struct zone *zone) +@@ -8024,7 +8047,7 @@ void zone_pcp_reset(struct zone *zone) free_percpu(zone->pageset); zone->pageset = &boot_pageset; } diff --git a/patches/of-allocate-free-phandle-cache-outside-of-the-devtre.patch b/patches/of-allocate-free-phandle-cache-outside-of-the-devtre.patch index 2a477a101630..910cceb4f4fa 100644 --- a/patches/of-allocate-free-phandle-cache-outside-of-the-devtre.patch +++ b/patches/of-allocate-free-phandle-cache-outside-of-the-devtre.patch @@ -17,14 +17,14 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/drivers/of/base.c +++ b/drivers/of/base.c -@@ -108,43 +108,49 @@ void of_populate_phandle_cache(void) +@@ -108,46 +108,52 @@ void of_populate_phandle_cache(void) u32 cache_entries; struct device_node *np; u32 phandles = 0; + struct device_node **shadow; raw_spin_lock_irqsave(&devtree_lock, flags); -- + - kfree(phandle_cache); + shadow = phandle_cache; phandle_cache = NULL; @@ -32,9 +32,13 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> for_each_of_allnodes(np) if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) phandles++; - + raw_spin_unlock_irqrestore(&devtree_lock, flags); -+ ++ kfree(shadow); + + if (!phandles) +- goto out; ++ return; + cache_entries = roundup_pow_of_two(phandles); phandle_cache_mask = cache_entries - 1; @@ -42,11 +46,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> - GFP_ATOMIC); - if (!phandle_cache) - goto out; -+ kfree(shadow); + shadow = kcalloc(cache_entries, sizeof(*phandle_cache), GFP_KERNEL); -+ + if (!shadow) + return; ++ + raw_spin_lock_irqsave(&devtree_lock, flags); + phandle_cache = shadow; diff --git a/patches/pci-switchtec-Don-t-use-completion-s-wait-queue.patch b/patches/pci-switchtec-Don-t-use-completion-s-wait-queue.patch index 3054d40f5225..15db55c5590f 100644 --- a/patches/pci-switchtec-Don-t-use-completion-s-wait-queue.patch +++ b/patches/pci-switchtec-Don-t-use-completion-s-wait-queue.patch @@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/drivers/pci/switch/switchtec.c +++ b/drivers/pci/switch/switchtec.c -@@ -41,10 +41,11 @@ struct switchtec_user { +@@ -43,10 +43,11 @@ struct switchtec_user { enum mrpc_state state; @@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> u32 cmd; u32 status; u32 return_code; -@@ -66,7 +67,7 @@ static struct switchtec_user *stuser_cre +@@ -68,7 +69,7 @@ static struct switchtec_user *stuser_cre stuser->stdev = stdev; kref_init(&stuser->kref); INIT_LIST_HEAD(&stuser->list); @@ -45,7 +45,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> stuser->event_cnt = atomic_read(&stdev->event_cnt); dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser); -@@ -149,7 +150,7 @@ static int mrpc_queue_cmd(struct switcht +@@ -151,7 +152,7 @@ static int mrpc_queue_cmd(struct switcht kref_get(&stuser->kref); stuser->read_len = sizeof(stuser->data); stuser_set_state(stuser, MRPC_QUEUED); @@ -54,7 +54,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> list_add_tail(&stuser->list, &stdev->mrpc_queue); mrpc_cmd_submit(stdev); -@@ -186,7 +187,8 @@ static void mrpc_complete_cmd(struct swi +@@ -188,7 +189,8 @@ static void mrpc_complete_cmd(struct swi stuser->read_len); out: @@ -64,7 +64,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> list_del_init(&stuser->list); stuser_put(stuser); stdev->mrpc_busy = 0; -@@ -456,10 +458,11 @@ static ssize_t switchtec_dev_read(struct +@@ -458,10 +460,11 @@ static ssize_t switchtec_dev_read(struct mutex_unlock(&stdev->mrpc_mutex); if (filp->f_flags & O_NONBLOCK) { @@ -78,7 +78,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> if (rc < 0) return rc; } -@@ -507,7 +510,7 @@ static __poll_t switchtec_dev_poll(struc +@@ -509,7 +512,7 @@ static __poll_t switchtec_dev_poll(struc struct switchtec_dev *stdev = stuser->stdev; __poll_t ret = 0; @@ -87,7 +87,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> poll_wait(filp, &stdev->event_wq, wait); if (lock_mutex_and_test_alive(stdev)) -@@ -515,7 +518,7 @@ static __poll_t switchtec_dev_poll(struc +@@ -517,7 +520,7 @@ static __poll_t switchtec_dev_poll(struc mutex_unlock(&stdev->mrpc_mutex); @@ -96,7 +96,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> ret |= EPOLLIN | EPOLLRDNORM; if (stuser->event_cnt != atomic_read(&stdev->event_cnt)) -@@ -1034,7 +1037,8 @@ static void stdev_kill(struct switchtec_ +@@ -1038,7 +1041,8 @@ static void stdev_kill(struct switchtec_ /* Wake up and kill any users waiting on an MRPC request */ list_for_each_entry_safe(stuser, tmpuser, &stdev->mrpc_queue, list) { diff --git a/patches/peterz-percpu-rwsem-rt.patch b/patches/peterz-percpu-rwsem-rt.patch index 211864b4a9d4..8963a463607d 100644 --- a/patches/peterz-percpu-rwsem-rt.patch +++ b/patches/peterz-percpu-rwsem-rt.patch @@ -142,7 +142,7 @@ Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> locks_dispose_list(&dispose); return error; } -@@ -2533,13 +2533,13 @@ locks_remove_lease(struct file *filp, st +@@ -2540,13 +2540,13 @@ locks_remove_lease(struct file *filp, st if (list_empty(&ctx->flc_lease)) return; diff --git a/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch b/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch index ac8cb868e7ed..41a08e644a24 100644 --- a/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch +++ b/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch @@ -54,7 +54,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), --- a/kernel/fork.c +++ b/kernel/fork.c -@@ -1563,6 +1563,9 @@ static void rt_mutex_init_task(struct ta +@@ -1564,6 +1564,9 @@ static void rt_mutex_init_task(struct ta */ static void posix_cpu_timers_init(struct task_struct *tsk) { diff --git a/patches/preempt-lazy-support.patch b/patches/preempt-lazy-support.patch index 985db27d6ebd..aaa7764da869 100644 --- a/patches/preempt-lazy-support.patch +++ b/patches/preempt-lazy-support.patch @@ -388,7 +388,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> EXPORT_SYMBOL(migrate_enable); --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c -@@ -4336,7 +4336,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq +@@ -4338,7 +4338,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq ideal_runtime = sched_slice(cfs_rq, curr); delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; if (delta_exec > ideal_runtime) { @@ -397,7 +397,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* * The current task ran long enough, ensure it doesn't get * re-elected due to buddy favours. -@@ -4360,7 +4360,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq +@@ -4362,7 +4362,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq return; if (delta > ideal_runtime) @@ -406,7 +406,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } static void -@@ -4502,7 +4502,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc +@@ -4504,7 +4504,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc * validating it and just reschedule. */ if (queued) { @@ -415,7 +415,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> return; } /* -@@ -4686,7 +4686,7 @@ static void __account_cfs_rq_runtime(str +@@ -4688,7 +4688,7 @@ static void __account_cfs_rq_runtime(str * hierarchy can be throttled */ if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) @@ -424,7 +424,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } static __always_inline -@@ -5340,7 +5340,7 @@ static void hrtick_start_fair(struct rq +@@ -5342,7 +5342,7 @@ static void hrtick_start_fair(struct rq if (delta < 0) { if (rq->curr == p) @@ -433,7 +433,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> return; } hrtick_start(rq, delta); -@@ -6881,7 +6881,7 @@ static void check_preempt_wakeup(struct +@@ -6883,7 +6883,7 @@ static void check_preempt_wakeup(struct return; preempt: @@ -442,7 +442,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* * Only set the backward buddy when the current task is still * on the rq. This can happen when a wakeup gets interleaved -@@ -9967,7 +9967,7 @@ static void task_fork_fair(struct task_s +@@ -9969,7 +9969,7 @@ static void task_fork_fair(struct task_s * 'current' within the tree based on its new key value. */ swap(curr->vruntime, se->vruntime); @@ -451,7 +451,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } se->vruntime -= cfs_rq->min_vruntime; -@@ -9991,7 +9991,7 @@ prio_changed_fair(struct rq *rq, struct +@@ -9993,7 +9993,7 @@ prio_changed_fair(struct rq *rq, struct */ if (rq->curr == p) { if (p->prio > oldprio) diff --git a/patches/rtmutex-add-sleeping-lock-implementation.patch b/patches/rtmutex-add-sleeping-lock-implementation.patch index d7ff68cdf0f4..64fbdf67259a 100644 --- a/patches/rtmutex-add-sleeping-lock-implementation.patch +++ b/patches/rtmutex-add-sleeping-lock-implementation.patch @@ -381,7 +381,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +#endif --- a/kernel/fork.c +++ b/kernel/fork.c -@@ -895,6 +895,7 @@ static struct task_struct *dup_task_stru +@@ -894,6 +894,7 @@ static struct task_struct *dup_task_stru tsk->splice_pipe = NULL; tsk->task_frag.page = NULL; tsk->wake_q.next = NULL; diff --git a/patches/sched-delay-put-task.patch b/patches/sched-delay-put-task.patch index d475abf66f68..73100683a4cd 100644 --- a/patches/sched-delay-put-task.patch +++ b/patches/sched-delay-put-task.patch @@ -53,7 +53,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT --- a/kernel/fork.c +++ b/kernel/fork.c -@@ -671,7 +671,9 @@ static inline void put_signal_struct(str +@@ -670,7 +670,9 @@ static inline void put_signal_struct(str if (atomic_dec_and_test(&sig->sigcnt)) free_signal_struct(sig); } @@ -64,7 +64,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> void __put_task_struct(struct task_struct *tsk) { WARN_ON(!tsk->exit_state); -@@ -688,7 +690,18 @@ void __put_task_struct(struct task_struc +@@ -687,7 +689,18 @@ void __put_task_struct(struct task_struc if (!profile_handoff_task(tsk)) free_task(tsk); } diff --git a/patches/sched-mmdrop-delayed.patch b/patches/sched-mmdrop-delayed.patch index eae2614e2aaa..7d618320898a 100644 --- a/patches/sched-mmdrop-delayed.patch +++ b/patches/sched-mmdrop-delayed.patch @@ -55,7 +55,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> * @mm: The address space to pin. --- a/kernel/fork.c +++ b/kernel/fork.c -@@ -637,6 +637,19 @@ void __mmdrop(struct mm_struct *mm) +@@ -636,6 +636,19 @@ void __mmdrop(struct mm_struct *mm) } EXPORT_SYMBOL_GPL(__mmdrop); diff --git a/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch b/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch index 54fbf6d3d463..4dbdfd666b20 100644 --- a/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch +++ b/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch @@ -49,7 +49,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/kernel/fork.c +++ b/kernel/fork.c -@@ -1759,6 +1759,7 @@ static __latent_entropy struct task_stru +@@ -1760,6 +1760,7 @@ static __latent_entropy struct task_stru spin_lock_init(&p->alloc_lock); init_sigpending(&p->pending); diff --git a/patches/timers-prepare-for-full-preemption.patch b/patches/timers-prepare-for-full-preemption.patch index 83ad5e40f596..3a5cb4f945b2 100644 --- a/patches/timers-prepare-for-full-preemption.patch +++ b/patches/timers-prepare-for-full-preemption.patch @@ -144,7 +144,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> raw_spin_lock_irq(&base->lock); } } -@@ -1665,8 +1699,8 @@ static inline void __run_timers(struct t +@@ -1681,8 +1715,8 @@ static inline void __run_timers(struct t while (levels--) expire_timers(base, heads + levels); } @@ -154,7 +154,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } /* -@@ -1924,6 +1958,9 @@ static void __init init_timer_cpu(int cp +@@ -1927,6 +1961,9 @@ static void __init init_timer_cpu(int cp base->cpu = cpu; raw_spin_lock_init(&base->lock); base->clk = jiffies; diff --git a/patches/workqueue-use-locallock.patch b/patches/workqueue-use-locallock.patch index 9b26ccbcc83b..9b21c6d4c61e 100644 --- a/patches/workqueue-use-locallock.patch +++ b/patches/workqueue-use-locallock.patch @@ -145,7 +145,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } /** -@@ -2984,7 +2999,7 @@ static bool __cancel_work_timer(struct w +@@ -2999,7 +3014,7 @@ static bool __cancel_work_timer(struct w /* tell other tasks trying to grab @work to back off */ mark_work_canceling(work); @@ -154,7 +154,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* * This allows canceling during early boot. We know that @work -@@ -3045,10 +3060,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync); +@@ -3060,10 +3075,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync); */ bool flush_delayed_work(struct delayed_work *dwork) { @@ -167,7 +167,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> return flush_work(&dwork->work); } EXPORT_SYMBOL(flush_delayed_work); -@@ -3086,7 +3101,7 @@ static bool __cancel_work(struct work_st +@@ -3101,7 +3116,7 @@ static bool __cancel_work(struct work_st return false; set_work_pool_and_clear_pending(work, get_work_pool_id(work)); diff --git a/patches/workqueue-use-rcu.patch b/patches/workqueue-use-rcu.patch index cd981f95498b..bfd7d95a8e6e 100644 --- a/patches/workqueue-use-rcu.patch +++ b/patches/workqueue-use-rcu.patch @@ -176,7 +176,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } /** -@@ -2851,14 +2855,14 @@ static bool start_flush_work(struct work +@@ -2855,14 +2859,14 @@ static bool start_flush_work(struct work might_sleep(); @@ -194,7 +194,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* see the comment in try_to_grab_pending() with the same code */ pwq = get_work_pwq(work); if (pwq) { -@@ -2889,10 +2893,11 @@ static bool start_flush_work(struct work +@@ -2894,10 +2898,11 @@ static bool start_flush_work(struct work lock_map_acquire(&pwq->wq->lockdep_map); lock_map_release(&pwq->wq->lockdep_map); } @@ -207,7 +207,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> return false; } -@@ -3326,7 +3331,7 @@ static void rcu_free_pool(struct rcu_hea +@@ -3341,7 +3346,7 @@ static void rcu_free_pool(struct rcu_hea * put_unbound_pool - put a worker_pool * @pool: worker_pool to put * @@ -216,7 +216,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> * safe manner. get_unbound_pool() calls this function on its failure path * and this function should be able to release pools which went through, * successfully or not, init_worker_pool(). -@@ -3380,8 +3385,8 @@ static void put_unbound_pool(struct work +@@ -3395,8 +3400,8 @@ static void put_unbound_pool(struct work del_timer_sync(&pool->idle_timer); del_timer_sync(&pool->mayday_timer); @@ -227,7 +227,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } /** -@@ -3488,14 +3493,14 @@ static void pwq_unbound_release_workfn(s +@@ -3503,14 +3508,14 @@ static void pwq_unbound_release_workfn(s put_unbound_pool(pool); mutex_unlock(&wq_pool_mutex); @@ -244,7 +244,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } /** -@@ -4180,7 +4185,7 @@ void destroy_workqueue(struct workqueue_ +@@ -4195,7 +4200,7 @@ void destroy_workqueue(struct workqueue_ * The base ref is never dropped on per-cpu pwqs. Directly * schedule RCU free. */ @@ -253,7 +253,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } else { /* * We're the sole accessor of @wq at this point. Directly -@@ -4290,7 +4295,8 @@ bool workqueue_congested(int cpu, struct +@@ -4305,7 +4310,8 @@ bool workqueue_congested(int cpu, struct struct pool_workqueue *pwq; bool ret; @@ -263,7 +263,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> if (cpu == WORK_CPU_UNBOUND) cpu = smp_processor_id(); -@@ -4301,7 +4307,8 @@ bool workqueue_congested(int cpu, struct +@@ -4316,7 +4322,8 @@ bool workqueue_congested(int cpu, struct pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); ret = !list_empty(&pwq->delayed_works); @@ -273,7 +273,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> return ret; } -@@ -4327,15 +4334,15 @@ unsigned int work_busy(struct work_struc +@@ -4342,15 +4349,15 @@ unsigned int work_busy(struct work_struc if (work_pending(work)) ret |= WORK_BUSY_PENDING; @@ -293,7 +293,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> return ret; } -@@ -4519,7 +4526,7 @@ void show_workqueue_state(void) +@@ -4534,7 +4541,7 @@ void show_workqueue_state(void) unsigned long flags; int pi; @@ -302,7 +302,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> pr_info("Showing busy workqueues and worker pools:\n"); -@@ -4584,7 +4591,7 @@ void show_workqueue_state(void) +@@ -4599,7 +4606,7 @@ void show_workqueue_state(void) touch_nmi_watchdog(); } @@ -311,7 +311,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } /* used to show worker information through /proc/PID/{comm,stat,status} */ -@@ -4971,16 +4978,16 @@ bool freeze_workqueues_busy(void) +@@ -4986,16 +4993,16 @@ bool freeze_workqueues_busy(void) * nr_active is monotonically decreasing. It's safe * to peek without lock. */ @@ -331,7 +331,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } out_unlock: mutex_unlock(&wq_pool_mutex); -@@ -5175,7 +5182,8 @@ static ssize_t wq_pool_ids_show(struct d +@@ -5190,7 +5197,8 @@ static ssize_t wq_pool_ids_show(struct d const char *delim = ""; int node, written = 0; @@ -341,7 +341,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> for_each_node(node) { written += scnprintf(buf + written, PAGE_SIZE - written, "%s%d:%d", delim, node, -@@ -5183,7 +5191,8 @@ static ssize_t wq_pool_ids_show(struct d +@@ -5198,7 +5206,8 @@ static ssize_t wq_pool_ids_show(struct d delim = " "; } written += scnprintf(buf + written, PAGE_SIZE - written, "\n"); diff --git a/patches/x86-kvm-require-const-tsc-for-rt.patch b/patches/x86-kvm-require-const-tsc-for-rt.patch index 8109c25c4ac0..11f5372a4efa 100644 --- a/patches/x86-kvm-require-const-tsc-for-rt.patch +++ b/patches/x86-kvm-require-const-tsc-for-rt.patch @@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c -@@ -6597,6 +6597,13 @@ int kvm_arch_init(void *opaque) +@@ -6605,6 +6605,13 @@ int kvm_arch_init(void *opaque) goto out; } diff --git a/patches/x86-preempt-lazy.patch b/patches/x86-preempt-lazy.patch index b9a4d870b542..d13edf92d5b7 100644 --- a/patches/x86-preempt-lazy.patch +++ b/patches/x86-preempt-lazy.patch @@ -76,7 +76,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -704,7 +704,23 @@ GLOBAL(swapgs_restore_regs_and_return_to - bt $9, EFLAGS(%rsp) /* were interrupts off? */ + btl $9, EFLAGS(%rsp) /* were interrupts off? */ jnc 1f 0: cmpl $0, PER_CPU_VAR(__preempt_count) +#ifndef CONFIG_PREEMPT_LAZY @@ -92,7 +92,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> + cmpl $0, TASK_TI_preempt_lazy_count(%rcx) + jnz 1f + -+ bt $TIF_NEED_RESCHED_LAZY,TASK_TI_flags(%rcx) ++ btl $TIF_NEED_RESCHED_LAZY,TASK_TI_flags(%rcx) + jnc 1f +do_preempt_schedule_irq: +#endif |