diff options
26 files changed, 80 insertions, 147 deletions
diff --git a/patches/0011-x86-fpu-Make-get_xsave_field_ptr-and-get_xsave_addr-.patch b/patches/0011-x86-fpu-Make-get_xsave_field_ptr-and-get_xsave_addr-.patch index 0fc44f71910d..f0d2b0346232 100644 --- a/patches/0011-x86-fpu-Make-get_xsave_field_ptr-and-get_xsave_addr-.patch +++ b/patches/0011-x86-fpu-Make-get_xsave_field_ptr-and-get_xsave_addr-.patch @@ -127,7 +127,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c -@@ -3673,15 +3673,15 @@ static void fill_xsave(u8 *dest, struct +@@ -3675,15 +3675,15 @@ static void fill_xsave(u8 *dest, struct */ valid = xstate_bv & ~XFEATURE_MASK_FPSSE; while (valid) { @@ -148,7 +148,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> memcpy(dest + offset, &vcpu->arch.pkru, sizeof(vcpu->arch.pkru)); else -@@ -3689,7 +3689,7 @@ static void fill_xsave(u8 *dest, struct +@@ -3691,7 +3691,7 @@ static void fill_xsave(u8 *dest, struct } @@ -157,7 +157,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } } -@@ -3716,22 +3716,22 @@ static void load_xsave(struct kvm_vcpu * +@@ -3718,22 +3718,22 @@ static void load_xsave(struct kvm_vcpu * */ valid = xstate_bv & ~XFEATURE_MASK_FPSSE; while (valid) { @@ -186,7 +186,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } } -@@ -8841,11 +8841,11 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcp +@@ -8839,11 +8839,11 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcp if (init_event) kvm_put_guest_fpu(vcpu); mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu->state.xsave, diff --git a/patches/0012-x86-pkru-Provide-.-_pkru_ins-functions.patch b/patches/0012-x86-pkru-Provide-.-_pkru_ins-functions.patch index adc38e1c89c8..2b0de0fe3e67 100644 --- a/patches/0012-x86-pkru-Provide-.-_pkru_ins-functions.patch +++ b/patches/0012-x86-pkru-Provide-.-_pkru_ins-functions.patch @@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c -@@ -6630,7 +6630,7 @@ static void vmx_vcpu_run(struct kvm_vcpu +@@ -6632,7 +6632,7 @@ static void vmx_vcpu_run(struct kvm_vcpu */ if (static_cpu_has(X86_FEATURE_PKU) && kvm_read_cr4_bits(vcpu, X86_CR4_PKE)) { diff --git a/patches/0023-x86-fpu-Defer-FPU-state-load-until-return-to-userspa.patch b/patches/0023-x86-fpu-Defer-FPU-state-load-until-return-to-userspa.patch index 233b8fd6df23..7e86a2c0509f 100644 --- a/patches/0023-x86-fpu-Defer-FPU-state-load-until-return-to-userspa.patch +++ b/patches/0023-x86-fpu-Defer-FPU-state-load-until-return-to-userspa.patch @@ -553,7 +553,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> if (unlikely(vcpu->arch.switch_db_regs)) { set_debugreg(0, 7); set_debugreg(vcpu->arch.eff_db[0], 0); -@@ -8128,22 +8132,30 @@ static int complete_emulated_mmio(struct +@@ -8126,22 +8130,30 @@ static int complete_emulated_mmio(struct /* Swap (qemu) user FPU context for the guest FPU context. */ static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) { diff --git a/patches/arm-kprobe-replace-patch_lock-to-raw-lock.patch b/patches/arm-kprobe-replace-patch_lock-to-raw-lock.patch deleted file mode 100644 index 9a0fa6413899..000000000000 --- a/patches/arm-kprobe-replace-patch_lock-to-raw-lock.patch +++ /dev/null @@ -1,69 +0,0 @@ -From: Yang Shi <yang.shi@linaro.org> -Date: Thu, 10 Nov 2016 16:17:55 -0800 -Subject: [PATCH] arm: kprobe: replace patch_lock to raw lock - -When running kprobe on -rt kernel, the below bug is caught: - -BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:931 -in_atomic(): 1, irqs_disabled(): 128, pid: 14, name: migration/0 -INFO: lockdep is turned off. -irq event stamp: 238 -hardirqs last enabled at (237): [<80b5aecc>] _raw_spin_unlock_irqrestore+0x88/0x90 -hardirqs last disabled at (238): [<80b56d88>] __schedule+0xec/0x94c -softirqs last enabled at (0): [<80225584>] copy_process.part.5+0x30c/0x1994 -softirqs last disabled at (0): [< (null)>] (null) -Preemption disabled at:[<802f2b98>] cpu_stopper_thread+0xc0/0x140 - -CPU: 0 PID: 14 Comm: migration/0 Tainted: G O 4.8.3-rt2 #1 -Hardware name: Freescale LS1021A -[<80212e7c>] (unwind_backtrace) from [<8020cd2c>] (show_stack+0x20/0x24) -[<8020cd2c>] (show_stack) from [<80689e14>] (dump_stack+0xa0/0xcc) -[<80689e14>] (dump_stack) from [<8025a43c>] (___might_sleep+0x1b8/0x2a4) -[<8025a43c>] (___might_sleep) from [<80b5b324>] (rt_spin_lock+0x34/0x74) -[<80b5b324>] (rt_spin_lock) from [<80b5c31c>] (__patch_text_real+0x70/0xe8) -[<80b5c31c>] (__patch_text_real) from [<80b5c3ac>] (patch_text_stop_machine+0x18/0x20) -[<80b5c3ac>] (patch_text_stop_machine) from [<802f2920>] (multi_cpu_stop+0xfc/0x134) -[<802f2920>] (multi_cpu_stop) from [<802f2ba0>] (cpu_stopper_thread+0xc8/0x140) -[<802f2ba0>] (cpu_stopper_thread) from [<802563a4>] (smpboot_thread_fn+0x1a4/0x354) -[<802563a4>] (smpboot_thread_fn) from [<80251d38>] (kthread+0x104/0x11c) -[<80251d38>] (kthread) from [<80207f70>] (ret_from_fork+0x14/0x24) - -Since patch_text_stop_machine() is called in stop_machine() which disables IRQ, -sleepable lock should be not used in this atomic context, so replace patch_lock -to raw lock. - -Signed-off-by: Yang Shi <yang.shi@linaro.org> -Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> ---- - arch/arm/kernel/patch.c | 6 +++--- - 1 file changed, 3 insertions(+), 3 deletions(-) - ---- a/arch/arm/kernel/patch.c -+++ b/arch/arm/kernel/patch.c -@@ -16,7 +16,7 @@ struct patch { - unsigned int insn; - }; - --static DEFINE_SPINLOCK(patch_lock); -+static DEFINE_RAW_SPINLOCK(patch_lock); - - static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags) - __acquires(&patch_lock) -@@ -33,7 +33,7 @@ static void __kprobes *patch_map(void *a - return addr; - - if (flags) -- spin_lock_irqsave(&patch_lock, *flags); -+ raw_spin_lock_irqsave(&patch_lock, *flags); - else - __acquire(&patch_lock); - -@@ -48,7 +48,7 @@ static void __kprobes patch_unmap(int fi - clear_fixmap(fixmap); - - if (flags) -- spin_unlock_irqrestore(&patch_lock, *flags); -+ raw_spin_unlock_irqrestore(&patch_lock, *flags); - else - __release(&patch_lock); - } diff --git a/patches/block-mq-use-cpu_light.patch b/patches/block-mq-use-cpu_light.patch index a2d96e843417..56e59ea8ebf4 100644 --- a/patches/block-mq-use-cpu_light.patch +++ b/patches/block-mq-use-cpu_light.patch @@ -12,7 +12,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/block/blk-mq.h +++ b/block/blk-mq.h -@@ -153,12 +153,12 @@ static inline struct blk_mq_ctx *__blk_m +@@ -151,12 +151,12 @@ static inline struct blk_mq_ctx *__blk_m */ static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q) { diff --git a/patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch b/patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch index a96787eea90e..dc909c45e94b 100644 --- a/patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch +++ b/patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch @@ -69,7 +69,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> timer->function = perf_mux_hrtimer_handler; } -@@ -9210,7 +9210,7 @@ static void perf_swevent_init_hrtimer(st +@@ -9212,7 +9212,7 @@ static void perf_swevent_init_hrtimer(st if (!is_sampling_event(event)) return; @@ -102,7 +102,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c -@@ -4891,9 +4891,9 @@ void init_cfs_bandwidth(struct cfs_bandw +@@ -4916,9 +4916,9 @@ void init_cfs_bandwidth(struct cfs_bandw cfs_b->period = ns_to_ktime(default_cfs_period()); INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq); diff --git a/patches/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch b/patches/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch index b8c1fb76840d..786d62499ada 100644 --- a/patches/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch +++ b/patches/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch @@ -26,7 +26,7 @@ Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de> --- a/block/blk-mq.c +++ b/block/blk-mq.c -@@ -3373,10 +3373,9 @@ static bool blk_mq_poll_hybrid_sleep(str +@@ -3377,10 +3377,9 @@ static bool blk_mq_poll_hybrid_sleep(str kt = nsecs; mode = HRTIMER_MODE_REL; diff --git a/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch b/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch index dd1e7013dedd..8ae8b1a501f0 100644 --- a/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch +++ b/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch @@ -553,7 +553,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> continue; env->dst_cpu = cpu; -@@ -5760,7 +5760,7 @@ find_idlest_group(struct sched_domain *s +@@ -5785,7 +5785,7 @@ find_idlest_group(struct sched_domain *s /* Skip over this group if it has no CPUs allowed */ if (!cpumask_intersects(sched_group_span(group), @@ -562,7 +562,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> continue; local_group = cpumask_test_cpu(this_cpu, -@@ -5892,7 +5892,7 @@ find_idlest_group_cpu(struct sched_group +@@ -5917,7 +5917,7 @@ find_idlest_group_cpu(struct sched_group return cpumask_first(sched_group_span(group)); /* Traverse only the allowed CPUs */ @@ -571,7 +571,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> if (available_idle_cpu(i)) { struct rq *rq = cpu_rq(i); struct cpuidle_state *idle = idle_get_state(rq); -@@ -5932,7 +5932,7 @@ static inline int find_idlest_cpu(struct +@@ -5957,7 +5957,7 @@ static inline int find_idlest_cpu(struct { int new_cpu = cpu; @@ -580,7 +580,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> return prev_cpu; /* -@@ -6049,7 +6049,7 @@ static int select_idle_core(struct task_ +@@ -6074,7 +6074,7 @@ static int select_idle_core(struct task_ if (!test_idle_cores(target, false)) return -1; @@ -589,7 +589,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> for_each_cpu_wrap(core, cpus, target) { bool idle = true; -@@ -6083,7 +6083,7 @@ static int select_idle_smt(struct task_s +@@ -6108,7 +6108,7 @@ static int select_idle_smt(struct task_s return -1; for_each_cpu(cpu, cpu_smt_mask(target)) { @@ -598,7 +598,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> continue; if (available_idle_cpu(cpu)) return cpu; -@@ -6146,7 +6146,7 @@ static int select_idle_cpu(struct task_s +@@ -6171,7 +6171,7 @@ static int select_idle_cpu(struct task_s for_each_cpu_wrap(cpu, sched_domain_span(sd), target) { if (!--nr) return -1; @@ -607,7 +607,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> continue; if (available_idle_cpu(cpu)) break; -@@ -6183,7 +6183,7 @@ static int select_idle_sibling(struct ta +@@ -6208,7 +6208,7 @@ static int select_idle_sibling(struct ta recent_used_cpu != target && cpus_share_cache(recent_used_cpu, target) && available_idle_cpu(recent_used_cpu) && @@ -616,7 +616,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* * Replace recent_used_cpu with prev as it is a potential * candidate for the next wake: -@@ -6529,7 +6529,7 @@ static int find_energy_efficient_cpu(str +@@ -6554,7 +6554,7 @@ static int find_energy_efficient_cpu(str int max_spare_cap_cpu = -1; for_each_cpu_and(cpu, perf_domain_span(pd), sched_domain_span(sd)) { @@ -625,7 +625,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> continue; /* Skip CPUs that will be overutilized. */ -@@ -6618,7 +6618,7 @@ select_task_rq_fair(struct task_struct * +@@ -6643,7 +6643,7 @@ select_task_rq_fair(struct task_struct * } want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu) && @@ -634,7 +634,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } rcu_read_lock(); -@@ -7368,14 +7368,14 @@ int can_migrate_task(struct task_struct +@@ -7393,14 +7393,14 @@ int can_migrate_task(struct task_struct /* * We do not migrate tasks that are: * 1) throttled_lb_pair, or @@ -651,7 +651,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> int cpu; schedstat_inc(p->se.statistics.nr_failed_migrations_affine); -@@ -7395,7 +7395,7 @@ int can_migrate_task(struct task_struct +@@ -7420,7 +7420,7 @@ int can_migrate_task(struct task_struct /* Prevent to re-select dst_cpu via env's CPUs: */ for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) { @@ -660,7 +660,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> env->flags |= LBF_DST_PINNED; env->new_dst_cpu = cpu; break; -@@ -7992,7 +7992,7 @@ check_cpu_capacity(struct rq *rq, struct +@@ -8017,7 +8017,7 @@ check_cpu_capacity(struct rq *rq, struct /* * Group imbalance indicates (and tries to solve) the problem where balancing @@ -669,7 +669,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> * * Imagine a situation of two groups of 4 CPUs each and 4 tasks each with a * cpumask covering 1 CPU of the first group and 3 CPUs of the second group. -@@ -8663,7 +8663,7 @@ static struct sched_group *find_busiest_ +@@ -8688,7 +8688,7 @@ static struct sched_group *find_busiest_ /* * If the busiest group is imbalanced the below checks don't * work because they assume all things are equal, which typically @@ -678,7 +678,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> */ if (busiest->group_type == group_imbalanced) goto force_balance; -@@ -9091,7 +9091,7 @@ static int load_balance(int this_cpu, st +@@ -9116,7 +9116,7 @@ static int load_balance(int this_cpu, st * if the curr task on busiest CPU can't be * moved to this_cpu: */ diff --git a/patches/localversion.patch b/patches/localversion.patch index 72cdd2b3c760..4c1841b6475d 100644 --- a/patches/localversion.patch +++ b/patches/localversion.patch @@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- /dev/null +++ b/localversion-rt @@ -0,0 +1 @@ -+-rt5 ++-rt6 diff --git a/patches/mm-memcontrol-do_not_disable_irq.patch b/patches/mm-memcontrol-do_not_disable_irq.patch index 6ebe432afd7b..de58c775117e 100644 --- a/patches/mm-memcontrol-do_not_disable_irq.patch +++ b/patches/mm-memcontrol-do_not_disable_irq.patch @@ -29,7 +29,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* Whether legacy memory+swap accounting is active */ static bool do_memsw_account(void) { -@@ -4853,12 +4856,12 @@ static int mem_cgroup_move_account(struc +@@ -4869,12 +4872,12 @@ static int mem_cgroup_move_account(struc ret = 0; @@ -44,7 +44,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> out_unlock: unlock_page(page); out: -@@ -5977,10 +5980,10 @@ void mem_cgroup_commit_charge(struct pag +@@ -5993,10 +5996,10 @@ void mem_cgroup_commit_charge(struct pag commit_charge(page, memcg, lrucare); @@ -57,7 +57,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> if (do_memsw_account() && PageSwapCache(page)) { swp_entry_t entry = { .val = page_private(page) }; -@@ -6049,7 +6052,7 @@ static void uncharge_batch(const struct +@@ -6065,7 +6068,7 @@ static void uncharge_batch(const struct memcg_oom_recover(ug->memcg); } @@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> __mod_memcg_state(ug->memcg, MEMCG_RSS, -ug->nr_anon); __mod_memcg_state(ug->memcg, MEMCG_CACHE, -ug->nr_file); __mod_memcg_state(ug->memcg, MEMCG_RSS_HUGE, -ug->nr_huge); -@@ -6057,7 +6060,7 @@ static void uncharge_batch(const struct +@@ -6073,7 +6076,7 @@ static void uncharge_batch(const struct __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout); __this_cpu_add(ug->memcg->stat_cpu->nr_page_events, nr_pages); memcg_check_events(ug->memcg, ug->dummy_page); @@ -75,7 +75,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> if (!mem_cgroup_is_root(ug->memcg)) css_put_many(&ug->memcg->css, nr_pages); -@@ -6220,10 +6223,10 @@ void mem_cgroup_migrate(struct page *old +@@ -6236,10 +6239,10 @@ void mem_cgroup_migrate(struct page *old commit_charge(newpage, memcg, false); @@ -88,7 +88,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key); -@@ -6415,6 +6418,7 @@ void mem_cgroup_swapout(struct page *pag +@@ -6431,6 +6434,7 @@ void mem_cgroup_swapout(struct page *pag struct mem_cgroup *memcg, *swap_memcg; unsigned int nr_entries; unsigned short oldid; @@ -96,7 +96,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> VM_BUG_ON_PAGE(PageLRU(page), page); VM_BUG_ON_PAGE(page_count(page), page); -@@ -6460,13 +6464,17 @@ void mem_cgroup_swapout(struct page *pag +@@ -6476,13 +6480,17 @@ void mem_cgroup_swapout(struct page *pag * important here to have the interrupts disabled because it is the * only synchronisation we have for updating the per-CPU variables. */ diff --git a/patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch b/patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch index b02b82c87909..5ab4f98c3fea 100644 --- a/patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch +++ b/patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch @@ -50,7 +50,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> { --- a/net/core/dev.c +++ b/net/core/dev.c -@@ -6020,6 +6020,7 @@ bool napi_schedule_prep(struct napi_stru +@@ -6036,6 +6036,7 @@ bool napi_schedule_prep(struct napi_stru } EXPORT_SYMBOL(napi_schedule_prep); @@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /** * __napi_schedule_irqoff - schedule for receive * @n: entry to schedule -@@ -6031,6 +6032,7 @@ void __napi_schedule_irqoff(struct napi_ +@@ -6047,6 +6048,7 @@ void __napi_schedule_irqoff(struct napi_ ____napi_schedule(this_cpu_ptr(&softnet_data), n); } EXPORT_SYMBOL(__napi_schedule_irqoff); diff --git a/patches/net-core-use-local_bh_disable-in-netif_rx_ni.patch b/patches/net-core-use-local_bh_disable-in-netif_rx_ni.patch index 42f55b615d42..43b860f5d611 100644 --- a/patches/net-core-use-local_bh_disable-in-netif_rx_ni.patch +++ b/patches/net-core-use-local_bh_disable-in-netif_rx_ni.patch @@ -18,7 +18,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/net/core/dev.c +++ b/net/core/dev.c -@@ -4566,11 +4566,9 @@ int netif_rx_ni(struct sk_buff *skb) +@@ -4580,11 +4580,9 @@ int netif_rx_ni(struct sk_buff *skb) trace_netif_rx_ni_entry(skb); diff --git a/patches/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch b/patches/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch index 22e781cf5ac6..0e1585581709 100644 --- a/patches/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch +++ b/patches/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch @@ -20,7 +20,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/net/core/dev.c +++ b/net/core/dev.c -@@ -3486,7 +3486,11 @@ static inline int __dev_xmit_skb(struct +@@ -3500,7 +3500,11 @@ static inline int __dev_xmit_skb(struct * This permits qdisc->running owner to get the lock more * often and dequeue packets faster. */ diff --git a/patches/net-make-devnet_rename_seq-a-mutex.patch b/patches/net-make-devnet_rename_seq-a-mutex.patch index 93644fe9290b..58d07eecba75 100644 --- a/patches/net-make-devnet_rename_seq-a-mutex.patch +++ b/patches/net-make-devnet_rename_seq-a-mutex.patch @@ -39,8 +39,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> goto retry; } -@@ -1187,20 +1189,17 @@ int dev_change_name(struct net_device *d - if (dev->flags & IFF_UP) +@@ -1201,20 +1203,17 @@ int dev_change_name(struct net_device *d + likely(!(dev->priv_flags & IFF_LIVE_RENAME_OK))) return -EBUSY; - write_seqcount_begin(&devnet_rename_seq); @@ -66,7 +66,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> if (oldname[0] && !strchr(oldname, '%')) netdev_info(dev, "renamed from %s\n", oldname); -@@ -1213,11 +1212,12 @@ int dev_change_name(struct net_device *d +@@ -1227,11 +1226,12 @@ int dev_change_name(struct net_device *d if (ret) { memcpy(dev->name, oldname, IFNAMSIZ); dev->name_assign_type = old_assign_type; @@ -82,7 +82,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> netdev_adjacent_rename_links(dev, oldname); -@@ -1238,7 +1238,8 @@ int dev_change_name(struct net_device *d +@@ -1252,7 +1252,8 @@ int dev_change_name(struct net_device *d /* err >= 0 after dev_alloc_name() or stores the first errno */ if (err >= 0) { err = ret; @@ -92,7 +92,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> memcpy(dev->name, oldname, IFNAMSIZ); memcpy(oldname, newname, IFNAMSIZ); dev->name_assign_type = old_assign_type; -@@ -1251,6 +1252,11 @@ int dev_change_name(struct net_device *d +@@ -1265,6 +1266,11 @@ int dev_change_name(struct net_device *d } return err; diff --git a/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch b/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch index b3607b146101..2c2d5f94dde3 100644 --- a/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch +++ b/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch @@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* * Time (in jiffies) of last Tx */ -@@ -2648,14 +2652,53 @@ void netdev_freemem(struct net_device *d +@@ -2651,14 +2655,53 @@ void netdev_freemem(struct net_device *d void synchronize_net(void); int init_dummy_netdev(struct net_device *dev); @@ -91,7 +91,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> struct net_device *dev_get_by_index(struct net *net, int ifindex); struct net_device *__dev_get_by_index(struct net *net, int ifindex); struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); -@@ -3865,10 +3908,48 @@ static inline u32 netif_msg_init(int deb +@@ -3868,10 +3911,48 @@ static inline u32 netif_msg_init(int deb return (1U << debug_value) - 1; } @@ -141,7 +141,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } static inline bool __netif_tx_acquire(struct netdev_queue *txq) -@@ -3885,32 +3966,32 @@ static inline void __netif_tx_release(st +@@ -3888,32 +3969,32 @@ static inline void __netif_tx_release(st static inline void __netif_tx_lock_bh(struct netdev_queue *txq) { spin_lock_bh(&txq->_xmit_lock); @@ -193,7 +193,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> struct task_struct *oom_reaper_list; --- a/net/core/dev.c +++ b/net/core/dev.c -@@ -3558,8 +3558,10 @@ static void skb_update_prio(struct sk_bu +@@ -3572,8 +3572,10 @@ static void skb_update_prio(struct sk_bu #define skb_update_prio(skb) #endif @@ -204,7 +204,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /** * dev_loopback_xmit - loop back @skb -@@ -3850,9 +3852,12 @@ static int __dev_queue_xmit(struct sk_bu +@@ -3864,9 +3866,12 @@ static int __dev_queue_xmit(struct sk_bu if (dev->flags & IFF_UP) { int cpu = smp_processor_id(); /* ok because BHs are off */ @@ -219,7 +219,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> goto recursion_alert; skb = validate_xmit_skb(skb, dev, &again); -@@ -3862,9 +3867,9 @@ static int __dev_queue_xmit(struct sk_bu +@@ -3876,9 +3881,9 @@ static int __dev_queue_xmit(struct sk_bu HARD_TX_LOCK(dev, txq, cpu); if (!netif_xmit_stopped(txq)) { @@ -231,7 +231,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> if (dev_xmit_complete(rc)) { HARD_TX_UNLOCK(dev, txq); goto out; -@@ -8485,7 +8490,7 @@ static void netdev_init_one_queue(struct +@@ -8501,7 +8506,7 @@ static void netdev_init_one_queue(struct /* Initialize queue lock */ spin_lock_init(&queue->_xmit_lock); netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type); diff --git a/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch b/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch index 4043a953866c..055e61828d12 100644 --- a/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch +++ b/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch @@ -67,7 +67,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> void raise_softirq_irqoff(unsigned int nr) --- a/net/core/dev.c +++ b/net/core/dev.c -@@ -6450,7 +6450,7 @@ static __latent_entropy void net_rx_acti +@@ -6466,7 +6466,7 @@ static __latent_entropy void net_rx_acti list_splice_tail(&repoll, &list); list_splice(&list, &sd->poll_list); if (!list_empty(&sd->poll_list)) diff --git a/patches/preempt-lazy-support.patch b/patches/preempt-lazy-support.patch index ce3a94694851..9721ff961d99 100644 --- a/patches/preempt-lazy-support.patch +++ b/patches/preempt-lazy-support.patch @@ -441,7 +441,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } static __always_inline -@@ -5050,7 +5050,7 @@ static void hrtick_start_fair(struct rq +@@ -5075,7 +5075,7 @@ static void hrtick_start_fair(struct rq if (delta < 0) { if (rq->curr == p) @@ -450,7 +450,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> return; } hrtick_start(rq, delta); -@@ -6879,7 +6879,7 @@ static void check_preempt_wakeup(struct +@@ -6904,7 +6904,7 @@ static void check_preempt_wakeup(struct return; preempt: @@ -459,7 +459,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* * Only set the backward buddy when the current task is still * on the rq. This can happen when a wakeup gets interleaved -@@ -10075,7 +10075,7 @@ static void task_fork_fair(struct task_s +@@ -10100,7 +10100,7 @@ static void task_fork_fair(struct task_s * 'current' within the tree based on its new key value. */ swap(curr->vruntime, se->vruntime); @@ -468,7 +468,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } se->vruntime -= cfs_rq->min_vruntime; -@@ -10099,7 +10099,7 @@ prio_changed_fair(struct rq *rq, struct +@@ -10124,7 +10124,7 @@ prio_changed_fair(struct rq *rq, struct */ if (rq->curr == p) { if (p->prio > oldprio) diff --git a/patches/sched-fair-Make-the-hrtimers-non-hard-again.patch b/patches/sched-fair-Make-the-hrtimers-non-hard-again.patch index 9f3f0c5e6d0e..da5d6d390ad7 100644 --- a/patches/sched-fair-Make-the-hrtimers-non-hard-again.patch +++ b/patches/sched-fair-Make-the-hrtimers-non-hard-again.patch @@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c -@@ -4891,9 +4891,9 @@ void init_cfs_bandwidth(struct cfs_bandw +@@ -4916,9 +4916,9 @@ void init_cfs_bandwidth(struct cfs_bandw cfs_b->period = ns_to_ktime(default_cfs_period()); INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq); diff --git a/patches/sched-fair-Robustify-CFS-bandwidth-timer-locking.patch b/patches/sched-fair-Robustify-CFS-bandwidth-timer-locking.patch index 15ded09b6467..d0d447b09ec8 100644 --- a/patches/sched-fair-Robustify-CFS-bandwidth-timer-locking.patch +++ b/patches/sched-fair-Robustify-CFS-bandwidth-timer-locking.patch @@ -114,20 +114,23 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } /* -@@ -4863,20 +4864,21 @@ static enum hrtimer_restart sched_cfs_pe +@@ -4865,11 +4866,12 @@ static enum hrtimer_restart sched_cfs_pe { struct cfs_bandwidth *cfs_b = container_of(timer, struct cfs_bandwidth, period_timer); + unsigned long flags; int overrun; int idle = 0; + int count = 0; - raw_spin_lock(&cfs_b->lock); + raw_spin_lock_irqsave(&cfs_b->lock, flags); for (;;) { overrun = hrtimer_forward_now(timer, cfs_b->period); if (!overrun) - break; +@@ -4897,11 +4899,11 @@ static enum hrtimer_restart sched_cfs_pe + count = 0; + } - idle = do_sched_cfs_period_timer(cfs_b, overrun); + idle = do_sched_cfs_period_timer(cfs_b, overrun, flags); diff --git a/patches/sched-mmdrop-delayed.patch b/patches/sched-mmdrop-delayed.patch index 039b11a60880..686616fd62c0 100644 --- a/patches/sched-mmdrop-delayed.patch +++ b/patches/sched-mmdrop-delayed.patch @@ -50,9 +50,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +# define mmdrop_delayed(mm) mmdrop(mm) +#endif + - /** - * mmget() - Pin the address space associated with a &struct mm_struct. - * @mm: The address space to pin. + /* + * This has to be called after a get_task_mm()/mmget_not_zero() + * followed by taking the mmap_sem for writing before modifying the --- a/kernel/fork.c +++ b/kernel/fork.c @@ -679,6 +679,19 @@ void __mmdrop(struct mm_struct *mm) diff --git a/patches/series b/patches/series index 811f914e0f9a..6b16d5b07831 100644 --- a/patches/series +++ b/patches/series @@ -70,7 +70,6 @@ printk-print-rate-limitted-message-as-info.patch # POSTED ############################################################ x86-ioapic-Don-t-let-setaffinity-unmask-threaded-EOI.patch -arm-kprobe-replace-patch_lock-to-raw-lock.patch cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch fscache-initialize-cookie-hash-table-raw-spinlocks.patch Drivers-hv-vmbus-include-header-for-get_irq_regs.patch diff --git a/patches/skbufhead-raw-lock.patch b/patches/skbufhead-raw-lock.patch index a1b4d850c1c2..b3eb51f82d15 100644 --- a/patches/skbufhead-raw-lock.patch +++ b/patches/skbufhead-raw-lock.patch @@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h -@@ -3010,6 +3010,7 @@ struct softnet_data { +@@ -3013,6 +3013,7 @@ struct softnet_data { unsigned int dropped; struct sk_buff_head input_pkt_queue; struct napi_struct backlog; @@ -65,7 +65,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #endif } -@@ -5307,7 +5307,7 @@ static void flush_backlog(struct work_st +@@ -5323,7 +5323,7 @@ static void flush_backlog(struct work_st skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { if (skb->dev->reg_state == NETREG_UNREGISTERING) { __skb_unlink(skb, &sd->input_pkt_queue); @@ -74,7 +74,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> input_queue_head_incr(sd); } } -@@ -5317,11 +5317,14 @@ static void flush_backlog(struct work_st +@@ -5333,11 +5333,14 @@ static void flush_backlog(struct work_st skb_queue_walk_safe(&sd->process_queue, skb, tmp) { if (skb->dev->reg_state == NETREG_UNREGISTERING) { __skb_unlink(skb, &sd->process_queue); @@ -90,7 +90,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } static void flush_all_backlogs(void) -@@ -5921,7 +5924,9 @@ static int process_backlog(struct napi_s +@@ -5937,7 +5940,9 @@ static int process_backlog(struct napi_s while (again) { struct sk_buff *skb; @@ -100,7 +100,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> rcu_read_lock(); __netif_receive_skb(skb); rcu_read_unlock(); -@@ -5929,9 +5934,9 @@ static int process_backlog(struct napi_s +@@ -5945,9 +5950,9 @@ static int process_backlog(struct napi_s if (++work >= quota) return work; @@ -111,7 +111,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> rps_lock(sd); if (skb_queue_empty(&sd->input_pkt_queue)) { /* -@@ -6396,13 +6401,21 @@ static __latent_entropy void net_rx_acti +@@ -6412,13 +6417,21 @@ static __latent_entropy void net_rx_acti unsigned long time_limit = jiffies + usecs_to_jiffies(netdev_budget_usecs); int budget = netdev_budget; @@ -133,7 +133,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> for (;;) { struct napi_struct *n; -@@ -9420,10 +9433,13 @@ static int dev_cpu_dead(unsigned int old +@@ -9436,10 +9449,13 @@ static int dev_cpu_dead(unsigned int old netif_rx_ni(skb); input_queue_head_incr(oldsd); } @@ -148,7 +148,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> return 0; } -@@ -9732,8 +9748,9 @@ static int __init net_dev_init(void) +@@ -9748,8 +9764,9 @@ static int __init net_dev_init(void) INIT_WORK(flush, flush_backlog); diff --git a/patches/softirq-preempt-fix-3-re.patch b/patches/softirq-preempt-fix-3-re.patch index 6141ef250f02..d0c9fbbc754f 100644 --- a/patches/softirq-preempt-fix-3-re.patch +++ b/patches/softirq-preempt-fix-3-re.patch @@ -111,7 +111,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } --- a/net/core/dev.c +++ b/net/core/dev.c -@@ -2740,6 +2740,7 @@ static void __netif_reschedule(struct Qd +@@ -2754,6 +2754,7 @@ static void __netif_reschedule(struct Qd sd->output_queue_tailp = &q->next_sched; raise_softirq_irqoff(NET_TX_SOFTIRQ); local_irq_restore(flags); @@ -119,7 +119,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } void __netif_schedule(struct Qdisc *q) -@@ -2802,6 +2803,7 @@ void __dev_kfree_skb_irq(struct sk_buff +@@ -2816,6 +2817,7 @@ void __dev_kfree_skb_irq(struct sk_buff __this_cpu_write(softnet_data.completion_queue, skb); raise_softirq_irqoff(NET_TX_SOFTIRQ); local_irq_restore(flags); @@ -127,7 +127,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } EXPORT_SYMBOL(__dev_kfree_skb_irq); -@@ -4281,6 +4283,7 @@ static int enqueue_to_backlog(struct sk_ +@@ -4295,6 +4297,7 @@ static int enqueue_to_backlog(struct sk_ rps_unlock(sd); local_irq_restore(flags); @@ -135,7 +135,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> atomic_long_inc(&skb->dev->rx_dropped); kfree_skb(skb); -@@ -5883,12 +5886,14 @@ static void net_rps_action_and_irq_enabl +@@ -5899,12 +5902,14 @@ static void net_rps_action_and_irq_enabl sd->rps_ipi_list = NULL; local_irq_enable(); @@ -150,7 +150,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } static bool sd_has_rps_ipi_waiting(struct softnet_data *sd) -@@ -5966,6 +5971,7 @@ void __napi_schedule(struct napi_struct +@@ -5982,6 +5987,7 @@ void __napi_schedule(struct napi_struct local_irq_save(flags); ____napi_schedule(this_cpu_ptr(&softnet_data), n); local_irq_restore(flags); @@ -158,7 +158,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } EXPORT_SYMBOL(__napi_schedule); -@@ -9402,6 +9408,7 @@ static int dev_cpu_dead(unsigned int old +@@ -9418,6 +9424,7 @@ static int dev_cpu_dead(unsigned int old raise_softirq_irqoff(NET_TX_SOFTIRQ); local_irq_enable(); diff --git a/patches/timekeeping-split-jiffies-lock.patch b/patches/timekeeping-split-jiffies-lock.patch index f01c548ff987..97fd9d67ebe2 100644 --- a/patches/timekeeping-split-jiffies-lock.patch +++ b/patches/timekeeping-split-jiffies-lock.patch @@ -144,7 +144,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } --- a/kernel/time/timekeeping.h +++ b/kernel/time/timekeeping.h -@@ -18,7 +18,8 @@ extern void timekeeping_resume(void); +@@ -25,7 +25,8 @@ static inline void sched_clock_resume(vo extern void do_timer(unsigned long ticks); extern void update_wall_time(void); diff --git a/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch b/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch index 3cecd0d5f1b9..d1f079809943 100644 --- a/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch +++ b/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch @@ -37,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/net/core/dev.c +++ b/net/core/dev.c -@@ -4519,7 +4519,7 @@ static int netif_rx_internal(struct sk_b +@@ -4533,7 +4533,7 @@ static int netif_rx_internal(struct sk_b struct rps_dev_flow voidflow, *rflow = &voidflow; int cpu; @@ -46,7 +46,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> rcu_read_lock(); cpu = get_rps_cpu(skb->dev, skb, &rflow); -@@ -4529,14 +4529,14 @@ static int netif_rx_internal(struct sk_b +@@ -4543,14 +4543,14 @@ static int netif_rx_internal(struct sk_b ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); rcu_read_unlock(); diff --git a/patches/x86-kvm-require-const-tsc-for-rt.patch b/patches/x86-kvm-require-const-tsc-for-rt.patch index 5ba76b799fce..cad282be22c5 100644 --- a/patches/x86-kvm-require-const-tsc-for-rt.patch +++ b/patches/x86-kvm-require-const-tsc-for-rt.patch @@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c -@@ -6934,6 +6934,14 @@ int kvm_arch_init(void *opaque) +@@ -6936,6 +6936,14 @@ int kvm_arch_init(void *opaque) goto out; } |