diff options
author | Sebastian Andrzej Siewior <bigeasy@linutronix.de> | 2019-01-14 13:27:28 +0100 |
---|---|---|
committer | Sebastian Andrzej Siewior <bigeasy@linutronix.de> | 2019-01-14 13:27:28 +0100 |
commit | aaf08793d14c49245bbea8d135907458f9f796de (patch) | |
tree | a09f3df53f7d519afefa0154464ecdadb8f21494 | |
parent | dae3539f9a4812bf8c9c2ae25fa1e5aaf61b78e3 (diff) | |
download | linux-rt-aaf08793d14c49245bbea8d135907458f9f796de.tar.gz |
[ANNOUNCE] v4.19.15-rt11v4.19.15-rt11-patches
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
16 files changed, 114 insertions, 99 deletions
diff --git a/patches/0002-clocksource-drivers-Add-a-new-driver-for-the-Atmel-A.patch b/patches/0002-clocksource-drivers-Add-a-new-driver-for-the-Atmel-A.patch index c0d7124dd59f..58026eee9409 100644 --- a/patches/0002-clocksource-drivers-Add-a-new-driver-for-the-Atmel-A.patch +++ b/patches/0002-clocksource-drivers-Add-a-new-driver-for-the-Atmel-A.patch @@ -31,7 +31,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig -@@ -403,6 +403,14 @@ config ATMEL_ST +@@ -404,6 +404,14 @@ config ATMEL_ST help Support for the Atmel ST timer. diff --git a/patches/0004-clocksource-drivers-atmel-pit-make-option-silent.patch b/patches/0004-clocksource-drivers-atmel-pit-make-option-silent.patch index 78ad5f0841d1..a8ada4bc5504 100644 --- a/patches/0004-clocksource-drivers-atmel-pit-make-option-silent.patch +++ b/patches/0004-clocksource-drivers-atmel-pit-make-option-silent.patch @@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig -@@ -392,8 +392,11 @@ config ARMV7M_SYSTICK +@@ -393,8 +393,11 @@ config ARMV7M_SYSTICK This options enables support for the ARMv7M system timer unit config ATMEL_PIT diff --git a/patches/cgroups-use-simple-wait-in-css_release.patch b/patches/cgroups-use-simple-wait-in-css_release.patch index 457e89c5baf3..8963a3e86ebc 100644 --- a/patches/cgroups-use-simple-wait-in-css_release.patch +++ b/patches/cgroups-use-simple-wait-in-css_release.patch @@ -52,7 +52,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c -@@ -4620,10 +4620,10 @@ static void css_free_rwork_fn(struct wor +@@ -4625,10 +4625,10 @@ static void css_free_rwork_fn(struct wor } } @@ -65,7 +65,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> struct cgroup_subsys *ss = css->ss; struct cgroup *cgrp = css->cgroup; -@@ -4683,8 +4683,8 @@ static void css_release(struct percpu_re +@@ -4688,8 +4688,8 @@ static void css_release(struct percpu_re struct cgroup_subsys_state *css = container_of(ref, struct cgroup_subsys_state, refcnt); @@ -76,7 +76,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } static void init_and_link_css(struct cgroup_subsys_state *css, -@@ -5406,6 +5406,7 @@ static int __init cgroup_wq_init(void) +@@ -5411,6 +5411,7 @@ static int __init cgroup_wq_init(void) */ cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1); BUG_ON(!cgroup_destroy_wq); diff --git a/patches/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch b/patches/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch index 06755adc5960..0f00b23dff2d 100644 --- a/patches/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch +++ b/patches/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch @@ -54,7 +54,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> static inline bool init_done(struct zram *zram) { -@@ -898,6 +923,8 @@ static DEVICE_ATTR_RO(io_stat); +@@ -900,6 +925,8 @@ static DEVICE_ATTR_RO(io_stat); static DEVICE_ATTR_RO(mm_stat); static DEVICE_ATTR_RO(debug_stat); @@ -63,7 +63,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> static void zram_meta_free(struct zram *zram, u64 disksize) { size_t num_pages = disksize >> PAGE_SHIFT; -@@ -928,6 +955,7 @@ static bool zram_meta_alloc(struct zram +@@ -930,6 +957,7 @@ static bool zram_meta_alloc(struct zram if (!huge_class_size) huge_class_size = zs_huge_class_size(zram->mem_pool); diff --git a/patches/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch b/patches/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch index 5d730716b4d5..e5617a809cb6 100644 --- a/patches/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch +++ b/patches/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch @@ -62,7 +62,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* dynamic per-device compression frontend */ --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c -@@ -1014,6 +1014,7 @@ static int __zram_bvec_read(struct zram +@@ -1016,6 +1016,7 @@ static int __zram_bvec_read(struct zram unsigned long handle; unsigned int size; void *src, *dst; @@ -70,7 +70,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> if (zram_wb_enabled(zram)) { zram_slot_lock(zram, index); -@@ -1048,6 +1049,7 @@ static int __zram_bvec_read(struct zram +@@ -1050,6 +1051,7 @@ static int __zram_bvec_read(struct zram size = zram_get_obj_size(zram, index); @@ -78,7 +78,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO); if (size == PAGE_SIZE) { dst = kmap_atomic(page); -@@ -1055,14 +1057,13 @@ static int __zram_bvec_read(struct zram +@@ -1057,14 +1059,13 @@ static int __zram_bvec_read(struct zram kunmap_atomic(dst); ret = 0; } else { diff --git a/patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch b/patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch index a883ae776797..671ce0d62b46 100644 --- a/patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch +++ b/patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch @@ -102,7 +102,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c -@@ -4880,9 +4880,9 @@ void init_cfs_bandwidth(struct cfs_bandw +@@ -4879,9 +4879,9 @@ void init_cfs_bandwidth(struct cfs_bandw cfs_b->period = ns_to_ktime(default_cfs_period()); INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq); diff --git a/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch b/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch index 656bc66d1dc0..c93f50ee9eec 100644 --- a/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch +++ b/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch @@ -535,7 +535,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> !task_on_rq_queued(task))) { --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c -@@ -1631,7 +1631,7 @@ static void task_numa_compare(struct tas +@@ -1630,7 +1630,7 @@ static void task_numa_compare(struct tas * be incurred if the tasks were swapped. */ /* Skip this swap candidate if cannot move to the source cpu */ @@ -544,7 +544,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> goto unlock; /* -@@ -1728,7 +1728,7 @@ static void task_numa_find_cpu(struct ta +@@ -1727,7 +1727,7 @@ static void task_numa_find_cpu(struct ta for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) { /* Skip this CPU if the source task cannot migrate */ @@ -553,7 +553,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> continue; env->dst_cpu = cpu; -@@ -5713,7 +5713,7 @@ find_idlest_group(struct sched_domain *s +@@ -5712,7 +5712,7 @@ find_idlest_group(struct sched_domain *s /* Skip over this group if it has no CPUs allowed */ if (!cpumask_intersects(sched_group_span(group), @@ -562,7 +562,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> continue; local_group = cpumask_test_cpu(this_cpu, -@@ -5845,7 +5845,7 @@ find_idlest_group_cpu(struct sched_group +@@ -5844,7 +5844,7 @@ find_idlest_group_cpu(struct sched_group return cpumask_first(sched_group_span(group)); /* Traverse only the allowed CPUs */ @@ -571,7 +571,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> if (available_idle_cpu(i)) { struct rq *rq = cpu_rq(i); struct cpuidle_state *idle = idle_get_state(rq); -@@ -5885,7 +5885,7 @@ static inline int find_idlest_cpu(struct +@@ -5884,7 +5884,7 @@ static inline int find_idlest_cpu(struct { int new_cpu = cpu; @@ -580,7 +580,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> return prev_cpu; /* -@@ -6001,7 +6001,7 @@ static int select_idle_core(struct task_ +@@ -6000,7 +6000,7 @@ static int select_idle_core(struct task_ if (!test_idle_cores(target, false)) return -1; @@ -589,7 +589,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> for_each_cpu_wrap(core, cpus, target) { bool idle = true; -@@ -6035,7 +6035,7 @@ static int select_idle_smt(struct task_s +@@ -6034,7 +6034,7 @@ static int select_idle_smt(struct task_s return -1; for_each_cpu(cpu, cpu_smt_mask(target)) { @@ -598,7 +598,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> continue; if (available_idle_cpu(cpu)) return cpu; -@@ -6098,7 +6098,7 @@ static int select_idle_cpu(struct task_s +@@ -6097,7 +6097,7 @@ static int select_idle_cpu(struct task_s for_each_cpu_wrap(cpu, sched_domain_span(sd), target) { if (!--nr) return -1; @@ -607,7 +607,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> continue; if (available_idle_cpu(cpu)) break; -@@ -6135,7 +6135,7 @@ static int select_idle_sibling(struct ta +@@ -6134,7 +6134,7 @@ static int select_idle_sibling(struct ta recent_used_cpu != target && cpus_share_cache(recent_used_cpu, target) && available_idle_cpu(recent_used_cpu) && @@ -616,7 +616,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* * Replace recent_used_cpu with prev as it is a potential * candidate for the next wake: -@@ -6353,7 +6353,7 @@ select_task_rq_fair(struct task_struct * +@@ -6352,7 +6352,7 @@ select_task_rq_fair(struct task_struct * if (sd_flag & SD_BALANCE_WAKE) { record_wakee(p); want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu) @@ -625,7 +625,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } rcu_read_lock(); -@@ -7092,14 +7092,14 @@ int can_migrate_task(struct task_struct +@@ -7091,14 +7091,14 @@ int can_migrate_task(struct task_struct /* * We do not migrate tasks that are: * 1) throttled_lb_pair, or @@ -642,7 +642,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> int cpu; schedstat_inc(p->se.statistics.nr_failed_migrations_affine); -@@ -7119,7 +7119,7 @@ int can_migrate_task(struct task_struct +@@ -7118,7 +7118,7 @@ int can_migrate_task(struct task_struct /* Prevent to re-select dst_cpu via env's CPUs: */ for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) { @@ -651,7 +651,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> env->flags |= LBF_DST_PINNED; env->new_dst_cpu = cpu; break; -@@ -7740,7 +7740,7 @@ check_cpu_capacity(struct rq *rq, struct +@@ -7715,7 +7715,7 @@ check_cpu_capacity(struct rq *rq, struct /* * Group imbalance indicates (and tries to solve) the problem where balancing @@ -660,7 +660,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> * * Imagine a situation of two groups of 4 CPUs each and 4 tasks each with a * cpumask covering 1 CPU of the first group and 3 CPUs of the second group. -@@ -8355,7 +8355,7 @@ static struct sched_group *find_busiest_ +@@ -8330,7 +8330,7 @@ static struct sched_group *find_busiest_ /* * If the busiest group is imbalanced the below checks don't * work because they assume all things are equal, which typically @@ -669,7 +669,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> */ if (busiest->group_type == group_imbalanced) goto force_balance; -@@ -8751,7 +8751,7 @@ static int load_balance(int this_cpu, st +@@ -8726,7 +8726,7 @@ static int load_balance(int this_cpu, st * if the curr task on busiest CPU can't be * moved to this_cpu: */ diff --git a/patches/localversion.patch b/patches/localversion.patch index e16fb07c0a7d..58842b503a27 100644 --- a/patches/localversion.patch +++ b/patches/localversion.patch @@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- /dev/null +++ b/localversion-rt @@ -0,0 +1 @@ -+-rt10 ++-rt11 diff --git a/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch b/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch index b05b6bf8f34e..1d870ba2af05 100644 --- a/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch +++ b/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch @@ -48,7 +48,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/mm/memcontrol.c +++ b/mm/memcontrol.c -@@ -2036,7 +2036,7 @@ static void drain_all_stock(struct mem_c +@@ -2052,7 +2052,7 @@ static void drain_all_stock(struct mem_c * as well as workers from this path always operate on the local * per-cpu data. CPU up doesn't touch memcg_stock at all. */ @@ -57,7 +57,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> for_each_online_cpu(cpu) { struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); struct mem_cgroup *memcg; -@@ -2056,7 +2056,7 @@ static void drain_all_stock(struct mem_c +@@ -2072,7 +2072,7 @@ static void drain_all_stock(struct mem_c } css_put(&memcg->css); } diff --git a/patches/mm-memcontrol-do_not_disable_irq.patch b/patches/mm-memcontrol-do_not_disable_irq.patch index 0143f826aaf6..b0c12fd045eb 100644 --- a/patches/mm-memcontrol-do_not_disable_irq.patch +++ b/patches/mm-memcontrol-do_not_disable_irq.patch @@ -29,7 +29,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* Whether legacy memory+swap accounting is active */ static bool do_memsw_account(void) { -@@ -4843,12 +4846,12 @@ static int mem_cgroup_move_account(struc +@@ -4859,12 +4862,12 @@ static int mem_cgroup_move_account(struc ret = 0; @@ -44,7 +44,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> out_unlock: unlock_page(page); out: -@@ -5967,10 +5970,10 @@ void mem_cgroup_commit_charge(struct pag +@@ -5983,10 +5986,10 @@ void mem_cgroup_commit_charge(struct pag commit_charge(page, memcg, lrucare); @@ -57,7 +57,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> if (do_memsw_account() && PageSwapCache(page)) { swp_entry_t entry = { .val = page_private(page) }; -@@ -6039,7 +6042,7 @@ static void uncharge_batch(const struct +@@ -6055,7 +6058,7 @@ static void uncharge_batch(const struct memcg_oom_recover(ug->memcg); } @@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> __mod_memcg_state(ug->memcg, MEMCG_RSS, -ug->nr_anon); __mod_memcg_state(ug->memcg, MEMCG_CACHE, -ug->nr_file); __mod_memcg_state(ug->memcg, MEMCG_RSS_HUGE, -ug->nr_huge); -@@ -6047,7 +6050,7 @@ static void uncharge_batch(const struct +@@ -6063,7 +6066,7 @@ static void uncharge_batch(const struct __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout); __this_cpu_add(ug->memcg->stat_cpu->nr_page_events, nr_pages); memcg_check_events(ug->memcg, ug->dummy_page); @@ -75,7 +75,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> if (!mem_cgroup_is_root(ug->memcg)) css_put_many(&ug->memcg->css, nr_pages); -@@ -6210,10 +6213,10 @@ void mem_cgroup_migrate(struct page *old +@@ -6226,10 +6229,10 @@ void mem_cgroup_migrate(struct page *old commit_charge(newpage, memcg, false); @@ -88,7 +88,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key); -@@ -6405,6 +6408,7 @@ void mem_cgroup_swapout(struct page *pag +@@ -6421,6 +6424,7 @@ void mem_cgroup_swapout(struct page *pag struct mem_cgroup *memcg, *swap_memcg; unsigned int nr_entries; unsigned short oldid; @@ -96,7 +96,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> VM_BUG_ON_PAGE(PageLRU(page), page); VM_BUG_ON_PAGE(page_count(page), page); -@@ -6450,13 +6454,17 @@ void mem_cgroup_swapout(struct page *pag +@@ -6466,13 +6470,17 @@ void mem_cgroup_swapout(struct page *pag * important here to have the interrupts disabled because it is the * only synchronisation we have for updating the per-CPU variables. */ diff --git a/patches/of-allocate-free-phandle-cache-outside-of-the-devtre.patch b/patches/of-allocate-free-phandle-cache-outside-of-the-devtre.patch index 5cc67753eb6d..8d14e1f77650 100644 --- a/patches/of-allocate-free-phandle-cache-outside-of-the-devtre.patch +++ b/patches/of-allocate-free-phandle-cache-outside-of-the-devtre.patch @@ -12,12 +12,52 @@ Cc: Frank Rowand <frowand.list@gmail.com> Cc: devicetree@vger.kernel.org Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- - drivers/of/base.c | 22 ++++++++++++++-------- - 1 file changed, 14 insertions(+), 8 deletions(-) + drivers/of/base.c | 19 +++++++++++++------ + 1 file changed, 13 insertions(+), 6 deletions(-) --- a/drivers/of/base.c +++ b/drivers/of/base.c -@@ -132,46 +132,52 @@ void of_populate_phandle_cache(void) +@@ -130,31 +130,34 @@ static u32 phandle_cache_mask; + /* + * Caller must hold devtree_lock. + */ +-static void __of_free_phandle_cache(void) ++static struct device_node** __of_free_phandle_cache(void) + { + u32 cache_entries = phandle_cache_mask + 1; + u32 k; ++ struct device_node **shadow; + + if (!phandle_cache) +- return; ++ return NULL; + + for (k = 0; k < cache_entries; k++) + of_node_put(phandle_cache[k]); + +- kfree(phandle_cache); ++ shadow = phandle_cache; + phandle_cache = NULL; ++ return shadow; + } + + int of_free_phandle_cache(void) + { + unsigned long flags; ++ struct device_node **shadow; + + raw_spin_lock_irqsave(&devtree_lock, flags); + +- __of_free_phandle_cache(); ++ shadow = __of_free_phandle_cache(); + + raw_spin_unlock_irqrestore(&devtree_lock, flags); +- ++ kfree(shadow); + return 0; + } + #if !defined(CONFIG_MODULES) +@@ -189,10 +192,11 @@ void of_populate_phandle_cache(void) u32 cache_entries; struct device_node *np; u32 phandles = 0; @@ -25,56 +65,31 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> raw_spin_lock_irqsave(&devtree_lock, flags); -- kfree(phandle_cache); -+ shadow = phandle_cache; - phandle_cache = NULL; +- __of_free_phandle_cache(); ++ shadow = __of_free_phandle_cache(); for_each_of_allnodes(np) if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) - phandles++; -+ raw_spin_unlock_irqrestore(&devtree_lock, flags); -+ kfree(shadow); +@@ -200,12 +204,14 @@ void of_populate_phandle_cache(void) if (!phandles) -- goto out; -+ return; + goto out; ++ raw_spin_unlock_irqrestore(&devtree_lock, flags); cache_entries = roundup_pow_of_two(phandles); phandle_cache_mask = cache_entries - 1; -- phandle_cache = kcalloc(cache_entries, sizeof(*phandle_cache), -- GFP_ATOMIC); -- if (!phandle_cache) -- goto out; -+ shadow = kcalloc(cache_entries, sizeof(*phandle_cache), GFP_KERNEL); -+ if (!shadow) -+ return; -+ + phandle_cache = kcalloc(cache_entries, sizeof(*phandle_cache), + GFP_ATOMIC); + raw_spin_lock_irqsave(&devtree_lock, flags); -+ phandle_cache = shadow; - - for_each_of_allnodes(np) - if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) - phandle_cache[np->phandle & phandle_cache_mask] = np; - --out: - raw_spin_unlock_irqrestore(&devtree_lock, flags); - } - - int of_free_phandle_cache(void) - { - unsigned long flags; -+ struct device_node **shadow; + if (!phandle_cache) + goto out; - raw_spin_lock_irqsave(&devtree_lock, flags); - -- kfree(phandle_cache); -+ shadow = phandle_cache; - phandle_cache = NULL; +@@ -217,6 +223,7 @@ void of_populate_phandle_cache(void) + out: raw_spin_unlock_irqrestore(&devtree_lock, flags); - + kfree(shadow); - return 0; } - #if !defined(CONFIG_MODULES) + + void __init of_core_init(void) diff --git a/patches/preempt-lazy-support.patch b/patches/preempt-lazy-support.patch index 9a02f09f143e..40fdb38c871c 100644 --- a/patches/preempt-lazy-support.patch +++ b/patches/preempt-lazy-support.patch @@ -405,7 +405,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> EXPORT_SYMBOL(migrate_enable); --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c -@@ -4018,7 +4018,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq +@@ -4017,7 +4017,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq ideal_runtime = sched_slice(cfs_rq, curr); delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; if (delta_exec > ideal_runtime) { @@ -414,7 +414,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* * The current task ran long enough, ensure it doesn't get * re-elected due to buddy favours. -@@ -4042,7 +4042,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq +@@ -4041,7 +4041,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq return; if (delta > ideal_runtime) @@ -423,7 +423,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } static void -@@ -4184,7 +4184,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc +@@ -4183,7 +4183,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc * validating it and just reschedule. */ if (queued) { @@ -432,7 +432,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> return; } /* -@@ -4368,7 +4368,7 @@ static void __account_cfs_rq_runtime(str +@@ -4367,7 +4367,7 @@ static void __account_cfs_rq_runtime(str * hierarchy can be throttled */ if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) @@ -441,7 +441,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } static __always_inline -@@ -5039,7 +5039,7 @@ static void hrtick_start_fair(struct rq +@@ -5038,7 +5038,7 @@ static void hrtick_start_fair(struct rq if (delta < 0) { if (rq->curr == p) @@ -450,7 +450,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> return; } hrtick_start(rq, delta); -@@ -6614,7 +6614,7 @@ static void check_preempt_wakeup(struct +@@ -6613,7 +6613,7 @@ static void check_preempt_wakeup(struct return; preempt: @@ -459,7 +459,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* * Only set the backward buddy when the current task is still * on the rq. This can happen when a wakeup gets interleaved -@@ -9725,7 +9725,7 @@ static void task_fork_fair(struct task_s +@@ -9700,7 +9700,7 @@ static void task_fork_fair(struct task_s * 'current' within the tree based on its new key value. */ swap(curr->vruntime, se->vruntime); @@ -468,7 +468,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } se->vruntime -= cfs_rq->min_vruntime; -@@ -9749,7 +9749,7 @@ prio_changed_fair(struct rq *rq, struct +@@ -9724,7 +9724,7 @@ prio_changed_fair(struct rq *rq, struct */ if (rq->curr == p) { if (p->prio > oldprio) diff --git a/patches/sched-fair-Make-the-hrtimers-non-hard-again.patch b/patches/sched-fair-Make-the-hrtimers-non-hard-again.patch index db0c7613e11b..c63feecaba6b 100644 --- a/patches/sched-fair-Make-the-hrtimers-non-hard-again.patch +++ b/patches/sched-fair-Make-the-hrtimers-non-hard-again.patch @@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c -@@ -4880,9 +4880,9 @@ void init_cfs_bandwidth(struct cfs_bandw +@@ -4879,9 +4879,9 @@ void init_cfs_bandwidth(struct cfs_bandw cfs_b->period = ns_to_ktime(default_cfs_period()); INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq); diff --git a/patches/sched-fair-Robustify-CFS-bandwidth-timer-locking.patch b/patches/sched-fair-Robustify-CFS-bandwidth-timer-locking.patch index a4fa35304e84..b1af1e9378e4 100644 --- a/patches/sched-fair-Robustify-CFS-bandwidth-timer-locking.patch +++ b/patches/sched-fair-Robustify-CFS-bandwidth-timer-locking.patch @@ -29,7 +29,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c -@@ -4554,7 +4554,7 @@ static u64 distribute_cfs_runtime(struct +@@ -4553,7 +4553,7 @@ static u64 distribute_cfs_runtime(struct struct rq *rq = rq_of(cfs_rq); struct rq_flags rf; @@ -38,7 +38,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> if (!cfs_rq_throttled(cfs_rq)) goto next; -@@ -4571,7 +4571,7 @@ static u64 distribute_cfs_runtime(struct +@@ -4570,7 +4570,7 @@ static u64 distribute_cfs_runtime(struct unthrottle_cfs_rq(cfs_rq); next: @@ -47,7 +47,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> if (!remaining) break; -@@ -4587,7 +4587,7 @@ static u64 distribute_cfs_runtime(struct +@@ -4586,7 +4586,7 @@ static u64 distribute_cfs_runtime(struct * period the timer is deactivated until scheduling resumes; cfs_b->idle is * used to track this state. */ @@ -56,7 +56,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> { u64 runtime, runtime_expires; int throttled; -@@ -4629,11 +4629,11 @@ static int do_sched_cfs_period_timer(str +@@ -4628,11 +4628,11 @@ static int do_sched_cfs_period_timer(str while (throttled && cfs_b->runtime > 0 && !cfs_b->distribute_running) { runtime = cfs_b->runtime; cfs_b->distribute_running = 1; @@ -70,7 +70,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> cfs_b->distribute_running = 0; throttled = !list_empty(&cfs_b->throttled_cfs_rq); -@@ -4742,17 +4742,18 @@ static __always_inline void return_cfs_r +@@ -4741,17 +4741,18 @@ static __always_inline void return_cfs_r static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b) { u64 runtime = 0, slice = sched_cfs_bandwidth_slice(); @@ -92,7 +92,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> return; } -@@ -4763,18 +4764,18 @@ static void do_sched_cfs_slack_timer(str +@@ -4762,18 +4763,18 @@ static void do_sched_cfs_slack_timer(str if (runtime) cfs_b->distribute_running = 1; @@ -114,7 +114,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } /* -@@ -4852,20 +4853,21 @@ static enum hrtimer_restart sched_cfs_pe +@@ -4851,20 +4852,21 @@ static enum hrtimer_restart sched_cfs_pe { struct cfs_bandwidth *cfs_b = container_of(timer, struct cfs_bandwidth, period_timer); diff --git a/patches/srcu-replace-local_irqsave-with-a-locallock.patch b/patches/srcu-replace-local_irqsave-with-a-locallock.patch index 5f44929fbf84..2e65e8a57565 100644 --- a/patches/srcu-replace-local_irqsave-with-a-locallock.patch +++ b/patches/srcu-replace-local_irqsave-with-a-locallock.patch @@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> #include "rcu.h" #include "rcu_segcblist.h" -@@ -758,6 +759,8 @@ static void srcu_flip(struct srcu_struct +@@ -760,6 +761,8 @@ static void srcu_flip(struct srcu_struct * negligible when amoritized over that time period, and the extra latency * of a needlessly non-expedited grace period is similarly negligible. */ @@ -32,7 +32,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> static bool srcu_might_be_idle(struct srcu_struct *sp) { unsigned long curseq; -@@ -766,13 +769,13 @@ static bool srcu_might_be_idle(struct sr +@@ -768,13 +771,13 @@ static bool srcu_might_be_idle(struct sr unsigned long t; /* If the local srcu_data structure has callbacks, not idle. */ @@ -49,7 +49,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* * No local callbacks, so probabalistically probe global state. -@@ -850,7 +853,7 @@ void __call_srcu(struct srcu_struct *sp, +@@ -852,7 +855,7 @@ void __call_srcu(struct srcu_struct *sp, return; } rhp->func = func; @@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> sdp = this_cpu_ptr(sp->sda); spin_lock_rcu_node(sdp); rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false); -@@ -866,7 +869,8 @@ void __call_srcu(struct srcu_struct *sp, +@@ -868,7 +871,8 @@ void __call_srcu(struct srcu_struct *sp, sdp->srcu_gp_seq_needed_exp = s; needexp = true; } diff --git a/patches/srcu-use-cpu_online-instead-custom-check.patch b/patches/srcu-use-cpu_online-instead-custom-check.patch index b35ad87f789a..7528bdafa6ac 100644 --- a/patches/srcu-use-cpu_online-instead-custom-check.patch +++ b/patches/srcu-use-cpu_online-instead-custom-check.patch @@ -27,7 +27,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> #include "rcu.h" #include "rcu_segcblist.h" -@@ -459,21 +460,6 @@ static void srcu_gp_start(struct srcu_st +@@ -461,21 +462,6 @@ static void srcu_gp_start(struct srcu_st } /* @@ -49,7 +49,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> * Place the workqueue handler on the specified CPU if online, otherwise * just run it whereever. This is useful for placing workqueue handlers * that are to invoke the specified CPU's callbacks. -@@ -484,12 +470,12 @@ static bool srcu_queue_delayed_work_on(i +@@ -486,12 +472,12 @@ static bool srcu_queue_delayed_work_on(i { bool ret; |