diff options
author | Sebastian Andrzej Siewior <bigeasy@linutronix.de> | 2018-06-18 11:58:57 +0200 |
---|---|---|
committer | Sebastian Andrzej Siewior <bigeasy@linutronix.de> | 2018-06-18 11:58:57 +0200 |
commit | c25478e86505efa4273b4d1affd6e411dca73950 (patch) | |
tree | abbc784b25e9f5690757481314a3b55fd49db32b | |
parent | f7705bc28c189014760a76db7581794f2187e56a (diff) | |
download | linux-rt-4.16.15-rt7-patches.tar.gz |
[ANNOUNCE] v4.16.15-rt7v4.16.15-rt7-patches
Dear RT folks!
I'm pleased to announce the v4.16.15-rt7 patch set.
Changes since v4.16.15-rt6:
- Add the mm/memcontro pach back to fix a "sleeping while atomic"
warning. It was dropped in the v4.16 cycle because it was wrongly
assuemd that it is not longer requried. Reported by Mike Galbraith.
- Add a percpu_ida fix from upstream.
- Drop the patch for the X86 Ultraviolet platform. It should be merged
upstream because PREEMPT causes warnings, too. This is not going to
happen so I drop it from RT since nobody cares.
Known issues
- A warning triggered in "rcu_note_context_switch" originated from
SyS_timer_gettime(). The issue was always there, it is now
visible. Reported by Grygorii Strashko and Daniel Wagner.
The delta patch against v4.16.15-rt6 is appended below and can be found here:
https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.16/incr/patch-4.16.15-rt6-rt7.patch.xz
You can get this release via the git tree at:
git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.16.15-rt7
The RT patch against v4.16.15 can be found here:
https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.16/older/patch-4.16.15-rt7.patch.xz
The split quilt queue is available at:
https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.16/older/patches-4.16.15-rt7.tar.xz
Sebastian
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-rw-r--r-- | patches/lib-percpu_ida.c-don-t-do-alloc-from-per-CPU-list-if.patch | 34 | ||||
-rw-r--r-- | patches/localversion.patch | 2 | ||||
-rw-r--r-- | patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch | 68 | ||||
-rw-r--r-- | patches/series | 33 | ||||
-rw-r--r-- | patches/x86-UV-raw_spinlock-conversion.patch | 219 |
5 files changed, 120 insertions, 236 deletions
diff --git a/patches/lib-percpu_ida.c-don-t-do-alloc-from-per-CPU-list-if.patch b/patches/lib-percpu_ida.c-don-t-do-alloc-from-per-CPU-list-if.patch new file mode 100644 index 000000000000..11064152c1b1 --- /dev/null +++ b/patches/lib-percpu_ida.c-don-t-do-alloc-from-per-CPU-list-if.patch @@ -0,0 +1,34 @@ +From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +Date: Wed, 13 Jun 2018 09:58:30 +0200 +Subject: [PATCH] lib/percpu_ida.c: don't do alloc from per-CPU list if there + is none + +In commit 804209d8a009 ("lib/percpu_ida.c: use _irqsave() instead of +local_irq_save() + spin_lock") I inlined alloc_local_tag() and mixed up +the >= check from percpu_ida_alloc() with the one in alloc_local_tag(). + +Don't alloc from per-CPU freelist if ->nr_free is zero. + +Fixes: 804209d8a009 ("lib/percpu_ida.c: use _irqsave() instead of local_irq_save() + spin_lock") +Reported-by: David Disseldorp <ddiss@suse.de> +Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +--- + lib/percpu_ida.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c +index 9bbd9c5d375a..beb14839b41a 100644 +--- a/lib/percpu_ida.c ++++ b/lib/percpu_ida.c +@@ -141,7 +141,7 @@ int percpu_ida_alloc(struct percpu_ida *pool, int state) + spin_lock_irqsave(&tags->lock, flags); + + /* Fastpath */ +- if (likely(tags->nr_free >= 0)) { ++ if (likely(tags->nr_free)) { + tag = tags->freelist[--tags->nr_free]; + spin_unlock_irqrestore(&tags->lock, flags); + return tag; +-- +2.17.1 + diff --git a/patches/localversion.patch b/patches/localversion.patch index 4c1841b6475d..bbb08330835d 100644 --- a/patches/localversion.patch +++ b/patches/localversion.patch @@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- /dev/null +++ b/localversion-rt @@ -0,0 +1 @@ -+-rt6 ++-rt7 diff --git a/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch b/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch new file mode 100644 index 000000000000..52ddca411353 --- /dev/null +++ b/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch @@ -0,0 +1,68 @@ +From: Yang Shi <yang.shi@windriver.com> +Subject: mm/memcontrol: Don't call schedule_work_on in preemption disabled context +Date: Wed, 30 Oct 2013 11:48:33 -0700 + +The following trace is triggered when running ltp oom test cases: + +BUG: sleeping function called from invalid context at kernel/rtmutex.c:659 +in_atomic(): 1, irqs_disabled(): 0, pid: 17188, name: oom03 +Preemption disabled at:[<ffffffff8112ba70>] mem_cgroup_reclaim+0x90/0xe0 + +CPU: 2 PID: 17188 Comm: oom03 Not tainted 3.10.10-rt3 #2 +Hardware name: Intel Corporation Calpella platform/MATXM-CORE-411-B, BIOS 4.6.3 08/18/2010 +ffff88007684d730 ffff880070df9b58 ffffffff8169918d ffff880070df9b70 +ffffffff8106db31 ffff88007688b4a0 ffff880070df9b88 ffffffff8169d9c0 +ffff88007688b4a0 ffff880070df9bc8 ffffffff81059da1 0000000170df9bb0 +Call Trace: +[<ffffffff8169918d>] dump_stack+0x19/0x1b +[<ffffffff8106db31>] __might_sleep+0xf1/0x170 +[<ffffffff8169d9c0>] rt_spin_lock+0x20/0x50 +[<ffffffff81059da1>] queue_work_on+0x61/0x100 +[<ffffffff8112b361>] drain_all_stock+0xe1/0x1c0 +[<ffffffff8112ba70>] mem_cgroup_reclaim+0x90/0xe0 +[<ffffffff8112beda>] __mem_cgroup_try_charge+0x41a/0xc40 +[<ffffffff810f1c91>] ? release_pages+0x1b1/0x1f0 +[<ffffffff8106f200>] ? sched_exec+0x40/0xb0 +[<ffffffff8112cc87>] mem_cgroup_charge_common+0x37/0x70 +[<ffffffff8112e2c6>] mem_cgroup_newpage_charge+0x26/0x30 +[<ffffffff8110af68>] handle_pte_fault+0x618/0x840 +[<ffffffff8103ecf6>] ? unpin_current_cpu+0x16/0x70 +[<ffffffff81070f94>] ? migrate_enable+0xd4/0x200 +[<ffffffff8110cde5>] handle_mm_fault+0x145/0x1e0 +[<ffffffff810301e1>] __do_page_fault+0x1a1/0x4c0 +[<ffffffff8169c9eb>] ? preempt_schedule_irq+0x4b/0x70 +[<ffffffff8169e3b7>] ? retint_kernel+0x37/0x40 +[<ffffffff8103053e>] do_page_fault+0xe/0x10 +[<ffffffff8169e4c2>] page_fault+0x22/0x30 + +So, to prevent schedule_work_on from being called in preempt disabled context, +replace the pair of get/put_cpu() to get/put_cpu_light(). + + +Signed-off-by: Yang Shi <yang.shi@windriver.com> +Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +--- + + mm/memcontrol.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/mm/memcontrol.c ++++ b/mm/memcontrol.c +@@ -1831,7 +1831,7 @@ static void drain_all_stock(struct mem_c + * as well as workers from this path always operate on the local + * per-cpu data. CPU up doesn't touch memcg_stock at all. + */ +- curcpu = get_cpu(); ++ curcpu = get_cpu_light(); + for_each_online_cpu(cpu) { + struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); + struct mem_cgroup *memcg; +@@ -1851,7 +1851,7 @@ static void drain_all_stock(struct mem_c + } + css_put(&memcg->css); + } +- put_cpu(); ++ put_cpu_light(); + mutex_unlock(&percpu_charge_mutex); + } + diff --git a/patches/series b/patches/series index 34925984b662..836a52a3d37c 100644 --- a/patches/series +++ b/patches/series @@ -108,11 +108,25 @@ net-3com-3c59x-Pull-locking-out-of-ISR.patch net-3com-3c59x-irq-save-variant-of-ISR.patch ALSA-pcm-Hide-local_irq_disable-enable-and-local_irq.patch percpu_ida-Use-_irqsave-instead-of-local_irq_save-sp.patch +lib-percpu_ida.c-don-t-do-alloc-from-per-CPU-list-if.patch xen-9pfs-don-t-inclide-rwlock.h-directly.patch ACPICA-provide-abstraction-for-raw_spinlock_t.patch ACPICA-Convert-acpi_gbl_hardware-lock-back-to-an-acp.patch mfd-syscon-atmel-smc-include-string.h.patch sched-tracing-Fix-trace_sched_pi_setprio-for-deboost.patch +0001-PM-suspend-Prevent-might-sleep-splats.patch +0002-PM-wakeup-Make-events_lock-a-RAW_SPINLOCK.patch +0003-PM-s2idle-Make-s2idle_wait_head-swait-based.patch +0004-PM-wakeup-Make-s2idle_lock-a-RAW_SPINLOCK.patch +rfc-arm-smp-__cpu_disable-fix-sleeping-function-called-from-invalid-context.patch +alim15x3-move-irq-restore-before-pci_dev_put.patch +ide-don-t-disable-interrupts-if-they-are-already-dis.patch +ide-don-t-disable-interrupts-during-kmap_atomic.patch +ide-don-t-enable-disable-interrupts-in-force-threade.patch +kernel-signal-Remove-no-longer-required-irqsave-rest.patch +IB-ipoib-replace-local_irq_disable-with-proper-locki.patch +SCSI-libsas-remove-irq-save-in-sas_ata_qc_issue.patch +posix-cpu-timers-remove-lockdep_assert_irqs_disabled.patch ############################################################ # POSTED by others @@ -133,17 +147,12 @@ sched-tracing-Fix-trace_sched_pi_setprio-for-deboost.patch Revert-mm-vmstat.c-fix-vmstat_update-preemption-BUG.patch arm-convert-boot-lock-to-raw.patch x86-io-apic-migra-no-unmask.patch -0001-PM-suspend-Prevent-might-sleep-splats.patch -0002-PM-wakeup-Make-events_lock-a-RAW_SPINLOCK.patch -0003-PM-s2idle-Make-s2idle_wait_head-swait-based.patch -0004-PM-wakeup-Make-s2idle_lock-a-RAW_SPINLOCK.patch +arm-kprobe-replace-patch_lock-to-raw-lock.patch +arm-unwind-use_raw_lock.patch ############################################################ # Ready for posting ############################################################ -arm-kprobe-replace-patch_lock-to-raw-lock.patch -x86-UV-raw_spinlock-conversion.patch -arm-unwind-use_raw_lock.patch ############################################################ # Needs to address review feedback @@ -153,7 +162,6 @@ posix-timers-no-broadcast.patch ############################################################ # Almost ready, needs final polishing ############################################################ -rfc-arm-smp-__cpu_disable-fix-sleeping-function-called-from-invalid-context.patch drivers-random-reduce-preempt-disabled-region.patch mm-page_alloc-rt-friendly-per-cpu-pages.patch mm-page_alloc-reduce-lock-sections-further.patch @@ -180,18 +188,10 @@ signal-revert-ptrace-preempt-magic.patch net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch # The removal of NONRT helpers -alim15x3-move-irq-restore-before-pci_dev_put.patch -ide-don-t-disable-interrupts-if-they-are-already-dis.patch -ide-don-t-disable-interrupts-during-kmap_atomic.patch -ide-don-t-enable-disable-interrupts-in-force-threade.patch dm-rq-remove-BUG_ON-irqs_disabled-check.patch -kernel-signal-Remove-no-longer-required-irqsave-rest.patch ntfs-avoid-disabling-interrupts-during-kmap_atomic.patch -IB-ipoib-replace-local_irq_disable-with-proper-locki.patch -SCSI-libsas-remove-irq-save-in-sas_ata_qc_issue.patch SCSI-qla2xxx-remove-irq-save-in-qla2x00_poll.patch libata-remove-ata_sff_data_xfer_noirq.patch -posix-cpu-timers-remove-lockdep_assert_irqs_disabled.patch usb-do-not-disable-interrupts-in-giveback.patch # atomic_dec_and_lock_irqsave / refcount_dec_and_lock_irqsave 0001-spinlock-atomic_dec_and_lock-Add-an-irqsave-variant.patch @@ -284,6 +284,7 @@ slub-disable-SLUB_CPU_PARTIAL.patch # MM mm-page-alloc-use-local-lock-on-target-cpu.patch +mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch mm-memcontrol-do_not_disable_irq.patch mm_zsmalloc_copy_with_get_cpu_var_and_locking.patch diff --git a/patches/x86-UV-raw_spinlock-conversion.patch b/patches/x86-UV-raw_spinlock-conversion.patch deleted file mode 100644 index eb74502caceb..000000000000 --- a/patches/x86-UV-raw_spinlock-conversion.patch +++ /dev/null @@ -1,219 +0,0 @@ -From: Mike Galbraith <umgwanakikbuti@gmail.com> -Date: Sun, 2 Nov 2014 08:31:37 +0100 -Subject: x86: UV: raw_spinlock conversion - -Shrug. Lots of hobbyists have a beast in their basement, right? - - -Signed-off-by: Mike Galbraith <mgalbraith@suse.de> -Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> ---- - arch/x86/include/asm/uv/uv_bau.h | 14 +++++++------- - arch/x86/platform/uv/tlb_uv.c | 26 +++++++++++++------------- - arch/x86/platform/uv/uv_time.c | 20 ++++++++++++-------- - 3 files changed, 32 insertions(+), 28 deletions(-) - ---- a/arch/x86/include/asm/uv/uv_bau.h -+++ b/arch/x86/include/asm/uv/uv_bau.h -@@ -642,9 +642,9 @@ struct bau_control { - cycles_t send_message; - cycles_t period_end; - cycles_t period_time; -- spinlock_t uvhub_lock; -- spinlock_t queue_lock; -- spinlock_t disable_lock; -+ raw_spinlock_t uvhub_lock; -+ raw_spinlock_t queue_lock; -+ raw_spinlock_t disable_lock; - /* tunables */ - int max_concurr; - int max_concurr_const; -@@ -846,15 +846,15 @@ static inline int atom_asr(short i, stru - * to be lowered below the current 'v'. atomic_add_unless can only stop - * on equal. - */ --static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u) -+static inline int atomic_inc_unless_ge(raw_spinlock_t *lock, atomic_t *v, int u) - { -- spin_lock(lock); -+ raw_spin_lock(lock); - if (atomic_read(v) >= u) { -- spin_unlock(lock); -+ raw_spin_unlock(lock); - return 0; - } - atomic_inc(v); -- spin_unlock(lock); -+ raw_spin_unlock(lock); - return 1; - } - ---- a/arch/x86/platform/uv/tlb_uv.c -+++ b/arch/x86/platform/uv/tlb_uv.c -@@ -740,9 +740,9 @@ static void destination_plugged(struct b - - quiesce_local_uvhub(hmaster); - -- spin_lock(&hmaster->queue_lock); -+ raw_spin_lock(&hmaster->queue_lock); - reset_with_ipi(&bau_desc->distribution, bcp); -- spin_unlock(&hmaster->queue_lock); -+ raw_spin_unlock(&hmaster->queue_lock); - - end_uvhub_quiesce(hmaster); - -@@ -762,9 +762,9 @@ static void destination_timeout(struct b - - quiesce_local_uvhub(hmaster); - -- spin_lock(&hmaster->queue_lock); -+ raw_spin_lock(&hmaster->queue_lock); - reset_with_ipi(&bau_desc->distribution, bcp); -- spin_unlock(&hmaster->queue_lock); -+ raw_spin_unlock(&hmaster->queue_lock); - - end_uvhub_quiesce(hmaster); - -@@ -785,7 +785,7 @@ static void disable_for_period(struct ba - cycles_t tm1; - - hmaster = bcp->uvhub_master; -- spin_lock(&hmaster->disable_lock); -+ raw_spin_lock(&hmaster->disable_lock); - if (!bcp->baudisabled) { - stat->s_bau_disabled++; - tm1 = get_cycles(); -@@ -798,7 +798,7 @@ static void disable_for_period(struct ba - } - } - } -- spin_unlock(&hmaster->disable_lock); -+ raw_spin_unlock(&hmaster->disable_lock); - } - - static void count_max_concurr(int stat, struct bau_control *bcp, -@@ -861,7 +861,7 @@ static void record_send_stats(cycles_t t - */ - static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat) - { -- spinlock_t *lock = &hmaster->uvhub_lock; -+ raw_spinlock_t *lock = &hmaster->uvhub_lock; - atomic_t *v; - - v = &hmaster->active_descriptor_count; -@@ -995,7 +995,7 @@ static int check_enable(struct bau_contr - struct bau_control *hmaster; - - hmaster = bcp->uvhub_master; -- spin_lock(&hmaster->disable_lock); -+ raw_spin_lock(&hmaster->disable_lock); - if (bcp->baudisabled && (get_cycles() >= bcp->set_bau_on_time)) { - stat->s_bau_reenabled++; - for_each_present_cpu(tcpu) { -@@ -1007,10 +1007,10 @@ static int check_enable(struct bau_contr - tbcp->period_giveups = 0; - } - } -- spin_unlock(&hmaster->disable_lock); -+ raw_spin_unlock(&hmaster->disable_lock); - return 0; - } -- spin_unlock(&hmaster->disable_lock); -+ raw_spin_unlock(&hmaster->disable_lock); - return -1; - } - -@@ -1942,9 +1942,9 @@ static void __init init_per_cpu_tunables - bcp->cong_reps = congested_reps; - bcp->disabled_period = sec_2_cycles(disabled_period); - bcp->giveup_limit = giveup_limit; -- spin_lock_init(&bcp->queue_lock); -- spin_lock_init(&bcp->uvhub_lock); -- spin_lock_init(&bcp->disable_lock); -+ raw_spin_lock_init(&bcp->queue_lock); -+ raw_spin_lock_init(&bcp->uvhub_lock); -+ raw_spin_lock_init(&bcp->disable_lock); - } - } - ---- a/arch/x86/platform/uv/uv_time.c -+++ b/arch/x86/platform/uv/uv_time.c -@@ -57,7 +57,7 @@ static DEFINE_PER_CPU(struct clock_event - - /* There is one of these allocated per node */ - struct uv_rtc_timer_head { -- spinlock_t lock; -+ raw_spinlock_t lock; - /* next cpu waiting for timer, local node relative: */ - int next_cpu; - /* number of cpus on this node: */ -@@ -177,7 +177,7 @@ static __init int uv_rtc_allocate_timers - uv_rtc_deallocate_timers(); - return -ENOMEM; - } -- spin_lock_init(&head->lock); -+ raw_spin_lock_init(&head->lock); - head->ncpus = uv_blade_nr_possible_cpus(bid); - head->next_cpu = -1; - blade_info[bid] = head; -@@ -231,7 +231,7 @@ static int uv_rtc_set_timer(int cpu, u64 - unsigned long flags; - int next_cpu; - -- spin_lock_irqsave(&head->lock, flags); -+ raw_spin_lock_irqsave(&head->lock, flags); - - next_cpu = head->next_cpu; - *t = expires; -@@ -243,12 +243,12 @@ static int uv_rtc_set_timer(int cpu, u64 - if (uv_setup_intr(cpu, expires)) { - *t = ULLONG_MAX; - uv_rtc_find_next_timer(head, pnode); -- spin_unlock_irqrestore(&head->lock, flags); -+ raw_spin_unlock_irqrestore(&head->lock, flags); - return -ETIME; - } - } - -- spin_unlock_irqrestore(&head->lock, flags); -+ raw_spin_unlock_irqrestore(&head->lock, flags); - return 0; - } - -@@ -267,7 +267,7 @@ static int uv_rtc_unset_timer(int cpu, i - unsigned long flags; - int rc = 0; - -- spin_lock_irqsave(&head->lock, flags); -+ raw_spin_lock_irqsave(&head->lock, flags); - - if ((head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) || force) - rc = 1; -@@ -279,7 +279,7 @@ static int uv_rtc_unset_timer(int cpu, i - uv_rtc_find_next_timer(head, pnode); - } - -- spin_unlock_irqrestore(&head->lock, flags); -+ raw_spin_unlock_irqrestore(&head->lock, flags); - - return rc; - } -@@ -299,13 +299,17 @@ static int uv_rtc_unset_timer(int cpu, i - static u64 uv_read_rtc(struct clocksource *cs) - { - unsigned long offset; -+ u64 cycles; - -+ preempt_disable(); - if (uv_get_min_hub_revision_id() == 1) - offset = 0; - else - offset = (uv_blade_processor_id() * L1_CACHE_BYTES) % PAGE_SIZE; - -- return (u64)uv_read_local_mmr(UVH_RTC | offset); -+ cycles = (u64)uv_read_local_mmr(UVH_RTC | offset); -+ preempt_enable(); -+ return cycles; - } - - /* |