diff options
author | Sebastian Andrzej Siewior <bigeasy@linutronix.de> | 2017-10-17 15:36:48 +0200 |
---|---|---|
committer | Sebastian Andrzej Siewior <bigeasy@linutronix.de> | 2017-10-17 15:36:48 +0200 |
commit | 163c99cd5cc3ffff6f0ac886236223e72ae989fb (patch) | |
tree | af03ce040630df8e74ae237ccdc400e5051ffab2 | |
parent | b2302d63e4ff0ae34732c2ea0de668eaebbce819 (diff) | |
download | linux-rt-4.11.12-rt16-patches.tar.gz |
[ANNOUNCE] v4.11.12-rt16v4.11.12-rt16-patches
Dear RT folks!
I'm pleased to announce the v4.11.12-rt16 patch set.
Changes since v4.11.12-rt15:
- Paul Gortmaker reported, that the unstable-TSC fixup on x86 need one
additional patch from upstream.
- Apparmor is protecting per-CPU variables with preempt-disable
resulting in "sleeping while atomic" warnings.
Known issues
None
The delta patch against v4.11.12-rt15 is appended below and can be found here:
https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.11/incr/patch-4.11.12-rt15-rt16.patch.xz
You can get this release via the git tree at:
git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.11.12-rt16
The RT patch against v4.11.12 can be found here:
https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patch-4.11.12-rt16.patch.xz
The split quilt queue is available at:
https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.12-rt16.tar.xz
Sebastian
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
10 files changed, 251 insertions, 18 deletions
diff --git a/patches/0001-sched-clock-Fix-early-boot-preempt-assumption-in-__s.patch b/patches/0001-sched-clock-Fix-early-boot-preempt-assumption-in-__s.patch index 9072fe63ca6d..e9ba34641b35 100644 --- a/patches/0001-sched-clock-Fix-early-boot-preempt-assumption-in-__s.patch +++ b/patches/0001-sched-clock-Fix-early-boot-preempt-assumption-in-__s.patch @@ -38,7 +38,7 @@ Signed-off-by: Ingo Molnar <mingo@kernel.org> --- a/kernel/sched/clock.c +++ b/kernel/sched/clock.c -@@ -126,12 +126,19 @@ int sched_clock_stable(void) +@@ -132,12 +132,19 @@ static void __scd_stamp(struct sched_clo static void __set_sched_clock_stable(void) { diff --git a/patches/apparmor-use-a-locallock-instead-preempt_disable.patch b/patches/apparmor-use-a-locallock-instead-preempt_disable.patch new file mode 100644 index 000000000000..57a4cd2cc609 --- /dev/null +++ b/patches/apparmor-use-a-locallock-instead-preempt_disable.patch @@ -0,0 +1,83 @@ +From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +Date: Wed, 11 Oct 2017 17:43:49 +0200 +Subject: apparmor: use a locallock instead preempt_disable() + +get_buffers() disables preemption which acts as a lock for the per-CPU +variable. Since we can't disable preemption here on RT, a local_lock is +used in order to remain on the same CPU and not to have more than one user +within the critical section. + +Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +--- + security/apparmor/include/path.h | 21 +++++++++++++++++---- + security/apparmor/lsm.c | 2 +- + 2 files changed, 18 insertions(+), 5 deletions(-) + +--- a/security/apparmor/include/path.h ++++ b/security/apparmor/include/path.h +@@ -38,9 +38,10 @@ struct aa_buffers { + }; + + #include <linux/percpu.h> +-#include <linux/preempt.h> ++#include <linux/locallock.h> + + DECLARE_PER_CPU(struct aa_buffers, aa_buffers); ++DECLARE_LOCAL_IRQ_LOCK(aa_buffers_lock); + + #define COUNT_ARGS(X...) COUNT_ARGS_HELPER(, ##X, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) + #define COUNT_ARGS_HELPER(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, n, X...) n +@@ -54,12 +55,24 @@ DECLARE_PER_CPU(struct aa_buffers, aa_bu + + #define for_each_cpu_buffer(I) for ((I) = 0; (I) < MAX_PATH_BUFFERS; (I)++) + +-#ifdef CONFIG_DEBUG_PREEMPT ++#ifdef CONFIG_PREEMPT_RT_BASE ++ ++static inline void AA_BUG_PREEMPT_ENABLED(const char *s) ++{ ++ struct local_irq_lock *lv; ++ ++ lv = this_cpu_ptr(&aa_buffers_lock); ++ WARN_ONCE(lv->owner != current, ++ "__get_buffer without aa_buffers_lock\n"); ++} ++ ++#elif CONFIG_DEBUG_PREEMPT + #define AA_BUG_PREEMPT_ENABLED(X) AA_BUG(preempt_count() <= 0, X) + #else + #define AA_BUG_PREEMPT_ENABLED(X) /* nop */ + #endif + ++ + #define __get_buffer(N) ({ \ + struct aa_buffers *__cpu_var; \ + AA_BUG_PREEMPT_ENABLED("__get_buffer without preempt disabled"); \ +@@ -72,14 +85,14 @@ DECLARE_PER_CPU(struct aa_buffers, aa_bu + + #define get_buffers(X...) \ + do { \ +- preempt_disable(); \ ++ local_lock(aa_buffers_lock); \ + __get_buffers(X); \ + } while (0) + + #define put_buffers(X, Y...) \ + do { \ + __put_buffers(X, Y); \ +- preempt_enable(); \ ++ local_unlock(aa_buffers_lock); \ + } while (0) + + #endif /* __AA_PATH_H */ +--- a/security/apparmor/lsm.c ++++ b/security/apparmor/lsm.c +@@ -42,7 +42,7 @@ + int apparmor_initialized __initdata; + + DEFINE_PER_CPU(struct aa_buffers, aa_buffers); +- ++DEFINE_LOCAL_IRQ_LOCK(aa_buffers_lock); + + /* + * LSM hook functions diff --git a/patches/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch b/patches/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch index d3c5dfd0a55c..fa483e301775 100644 --- a/patches/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch +++ b/patches/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch @@ -33,7 +33,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c -@@ -1687,10 +1687,11 @@ EXPORT_SYMBOL_GPL(hrtimer_init_sleeper_o +@@ -1689,10 +1689,11 @@ EXPORT_SYMBOL_GPL(hrtimer_init_sleeper_o #endif @@ -47,7 +47,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> hrtimer_start_expires(&t->timer, mode); if (likely(t->task)) -@@ -1732,7 +1733,8 @@ long __sched hrtimer_nanosleep_restart(s +@@ -1734,7 +1735,8 @@ long __sched hrtimer_nanosleep_restart(s HRTIMER_MODE_ABS, current); hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires); @@ -57,7 +57,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> goto out; rmtp = restart->nanosleep.rmtp; -@@ -1749,8 +1751,10 @@ long __sched hrtimer_nanosleep_restart(s +@@ -1751,8 +1753,10 @@ long __sched hrtimer_nanosleep_restart(s return ret; } @@ -70,7 +70,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> { struct restart_block *restart; struct hrtimer_sleeper t; -@@ -1764,7 +1768,7 @@ long hrtimer_nanosleep(struct timespec * +@@ -1766,7 +1770,7 @@ long hrtimer_nanosleep(struct timespec * hrtimer_init_sleeper_on_stack(&t, clockid, mode, current); hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack); @@ -79,7 +79,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> goto out; /* Absolute timers do not update the rmtp value and restart: */ -@@ -1791,6 +1795,12 @@ long hrtimer_nanosleep(struct timespec * +@@ -1793,6 +1797,12 @@ long hrtimer_nanosleep(struct timespec * return ret; } @@ -92,7 +92,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp, struct timespec __user *, rmtp) { -@@ -1817,7 +1827,8 @@ void cpu_chill(void) +@@ -1819,7 +1829,8 @@ void cpu_chill(void) unsigned int freeze_flag = current->flags & PF_NOFREEZE; current->flags |= PF_NOFREEZE; diff --git a/patches/localversion.patch b/patches/localversion.patch index 340816c8febc..0cccc7790a5d 100644 --- a/patches/localversion.patch +++ b/patches/localversion.patch @@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- /dev/null +++ b/localversion-rt @@ -0,0 +1 @@ -+-rt15 ++-rt16 diff --git a/patches/rt-introduce-cpu-chill.patch b/patches/rt-introduce-cpu-chill.patch index 47e01a46017c..51b2ac0ed9cb 100644 --- a/patches/rt-introduce-cpu-chill.patch +++ b/patches/rt-introduce-cpu-chill.patch @@ -100,7 +100,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> #endif /* defined(_LINUX_DELAY_H) */ --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c -@@ -1805,6 +1805,25 @@ SYSCALL_DEFINE2(nanosleep, struct timesp +@@ -1807,6 +1807,25 @@ SYSCALL_DEFINE2(nanosleep, struct timesp return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC); } diff --git a/patches/sched-clock-Initialize-all-per-CPU-state-before-swit.patch b/patches/sched-clock-Initialize-all-per-CPU-state-before-swit.patch index 1ac686c4f189..30a598e48ada 100644 --- a/patches/sched-clock-Initialize-all-per-CPU-state-before-swit.patch +++ b/patches/sched-clock-Initialize-all-per-CPU-state-before-swit.patch @@ -3,6 +3,8 @@ Date: Fri, 21 Apr 2017 12:11:53 +0200 Subject: [PATCH] sched/clock: Initialize all per-CPU state before switching (back) to unstable +commit cf15ca8deda86b27b66e27848b4b0fe58098fc0b upstream. + In preparation for not keeping the sched_clock_tick() active for stable TSC, we need to explicitly initialize all per-CPU state before switching back to unstable. diff --git a/patches/series b/patches/series index d6f4af4c0253..17eb43e9eef9 100644 --- a/patches/series +++ b/patches/series @@ -6,6 +6,7 @@ # UPSTREAM changes queued ############################################################ sched-clock-Initialize-all-per-CPU-state-before-swit.patch +x86-tsc-sched-clock-clocksource-Use-clocksource-watc.patch ############################################################ # UPSTREAM FIXES, patches pending @@ -142,7 +143,6 @@ hrtimer-Remove-hrtimer_peek_ahead_timers-leftovers.patch 0017-hrtimer-Implementation-of-softirq-hrtimer-handling.patch 0018-hrtimer-Enable-soft-and-hard-hrtimer.patch hrtimer-soft-bases-timekeeping.patch -time-hrtimer-use-a-MONOTIC-clock-for-relative-REALTI.patch 0019-can-bcm-Replace-hrtimer_tasklet-with-softirq-based-h.patch 0020-mac80211_hwsim-Replace-hrtimer-tasklet-with-softirq-.patch 0021-xfrm-Replace-hrtimer-tasklet-with-softirq-hrtimer.patch @@ -433,6 +433,7 @@ x86-kvm-require-const-tsc-for-rt.patch hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch hrtimers-prepare-full-preemption.patch hrtimer-by-timers-by-default-into-the-softirq-context.patch +time-hrtimer-use-a-MONOTIC-clock-for-relative-REALTI.patch time-hrtimer-Use-softirq-based-wakeups-for-non-RT-th.patch timer-fd-avoid-live-lock.patch @@ -749,6 +750,9 @@ move_sched_delayed_work_to_helper.patch # MD md-disable-bcache.patch +# Security +apparmor-use-a-locallock-instead-preempt_disable.patch + # WORKQUEUE SIGH workqueue-prevent-deadlock-stall.patch diff --git a/patches/time-hrtimer-Use-softirq-based-wakeups-for-non-RT-th.patch b/patches/time-hrtimer-Use-softirq-based-wakeups-for-non-RT-th.patch index 4493e1b8ad5d..26a9dbd0e997 100644 --- a/patches/time-hrtimer-Use-softirq-based-wakeups-for-non-RT-th.patch +++ b/patches/time-hrtimer-Use-softirq-based-wakeups-for-non-RT-th.patch @@ -16,7 +16,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c -@@ -1604,14 +1604,31 @@ static enum hrtimer_restart hrtimer_wake +@@ -1606,14 +1606,31 @@ static enum hrtimer_restart hrtimer_wake return HRTIMER_NORESTART; } diff --git a/patches/time-hrtimer-use-a-MONOTIC-clock-for-relative-REALTI.patch b/patches/time-hrtimer-use-a-MONOTIC-clock-for-relative-REALTI.patch index c7da128a48de..829fb928a733 100644 --- a/patches/time-hrtimer-use-a-MONOTIC-clock-for-relative-REALTI.patch +++ b/patches/time-hrtimer-use-a-MONOTIC-clock-for-relative-REALTI.patch @@ -10,14 +10,12 @@ for the SOFT-irq based clock but was forgotten for the HARD-irq clock. Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- - kernel/time/hrtimer.c | 2 ++ + kernel/time/hrtimer.c | 2 ++ 1 file changed, 2 insertions(+) -diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c -index cdd7cd35da11..b7c2ea23a320 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c -@@ -1258,6 +1258,8 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, +@@ -1234,6 +1234,8 @@ static void __hrtimer_init(struct hrtime clock_id = CLOCK_MONOTONIC; else if (clock_id == CLOCK_REALTIME_SOFT) clock_id = CLOCK_MONOTONIC_SOFT; @@ -26,6 +24,3 @@ index cdd7cd35da11..b7c2ea23a320 100644 } base = hrtimer_clockid_to_base(clock_id); --- -2.14.2 - diff --git a/patches/x86-tsc-sched-clock-clocksource-Use-clocksource-watc.patch b/patches/x86-tsc-sched-clock-clocksource-Use-clocksource-watc.patch new file mode 100644 index 000000000000..b93dd4f855a5 --- /dev/null +++ b/patches/x86-tsc-sched-clock-clocksource-Use-clocksource-watc.patch @@ -0,0 +1,149 @@ +From: Peter Zijlstra <peterz@infradead.org> +Date: Fri, 21 Apr 2017 12:14:13 +0200 +Subject: [PATCH] x86/tsc, sched/clock, clocksource: Use clocksource watchdog + to provide stable sync points + +commit b421b22b00b0011f6a2ce3561176c4e79e640c49 upstream. + +Currently we keep sched_clock_tick() active for stable TSC in order to +keep the per-CPU state semi up-to-date. The (obvious) problem is that +by the time we detect TSC is borked, our per-CPU state is also borked. + +So hook into the clocksource watchdog and call a method after we've +found it to still be stable. + +There's the obvious race where the TSC goes wonky between finding it +stable and us running the callback, but closing that is too much work +and not really worth it, since we're already detecting TSC wobbles +after the fact, so we cannot, per definition, fully avoid funny clock +values. + +And since the watchdog runs less often than the tick, this is also an +optimization. + +Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> +Cc: Linus Torvalds <torvalds@linux-foundation.org> +Cc: Mike Galbraith <efault@gmx.de> +Cc: Peter Zijlstra <peterz@infradead.org> +Cc: Thomas Gleixner <tglx@linutronix.de> +Cc: linux-kernel@vger.kernel.org +Signed-off-by: Ingo Molnar <mingo@kernel.org> + +--- + arch/x86/kernel/tsc.c | 10 ++++++++++ + include/linux/clocksource.h | 1 + + include/linux/sched/clock.h | 2 +- + kernel/sched/clock.c | 36 +++++++++++++++++++++++++++--------- + kernel/time/clocksource.c | 3 +++ + 5 files changed, 42 insertions(+), 10 deletions(-) + +--- a/arch/x86/kernel/tsc.c ++++ b/arch/x86/kernel/tsc.c +@@ -1127,6 +1127,15 @@ static void tsc_cs_mark_unstable(struct + pr_info("Marking TSC unstable due to clocksource watchdog\n"); + } + ++static void tsc_cs_tick_stable(struct clocksource *cs) ++{ ++ if (tsc_unstable) ++ return; ++ ++ if (using_native_sched_clock()) ++ sched_clock_tick_stable(); ++} ++ + /* + * .mask MUST be CLOCKSOURCE_MASK(64). See comment above read_tsc() + */ +@@ -1140,6 +1149,7 @@ static struct clocksource clocksource_ts + .archdata = { .vclock_mode = VCLOCK_TSC }, + .resume = tsc_resume, + .mark_unstable = tsc_cs_mark_unstable, ++ .tick_stable = tsc_cs_tick_stable, + }; + + void mark_tsc_unstable(char *reason) +--- a/include/linux/clocksource.h ++++ b/include/linux/clocksource.h +@@ -96,6 +96,7 @@ struct clocksource { + void (*suspend)(struct clocksource *cs); + void (*resume)(struct clocksource *cs); + void (*mark_unstable)(struct clocksource *cs); ++ void (*tick_stable)(struct clocksource *cs); + + /* private: */ + #ifdef CONFIG_CLOCKSOURCE_WATCHDOG +--- a/include/linux/sched/clock.h ++++ b/include/linux/sched/clock.h +@@ -63,8 +63,8 @@ extern void clear_sched_clock_stable(voi + */ + extern u64 __sched_clock_offset; + +- + extern void sched_clock_tick(void); ++extern void sched_clock_tick_stable(void); + extern void sched_clock_idle_sleep_event(void); + extern void sched_clock_idle_wakeup_event(u64 delta_ns); + +--- a/kernel/sched/clock.c ++++ b/kernel/sched/clock.c +@@ -366,20 +366,38 @@ void sched_clock_tick(void) + { + struct sched_clock_data *scd; + ++ if (sched_clock_stable()) ++ return; ++ ++ if (unlikely(!sched_clock_running)) ++ return; ++ + WARN_ON_ONCE(!irqs_disabled()); + +- /* +- * Update these values even if sched_clock_stable(), because it can +- * become unstable at any point in time at which point we need some +- * values to fall back on. +- * +- * XXX arguably we can skip this if we expose tsc_clocksource_reliable +- */ + scd = this_scd(); + __scd_stamp(scd); ++ sched_clock_local(scd); ++} + +- if (!sched_clock_stable() && likely(sched_clock_running)) +- sched_clock_local(scd); ++void sched_clock_tick_stable(void) ++{ ++ u64 gtod, clock; ++ ++ if (!sched_clock_stable()) ++ return; ++ ++ /* ++ * Called under watchdog_lock. ++ * ++ * The watchdog just found this TSC to (still) be stable, so now is a ++ * good moment to update our __gtod_offset. Because once we find the ++ * TSC to be unstable, any computation will be computing crap. ++ */ ++ local_irq_disable(); ++ gtod = ktime_get_ns(); ++ clock = sched_clock(); ++ __gtod_offset = (clock + __sched_clock_offset) - gtod; ++ local_irq_enable(); + } + + /* +--- a/kernel/time/clocksource.c ++++ b/kernel/time/clocksource.c +@@ -233,6 +233,9 @@ static void clocksource_watchdog(unsigne + continue; + } + ++ if (cs == curr_clocksource && cs->tick_stable) ++ cs->tick_stable(cs); ++ + if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && + (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) && + (watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) { |