summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--patches/0001-seqlock-seqcount_LOCKTYPE_t-Standardize-naming-conve.patch272
-rw-r--r--patches/0001-time-sched_clock-Use-raw_read_seqcount_latch-during-.patch39
-rw-r--r--patches/0002-mm-swap-Do-not-abuse-the-seqcount_t-latching-API.patch151
-rw-r--r--patches/0002-seqlock-Use-unique-prefix-for-seqcount_t-property-ac.patch72
-rw-r--r--patches/0003-seqlock-Introduce-seqcount_latch_t.patch225
-rw-r--r--patches/0004-seqlock-seqcount_LOCKTYPE_t-Introduce-PREEMPT_RT-sup.patch184
-rw-r--r--patches/0004-time-sched_clock-Use-seqcount_latch_t.patch39
-rw-r--r--patches/0005-timekeeping-Use-seqcount_latch_t.patch86
-rw-r--r--patches/0006-x86-tsc-Use-seqcount_latch_t.patch66
-rw-r--r--patches/0007-rbtree_latch-Use-seqcount_latch_t.patch41
-rw-r--r--patches/0008-seqlock-seqcount-latch-APIs-Only-allow-seqcount_latc.patch85
-rw-r--r--patches/0009-seqlock-seqcount_LOCKNAME_t-Standardize-naming-conve.patch250
-rw-r--r--patches/0010-seqlock-Use-unique-prefix-for-seqcount_t-property-ac.patch109
-rw-r--r--patches/0011-seqlock-seqcount_t-Implement-all-read-APIs-as-statem.patch (renamed from patches/0003-seqlock-seqcount_t-Implement-all-read-APIs-as-statem.patch)79
-rw-r--r--patches/0012-seqlock-seqcount_LOCKNAME_t-Introduce-PREEMPT_RT-sup.patch139
-rw-r--r--patches/0013-seqlock-PREEMPT_RT-Do-not-starve-seqlock_t-writers.patch (renamed from patches/0005-seqlock-PREEMPT_RT-Do-not-starve-seqlock_t-writers.patch)40
-rw-r--r--patches/localversion.patch2
-rw-r--r--patches/mm-memcontrol-Provide-a-local_lock-for-per-CPU-memcg.patch2
-rw-r--r--patches/net-Properly-annotate-the-try-lock-for-the-seqlock.patch61
-rw-r--r--patches/oleg-signal-rt-fix.patch2
-rw-r--r--patches/seqlock-Fix-multiple-kernel-doc-warnings.patch77
-rw-r--r--patches/series25
-rw-r--r--patches/skbufhead-raw-lock.patch4
-rw-r--r--patches/softirq-preempt-fix-3-re.patch2
-rw-r--r--patches/x86-entry-Use-should_resched-in-idtentry_exit_cond_r.patch2
-rw-r--r--patches/x86-preempt-lazy.patch2
26 files changed, 1456 insertions, 600 deletions
diff --git a/patches/0001-seqlock-seqcount_LOCKTYPE_t-Standardize-naming-conve.patch b/patches/0001-seqlock-seqcount_LOCKTYPE_t-Standardize-naming-conve.patch
deleted file mode 100644
index 5e7306638ea3..000000000000
--- a/patches/0001-seqlock-seqcount_LOCKTYPE_t-Standardize-naming-conve.patch
+++ /dev/null
@@ -1,272 +0,0 @@
-From: "Ahmed S. Darwish" <a.darwish@linutronix.de>
-Date: Wed, 19 Aug 2020 11:36:55 +0200
-Subject: [PATCH 1/5] seqlock: seqcount_LOCKTYPE_t: Standardize naming
- convention
-
-At seqlock.h, sequence counters with associated locks are either called
-seqcount_LOCKNAME_t, seqcount_LOCKTYPE_t, or seqcount_locktype_t.
-
-Standardize on "seqcount_LOCKTYPE_t" for all instances in comments,
-kernel-doc, and SEQCOUNT_LOCKTYPE() generative macro paramters.
-
-Signed-off-by: Ahmed S. Darwish <a.darwish@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/seqlock.h | 84 ++++++++++++++++++++++++------------------------
- 1 file changed, 43 insertions(+), 41 deletions(-)
-
---- a/include/linux/seqlock.h
-+++ b/include/linux/seqlock.h
-@@ -138,56 +138,58 @@ static inline void seqcount_lockdep_read
- #endif
-
- /**
-- * typedef seqcount_LOCKNAME_t - sequence counter with LOCKTYPR associated
-+ * typedef seqcount_LOCKTYPE_t - sequence counter with associated lock
- * @seqcount: The real sequence counter
- * @lock: Pointer to the associated spinlock
- *
-- * A plain sequence counter with external writer synchronization by a
-- * spinlock. The spinlock is associated to the sequence count in the
-+ * A plain sequence counter with external writer synchronization by
-+ * LOCKTYPE @lock. The lock is associated to the sequence counter in the
- * static initializer or init function. This enables lockdep to validate
- * that the write side critical section is properly serialized.
-+ *
-+ * LOCKTYPE: raw_spinlock, spinlock, rwlock, mutex, or ww_mutex.
- */
-
- /**
-- * seqcount_LOCKNAME_init() - runtime initializer for seqcount_LOCKNAME_t
-- * @s: Pointer to the seqcount_LOCKNAME_t instance
-+ * seqcount_LOCKTYPE_init() - runtime initializer for seqcount_LOCKTYPE_t
-+ * @s: Pointer to the seqcount_LOCKTYPE_t instance
- * @lock: Pointer to the associated LOCKTYPE
- */
-
- /*
-- * SEQCOUNT_LOCKTYPE() - Instantiate seqcount_LOCKNAME_t and helpers
-- * @locktype: actual typename
-- * @lockname: name
-+ * SEQCOUNT_LOCKTYPE() - Instantiate seqcount_LOCKTYPE_t and helpers
-+ * @locktype: "LOCKTYPE" part of seqcount_LOCKTYPE_t
-+ * @locktype_t: canonical/full LOCKTYPE C data type
- * @preemptible: preemptibility of above locktype
- * @lockmember: argument for lockdep_assert_held()
- */
--#define SEQCOUNT_LOCKTYPE(locktype, lockname, preemptible, lockmember) \
--typedef struct seqcount_##lockname { \
-+#define SEQCOUNT_LOCKTYPE(locktype, locktype_t, preemptible, lockmember) \
-+typedef struct seqcount_##locktype { \
- seqcount_t seqcount; \
-- __SEQ_LOCK(locktype *lock); \
--} seqcount_##lockname##_t; \
-+ __SEQ_LOCK(locktype_t *lock); \
-+} seqcount_##locktype##_t; \
- \
- static __always_inline void \
--seqcount_##lockname##_init(seqcount_##lockname##_t *s, locktype *lock) \
-+seqcount_##locktype##_init(seqcount_##locktype##_t *s, locktype_t *lock)\
- { \
- seqcount_init(&s->seqcount); \
- __SEQ_LOCK(s->lock = lock); \
- } \
- \
- static __always_inline seqcount_t * \
--__seqcount_##lockname##_ptr(seqcount_##lockname##_t *s) \
-+__seqcount_##locktype##_ptr(seqcount_##locktype##_t *s) \
- { \
- return &s->seqcount; \
- } \
- \
- static __always_inline bool \
--__seqcount_##lockname##_preemptible(seqcount_##lockname##_t *s) \
-+__seqcount_##locktype##_preemptible(seqcount_##locktype##_t *s) \
- { \
- return preemptible; \
- } \
- \
- static __always_inline void \
--__seqcount_##lockname##_assert(seqcount_##lockname##_t *s) \
-+__seqcount_##locktype##_assert(seqcount_##locktype##_t *s) \
- { \
- __SEQ_LOCK(lockdep_assert_held(lockmember)); \
- }
-@@ -211,15 +213,15 @@ static inline void __seqcount_assert(seq
- lockdep_assert_preemption_disabled();
- }
-
--SEQCOUNT_LOCKTYPE(raw_spinlock_t, raw_spinlock, false, s->lock)
--SEQCOUNT_LOCKTYPE(spinlock_t, spinlock, false, s->lock)
--SEQCOUNT_LOCKTYPE(rwlock_t, rwlock, false, s->lock)
--SEQCOUNT_LOCKTYPE(struct mutex, mutex, true, s->lock)
--SEQCOUNT_LOCKTYPE(struct ww_mutex, ww_mutex, true, &s->lock->base)
-+SEQCOUNT_LOCKTYPE(raw_spinlock, raw_spinlock_t, false, s->lock)
-+SEQCOUNT_LOCKTYPE(spinlock, spinlock_t, false, s->lock)
-+SEQCOUNT_LOCKTYPE(rwlock, rwlock_t, false, s->lock)
-+SEQCOUNT_LOCKTYPE(mutex, struct mutex, true, s->lock)
-+SEQCOUNT_LOCKTYPE(ww_mutex, struct ww_mutex, true, &s->lock->base)
-
- /**
-- * SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t
-- * @name: Name of the seqcount_LOCKNAME_t instance
-+ * SEQCNT_LOCKTYPE_ZERO - static initializer for seqcount_LOCKTYPE_t
-+ * @name: Name of the seqcount_LOCKTYPE_t instance
- * @lock: Pointer to the associated LOCKTYPE
- */
-
-@@ -235,8 +237,8 @@ SEQCOUNT_LOCKTYPE(struct ww_mutex, ww_mu
- #define SEQCNT_WW_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock)
-
-
--#define __seqprop_case(s, lockname, prop) \
-- seqcount_##lockname##_t: __seqcount_##lockname##_##prop((void *)(s))
-+#define __seqprop_case(s, locktype, prop) \
-+ seqcount_##locktype##_t: __seqcount_##locktype##_##prop((void *)(s))
-
- #define __seqprop(s, prop) _Generic(*(s), \
- seqcount_t: __seqcount_##prop((void *)(s)), \
-@@ -252,7 +254,7 @@ SEQCOUNT_LOCKTYPE(struct ww_mutex, ww_mu
-
- /**
- * __read_seqcount_begin() - begin a seqcount_t read section w/o barrier
-- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
-+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKTYPE_t variants
- *
- * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
- * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
-@@ -283,7 +285,7 @@ static inline unsigned __read_seqcount_t
-
- /**
- * raw_read_seqcount_begin() - begin a seqcount_t read section w/o lockdep
-- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
-+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKTYPE_t variants
- *
- * Return: count to be passed to read_seqcount_retry()
- */
-@@ -299,7 +301,7 @@ static inline unsigned raw_read_seqcount
-
- /**
- * read_seqcount_begin() - begin a seqcount_t read critical section
-- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
-+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKTYPE_t variants
- *
- * Return: count to be passed to read_seqcount_retry()
- */
-@@ -314,7 +316,7 @@ static inline unsigned read_seqcount_t_b
-
- /**
- * raw_read_seqcount() - read the raw seqcount_t counter value
-- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
-+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKTYPE_t variants
- *
- * raw_read_seqcount opens a read critical section of the given
- * seqcount_t, without any lockdep checking, and without checking or
-@@ -337,7 +339,7 @@ static inline unsigned raw_read_seqcount
- /**
- * raw_seqcount_begin() - begin a seqcount_t read critical section w/o
- * lockdep and w/o counter stabilization
-- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
-+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKTYPE_t variants
- *
- * raw_seqcount_begin opens a read critical section of the given
- * seqcount_t. Unlike read_seqcount_begin(), this function will not wait
-@@ -365,7 +367,7 @@ static inline unsigned raw_seqcount_t_be
-
- /**
- * __read_seqcount_retry() - end a seqcount_t read section w/o barrier
-- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
-+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKTYPE_t variants
- * @start: count, from read_seqcount_begin()
- *
- * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
-@@ -389,7 +391,7 @@ static inline int __read_seqcount_t_retr
-
- /**
- * read_seqcount_retry() - end a seqcount_t read critical section
-- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
-+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKTYPE_t variants
- * @start: count, from read_seqcount_begin()
- *
- * read_seqcount_retry closes the read critical section of given
-@@ -409,7 +411,7 @@ static inline int read_seqcount_t_retry(
-
- /**
- * raw_write_seqcount_begin() - start a seqcount_t write section w/o lockdep
-- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
-+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKTYPE_t variants
- */
- #define raw_write_seqcount_begin(s) \
- do { \
-@@ -428,7 +430,7 @@ static inline void raw_write_seqcount_t_
-
- /**
- * raw_write_seqcount_end() - end a seqcount_t write section w/o lockdep
-- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
-+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKTYPE_t variants
- */
- #define raw_write_seqcount_end(s) \
- do { \
-@@ -448,7 +450,7 @@ static inline void raw_write_seqcount_t_
- /**
- * write_seqcount_begin_nested() - start a seqcount_t write section with
- * custom lockdep nesting level
-- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
-+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKTYPE_t variants
- * @subclass: lockdep nesting level
- *
- * See Documentation/locking/lockdep-design.rst
-@@ -471,7 +473,7 @@ static inline void write_seqcount_t_begi
-
- /**
- * write_seqcount_begin() - start a seqcount_t write side critical section
-- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
-+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKTYPE_t variants
- *
- * write_seqcount_begin opens a write side critical section of the given
- * seqcount_t.
-@@ -497,7 +499,7 @@ static inline void write_seqcount_t_begi
-
- /**
- * write_seqcount_end() - end a seqcount_t write side critical section
-- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
-+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKTYPE_t variants
- *
- * The write section must've been opened with write_seqcount_begin().
- */
-@@ -517,7 +519,7 @@ static inline void write_seqcount_t_end(
-
- /**
- * raw_write_seqcount_barrier() - do a seqcount_t write barrier
-- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
-+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKTYPE_t variants
- *
- * This can be used to provide an ordering guarantee instead of the usual
- * consistency guarantee. It is one wmb cheaper, because it can collapse
-@@ -571,7 +573,7 @@ static inline void raw_write_seqcount_t_
- /**
- * write_seqcount_invalidate() - invalidate in-progress seqcount_t read
- * side operations
-- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
-+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKTYPE_t variants
- *
- * After write_seqcount_invalidate, no seqcount_t read side operations
- * will complete successfully and see data older than this.
-@@ -589,7 +591,7 @@ static inline void write_seqcount_t_inva
-
- /**
- * raw_read_seqcount_latch() - pick even/odd seqcount_t latch data copy
-- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
-+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKTYPE_t variants
- *
- * Use seqcount_t latching to switch between two storage places protected
- * by a sequence counter. Doing so allows having interruptible, preemptible,
-@@ -614,7 +616,7 @@ static inline int raw_read_seqcount_t_la
-
- /**
- * raw_write_seqcount_latch() - redirect readers to even/odd copy
-- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
-+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKTYPE_t variants
- *
- * The latch technique is a multiversion concurrency control method that allows
- * queries during non-atomic modifications. If you can guarantee queries never
diff --git a/patches/0001-time-sched_clock-Use-raw_read_seqcount_latch-during-.patch b/patches/0001-time-sched_clock-Use-raw_read_seqcount_latch-during-.patch
new file mode 100644
index 000000000000..3c9666b632e4
--- /dev/null
+++ b/patches/0001-time-sched_clock-Use-raw_read_seqcount_latch-during-.patch
@@ -0,0 +1,39 @@
+From: "Ahmed S. Darwish" <a.darwish@linutronix.de>
+Date: Thu, 27 Aug 2020 13:40:37 +0200
+Subject: [PATCH 01/13] time/sched_clock: Use raw_read_seqcount_latch() during
+ suspend
+
+sched_clock uses seqcount_t latching to switch between two storage
+places protected by the sequence counter. This allows it to have
+interruptible, NMI-safe, seqcount_t write side critical sections.
+
+Since 7fc26327b756 ("seqlock: Introduce raw_read_seqcount_latch()"),
+raw_read_seqcount_latch() became the standardized way for seqcount_t
+latch read paths. Due to the dependent load, it has one read memory
+barrier less than the currently used raw_read_seqcount() API.
+
+Use raw_read_seqcount_latch() for the suspend path.
+
+Commit aadd6e5caaac ("time/sched_clock: Use raw_read_seqcount_latch()")
+missed changing that instance of raw_read_seqcount().
+
+References: 1809bfa44e10 ("timers, sched/clock: Avoid deadlock during read from NMI")
+Signed-off-by: Ahmed S. Darwish <a.darwish@linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20200715092345.GA231464@debian-buster-darwi.lab.linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/time/sched_clock.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/time/sched_clock.c
++++ b/kernel/time/sched_clock.c
+@@ -258,7 +258,7 @@ void __init generic_sched_clock_init(voi
+ */
+ static u64 notrace suspended_sched_clock_read(void)
+ {
+- unsigned int seq = raw_read_seqcount(&cd.seq);
++ unsigned int seq = raw_read_seqcount_latch(&cd.seq);
+
+ return cd.read_data[seq & 1].epoch_cyc;
+ }
diff --git a/patches/0002-mm-swap-Do-not-abuse-the-seqcount_t-latching-API.patch b/patches/0002-mm-swap-Do-not-abuse-the-seqcount_t-latching-API.patch
new file mode 100644
index 000000000000..f8f3f37364c3
--- /dev/null
+++ b/patches/0002-mm-swap-Do-not-abuse-the-seqcount_t-latching-API.patch
@@ -0,0 +1,151 @@
+From: "Ahmed S. Darwish" <a.darwish@linutronix.de>
+Date: Thu, 27 Aug 2020 13:40:38 +0200
+Subject: [PATCH 02/13] mm/swap: Do not abuse the seqcount_t latching API
+
+Commit eef1a429f234 ("mm/swap.c: piggyback lru_add_drain_all() calls")
+implemented an optimization mechanism to exit the to-be-started LRU
+drain operation (name it A) if another drain operation *started and
+finished* while (A) was blocked on the LRU draining mutex.
+
+This was done through a seqcount_t latch, which is an abuse of its
+semantics:
+
+ 1. seqcount_t latching should be used for the purpose of switching
+ between two storage places with sequence protection to allow
+ interruptible, preemptible, writer sections. The referenced
+ optimization mechanism has absolutely nothing to do with that.
+
+ 2. The used raw_write_seqcount_latch() has two SMP write memory
+ barriers to insure one consistent storage place out of the two
+ storage places available. A full memory barrier is required
+ instead: to guarantee that the pagevec counter stores visible by
+ local CPU are visible to other CPUs -- before loading the current
+ drain generation.
+
+Beside the seqcount_t API abuse, the semantics of a latch sequence
+counter was force-fitted into the referenced optimization. What was
+meant is to track "generations" of LRU draining operations, where
+"global lru draining generation = x" implies that all generations
+0 < n <= x are already *scheduled* for draining -- thus nothing needs
+to be done if the current generation number n <= x.
+
+Remove the conceptually-inappropriate seqcount_t latch usage. Manually
+implement the referenced optimization using a counter and SMP memory
+barriers.
+
+Note, while at it, use the non-atomic variant of cpumask_set_cpu(),
+__cpumask_set_cpu(), due to the already existing mutex protection.
+
+Signed-off-by: Ahmed S. Darwish <a.darwish@linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/87y2pg9erj.fsf@vostro.fn.ogness.net
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ mm/swap.c | 65 +++++++++++++++++++++++++++++++++++++++++++++++++++-----------
+ 1 file changed, 54 insertions(+), 11 deletions(-)
+
+--- a/mm/swap.c
++++ b/mm/swap.c
+@@ -763,10 +763,20 @@ static void lru_add_drain_per_cpu(struct
+ */
+ void lru_add_drain_all(void)
+ {
+- static seqcount_t seqcount = SEQCNT_ZERO(seqcount);
+- static DEFINE_MUTEX(lock);
++ /*
++ * lru_drain_gen - Global pages generation number
++ *
++ * (A) Definition: global lru_drain_gen = x implies that all generations
++ * 0 < n <= x are already *scheduled* for draining.
++ *
++ * This is an optimization for the highly-contended use case where a
++ * user space workload keeps constantly generating a flow of pages for
++ * each CPU.
++ */
++ static unsigned int lru_drain_gen;
+ static struct cpumask has_work;
+- int cpu, seq;
++ static DEFINE_MUTEX(lock);
++ unsigned cpu, this_gen;
+
+ /*
+ * Make sure nobody triggers this path before mm_percpu_wq is fully
+@@ -775,21 +785,54 @@ void lru_add_drain_all(void)
+ if (WARN_ON(!mm_percpu_wq))
+ return;
+
+- seq = raw_read_seqcount_latch(&seqcount);
++ /*
++ * Guarantee pagevec counter stores visible by this CPU are visible to
++ * other CPUs before loading the current drain generation.
++ */
++ smp_mb();
++
++ /*
++ * (B) Locally cache global LRU draining generation number
++ *
++ * The read barrier ensures that the counter is loaded before the mutex
++ * is taken. It pairs with smp_mb() inside the mutex critical section
++ * at (D).
++ */
++ this_gen = smp_load_acquire(&lru_drain_gen);
+
+ mutex_lock(&lock);
+
+ /*
+- * Piggyback on drain started and finished while we waited for lock:
+- * all pages pended at the time of our enter were drained from vectors.
++ * (C) Exit the draining operation if a newer generation, from another
++ * lru_add_drain_all(), was already scheduled for draining. Check (A).
+ */
+- if (__read_seqcount_retry(&seqcount, seq))
++ if (unlikely(this_gen != lru_drain_gen))
+ goto done;
+
+- raw_write_seqcount_latch(&seqcount);
++ /*
++ * (D) Increment global generation number
++ *
++ * Pairs with smp_load_acquire() at (B), outside of the critical
++ * section. Use a full memory barrier to guarantee that the new global
++ * drain generation number is stored before loading pagevec counters.
++ *
++ * This pairing must be done here, before the for_each_online_cpu loop
++ * below which drains the page vectors.
++ *
++ * Let x, y, and z represent some system CPU numbers, where x < y < z.
++ * Assume CPU #z is is in the middle of the for_each_online_cpu loop
++ * below and has already reached CPU #y's per-cpu data. CPU #x comes
++ * along, adds some pages to its per-cpu vectors, then calls
++ * lru_add_drain_all().
++ *
++ * If the paired barrier is done at any later step, e.g. after the
++ * loop, CPU #x will just exit at (C) and miss flushing out all of its
++ * added pages.
++ */
++ WRITE_ONCE(lru_drain_gen, lru_drain_gen + 1);
++ smp_mb();
+
+ cpumask_clear(&has_work);
+-
+ for_each_online_cpu(cpu) {
+ struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
+
+@@ -801,7 +844,7 @@ void lru_add_drain_all(void)
+ need_activate_page_drain(cpu)) {
+ INIT_WORK(work, lru_add_drain_per_cpu);
+ queue_work_on(cpu, mm_percpu_wq, work);
+- cpumask_set_cpu(cpu, &has_work);
++ __cpumask_set_cpu(cpu, &has_work);
+ }
+ }
+
+@@ -816,7 +859,7 @@ void lru_add_drain_all(void)
+ {
+ lru_add_drain();
+ }
+-#endif
++#endif /* CONFIG_SMP */
+
+ /**
+ * release_pages - batched put_page()
diff --git a/patches/0002-seqlock-Use-unique-prefix-for-seqcount_t-property-ac.patch b/patches/0002-seqlock-Use-unique-prefix-for-seqcount_t-property-ac.patch
deleted file mode 100644
index 915331a3d93f..000000000000
--- a/patches/0002-seqlock-Use-unique-prefix-for-seqcount_t-property-ac.patch
+++ /dev/null
@@ -1,72 +0,0 @@
-From: "Ahmed S. Darwish" <a.darwish@linutronix.de>
-Date: Wed, 19 Aug 2020 11:36:56 +0200
-Subject: [PATCH 2/5] seqlock: Use unique prefix for seqcount_t property
- accessors
-
-At seqlock.h, the following set of functions:
-
- - __seqcount_ptr()
- - __seqcount_preemptible()
- - __seqcount_assert()
-
-act as plain seqcount_t "property" accessors. Meanwhile, the following
-macros:
-
- - __seqcount_ptr()
- - __seqcount_lock_preemptible()
- - __seqcount_assert_lock_held()
-
-act as the equivalent set, but in the generic form, taking either
-seqcount_t or any of the seqcount_LOCKTYPE_t variants.
-
-This is quite confusing, especially the first member where it is called
-exactly the same in both groups.
-
-Differentiate the first group by using "__seqcount_t_" as prefix for all
-of its members. This also conforms with the rest of seqlock.h naming
-conventions.
-
-References: 55f3560df975 ("seqlock: Extend seqcount API with associated locks")
-Signed-off-by: Ahmed S. Darwish <a.darwish@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/seqlock.h | 9 ++++-----
- 1 file changed, 4 insertions(+), 5 deletions(-)
-
---- a/include/linux/seqlock.h
-+++ b/include/linux/seqlock.h
-@@ -198,17 +198,17 @@ static __always_inline void \
- * __seqprop() for seqcount_t
- */
-
--static inline seqcount_t *__seqcount_ptr(seqcount_t *s)
-+static inline seqcount_t *__seqcount_t_ptr(seqcount_t *s)
- {
- return s;
- }
-
--static inline bool __seqcount_preemptible(seqcount_t *s)
-+static inline bool __seqcount_t_preemptible(seqcount_t *s)
- {
- return false;
- }
-
--static inline void __seqcount_assert(seqcount_t *s)
-+static inline void __seqcount_t_assert(seqcount_t *s)
- {
- lockdep_assert_preemption_disabled();
- }
-@@ -236,12 +236,11 @@ SEQCOUNT_LOCKTYPE(ww_mutex, struct ww_mu
- #define SEQCNT_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock)
- #define SEQCNT_WW_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock)
-
--
- #define __seqprop_case(s, locktype, prop) \
- seqcount_##locktype##_t: __seqcount_##locktype##_##prop((void *)(s))
-
- #define __seqprop(s, prop) _Generic(*(s), \
-- seqcount_t: __seqcount_##prop((void *)(s)), \
-+ seqcount_t: __seqcount_t_##prop((void *)(s)), \
- __seqprop_case((s), raw_spinlock, prop), \
- __seqprop_case((s), spinlock, prop), \
- __seqprop_case((s), rwlock, prop), \
diff --git a/patches/0003-seqlock-Introduce-seqcount_latch_t.patch b/patches/0003-seqlock-Introduce-seqcount_latch_t.patch
new file mode 100644
index 000000000000..d68d4614cf56
--- /dev/null
+++ b/patches/0003-seqlock-Introduce-seqcount_latch_t.patch
@@ -0,0 +1,225 @@
+From: "Ahmed S. Darwish" <a.darwish@linutronix.de>
+Date: Thu, 27 Aug 2020 13:40:39 +0200
+Subject: [PATCH 03/13] seqlock: Introduce seqcount_latch_t
+
+Latch sequence counters are a multiversion concurrency control mechanism
+where the seqcount_t counter even/odd value is used to switch between
+two copies of protected data. This allows the seqcount_t read path to
+safely interrupt its write side critical section (e.g. from NMIs).
+
+Initially, latch sequence counters were implemented as a single write
+function above plain seqcount_t: raw_write_seqcount_latch(). The read
+side was expected to use plain seqcount_t raw_read_seqcount().
+
+A specialized latch read function, raw_read_seqcount_latch(), was later
+added. It became the standardized way for latch read paths. Due to the
+dependent load, it has one read memory barrier less than the plain
+seqcount_t raw_read_seqcount() API.
+
+Only raw_write_seqcount_latch() and raw_read_seqcount_latch() should be
+used with latch sequence counters. Having *unique* read and write path
+APIs means that latch sequence counters are actually a data type of
+their own -- just inappropriately overloading plain seqcount_t.
+
+Introduce seqcount_latch_t. This adds type-safety and ensures that only
+the correct latch-safe APIs are to be used.
+
+Not to break bisection, let the latch APIs also accept plain seqcount_t
+or seqcount_raw_spinlock_t. After converting all call sites to
+seqcount_latch_t, only that new data type will be allowed.
+
+References: 9b0fd802e8c0 ("seqcount: Add raw_write_seqcount_latch()")
+References: 7fc26327b756 ("seqlock: Introduce raw_read_seqcount_latch()")
+References: aadd6e5caaac ("time/sched_clock: Use raw_read_seqcount_latch()")
+Signed-off-by: Ahmed S. Darwish <a.darwish@linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20200827114044.11173-4-a.darwish@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ Documentation/locking/seqlock.rst | 18 ++++++
+ include/linux/seqlock.h | 104 ++++++++++++++++++++++++++------------
+ 2 files changed, 91 insertions(+), 31 deletions(-)
+
+--- a/Documentation/locking/seqlock.rst
++++ b/Documentation/locking/seqlock.rst
+@@ -139,6 +139,24 @@ with the associated LOCKTYPE lock acquir
+
+ Read path: same as in :ref:`seqcount_t`.
+
++
++.. _seqcount_latch_t:
++
++Latch sequence counters (``seqcount_latch_t``)
++----------------------------------------------
++
++Latch sequence counters are a multiversion concurrency control mechanism
++where the embedded seqcount_t counter even/odd value is used to switch
++between two copies of protected data. This allows the sequence counter
++read path to safely interrupt its own write side critical section.
++
++Use seqcount_latch_t when the write side sections cannot be protected
++from interruption by readers. This is typically the case when the read
++side can be invoked from NMI handlers.
++
++Check `raw_write_seqcount_latch()` for more information.
++
++
+ .. _seqlock_t:
+
+ Sequential locks (``seqlock_t``)
+--- a/include/linux/seqlock.h
++++ b/include/linux/seqlock.h
+@@ -587,34 +587,76 @@ static inline void write_seqcount_t_inva
+ kcsan_nestable_atomic_end();
+ }
+
+-/**
+- * raw_read_seqcount_latch() - pick even/odd seqcount_t latch data copy
+- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
++/*
++ * Latch sequence counters (seqcount_latch_t)
++ *
++ * A sequence counter variant where the counter even/odd value is used to
++ * switch between two copies of protected data. This allows the read path,
++ * typically NMIs, to safely interrupt the write side critical section.
+ *
+- * Use seqcount_t latching to switch between two storage places protected
+- * by a sequence counter. Doing so allows having interruptible, preemptible,
+- * seqcount_t write side critical sections.
++ * As the write sections are fully preemptible, no special handling for
++ * PREEMPT_RT is needed.
++ */
++typedef struct {
++ seqcount_t seqcount;
++} seqcount_latch_t;
++
++/**
++ * SEQCNT_LATCH_ZERO() - static initializer for seqcount_latch_t
++ * @seq_name: Name of the seqcount_latch_t instance
++ */
++#define SEQCNT_LATCH_ZERO(seq_name) { \
++ .seqcount = SEQCNT_ZERO(seq_name.seqcount), \
++}
++
++/**
++ * seqcount_latch_init() - runtime initializer for seqcount_latch_t
++ * @s: Pointer to the seqcount_latch_t instance
++ */
++static inline void seqcount_latch_init(seqcount_latch_t *s)
++{
++ seqcount_init(&s->seqcount);
++}
++
++/**
++ * raw_read_seqcount_latch() - pick even/odd latch data copy
++ * @s: Pointer to seqcount_t, seqcount_raw_spinlock_t, or seqcount_latch_t
+ *
+- * Check raw_write_seqcount_latch() for more details and a full reader and
+- * writer usage example.
++ * See raw_write_seqcount_latch() for details and a full reader/writer
++ * usage example.
+ *
+ * Return: sequence counter raw value. Use the lowest bit as an index for
+- * picking which data copy to read. The full counter value must then be
+- * checked with read_seqcount_retry().
++ * picking which data copy to read. The full counter must then be checked
++ * with read_seqcount_latch_retry().
+ */
+-#define raw_read_seqcount_latch(s) \
+- raw_read_seqcount_t_latch(__seqcount_ptr(s))
++#define raw_read_seqcount_latch(s) \
++({ \
++ /* \
++ * Pairs with the first smp_wmb() in raw_write_seqcount_latch(). \
++ * Due to the dependent load, a full smp_rmb() is not needed. \
++ */ \
++ _Generic(*(s), \
++ seqcount_t: READ_ONCE(((seqcount_t *)s)->sequence), \
++ seqcount_raw_spinlock_t: READ_ONCE(((seqcount_raw_spinlock_t *)s)->seqcount.sequence), \
++ seqcount_latch_t: READ_ONCE(((seqcount_latch_t *)s)->seqcount.sequence)); \
++})
+
+-static inline int raw_read_seqcount_t_latch(seqcount_t *s)
++/**
++ * read_seqcount_latch_retry() - end a seqcount_latch_t read section
++ * @s: Pointer to seqcount_latch_t
++ * @start: count, from raw_read_seqcount_latch()
++ *
++ * Return: true if a read section retry is required, else false
++ */
++static inline int
++read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
+ {
+- /* Pairs with the first smp_wmb() in raw_write_seqcount_latch() */
+- int seq = READ_ONCE(s->sequence); /* ^^^ */
+- return seq;
++ return read_seqcount_retry(&s->seqcount, start);
+ }
+
+ /**
+- * raw_write_seqcount_latch() - redirect readers to even/odd copy
+- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
++ * raw_write_seqcount_latch() - redirect latch readers to even/odd copy
++ * @s: Pointer to seqcount_t, seqcount_raw_spinlock_t, or seqcount_latch_t
+ *
+ * The latch technique is a multiversion concurrency control method that allows
+ * queries during non-atomic modifications. If you can guarantee queries never
+@@ -633,7 +675,7 @@ static inline int raw_read_seqcount_t_la
+ * The basic form is a data structure like::
+ *
+ * struct latch_struct {
+- * seqcount_t seq;
++ * seqcount_latch_t seq;
+ * struct data_struct data[2];
+ * };
+ *
+@@ -643,13 +685,13 @@ static inline int raw_read_seqcount_t_la
+ * void latch_modify(struct latch_struct *latch, ...)
+ * {
+ * smp_wmb(); // Ensure that the last data[1] update is visible
+- * latch->seq++;
++ * latch->seq.sequence++;
+ * smp_wmb(); // Ensure that the seqcount update is visible
+ *
+ * modify(latch->data[0], ...);
+ *
+ * smp_wmb(); // Ensure that the data[0] update is visible
+- * latch->seq++;
++ * latch->seq.sequence++;
+ * smp_wmb(); // Ensure that the seqcount update is visible
+ *
+ * modify(latch->data[1], ...);
+@@ -668,8 +710,8 @@ static inline int raw_read_seqcount_t_la
+ * idx = seq & 0x01;
+ * entry = data_query(latch->data[idx], ...);
+ *
+- * // read_seqcount_retry() includes needed smp_rmb()
+- * } while (read_seqcount_retry(&latch->seq, seq));
++ * // This includes needed smp_rmb()
++ * } while (read_seqcount_latch_retry(&latch->seq, seq));
+ *
+ * return entry;
+ * }
+@@ -693,14 +735,14 @@ static inline int raw_read_seqcount_t_la
+ * When data is a dynamic data structure; one should use regular RCU
+ * patterns to manage the lifetimes of the objects within.
+ */
+-#define raw_write_seqcount_latch(s) \
+- raw_write_seqcount_t_latch(__seqcount_ptr(s))
+-
+-static inline void raw_write_seqcount_t_latch(seqcount_t *s)
+-{
+- smp_wmb(); /* prior stores before incrementing "sequence" */
+- s->sequence++;
+- smp_wmb(); /* increment "sequence" before following stores */
++#define raw_write_seqcount_latch(s) \
++{ \
++ smp_wmb(); /* prior stores before incrementing "sequence" */ \
++ _Generic(*(s), \
++ seqcount_t: ((seqcount_t *)s)->sequence++, \
++ seqcount_raw_spinlock_t:((seqcount_raw_spinlock_t *)s)->seqcount.sequence++, \
++ seqcount_latch_t: ((seqcount_latch_t *)s)->seqcount.sequence++); \
++ smp_wmb(); /* increment "sequence" before following stores */ \
+ }
+
+ /*
diff --git a/patches/0004-seqlock-seqcount_LOCKTYPE_t-Introduce-PREEMPT_RT-sup.patch b/patches/0004-seqlock-seqcount_LOCKTYPE_t-Introduce-PREEMPT_RT-sup.patch
deleted file mode 100644
index d7985cdd4550..000000000000
--- a/patches/0004-seqlock-seqcount_LOCKTYPE_t-Introduce-PREEMPT_RT-sup.patch
+++ /dev/null
@@ -1,184 +0,0 @@
-From: "Ahmed S. Darwish" <a.darwish@linutronix.de>
-Date: Wed, 19 Aug 2020 11:36:58 +0200
-Subject: [PATCH 4/5] seqlock: seqcount_LOCKTYPE_t: Introduce PREEMPT_RT
- support
-
-Preemption must be disabled before entering a sequence counter write
-side critical section. Failing to do so, the read side section can
-preempt the write side section and spin for the entire scheduler tick.
-If that reader belongs to a real-time scheduling class, it can spin
-forever and the kernel will livelock.
-
-Disabling preemption cannot be done for PREEMPT_RT though. It can lead
-to higher latencies and the write side sections will not be able to
-acquire locks which become sleeping locks (e.g. spinlock_t).
-
-To solve this dilemma, do not disable preemption, and just detect if a
-seqcount_LOCKTYPE_t writer is in progress. If that's the case, acquire
-then release the associated LOCKTYPE writer serialization lock. This
-will break any possible livelock by allowing the possibly-preempted
-writer to make progress until the end of its writer serialization lock
-critical section.
-
-Implement this technique for all of PREEMPT_RT sleeping locks.
-
-Link: https://lkml.kernel.org/r/159708609435.2571.13948681727529247231.tglx@nanos
-Link: https://lkml.kernel.org/r/20200519214547.352050-1-a.darwish@linutronix.de
-References: 55f3560df975 ("seqlock: Extend seqcount API with associated locks")
-Signed-off-by: Ahmed S. Darwish <a.darwish@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/seqlock.h | 72 +++++++++++++++++++++++++++++++++++++++---------
- 1 file changed, 59 insertions(+), 13 deletions(-)
-
---- a/include/linux/seqlock.h
-+++ b/include/linux/seqlock.h
-@@ -17,6 +17,7 @@
- #include <linux/kcsan-checks.h>
- #include <linux/lockdep.h>
- #include <linux/mutex.h>
-+#include <linux/ww_mutex.h>
- #include <linux/preempt.h>
- #include <linux/spinlock.h>
-
-@@ -131,7 +132,23 @@ static inline void seqcount_lockdep_read
- * See Documentation/locking/seqlock.rst
- */
-
--#ifdef CONFIG_LOCKDEP
-+/*
-+ * For PREEMPT_RT, seqcount_LOCKTYPE_t write side critical sections cannot
-+ * disable preemption. It can lead to higher latencies and the write side
-+ * sections will not be able to acquire locks which become sleeping locks
-+ * in RT (e.g. spinlock_t).
-+ *
-+ * To remain preemptible while avoiding a possible livelock caused by a
-+ * reader preempting the write section, use a different technique: detect
-+ * if a seqcount_LOCKTYPE_t writer is in progress. If that is the case,
-+ * acquire then release the associated LOCKTYPE writer serialization
-+ * lock. This will force any possibly preempted writer to make progress
-+ * until the end of its writer serialization lock critical section.
-+ *
-+ * This lock-unlock technique must be implemented for all PREEMPT_RT
-+ * sleeping locks. See Documentation/locking/locktypes.rst
-+ */
-+#if defined(CONFIG_LOCKDEP) || defined(CONFIG_PREEMPT_RT)
- #define __SEQ_LOCK(expr) expr
- #else
- #define __SEQ_LOCK(expr)
-@@ -162,8 +179,10 @@ static inline void seqcount_lockdep_read
- * @locktype_t: canonical/full LOCKTYPE C data type
- * @preemptible: preemptibility of above locktype
- * @lockmember: argument for lockdep_assert_held()
-+ * @lockbase: associated lock release function (prefix only)
-+ * @lock_acquire: associated lock acquisition function (full call)
- */
--#define SEQCOUNT_LOCKTYPE(locktype, locktype_t, preemptible, lockmember) \
-+#define SEQCOUNT_LOCKTYPE(locktype, locktype_t, preemptible, lockmember, lockbase, lock_acquire) \
- typedef struct seqcount_##locktype { \
- seqcount_t seqcount; \
- __SEQ_LOCK(locktype_t *lock); \
-@@ -185,7 +204,23 @@ static __always_inline seqcount_t *
- static __always_inline unsigned \
- __seqcount_##locktype##_sequence(const seqcount_##locktype##_t *s) \
- { \
-- return READ_ONCE(s->seqcount.sequence); \
-+ unsigned seq = READ_ONCE(s->seqcount.sequence); \
-+ \
-+ if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \
-+ return seq; \
-+ \
-+ if (preemptible && unlikely(seq & 1)) { \
-+ __SEQ_LOCK(lock_acquire); \
-+ __SEQ_LOCK(lockbase##_unlock((void *) s->lock)); \
-+ \
-+ /* \
-+ * Re-read the sequence counter since the (possibly \
-+ * preempted) writer made progress. \
-+ */ \
-+ seq = READ_ONCE(s->seqcount.sequence); \
-+ } \
-+ \
-+ return seq; \
- } \
- \
- static __always_inline bool \
-@@ -224,11 +259,13 @@ static inline void __seqcount_t_assert(s
- lockdep_assert_preemption_disabled();
- }
-
--SEQCOUNT_LOCKTYPE(raw_spinlock, raw_spinlock_t, false, s->lock)
--SEQCOUNT_LOCKTYPE(spinlock, spinlock_t, false, s->lock)
--SEQCOUNT_LOCKTYPE(rwlock, rwlock_t, false, s->lock)
--SEQCOUNT_LOCKTYPE(mutex, struct mutex, true, s->lock)
--SEQCOUNT_LOCKTYPE(ww_mutex, struct ww_mutex, true, &s->lock->base)
-+#define __SEQ_RT IS_ENABLED(CONFIG_PREEMPT_RT)
-+
-+SEQCOUNT_LOCKTYPE(raw_spinlock, raw_spinlock_t, false, s->lock, raw_spin, raw_spin_lock(s->lock))
-+SEQCOUNT_LOCKTYPE(spinlock, spinlock_t, __SEQ_RT, s->lock, spin, spin_lock(s->lock))
-+SEQCOUNT_LOCKTYPE(rwlock, rwlock_t, __SEQ_RT, s->lock, read, read_lock(s->lock))
-+SEQCOUNT_LOCKTYPE(mutex, struct mutex, true, s->lock, mutex, mutex_lock(s->lock))
-+SEQCOUNT_LOCKTYPE(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mutex, ww_mutex_lock(s->lock, NULL))
-
- /**
- * SEQCNT_LOCKTYPE_ZERO - static initializer for seqcount_LOCKTYPE_t
-@@ -408,13 +445,22 @@ static inline int read_seqcount_t_retry(
- return __read_seqcount_t_retry(s, start);
- }
-
-+/*
-+ * Automatically disable preemption for seqcount_LOCKTYPE_t writers, if the
-+ * associated lock does not implicitly disable preemption.
-+ *
-+ * Don't do it for PREEMPT_RT. Check __SEQ_LOCK() for rationale.
-+ */
-+#define __seq_enforce_preemption_protection(s) \
-+ (!IS_ENABLED(CONFIG_PREEMPT_RT) && __seqcount_lock_preemptible(s))
-+
- /**
- * raw_write_seqcount_begin() - start a seqcount_t write section w/o lockdep
- * @s: Pointer to seqcount_t or any of the seqcount_LOCKTYPE_t variants
- */
- #define raw_write_seqcount_begin(s) \
- do { \
-- if (__seqcount_lock_preemptible(s)) \
-+ if (__seq_enforce_preemption_protection(s)) \
- preempt_disable(); \
- \
- raw_write_seqcount_t_begin(__seqcount_ptr(s)); \
-@@ -435,7 +481,7 @@ static inline void raw_write_seqcount_t_
- do { \
- raw_write_seqcount_t_end(__seqcount_ptr(s)); \
- \
-- if (__seqcount_lock_preemptible(s)) \
-+ if (__seq_enforce_preemption_protection(s)) \
- preempt_enable(); \
- } while (0)
-
-@@ -458,7 +504,7 @@ static inline void raw_write_seqcount_t_
- do { \
- __seqcount_assert_lock_held(s); \
- \
-- if (__seqcount_lock_preemptible(s)) \
-+ if (__seq_enforce_preemption_protection(s)) \
- preempt_disable(); \
- \
- write_seqcount_t_begin_nested(__seqcount_ptr(s), subclass); \
-@@ -485,7 +531,7 @@ static inline void write_seqcount_t_begi
- do { \
- __seqcount_assert_lock_held(s); \
- \
-- if (__seqcount_lock_preemptible(s)) \
-+ if (__seq_enforce_preemption_protection(s)) \
- preempt_disable(); \
- \
- write_seqcount_t_begin(__seqcount_ptr(s)); \
-@@ -506,7 +552,7 @@ static inline void write_seqcount_t_begi
- do { \
- write_seqcount_t_end(__seqcount_ptr(s)); \
- \
-- if (__seqcount_lock_preemptible(s)) \
-+ if (__seq_enforce_preemption_protection(s)) \
- preempt_enable(); \
- } while (0)
-
diff --git a/patches/0004-time-sched_clock-Use-seqcount_latch_t.patch b/patches/0004-time-sched_clock-Use-seqcount_latch_t.patch
new file mode 100644
index 000000000000..e9d70856b0e4
--- /dev/null
+++ b/patches/0004-time-sched_clock-Use-seqcount_latch_t.patch
@@ -0,0 +1,39 @@
+From: "Ahmed S. Darwish" <a.darwish@linutronix.de>
+Date: Thu, 27 Aug 2020 13:40:40 +0200
+Subject: [PATCH 04/13] time/sched_clock: Use seqcount_latch_t
+
+Latch sequence counters have unique read and write APIs, and thus
+seqcount_latch_t was recently introduced at seqlock.h.
+
+Use that new data type instead of plain seqcount_t. This adds the
+necessary type-safety and ensures only latching-safe seqcount APIs are
+to be used.
+
+Signed-off-by: Ahmed S. Darwish <a.darwish@linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20200827114044.11173-5-a.darwish@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/time/sched_clock.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/kernel/time/sched_clock.c
++++ b/kernel/time/sched_clock.c
+@@ -35,7 +35,7 @@
+ * into a single 64-byte cache line.
+ */
+ struct clock_data {
+- seqcount_t seq;
++ seqcount_latch_t seq;
+ struct clock_read_data read_data[2];
+ ktime_t wrap_kt;
+ unsigned long rate;
+@@ -76,7 +76,7 @@ struct clock_read_data *sched_clock_read
+
+ int sched_clock_read_retry(unsigned int seq)
+ {
+- return read_seqcount_retry(&cd.seq, seq);
++ return read_seqcount_latch_retry(&cd.seq, seq);
+ }
+
+ unsigned long long notrace sched_clock(void)
diff --git a/patches/0005-timekeeping-Use-seqcount_latch_t.patch b/patches/0005-timekeeping-Use-seqcount_latch_t.patch
new file mode 100644
index 000000000000..9bacd842b315
--- /dev/null
+++ b/patches/0005-timekeeping-Use-seqcount_latch_t.patch
@@ -0,0 +1,86 @@
+From: "Ahmed S. Darwish" <a.darwish@linutronix.de>
+Date: Thu, 27 Aug 2020 13:40:41 +0200
+Subject: [PATCH 05/13] timekeeping: Use seqcount_latch_t
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Latch sequence counters are a multiversion concurrency control mechanism
+where the seqcount_t counter even/odd value is used to switch between
+two data storage copies. This allows the seqcount_t read path to safely
+interrupt its write side critical section (e.g. from NMIs).
+
+Initially, latch sequence counters were implemented as a single write
+function, raw_write_seqcount_latch(), above plain seqcount_t. The read
+path was expected to use plain seqcount_t raw_read_seqcount().
+
+A specialized read function was later added, raw_read_seqcount_latch(),
+and became the standardized way for latch read paths. Having unique read
+and write APIs meant that latch sequence counters are basically a data
+type of their own -- just inappropriately overloading plain seqcount_t.
+The seqcount_latch_t data type was thus introduced at seqlock.h.
+
+Use that new data type instead of seqcount_raw_spinlock_t. This ensures
+that only latch-safe APIs are to be used with the sequence counter.
+
+Note that the use of seqcount_raw_spinlock_t was not very useful in the
+first place. Only the "raw_" subset of seqcount_t APIs were used at
+timekeeping.c. This subset was created for contexts where lockdep cannot
+be used. seqcount_LOCKTYPE_t's raison d'être -- verifying that the
+seqcount_t writer serialization lock is held -- cannot thus be done.
+
+References: 0c3351d451ae ("seqlock: Use raw_ prefix instead of _no_lockdep")
+References: 55f3560df975 ("seqlock: Extend seqcount API with associated locks")
+Signed-off-by: Ahmed S. Darwish <a.darwish@linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20200827114044.11173-6-a.darwish@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/time/timekeeping.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -64,7 +64,7 @@ static struct timekeeper shadow_timekeep
+ * See @update_fast_timekeeper() below.
+ */
+ struct tk_fast {
+- seqcount_raw_spinlock_t seq;
++ seqcount_latch_t seq;
+ struct tk_read_base base[2];
+ };
+
+@@ -81,13 +81,13 @@ static struct clocksource dummy_clock =
+ };
+
+ static struct tk_fast tk_fast_mono ____cacheline_aligned = {
+- .seq = SEQCNT_RAW_SPINLOCK_ZERO(tk_fast_mono.seq, &timekeeper_lock),
++ .seq = SEQCNT_LATCH_ZERO(tk_fast_mono.seq),
+ .base[0] = { .clock = &dummy_clock, },
+ .base[1] = { .clock = &dummy_clock, },
+ };
+
+ static struct tk_fast tk_fast_raw ____cacheline_aligned = {
+- .seq = SEQCNT_RAW_SPINLOCK_ZERO(tk_fast_raw.seq, &timekeeper_lock),
++ .seq = SEQCNT_LATCH_ZERO(tk_fast_raw.seq),
+ .base[0] = { .clock = &dummy_clock, },
+ .base[1] = { .clock = &dummy_clock, },
+ };
+@@ -467,7 +467,7 @@ static __always_inline u64 __ktime_get_f
+ tk_clock_read(tkr),
+ tkr->cycle_last,
+ tkr->mask));
+- } while (read_seqcount_retry(&tkf->seq, seq));
++ } while (read_seqcount_latch_retry(&tkf->seq, seq));
+
+ return now;
+ }
+@@ -533,7 +533,7 @@ static __always_inline u64 __ktime_get_r
+ tk_clock_read(tkr),
+ tkr->cycle_last,
+ tkr->mask));
+- } while (read_seqcount_retry(&tkf->seq, seq));
++ } while (read_seqcount_latch_retry(&tkf->seq, seq));
+
+ return now;
+ }
diff --git a/patches/0006-x86-tsc-Use-seqcount_latch_t.patch b/patches/0006-x86-tsc-Use-seqcount_latch_t.patch
new file mode 100644
index 000000000000..f56b8ff354fb
--- /dev/null
+++ b/patches/0006-x86-tsc-Use-seqcount_latch_t.patch
@@ -0,0 +1,66 @@
+From: "Ahmed S. Darwish" <a.darwish@linutronix.de>
+Date: Thu, 27 Aug 2020 13:40:42 +0200
+Subject: [PATCH 06/13] x86/tsc: Use seqcount_latch_t
+
+Latch sequence counters have unique read and write APIs, and thus
+seqcount_latch_t was recently introduced at seqlock.h.
+
+Use that new data type instead of plain seqcount_t. This adds the
+necessary type-safety and ensures that only latching-safe seqcount APIs
+are to be used.
+
+Signed-off-by: Ahmed S. Darwish <a.darwish@linutronix.de>
+[peterz: unwreck cyc2ns_read_begin()]
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20200827114044.11173-7-a.darwish@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/x86/kernel/tsc.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/kernel/tsc.c
++++ b/arch/x86/kernel/tsc.c
+@@ -54,7 +54,7 @@ struct clocksource *art_related_clocksou
+
+ struct cyc2ns {
+ struct cyc2ns_data data[2]; /* 0 + 2*16 = 32 */
+- seqcount_t seq; /* 32 + 4 = 36 */
++ seqcount_latch_t seq; /* 32 + 4 = 36 */
+
+ }; /* fits one cacheline */
+
+@@ -73,14 +73,14 @@ early_param("tsc_early_khz", tsc_early_k
+ preempt_disable_notrace();
+
+ do {
+- seq = this_cpu_read(cyc2ns.seq.sequence);
++ seq = this_cpu_read(cyc2ns.seq.seqcount.sequence);
+ idx = seq & 1;
+
+ data->cyc2ns_offset = this_cpu_read(cyc2ns.data[idx].cyc2ns_offset);
+ data->cyc2ns_mul = this_cpu_read(cyc2ns.data[idx].cyc2ns_mul);
+ data->cyc2ns_shift = this_cpu_read(cyc2ns.data[idx].cyc2ns_shift);
+
+- } while (unlikely(seq != this_cpu_read(cyc2ns.seq.sequence)));
++ } while (unlikely(seq != this_cpu_read(cyc2ns.seq.seqcount.sequence)));
+ }
+
+ __always_inline void cyc2ns_read_end(void)
+@@ -186,7 +186,7 @@ static void __init cyc2ns_init_boot_cpu(
+ {
+ struct cyc2ns *c2n = this_cpu_ptr(&cyc2ns);
+
+- seqcount_init(&c2n->seq);
++ seqcount_latch_init(&c2n->seq);
+ __set_cyc2ns_scale(tsc_khz, smp_processor_id(), rdtsc());
+ }
+
+@@ -203,7 +203,7 @@ static void __init cyc2ns_init_secondary
+
+ for_each_possible_cpu(cpu) {
+ if (cpu != this_cpu) {
+- seqcount_init(&c2n->seq);
++ seqcount_latch_init(&c2n->seq);
+ c2n = per_cpu_ptr(&cyc2ns, cpu);
+ c2n->data[0] = data[0];
+ c2n->data[1] = data[1];
diff --git a/patches/0007-rbtree_latch-Use-seqcount_latch_t.patch b/patches/0007-rbtree_latch-Use-seqcount_latch_t.patch
new file mode 100644
index 000000000000..b7cd6fbabe5c
--- /dev/null
+++ b/patches/0007-rbtree_latch-Use-seqcount_latch_t.patch
@@ -0,0 +1,41 @@
+From: "Ahmed S. Darwish" <a.darwish@linutronix.de>
+Date: Thu, 27 Aug 2020 13:40:43 +0200
+Subject: [PATCH 07/13] rbtree_latch: Use seqcount_latch_t
+
+Latch sequence counters have unique read and write APIs, and thus
+seqcount_latch_t was recently introduced at seqlock.h.
+
+Use that new data type instead of plain seqcount_t. This adds the
+necessary type-safety and ensures that only latching-safe seqcount APIs
+are to be used.
+
+Signed-off-by: Ahmed S. Darwish <a.darwish@linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20200827114044.11173-8-a.darwish@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/rbtree_latch.h | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/include/linux/rbtree_latch.h
++++ b/include/linux/rbtree_latch.h
+@@ -42,8 +42,8 @@ struct latch_tree_node {
+ };
+
+ struct latch_tree_root {
+- seqcount_t seq;
+- struct rb_root tree[2];
++ seqcount_latch_t seq;
++ struct rb_root tree[2];
+ };
+
+ /**
+@@ -206,7 +206,7 @@ latch_tree_find(void *key, struct latch_
+ do {
+ seq = raw_read_seqcount_latch(&root->seq);
+ node = __lt_find(key, root, seq & 1, ops->comp);
+- } while (read_seqcount_retry(&root->seq, seq));
++ } while (read_seqcount_latch_retry(&root->seq, seq));
+
+ return node;
+ }
diff --git a/patches/0008-seqlock-seqcount-latch-APIs-Only-allow-seqcount_latc.patch b/patches/0008-seqlock-seqcount-latch-APIs-Only-allow-seqcount_latc.patch
new file mode 100644
index 000000000000..14817a4dd46d
--- /dev/null
+++ b/patches/0008-seqlock-seqcount-latch-APIs-Only-allow-seqcount_latc.patch
@@ -0,0 +1,85 @@
+From: "Ahmed S. Darwish" <a.darwish@linutronix.de>
+Date: Thu, 27 Aug 2020 13:40:44 +0200
+Subject: [PATCH 08/13] seqlock: seqcount latch APIs: Only allow
+ seqcount_latch_t
+
+All latch sequence counter call-sites have now been converted from plain
+seqcount_t to the new seqcount_latch_t data type.
+
+Enforce type-safety by modifying seqlock.h latch APIs to only accept
+seqcount_latch_t.
+
+Signed-off-by: Ahmed S. Darwish <a.darwish@linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20200827114044.11173-9-a.darwish@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/seqlock.h | 36 +++++++++++++++---------------------
+ 1 file changed, 15 insertions(+), 21 deletions(-)
+
+--- a/include/linux/seqlock.h
++++ b/include/linux/seqlock.h
+@@ -620,7 +620,7 @@ static inline void seqcount_latch_init(s
+
+ /**
+ * raw_read_seqcount_latch() - pick even/odd latch data copy
+- * @s: Pointer to seqcount_t, seqcount_raw_spinlock_t, or seqcount_latch_t
++ * @s: Pointer to seqcount_latch_t
+ *
+ * See raw_write_seqcount_latch() for details and a full reader/writer
+ * usage example.
+@@ -629,17 +629,14 @@ static inline void seqcount_latch_init(s
+ * picking which data copy to read. The full counter must then be checked
+ * with read_seqcount_latch_retry().
+ */
+-#define raw_read_seqcount_latch(s) \
+-({ \
+- /* \
+- * Pairs with the first smp_wmb() in raw_write_seqcount_latch(). \
+- * Due to the dependent load, a full smp_rmb() is not needed. \
+- */ \
+- _Generic(*(s), \
+- seqcount_t: READ_ONCE(((seqcount_t *)s)->sequence), \
+- seqcount_raw_spinlock_t: READ_ONCE(((seqcount_raw_spinlock_t *)s)->seqcount.sequence), \
+- seqcount_latch_t: READ_ONCE(((seqcount_latch_t *)s)->seqcount.sequence)); \
+-})
++static inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *s)
++{
++ /*
++ * Pairs with the first smp_wmb() in raw_write_seqcount_latch().
++ * Due to the dependent load, a full smp_rmb() is not needed.
++ */
++ return READ_ONCE(s->seqcount.sequence);
++}
+
+ /**
+ * read_seqcount_latch_retry() - end a seqcount_latch_t read section
+@@ -656,7 +653,7 @@ read_seqcount_latch_retry(const seqcount
+
+ /**
+ * raw_write_seqcount_latch() - redirect latch readers to even/odd copy
+- * @s: Pointer to seqcount_t, seqcount_raw_spinlock_t, or seqcount_latch_t
++ * @s: Pointer to seqcount_latch_t
+ *
+ * The latch technique is a multiversion concurrency control method that allows
+ * queries during non-atomic modifications. If you can guarantee queries never
+@@ -735,14 +732,11 @@ read_seqcount_latch_retry(const seqcount
+ * When data is a dynamic data structure; one should use regular RCU
+ * patterns to manage the lifetimes of the objects within.
+ */
+-#define raw_write_seqcount_latch(s) \
+-{ \
+- smp_wmb(); /* prior stores before incrementing "sequence" */ \
+- _Generic(*(s), \
+- seqcount_t: ((seqcount_t *)s)->sequence++, \
+- seqcount_raw_spinlock_t:((seqcount_raw_spinlock_t *)s)->seqcount.sequence++, \
+- seqcount_latch_t: ((seqcount_latch_t *)s)->seqcount.sequence++); \
+- smp_wmb(); /* increment "sequence" before following stores */ \
++static inline void raw_write_seqcount_latch(seqcount_latch_t *s)
++{
++ smp_wmb(); /* prior stores before incrementing "sequence" */
++ s->seqcount.sequence++;
++ smp_wmb(); /* increment "sequence" before following stores */
+ }
+
+ /*
diff --git a/patches/0009-seqlock-seqcount_LOCKNAME_t-Standardize-naming-conve.patch b/patches/0009-seqlock-seqcount_LOCKNAME_t-Standardize-naming-conve.patch
new file mode 100644
index 000000000000..9f0e22b64fd6
--- /dev/null
+++ b/patches/0009-seqlock-seqcount_LOCKNAME_t-Standardize-naming-conve.patch
@@ -0,0 +1,250 @@
+From: "Ahmed S. Darwish" <a.darwish@linutronix.de>
+Date: Fri, 4 Sep 2020 17:32:27 +0200
+Subject: [PATCH 09/13] seqlock: seqcount_LOCKNAME_t: Standardize naming
+ convention
+
+At seqlock.h, sequence counters with associated locks are either called
+seqcount_LOCKNAME_t, seqcount_LOCKTYPE_t, or seqcount_locktype_t.
+
+Standardize on seqcount_LOCKNAME_t for all instances in comments,
+kernel-doc, and SEQCOUNT_LOCKNAME() generative macro paramters.
+
+Signed-off-by: Ahmed S. Darwish <a.darwish@linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20200904153231.11994-2-a.darwish@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/seqlock.h | 79 ++++++++++++++++++++++++------------------------
+ 1 file changed, 40 insertions(+), 39 deletions(-)
+
+--- a/include/linux/seqlock.h
++++ b/include/linux/seqlock.h
+@@ -53,7 +53,7 @@
+ *
+ * If the write serialization mechanism is one of the common kernel
+ * locking primitives, use a sequence counter with associated lock
+- * (seqcount_LOCKTYPE_t) instead.
++ * (seqcount_LOCKNAME_t) instead.
+ *
+ * If it's desired to automatically handle the sequence counter writer
+ * serialization and non-preemptibility requirements, use a sequential
+@@ -117,7 +117,7 @@ static inline void seqcount_lockdep_read
+ #define SEQCNT_ZERO(name) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(name) }
+
+ /*
+- * Sequence counters with associated locks (seqcount_LOCKTYPE_t)
++ * Sequence counters with associated locks (seqcount_LOCKNAME_t)
+ *
+ * A sequence counter which associates the lock used for writer
+ * serialization at initialization time. This enables lockdep to validate
+@@ -138,30 +138,32 @@ static inline void seqcount_lockdep_read
+ #endif
+
+ /**
+- * typedef seqcount_LOCKNAME_t - sequence counter with LOCKTYPE associated
++ * typedef seqcount_LOCKNAME_t - sequence counter with LOCKNAME associated
+ * @seqcount: The real sequence counter
+- * @lock: Pointer to the associated spinlock
++ * @lock: Pointer to the associated lock
+ *
+- * A plain sequence counter with external writer synchronization by a
+- * spinlock. The spinlock is associated to the sequence count in the
++ * A plain sequence counter with external writer synchronization by
++ * LOCKNAME @lock. The lock is associated to the sequence counter in the
+ * static initializer or init function. This enables lockdep to validate
+ * that the write side critical section is properly serialized.
++ *
++ * LOCKNAME: raw_spinlock, spinlock, rwlock, mutex, or ww_mutex.
+ */
+
+ /*
+ * seqcount_LOCKNAME_init() - runtime initializer for seqcount_LOCKNAME_t
+ * @s: Pointer to the seqcount_LOCKNAME_t instance
+- * @lock: Pointer to the associated LOCKTYPE
++ * @lock: Pointer to the associated lock
+ */
+
+ /*
+- * SEQCOUNT_LOCKTYPE() - Instantiate seqcount_LOCKNAME_t and helpers
+- * @locktype: actual typename
+- * @lockname: name
+- * @preemptible: preemptibility of above locktype
++ * SEQCOUNT_LOCKNAME() - Instantiate seqcount_LOCKNAME_t and helpers
++ * @lockname: "LOCKNAME" part of seqcount_LOCKNAME_t
++ * @locktype: LOCKNAME canonical C data type
++ * @preemptible: preemptibility of above lockname
+ * @lockmember: argument for lockdep_assert_held()
+ */
+-#define SEQCOUNT_LOCKTYPE(locktype, lockname, preemptible, lockmember) \
++#define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockmember) \
+ typedef struct seqcount_##lockname { \
+ seqcount_t seqcount; \
+ __SEQ_LOCK(locktype *lock); \
+@@ -211,29 +213,28 @@ static inline void __seqcount_assert(seq
+ lockdep_assert_preemption_disabled();
+ }
+
+-SEQCOUNT_LOCKTYPE(raw_spinlock_t, raw_spinlock, false, s->lock)
+-SEQCOUNT_LOCKTYPE(spinlock_t, spinlock, false, s->lock)
+-SEQCOUNT_LOCKTYPE(rwlock_t, rwlock, false, s->lock)
+-SEQCOUNT_LOCKTYPE(struct mutex, mutex, true, s->lock)
+-SEQCOUNT_LOCKTYPE(struct ww_mutex, ww_mutex, true, &s->lock->base)
++SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, s->lock)
++SEQCOUNT_LOCKNAME(spinlock, spinlock_t, false, s->lock)
++SEQCOUNT_LOCKNAME(rwlock, rwlock_t, false, s->lock)
++SEQCOUNT_LOCKNAME(mutex, struct mutex, true, s->lock)
++SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base)
+
+ /*
+ * SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t
+ * @name: Name of the seqcount_LOCKNAME_t instance
+- * @lock: Pointer to the associated LOCKTYPE
++ * @lock: Pointer to the associated LOCKNAME
+ */
+
+-#define SEQCOUNT_LOCKTYPE_ZERO(seq_name, assoc_lock) { \
++#define SEQCOUNT_LOCKNAME_ZERO(seq_name, assoc_lock) { \
+ .seqcount = SEQCNT_ZERO(seq_name.seqcount), \
+ __SEQ_LOCK(.lock = (assoc_lock)) \
+ }
+
+-#define SEQCNT_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock)
+-#define SEQCNT_RAW_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock)
+-#define SEQCNT_RWLOCK_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock)
+-#define SEQCNT_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock)
+-#define SEQCNT_WW_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock)
+-
++#define SEQCNT_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
++#define SEQCNT_RAW_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
++#define SEQCNT_RWLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
++#define SEQCNT_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
++#define SEQCNT_WW_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
+
+ #define __seqprop_case(s, lockname, prop) \
+ seqcount_##lockname##_t: __seqcount_##lockname##_##prop((void *)(s))
+@@ -252,7 +253,7 @@ SEQCOUNT_LOCKTYPE(struct ww_mutex, ww_mu
+
+ /**
+ * __read_seqcount_begin() - begin a seqcount_t read section w/o barrier
+- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
++ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
+ *
+ * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
+ * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
+@@ -283,7 +284,7 @@ static inline unsigned __read_seqcount_t
+
+ /**
+ * raw_read_seqcount_begin() - begin a seqcount_t read section w/o lockdep
+- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
++ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
+ *
+ * Return: count to be passed to read_seqcount_retry()
+ */
+@@ -299,7 +300,7 @@ static inline unsigned raw_read_seqcount
+
+ /**
+ * read_seqcount_begin() - begin a seqcount_t read critical section
+- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
++ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
+ *
+ * Return: count to be passed to read_seqcount_retry()
+ */
+@@ -314,7 +315,7 @@ static inline unsigned read_seqcount_t_b
+
+ /**
+ * raw_read_seqcount() - read the raw seqcount_t counter value
+- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
++ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
+ *
+ * raw_read_seqcount opens a read critical section of the given
+ * seqcount_t, without any lockdep checking, and without checking or
+@@ -337,7 +338,7 @@ static inline unsigned raw_read_seqcount
+ /**
+ * raw_seqcount_begin() - begin a seqcount_t read critical section w/o
+ * lockdep and w/o counter stabilization
+- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
++ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
+ *
+ * raw_seqcount_begin opens a read critical section of the given
+ * seqcount_t. Unlike read_seqcount_begin(), this function will not wait
+@@ -365,7 +366,7 @@ static inline unsigned raw_seqcount_t_be
+
+ /**
+ * __read_seqcount_retry() - end a seqcount_t read section w/o barrier
+- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
++ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
+ * @start: count, from read_seqcount_begin()
+ *
+ * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
+@@ -389,7 +390,7 @@ static inline int __read_seqcount_t_retr
+
+ /**
+ * read_seqcount_retry() - end a seqcount_t read critical section
+- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
++ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
+ * @start: count, from read_seqcount_begin()
+ *
+ * read_seqcount_retry closes the read critical section of given
+@@ -409,7 +410,7 @@ static inline int read_seqcount_t_retry(
+
+ /**
+ * raw_write_seqcount_begin() - start a seqcount_t write section w/o lockdep
+- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
++ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
+ */
+ #define raw_write_seqcount_begin(s) \
+ do { \
+@@ -428,7 +429,7 @@ static inline void raw_write_seqcount_t_
+
+ /**
+ * raw_write_seqcount_end() - end a seqcount_t write section w/o lockdep
+- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
++ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
+ */
+ #define raw_write_seqcount_end(s) \
+ do { \
+@@ -448,7 +449,7 @@ static inline void raw_write_seqcount_t_
+ /**
+ * write_seqcount_begin_nested() - start a seqcount_t write section with
+ * custom lockdep nesting level
+- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
++ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
+ * @subclass: lockdep nesting level
+ *
+ * See Documentation/locking/lockdep-design.rst
+@@ -471,7 +472,7 @@ static inline void write_seqcount_t_begi
+
+ /**
+ * write_seqcount_begin() - start a seqcount_t write side critical section
+- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
++ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
+ *
+ * write_seqcount_begin opens a write side critical section of the given
+ * seqcount_t.
+@@ -497,7 +498,7 @@ static inline void write_seqcount_t_begi
+
+ /**
+ * write_seqcount_end() - end a seqcount_t write side critical section
+- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
++ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
+ *
+ * The write section must've been opened with write_seqcount_begin().
+ */
+@@ -517,7 +518,7 @@ static inline void write_seqcount_t_end(
+
+ /**
+ * raw_write_seqcount_barrier() - do a seqcount_t write barrier
+- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
++ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
+ *
+ * This can be used to provide an ordering guarantee instead of the usual
+ * consistency guarantee. It is one wmb cheaper, because it can collapse
+@@ -571,7 +572,7 @@ static inline void raw_write_seqcount_t_
+ /**
+ * write_seqcount_invalidate() - invalidate in-progress seqcount_t read
+ * side operations
+- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
++ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
+ *
+ * After write_seqcount_invalidate, no seqcount_t read side operations
+ * will complete successfully and see data older than this.
diff --git a/patches/0010-seqlock-Use-unique-prefix-for-seqcount_t-property-ac.patch b/patches/0010-seqlock-Use-unique-prefix-for-seqcount_t-property-ac.patch
new file mode 100644
index 000000000000..c44e4038c211
--- /dev/null
+++ b/patches/0010-seqlock-Use-unique-prefix-for-seqcount_t-property-ac.patch
@@ -0,0 +1,109 @@
+From: "Ahmed S. Darwish" <a.darwish@linutronix.de>
+Date: Fri, 4 Sep 2020 17:32:28 +0200
+Subject: [PATCH 10/13] seqlock: Use unique prefix for seqcount_t property
+ accessors
+
+At seqlock.h, the following set of functions:
+
+ - __seqcount_ptr()
+ - __seqcount_preemptible()
+ - __seqcount_assert()
+
+act as plain seqcount_t "property" accessors. Meanwhile, the following
+group:
+
+ - __seqcount_ptr()
+ - __seqcount_lock_preemptible()
+ - __seqcount_assert_lock_held()
+
+act as the equivalent set, but in the generic form, taking either
+seqcount_t or any of the seqcount_LOCKNAME_t variants.
+
+This is quite confusing, especially the first member where it is called
+exactly the same in both groups.
+
+Differentiate the first group by using "__seqprop" as prefix, and also
+use that same prefix for all of seqcount_LOCKNAME_t property accessors.
+
+While at it, constify the property accessors first parameter when
+appropriate.
+
+References: 55f3560df975 ("seqlock: Extend seqcount API with associated locks")
+Signed-off-by: Ahmed S. Darwish <a.darwish@linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20200904153231.11994-3-a.darwish@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/seqlock.h | 20 +++++++++++---------
+ 1 file changed, 11 insertions(+), 9 deletions(-)
+
+--- a/include/linux/seqlock.h
++++ b/include/linux/seqlock.h
+@@ -157,7 +157,9 @@ static inline void seqcount_lockdep_read
+ */
+
+ /*
+- * SEQCOUNT_LOCKNAME() - Instantiate seqcount_LOCKNAME_t and helpers
++ * SEQCOUNT_LOCKNAME() - Instantiate seqcount_LOCKNAME_t and helpers
++ * seqprop_LOCKNAME_*() - Property accessors for seqcount_LOCKNAME_t
++ *
+ * @lockname: "LOCKNAME" part of seqcount_LOCKNAME_t
+ * @locktype: LOCKNAME canonical C data type
+ * @preemptible: preemptibility of above lockname
+@@ -177,19 +179,19 @@ seqcount_##lockname##_init(seqcount_##lo
+ } \
+ \
+ static __always_inline seqcount_t * \
+-__seqcount_##lockname##_ptr(seqcount_##lockname##_t *s) \
++__seqprop_##lockname##_ptr(seqcount_##lockname##_t *s) \
+ { \
+ return &s->seqcount; \
+ } \
+ \
+ static __always_inline bool \
+-__seqcount_##lockname##_preemptible(seqcount_##lockname##_t *s) \
++__seqprop_##lockname##_preemptible(const seqcount_##lockname##_t *s) \
+ { \
+ return preemptible; \
+ } \
+ \
+ static __always_inline void \
+-__seqcount_##lockname##_assert(seqcount_##lockname##_t *s) \
++__seqprop_##lockname##_assert(const seqcount_##lockname##_t *s) \
+ { \
+ __SEQ_LOCK(lockdep_assert_held(lockmember)); \
+ }
+@@ -198,17 +200,17 @@ static __always_inline void \
+ * __seqprop() for seqcount_t
+ */
+
+-static inline seqcount_t *__seqcount_ptr(seqcount_t *s)
++static inline seqcount_t *__seqprop_ptr(seqcount_t *s)
+ {
+ return s;
+ }
+
+-static inline bool __seqcount_preemptible(seqcount_t *s)
++static inline bool __seqprop_preemptible(const seqcount_t *s)
+ {
+ return false;
+ }
+
+-static inline void __seqcount_assert(seqcount_t *s)
++static inline void __seqprop_assert(const seqcount_t *s)
+ {
+ lockdep_assert_preemption_disabled();
+ }
+@@ -237,10 +239,10 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mu
+ #define SEQCNT_WW_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
+
+ #define __seqprop_case(s, lockname, prop) \
+- seqcount_##lockname##_t: __seqcount_##lockname##_##prop((void *)(s))
++ seqcount_##lockname##_t: __seqprop_##lockname##_##prop((void *)(s))
+
+ #define __seqprop(s, prop) _Generic(*(s), \
+- seqcount_t: __seqcount_##prop((void *)(s)), \
++ seqcount_t: __seqprop_##prop((void *)(s)), \
+ __seqprop_case((s), raw_spinlock, prop), \
+ __seqprop_case((s), spinlock, prop), \
+ __seqprop_case((s), rwlock, prop), \
diff --git a/patches/0003-seqlock-seqcount_t-Implement-all-read-APIs-as-statem.patch b/patches/0011-seqlock-seqcount_t-Implement-all-read-APIs-as-statem.patch
index bc846a1a4c85..df2f18ea65de 100644
--- a/patches/0003-seqlock-seqcount_t-Implement-all-read-APIs-as-statem.patch
+++ b/patches/0011-seqlock-seqcount_t-Implement-all-read-APIs-as-statem.patch
@@ -1,57 +1,60 @@
From: "Ahmed S. Darwish" <a.darwish@linutronix.de>
-Date: Wed, 19 Aug 2020 11:36:57 +0200
-Subject: [PATCH 3/5] seqlock: seqcount_t: Implement all read APIs as statement
- expressions
+Date: Fri, 4 Sep 2020 17:32:29 +0200
+Subject: [PATCH 11/13] seqlock: seqcount_t: Implement all read APIs as
+ statement expressions
-Current implementation of sequence counters implement read APIs as
-macros, so it can take both seqcount_t and any of the
-seqcount_LOCKTYPE_t types as parameters. These macros directly transform
-the call to internal C functions that only take seqcount_t.
+The sequence counters read APIs are implemented as CPP macros, so they
+can take either seqcount_t or any of the seqcount_LOCKNAME_t variants.
+Such macros then get *directly* transformed to internal C functions that
+only take plain seqcount_t.
-Further commits need access to seqcount_LOCKTYPE_t inside all of the
-read APIs. Thus transform the read APIs to pure GCC statement
-expressions instead.
+Further commits need access to seqcount_LOCKNAME_t inside of the actual
+read APIs code. Thus transform all of the seqcount read APIs to pure GCC
+statement expressions instead.
-This will not break type safety, as all the calls resolve to a
-_Generic() that does not have a default case and explicitly states all
-of the supported seqcount_t/seqcount_LOCKTYPE_t types. The previously
-added kernel-doc, above each exported function, also makes the API
-expectations clear for call-site developers.
+This will not break type-safety: all of the transformed APIs resolve to
+a _Generic() selection that does not have a "default" case.
+
+This will also not affect the transformed APIs readability: previously
+added kernel-doc above all of seqlock.h functions makes the expectations
+quite clear for call-site developers.
Signed-off-by: Ahmed S. Darwish <a.darwish@linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20200904153231.11994-4-a.darwish@linutronix.de
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- include/linux/seqlock.h | 98 ++++++++++++++++++++++++------------------------
- 1 file changed, 49 insertions(+), 49 deletions(-)
+ include/linux/seqlock.h | 94 ++++++++++++++++++++++--------------------------
+ 1 file changed, 45 insertions(+), 49 deletions(-)
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
-@@ -182,6 +182,12 @@ static __always_inline seqcount_t *
+@@ -184,6 +184,12 @@ static __always_inline seqcount_t *
return &s->seqcount; \
} \
\
+static __always_inline unsigned \
-+__seqcount_##locktype##_sequence(const seqcount_##locktype##_t *s) \
++__seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s) \
+{ \
+ return READ_ONCE(s->seqcount.sequence); \
+} \
+ \
static __always_inline bool \
- __seqcount_##locktype##_preemptible(seqcount_##locktype##_t *s) \
+ __seqprop_##lockname##_preemptible(const seqcount_##lockname##_t *s) \
{ \
-@@ -203,6 +209,11 @@ static inline seqcount_t *__seqcount_t_p
+@@ -205,6 +211,11 @@ static inline seqcount_t *__seqprop_ptr(
return s;
}
-+static inline unsigned __seqcount_t_sequence(seqcount_t *s)
++static inline unsigned __seqprop_sequence(const seqcount_t *s)
+{
+ return READ_ONCE(s->sequence);
+}
+
- static inline bool __seqcount_t_preemptible(seqcount_t *s)
+ static inline bool __seqprop_preemptible(const seqcount_t *s)
{
return false;
-@@ -248,6 +259,7 @@ SEQCOUNT_LOCKTYPE(ww_mutex, struct ww_mu
+@@ -250,6 +261,7 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mu
__seqprop_case((s), ww_mutex, prop))
#define __seqcount_ptr(s) __seqprop(s, ptr)
@@ -59,7 +62,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#define __seqcount_lock_preemptible(s) __seqprop(s, preemptible)
#define __seqcount_assert_lock_held(s) __seqprop(s, assert)
-@@ -266,21 +278,19 @@ SEQCOUNT_LOCKTYPE(ww_mutex, struct ww_mu
+@@ -268,21 +280,15 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mu
* Return: count to be passed to read_seqcount_retry()
*/
#define __read_seqcount_begin(s) \
@@ -79,22 +82,18 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- return ret;
-}
+({ \
-+ unsigned ret; \
++ unsigned seq; \
+ \
-+ while (true) { \
-+ ret = __seqcount_sequence(s); \
-+ if (likely(! (ret & 1))) \
-+ break; \
++ while ((seq = __seqcount_sequence(s)) & 1) \
+ cpu_relax(); \
-+ } \
+ \
+ kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
-+ ret; \
++ seq; \
+})
/**
* raw_read_seqcount_begin() - begin a seqcount_t read section w/o lockdep
-@@ -289,14 +299,12 @@ static inline unsigned __read_seqcount_t
+@@ -291,14 +297,12 @@ static inline unsigned __read_seqcount_t
* Return: count to be passed to read_seqcount_retry()
*/
#define raw_read_seqcount_begin(s) \
@@ -107,15 +106,15 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- return ret;
-}
+({ \
-+ unsigned ret = __read_seqcount_begin(s); \
++ unsigned seq = __read_seqcount_begin(s); \
+ \
+ smp_rmb(); \
-+ ret; \
++ seq; \
+})
/**
* read_seqcount_begin() - begin a seqcount_t read critical section
-@@ -305,13 +313,10 @@ static inline unsigned raw_read_seqcount
+@@ -307,13 +311,10 @@ static inline unsigned raw_read_seqcount
* Return: count to be passed to read_seqcount_retry()
*/
#define read_seqcount_begin(s) \
@@ -133,7 +132,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* raw_read_seqcount() - read the raw seqcount_t counter value
-@@ -325,15 +330,13 @@ static inline unsigned read_seqcount_t_b
+@@ -327,15 +328,13 @@ static inline unsigned read_seqcount_t_b
* Return: count to be passed to read_seqcount_retry()
*/
#define raw_read_seqcount(s) \
@@ -147,16 +146,16 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- return ret;
-}
+({ \
-+ unsigned ret = __seqcount_sequence(s); \
++ unsigned seq = __seqcount_sequence(s); \
+ \
+ smp_rmb(); \
+ kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
-+ ret; \
++ seq; \
+})
/**
* raw_seqcount_begin() - begin a seqcount_t read critical section w/o
-@@ -353,16 +356,13 @@ static inline unsigned raw_read_seqcount
+@@ -355,16 +354,13 @@ static inline unsigned raw_read_seqcount
* Return: count to be passed to read_seqcount_retry()
*/
#define raw_seqcount_begin(s) \
diff --git a/patches/0012-seqlock-seqcount_LOCKNAME_t-Introduce-PREEMPT_RT-sup.patch b/patches/0012-seqlock-seqcount_LOCKNAME_t-Introduce-PREEMPT_RT-sup.patch
new file mode 100644
index 000000000000..31efd742af38
--- /dev/null
+++ b/patches/0012-seqlock-seqcount_LOCKNAME_t-Introduce-PREEMPT_RT-sup.patch
@@ -0,0 +1,139 @@
+From: "Ahmed S. Darwish" <a.darwish@linutronix.de>
+Date: Fri, 4 Sep 2020 17:32:30 +0200
+Subject: [PATCH 12/13] seqlock: seqcount_LOCKNAME_t: Introduce PREEMPT_RT
+ support
+
+Preemption must be disabled before entering a sequence counter write
+side critical section. Otherwise the read side section can preempt the
+write side section and spin for the entire scheduler tick. If that
+reader belongs to a real-time scheduling class, it can spin forever and
+the kernel will livelock.
+
+Disabling preemption cannot be done for PREEMPT_RT though: it can lead
+to higher latencies, and the write side sections will not be able to
+acquire locks which become sleeping locks (e.g. spinlock_t).
+
+To remain preemptible, while avoiding a possible livelock caused by the
+reader preempting the writer, use a different technique: let the reader
+detect if a seqcount_LOCKNAME_t writer is in progress. If that's the
+case, acquire then release the associated LOCKNAME writer serialization
+lock. This will allow any possibly-preempted writer to make progress
+until the end of its writer serialization lock critical section.
+
+Implement this lock-unlock technique for all seqcount_LOCKNAME_t with
+an associated (PREEMPT_RT) sleeping lock.
+
+References: 55f3560df975 ("seqlock: Extend seqcount API with associated locks")
+Signed-off-by: Ahmed S. Darwish <a.darwish@linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20200519214547.352050-1-a.darwish@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/seqlock.h | 61 ++++++++++++++++++++++++++++++++++++++++--------
+ 1 file changed, 51 insertions(+), 10 deletions(-)
+
+--- a/include/linux/seqlock.h
++++ b/include/linux/seqlock.h
+@@ -17,6 +17,7 @@
+ #include <linux/kcsan-checks.h>
+ #include <linux/lockdep.h>
+ #include <linux/mutex.h>
++#include <linux/ww_mutex.h>
+ #include <linux/preempt.h>
+ #include <linux/spinlock.h>
+
+@@ -131,7 +132,23 @@ static inline void seqcount_lockdep_read
+ * See Documentation/locking/seqlock.rst
+ */
+
+-#ifdef CONFIG_LOCKDEP
++/*
++ * For PREEMPT_RT, seqcount_LOCKNAME_t write side critical sections cannot
++ * disable preemption. It can lead to higher latencies, and the write side
++ * sections will not be able to acquire locks which become sleeping locks
++ * (e.g. spinlock_t).
++ *
++ * To remain preemptible while avoiding a possible livelock caused by the
++ * reader preempting the writer, use a different technique: let the reader
++ * detect if a seqcount_LOCKNAME_t writer is in progress. If that is the
++ * case, acquire then release the associated LOCKNAME writer serialization
++ * lock. This will allow any possibly-preempted writer to make progress
++ * until the end of its writer serialization lock critical section.
++ *
++ * This lock-unlock technique must be implemented for all of PREEMPT_RT
++ * sleeping locks. See Documentation/locking/locktypes.rst
++ */
++#if defined(CONFIG_LOCKDEP) || defined(CONFIG_PREEMPT_RT)
+ #define __SEQ_LOCK(expr) expr
+ #else
+ #define __SEQ_LOCK(expr)
+@@ -162,10 +179,12 @@ static inline void seqcount_lockdep_read
+ *
+ * @lockname: "LOCKNAME" part of seqcount_LOCKNAME_t
+ * @locktype: LOCKNAME canonical C data type
+- * @preemptible: preemptibility of above lockname
++ * @preemptible: preemptibility of above locktype
+ * @lockmember: argument for lockdep_assert_held()
++ * @lockbase: associated lock release function (prefix only)
++ * @lock_acquire: associated lock acquisition function (full call)
+ */
+-#define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockmember) \
++#define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockmember, lockbase, lock_acquire) \
+ typedef struct seqcount_##lockname { \
+ seqcount_t seqcount; \
+ __SEQ_LOCK(locktype *lock); \
+@@ -187,13 +206,33 @@ static __always_inline seqcount_t *
+ static __always_inline unsigned \
+ __seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s) \
+ { \
+- return READ_ONCE(s->seqcount.sequence); \
++ unsigned seq = READ_ONCE(s->seqcount.sequence); \
++ \
++ if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \
++ return seq; \
++ \
++ if (preemptible && unlikely(seq & 1)) { \
++ __SEQ_LOCK(lock_acquire); \
++ __SEQ_LOCK(lockbase##_unlock(s->lock)); \
++ \
++ /* \
++ * Re-read the sequence counter since the (possibly \
++ * preempted) writer made progress. \
++ */ \
++ seq = READ_ONCE(s->seqcount.sequence); \
++ } \
++ \
++ return seq; \
+ } \
+ \
+ static __always_inline bool \
+ __seqprop_##lockname##_preemptible(const seqcount_##lockname##_t *s) \
+ { \
+- return preemptible; \
++ if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \
++ return preemptible; \
++ \
++ /* PREEMPT_RT relies on the above LOCK+UNLOCK */ \
++ return false; \
+ } \
+ \
+ static __always_inline void \
+@@ -226,11 +265,13 @@ static inline void __seqprop_assert(cons
+ lockdep_assert_preemption_disabled();
+ }
+
+-SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, s->lock)
+-SEQCOUNT_LOCKNAME(spinlock, spinlock_t, false, s->lock)
+-SEQCOUNT_LOCKNAME(rwlock, rwlock_t, false, s->lock)
+-SEQCOUNT_LOCKNAME(mutex, struct mutex, true, s->lock)
+-SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base)
++#define __SEQ_RT IS_ENABLED(CONFIG_PREEMPT_RT)
++
++SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, s->lock, raw_spin, raw_spin_lock(s->lock))
++SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, s->lock, spin, spin_lock(s->lock))
++SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, s->lock, read, read_lock(s->lock))
++SEQCOUNT_LOCKNAME(mutex, struct mutex, true, s->lock, mutex, mutex_lock(s->lock))
++SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mutex, ww_mutex_lock(s->lock, NULL))
+
+ /*
+ * SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t
diff --git a/patches/0005-seqlock-PREEMPT_RT-Do-not-starve-seqlock_t-writers.patch b/patches/0013-seqlock-PREEMPT_RT-Do-not-starve-seqlock_t-writers.patch
index d2a9def7615d..69295922ee25 100644
--- a/patches/0005-seqlock-PREEMPT_RT-Do-not-starve-seqlock_t-writers.patch
+++ b/patches/0013-seqlock-PREEMPT_RT-Do-not-starve-seqlock_t-writers.patch
@@ -1,21 +1,23 @@
From: "Ahmed S. Darwish" <a.darwish@linutronix.de>
-Date: Wed, 19 Aug 2020 11:36:59 +0200
-Subject: [PATCH 5/5] seqlock: PREEMPT_RT: Do not starve seqlock_t writers
+Date: Fri, 4 Sep 2020 17:32:31 +0200
+Subject: [PATCH 13/13] seqlock: PREEMPT_RT: Do not starve seqlock_t writers
On PREEMPT_RT, seqlock_t is transformed to a sleeping lock that do not
-disable preemption. A seqlock_t reader can thus preempt the write side
+disable preemption. A seqlock_t reader can thus preempt its write side
section and spin for the enter scheduler tick. If that reader belongs to
a real-time scheduling class, it can spin forever and the kernel will
livelock.
-To break the possible livelock on RT, implement seqlock_t in terms of
-"seqcount_spinlock_t" instead of plain "seqcount_t".
+To break this livelock possibility on PREEMPT_RT, implement seqlock_t in
+terms of "seqcount_spinlock_t" instead of plain "seqcount_t".
-Beside the pure annotational value, for RT this automatically leverages
-the existing seqcount_LOCKTYPE_T anti-livelock mechanisms -- without
-adding any extra code.
+Beside its pure annotational value, this will leverage the existing
+seqcount_LOCKNAME_T PREEMPT_RT anti-livelock mechanisms, without adding
+any extra code.
Signed-off-by: Ahmed S. Darwish <a.darwish@linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20200904153231.11994-6-a.darwish@linutronix.de
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/seqlock.h | 32 +++++++++++++++++++++-----------
@@ -23,7 +25,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
-@@ -761,13 +761,17 @@ static inline void raw_write_seqcount_t_
+@@ -790,13 +790,17 @@ static inline void raw_write_seqcount_la
* - Documentation/locking/seqlock.rst
*/
typedef struct {
@@ -43,7 +45,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
.lock = __SPIN_LOCK_UNLOCKED(lockname) \
}
-@@ -777,8 +781,8 @@ typedef struct {
+@@ -806,8 +810,8 @@ typedef struct {
*/
#define seqlock_init(sl) \
do { \
@@ -53,7 +55,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
} while (0)
/**
-@@ -825,6 +829,12 @@ static inline unsigned read_seqretry(con
+@@ -854,6 +858,12 @@ static inline unsigned read_seqretry(con
return read_seqcount_retry(&sl->seqcount, start);
}
@@ -66,7 +68,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* write_seqlock() - start a seqlock_t write side critical section
* @sl: Pointer to seqlock_t
-@@ -841,7 +851,7 @@ static inline unsigned read_seqretry(con
+@@ -870,7 +880,7 @@ static inline unsigned read_seqretry(con
static inline void write_seqlock(seqlock_t *sl)
{
spin_lock(&sl->lock);
@@ -75,7 +77,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/**
-@@ -853,7 +863,7 @@ static inline void write_seqlock(seqlock
+@@ -882,7 +892,7 @@ static inline void write_seqlock(seqlock
*/
static inline void write_sequnlock(seqlock_t *sl)
{
@@ -84,7 +86,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
spin_unlock(&sl->lock);
}
-@@ -867,7 +877,7 @@ static inline void write_sequnlock(seqlo
+@@ -896,7 +906,7 @@ static inline void write_sequnlock(seqlo
static inline void write_seqlock_bh(seqlock_t *sl)
{
spin_lock_bh(&sl->lock);
@@ -93,7 +95,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/**
-@@ -880,7 +890,7 @@ static inline void write_seqlock_bh(seql
+@@ -909,7 +919,7 @@ static inline void write_seqlock_bh(seql
*/
static inline void write_sequnlock_bh(seqlock_t *sl)
{
@@ -102,7 +104,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
spin_unlock_bh(&sl->lock);
}
-@@ -894,7 +904,7 @@ static inline void write_sequnlock_bh(se
+@@ -923,7 +933,7 @@ static inline void write_sequnlock_bh(se
static inline void write_seqlock_irq(seqlock_t *sl)
{
spin_lock_irq(&sl->lock);
@@ -111,7 +113,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/**
-@@ -906,7 +916,7 @@ static inline void write_seqlock_irq(seq
+@@ -935,7 +945,7 @@ static inline void write_seqlock_irq(seq
*/
static inline void write_sequnlock_irq(seqlock_t *sl)
{
@@ -120,7 +122,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
spin_unlock_irq(&sl->lock);
}
-@@ -915,7 +925,7 @@ static inline unsigned long __write_seql
+@@ -944,7 +954,7 @@ static inline unsigned long __write_seql
unsigned long flags;
spin_lock_irqsave(&sl->lock, flags);
@@ -129,7 +131,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return flags;
}
-@@ -944,7 +954,7 @@ static inline unsigned long __write_seql
+@@ -973,7 +983,7 @@ static inline unsigned long __write_seql
static inline void
write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
{
diff --git a/patches/localversion.patch b/patches/localversion.patch
index 03a80b8b0e80..72cdd2b3c760 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt4
++-rt5
diff --git a/patches/mm-memcontrol-Provide-a-local_lock-for-per-CPU-memcg.patch b/patches/mm-memcontrol-Provide-a-local_lock-for-per-CPU-memcg.patch
index bd1c10abd14d..effaabd7d665 100644
--- a/patches/mm-memcontrol-Provide-a-local_lock-for-per-CPU-memcg.patch
+++ b/patches/mm-memcontrol-Provide-a-local_lock-for-per-CPU-memcg.patch
@@ -117,7 +117,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
-@@ -7044,9 +7045,13 @@ static int __init mem_cgroup_init(void)
+@@ -7050,9 +7051,13 @@ static int __init mem_cgroup_init(void)
cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
memcg_hotplug_cpu_dead);
diff --git a/patches/net-Properly-annotate-the-try-lock-for-the-seqlock.patch b/patches/net-Properly-annotate-the-try-lock-for-the-seqlock.patch
new file mode 100644
index 000000000000..ba6226eb0474
--- /dev/null
+++ b/patches/net-Properly-annotate-the-try-lock-for-the-seqlock.patch
@@ -0,0 +1,61 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 8 Sep 2020 16:57:11 +0200
+Subject: [PATCH] net: Properly annotate the try-lock for the seqlock
+
+In patch
+ ("net/Qdisc: use a seqlock instead seqcount")
+
+the seqcount has been replaced with a seqlock to allow to reader to
+boost the preempted writer.
+The try_write_seqlock() acquired the lock with a try-lock but the
+seqcount annotation was "lock".
+
+Opencode write_seqcount_t_begin() and use the try-lock annotation for
+lockdep.
+
+Reported-by: Mike Galbraith <efault@gmx.de>
+Cc: stable-rt@vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/net/net_seq_lock.h | 9 ---------
+ include/net/sch_generic.h | 10 +++++++++-
+ 2 files changed, 9 insertions(+), 10 deletions(-)
+
+--- a/include/net/net_seq_lock.h
++++ b/include/net/net_seq_lock.h
+@@ -6,15 +6,6 @@
+ # define net_seq_begin(__r) read_seqbegin(__r)
+ # define net_seq_retry(__r, __s) read_seqretry(__r, __s)
+
+-static inline int try_write_seqlock(seqlock_t *sl)
+-{
+- if (spin_trylock(&sl->lock)) {
+- write_seqcount_begin(&sl->seqcount);
+- return 1;
+- }
+- return 0;
+-}
+-
+ #else
+ # define net_seqlock_t seqcount_t
+ # define net_seq_begin(__r) read_seqcount_begin(__r)
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -168,8 +168,16 @@ static inline bool qdisc_run_begin(struc
+ return false;
+ }
+ #ifdef CONFIG_PREEMPT_RT
+- if (try_write_seqlock(&qdisc->running))
++ if (spin_trylock(&qdisc->running.lock)) {
++ seqcount_t *s = &qdisc->running.seqcount.seqcount;
++ /*
++ * Variant of write_seqcount_t_begin() telling lockdep that a
++ * trylock was attempted.
++ */
++ raw_write_seqcount_t_begin(s);
++ seqcount_acquire(&s->dep_map, 0, 1, _RET_IP_);
+ return true;
++ }
+ return false;
+ #else
+ /* Variant of write_seqcount_begin() telling lockdep a trylock
diff --git a/patches/oleg-signal-rt-fix.patch b/patches/oleg-signal-rt-fix.patch
index df1a2cabed1f..f7122a4be1d5 100644
--- a/patches/oleg-signal-rt-fix.patch
+++ b/patches/oleg-signal-rt-fix.patch
@@ -75,7 +75,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
unsigned int sas_ss_flags;
--- a/kernel/entry/common.c
+++ b/kernel/entry/common.c
-@@ -126,6 +126,14 @@ static unsigned long exit_to_user_mode_l
+@@ -149,6 +149,14 @@ static unsigned long exit_to_user_mode_l
if (ti_work & _TIF_NEED_RESCHED)
schedule();
diff --git a/patches/seqlock-Fix-multiple-kernel-doc-warnings.patch b/patches/seqlock-Fix-multiple-kernel-doc-warnings.patch
new file mode 100644
index 000000000000..e4a28ceccd4f
--- /dev/null
+++ b/patches/seqlock-Fix-multiple-kernel-doc-warnings.patch
@@ -0,0 +1,77 @@
+From: Randy Dunlap <rdunlap@infradead.org>
+Date: Sun, 16 Aug 2020 17:02:00 -0700
+Subject: [PATCH] seqlock: Fix multiple kernel-doc warnings
+
+Fix kernel-doc warnings in <linux/seqlock.h>.
+
+../include/linux/seqlock.h:152: warning: Incorrect use of kernel-doc format: * seqcount_LOCKNAME_init() - runtime initializer for seqcount_LOCKNAME_t
+../include/linux/seqlock.h:164: warning: Incorrect use of kernel-doc format: * SEQCOUNT_LOCKTYPE() - Instantiate seqcount_LOCKNAME_t and helpers
+../include/linux/seqlock.h:229: warning: Function parameter or member 'seq_name' not described in 'SEQCOUNT_LOCKTYPE_ZERO'
+../include/linux/seqlock.h:229: warning: Function parameter or member 'assoc_lock' not described in 'SEQCOUNT_LOCKTYPE_ZERO'
+../include/linux/seqlock.h:229: warning: Excess function parameter 'name' description in 'SEQCOUNT_LOCKTYPE_ZERO'
+../include/linux/seqlock.h:229: warning: Excess function parameter 'lock' description in 'SEQCOUNT_LOCKTYPE_ZERO'
+../include/linux/seqlock.h:695: warning: duplicate section name 'NOTE'
+
+Demote kernel-doc notation for the macros "seqcount_LOCKNAME_init()" and
+"SEQCOUNT_LOCKTYPE()"; scripts/kernel-doc does not handle them correctly.
+
+Rename function parameters in SEQCNT_LOCKNAME_ZERO() documentation
+to match the macro's argument names. Change the macro name in the
+documentation to SEQCOUNT_LOCKTYPE_ZERO() to match the macro's name.
+
+For raw_write_seqcount_latch(), rename the second NOTE: to NOTE2:
+to prevent a kernel-doc warning. However, the generated output is not
+quite as nice as it could be for this.
+
+Fix a typo: s/LOCKTYPR/LOCKTYPE/
+
+Fixes: 0efc94c5d15c ("seqcount: Compress SEQCNT_LOCKNAME_ZERO()")
+Fixes: e4e9ab3f9f91 ("seqlock: Fold seqcount_LOCKNAME_init() definition")
+Fixes: a8772dccb2ec ("seqlock: Fold seqcount_LOCKNAME_t definition")
+Reported-by: kernel test robot <lkp@intel.com>
+Signed-off-by: Randy Dunlap <rdunlap@infradead.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20200817000200.20993-1-rdunlap@infradead.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/seqlock.h | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/include/linux/seqlock.h
++++ b/include/linux/seqlock.h
+@@ -138,7 +138,7 @@ static inline void seqcount_lockdep_read
+ #endif
+
+ /**
+- * typedef seqcount_LOCKNAME_t - sequence counter with LOCKTYPR associated
++ * typedef seqcount_LOCKNAME_t - sequence counter with LOCKTYPE associated
+ * @seqcount: The real sequence counter
+ * @lock: Pointer to the associated spinlock
+ *
+@@ -148,7 +148,7 @@ static inline void seqcount_lockdep_read
+ * that the write side critical section is properly serialized.
+ */
+
+-/**
++/*
+ * seqcount_LOCKNAME_init() - runtime initializer for seqcount_LOCKNAME_t
+ * @s: Pointer to the seqcount_LOCKNAME_t instance
+ * @lock: Pointer to the associated LOCKTYPE
+@@ -217,7 +217,7 @@ SEQCOUNT_LOCKTYPE(rwlock_t, rwlock, fa
+ SEQCOUNT_LOCKTYPE(struct mutex, mutex, true, s->lock)
+ SEQCOUNT_LOCKTYPE(struct ww_mutex, ww_mutex, true, &s->lock->base)
+
+-/**
++/*
+ * SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t
+ * @name: Name of the seqcount_LOCKNAME_t instance
+ * @lock: Pointer to the associated LOCKTYPE
+@@ -688,7 +688,7 @@ static inline int raw_read_seqcount_t_la
+ * to miss an entire modification sequence, once it resumes it might
+ * observe the new entry.
+ *
+- * NOTE:
++ * NOTE2:
+ *
+ * When data is a dynamic data structure; one should use regular RCU
+ * patterns to manage the lifetimes of the objects within.
diff --git a/patches/series b/patches/series
index 9d2a58243f3a..58a8ebe74256 100644
--- a/patches/series
+++ b/patches/series
@@ -143,11 +143,25 @@ mm-make-vmstat-rt-aware.patch
## ############################################################
# seqcount
-0001-seqlock-seqcount_LOCKTYPE_t-Standardize-naming-conve.patch
-0002-seqlock-Use-unique-prefix-for-seqcount_t-property-ac.patch
-0003-seqlock-seqcount_t-Implement-all-read-APIs-as-statem.patch
-0004-seqlock-seqcount_LOCKTYPE_t-Introduce-PREEMPT_RT-sup.patch
-0005-seqlock-PREEMPT_RT-Do-not-starve-seqlock_t-writers.patch
+# https://lkml.kernel.org/r/20200817000200.20993-1-rdunlap@infradead.org
+seqlock-Fix-multiple-kernel-doc-warnings.patch
+# 2020-08-27 13:40 Ahmed S. Darwis [PATCH v1 0/8] seqlock: Introduce seqcount_latch_t
+# 20200827114044.11173-1-a.darwish@linutronix.de
+0001-time-sched_clock-Use-raw_read_seqcount_latch-during-.patch
+0002-mm-swap-Do-not-abuse-the-seqcount_t-latching-API.patch
+0003-seqlock-Introduce-seqcount_latch_t.patch
+0004-time-sched_clock-Use-seqcount_latch_t.patch
+0005-timekeeping-Use-seqcount_latch_t.patch
+0006-x86-tsc-Use-seqcount_latch_t.patch
+0007-rbtree_latch-Use-seqcount_latch_t.patch
+0008-seqlock-seqcount-latch-APIs-Only-allow-seqcount_latc.patch
+# 2020-09-04 17:32 Ahmed S. Darwis [PATCH v2 0/5] seqlock: Introduce PREEMPT_RT support
+# 20200904153231.11994-1-a.darwish@linutronix.de
+0009-seqlock-seqcount_LOCKNAME_t-Standardize-naming-conve.patch
+0010-seqlock-Use-unique-prefix-for-seqcount_t-property-ac.patch
+0011-seqlock-seqcount_t-Implement-all-read-APIs-as-statem.patch
+0012-seqlock-seqcount_LOCKNAME_t-Introduce-PREEMPT_RT-sup.patch
+0013-seqlock-PREEMPT_RT-Do-not-starve-seqlock_t-writers.patch
##
0024-xfrm-Use-sequence-counter-with-associated-spinlock.patch
u64_stats-Disable-preemption-on-32bit-UP-SMP-with-RT.patch
@@ -155,6 +169,7 @@ u64_stats-Disable-preemption-on-32bit-UP-SMP-with-RT.patch
fs-dcache-use-swait_queue-instead-of-waitqueue.patch
fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch
net-Qdisc-use-a-seqlock-instead-seqcount.patch
+net-Properly-annotate-the-try-lock-for-the-seqlock.patch
#####
# split changelog
diff --git a/patches/skbufhead-raw-lock.patch b/patches/skbufhead-raw-lock.patch
index 5b0e34f95192..8c9d76d4f6f8 100644
--- a/patches/skbufhead-raw-lock.patch
+++ b/patches/skbufhead-raw-lock.patch
@@ -22,7 +22,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
};
struct sk_buff;
-@@ -1875,6 +1876,12 @@ static inline void skb_queue_head_init(s
+@@ -1884,6 +1885,12 @@ static inline void skb_queue_head_init(s
__skb_queue_head_init(list);
}
@@ -54,7 +54,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
}
-@@ -10964,7 +10964,7 @@ static int __init net_dev_init(void)
+@@ -10965,7 +10965,7 @@ static int __init net_dev_init(void)
INIT_WORK(flush, flush_backlog);
diff --git a/patches/softirq-preempt-fix-3-re.patch b/patches/softirq-preempt-fix-3-re.patch
index 0f02f359285c..992cebe83882 100644
--- a/patches/softirq-preempt-fix-3-re.patch
+++ b/patches/softirq-preempt-fix-3-re.patch
@@ -150,7 +150,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(__napi_schedule);
-@@ -10630,6 +10636,7 @@ static int dev_cpu_dead(unsigned int old
+@@ -10631,6 +10637,7 @@ static int dev_cpu_dead(unsigned int old
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_enable();
diff --git a/patches/x86-entry-Use-should_resched-in-idtentry_exit_cond_r.patch b/patches/x86-entry-Use-should_resched-in-idtentry_exit_cond_r.patch
index 295fae7a78a8..537347e1880f 100644
--- a/patches/x86-entry-Use-should_resched-in-idtentry_exit_cond_r.patch
+++ b/patches/x86-entry-Use-should_resched-in-idtentry_exit_cond_r.patch
@@ -17,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/entry/common.c
+++ b/kernel/entry/common.c
-@@ -329,7 +329,7 @@ void irqentry_exit_cond_resched(void)
+@@ -352,7 +352,7 @@ void irqentry_exit_cond_resched(void)
rcu_irq_exit_check_preempt();
if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
WARN_ON_ONCE(!on_thread_stack());
diff --git a/patches/x86-preempt-lazy.patch b/patches/x86-preempt-lazy.patch
index 14a143eebe01..bde3bb57d7a2 100644
--- a/patches/x86-preempt-lazy.patch
+++ b/patches/x86-preempt-lazy.patch
@@ -140,7 +140,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
--- a/kernel/entry/common.c
+++ b/kernel/entry/common.c
-@@ -123,7 +123,7 @@ static unsigned long exit_to_user_mode_l
+@@ -146,7 +146,7 @@ static unsigned long exit_to_user_mode_l
local_irq_enable_exit_to_user(ti_work);