summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--patches/0001_random_remove_unused_irq_flags_argument_from_add_interrupt_randomness.patch97
-rw-r--r--patches/0002_irq_remove_unsued_flags_argument_from___handle_irq_event_percpu.patch82
-rw-r--r--patches/0003_random_split_add_interrupt_randomness.patch87
-rw-r--r--patches/0004_random_move_the_fast_pool_reset_into_the_caller.patch79
-rw-r--r--patches/0005_random_defer_processing_of_randomness_on_preempt_rt.patch116
-rw-r--r--patches/0007_kernel_fork_only_cache_the_vmap_stack_in_finish_task_switch.patch2
-rw-r--r--patches/0010-drm-i915-Drop-the-irqs_disabled-check.patch4
-rw-r--r--patches/Add_localversion_for_-RT_release.patch2
-rw-r--r--patches/drm-i915-Don-t-disable-interrupts-and-pretend-a-lock.patch (renamed from patches/0002-drm-i915-Don-t-disable-interrupts-and-pretend-a-lock.patch)23
-rw-r--r--patches/mm-memcontro--Disable-on-PREEMPT_RT.patch25
-rw-r--r--patches/mm-memcontrol-Disable-on-PREEMPT_RT.patch42
-rw-r--r--patches/net-dev-Always-serialize-on-Qdisc-busylock-in-__dev_.patch39
-rw-r--r--patches/net__Dequeue_in_dev_cpu_dead_without_the_lock.patch35
-rw-r--r--patches/net__Remove_preemption_disabling_in_netif_rx.patch4
-rw-r--r--patches/net__Use_skbufhead_with_raw_lock.patch15
-rw-r--r--patches/net__dev__always_take_qdiscs_busylock_in___dev_xmit_skb.patch39
-rw-r--r--patches/net_core__use_local_bh_disable_in_netif_rx_ni.patch2
-rw-r--r--patches/panic__skip_get_random_bytes_for_RT_FULL_in_init_oops_id.patch30
-rw-r--r--patches/panic_remove_oops_id.patch55
-rw-r--r--patches/random__Make_it_work_on_rt.patch173
-rw-r--r--patches/serial__8250__implement_write_atomic.patch26
-rw-r--r--patches/series61
-rw-r--r--patches/softirq-Use-a-dedicated-thread-for-timer-wakeups.patch6
-rw-r--r--patches/softirq__Check_preemption_after_reenabling_interrupts.patch8
-rw-r--r--patches/tcp-Don-t-acquire-inet_listen_hashbucket-lock-with-d.patch6
-rw-r--r--patches/u64_stats-Disable-preemption-on-32bit-UP-SMP-PREEMPT.patch (renamed from patches/u64_stats__Disable_preemption_on_32bit-UP_SMP_with_RT_during_updates.patch)31
-rw-r--r--patches/x86__kvm_Require_const_tsc_for_RT.patch2
27 files changed, 696 insertions, 395 deletions
diff --git a/patches/0001_random_remove_unused_irq_flags_argument_from_add_interrupt_randomness.patch b/patches/0001_random_remove_unused_irq_flags_argument_from_add_interrupt_randomness.patch
new file mode 100644
index 000000000000..e596a409f7ec
--- /dev/null
+++ b/patches/0001_random_remove_unused_irq_flags_argument_from_add_interrupt_randomness.patch
@@ -0,0 +1,97 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Subject: random: Remove unused irq_flags argument from add_interrupt_randomness().
+Date: Tue, 07 Dec 2021 13:17:33 +0100
+
+Since commit
+ ee3e00e9e7101 ("random: use registers from interrupted code for CPU's w/o a cycle counter")
+
+the irq_flags argument is no longer used.
+
+Remove unused irq_flags irq_flags.
+
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: Dexuan Cui <decui@microsoft.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Haiyang Zhang <haiyangz@microsoft.com>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: K. Y. Srinivasan <kys@microsoft.com>
+Cc: Stephen Hemminger <sthemmin@microsoft.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Wei Liu <wei.liu@kernel.org>
+Cc: linux-hyperv@vger.kernel.org
+Cc: x86@kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://lore.kernel.org/r/20211207121737.2347312-2-bigeasy@linutronix.de
+---
+ arch/x86/kernel/cpu/mshyperv.c | 2 +-
+ drivers/char/random.c | 4 ++--
+ drivers/hv/vmbus_drv.c | 2 +-
+ include/linux/random.h | 2 +-
+ kernel/irq/handle.c | 2 +-
+ 5 files changed, 6 insertions(+), 6 deletions(-)
+
+--- a/arch/x86/kernel/cpu/mshyperv.c
++++ b/arch/x86/kernel/cpu/mshyperv.c
+@@ -79,7 +79,7 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_sti
+ inc_irq_stat(hyperv_stimer0_count);
+ if (hv_stimer0_handler)
+ hv_stimer0_handler();
+- add_interrupt_randomness(HYPERV_STIMER0_VECTOR, 0);
++ add_interrupt_randomness(HYPERV_STIMER0_VECTOR);
+ ack_APIC_irq();
+
+ set_irq_regs(old_regs);
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -200,7 +200,7 @@
+ * void add_device_randomness(const void *buf, unsigned int size);
+ * void add_input_randomness(unsigned int type, unsigned int code,
+ * unsigned int value);
+- * void add_interrupt_randomness(int irq, int irq_flags);
++ * void add_interrupt_randomness(int irq);
+ * void add_disk_randomness(struct gendisk *disk);
+ *
+ * add_device_randomness() is for adding data to the random pool that
+@@ -1242,7 +1242,7 @@ static __u32 get_reg(struct fast_pool *f
+ return *ptr;
+ }
+
+-void add_interrupt_randomness(int irq, int irq_flags)
++void add_interrupt_randomness(int irq)
+ {
+ struct entropy_store *r;
+ struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
+--- a/drivers/hv/vmbus_drv.c
++++ b/drivers/hv/vmbus_drv.c
+@@ -1381,7 +1381,7 @@ static void vmbus_isr(void)
+ tasklet_schedule(&hv_cpu->msg_dpc);
+ }
+
+- add_interrupt_randomness(vmbus_interrupt, 0);
++ add_interrupt_randomness(vmbus_interrupt);
+ }
+
+ static irqreturn_t vmbus_percpu_isr(int irq, void *dev_id)
+--- a/include/linux/random.h
++++ b/include/linux/random.h
+@@ -35,7 +35,7 @@ static inline void add_latent_entropy(vo
+
+ extern void add_input_randomness(unsigned int type, unsigned int code,
+ unsigned int value) __latent_entropy;
+-extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
++extern void add_interrupt_randomness(int irq) __latent_entropy;
+
+ extern void get_random_bytes(void *buf, int nbytes);
+ extern int wait_for_random_bytes(void);
+--- a/kernel/irq/handle.c
++++ b/kernel/irq/handle.c
+@@ -197,7 +197,7 @@ irqreturn_t handle_irq_event_percpu(stru
+
+ retval = __handle_irq_event_percpu(desc, &flags);
+
+- add_interrupt_randomness(desc->irq_data.irq, flags);
++ add_interrupt_randomness(desc->irq_data.irq);
+
+ if (!irq_settings_no_debug(desc))
+ note_interrupt(desc, retval);
diff --git a/patches/0002_irq_remove_unsued_flags_argument_from___handle_irq_event_percpu.patch b/patches/0002_irq_remove_unsued_flags_argument_from___handle_irq_event_percpu.patch
new file mode 100644
index 000000000000..70a3f0a993a4
--- /dev/null
+++ b/patches/0002_irq_remove_unsued_flags_argument_from___handle_irq_event_percpu.patch
@@ -0,0 +1,82 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Subject: irq: Remove unsued flags argument from __handle_irq_event_percpu().
+Date: Tue, 07 Dec 2021 13:17:34 +0100
+
+The __IRQF_TIMER bit from the flags argument was used in
+add_interrupt_randomness() to distinguish the timer interrupt from other
+interrupts. This is no longer the case.
+
+Remove the flags argument from __handle_irq_event_percpu().
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://lore.kernel.org/r/20211207121737.2347312-3-bigeasy@linutronix.de
+---
+ kernel/irq/chip.c | 4 +---
+ kernel/irq/handle.c | 9 ++-------
+ kernel/irq/internals.h | 2 +-
+ 3 files changed, 4 insertions(+), 11 deletions(-)
+
+--- a/kernel/irq/chip.c
++++ b/kernel/irq/chip.c
+@@ -575,8 +575,6 @@ EXPORT_SYMBOL_GPL(handle_simple_irq);
+ */
+ void handle_untracked_irq(struct irq_desc *desc)
+ {
+- unsigned int flags = 0;
+-
+ raw_spin_lock(&desc->lock);
+
+ if (!irq_may_run(desc))
+@@ -593,7 +591,7 @@ void handle_untracked_irq(struct irq_des
+ irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
+ raw_spin_unlock(&desc->lock);
+
+- __handle_irq_event_percpu(desc, &flags);
++ __handle_irq_event_percpu(desc);
+
+ raw_spin_lock(&desc->lock);
+ irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
+--- a/kernel/irq/handle.c
++++ b/kernel/irq/handle.c
+@@ -136,7 +136,7 @@ void __irq_wake_thread(struct irq_desc *
+ wake_up_process(action->thread);
+ }
+
+-irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc, unsigned int *flags)
++irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc)
+ {
+ irqreturn_t retval = IRQ_NONE;
+ unsigned int irq = desc->irq_data.irq;
+@@ -174,10 +174,6 @@ irqreturn_t __handle_irq_event_percpu(st
+ }
+
+ __irq_wake_thread(desc, action);
+-
+- fallthrough; /* to add to randomness */
+- case IRQ_HANDLED:
+- *flags |= action->flags;
+ break;
+
+ default:
+@@ -193,9 +189,8 @@ irqreturn_t __handle_irq_event_percpu(st
+ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
+ {
+ irqreturn_t retval;
+- unsigned int flags = 0;
+
+- retval = __handle_irq_event_percpu(desc, &flags);
++ retval = __handle_irq_event_percpu(desc);
+
+ add_interrupt_randomness(desc->irq_data.irq);
+
+--- a/kernel/irq/internals.h
++++ b/kernel/irq/internals.h
+@@ -103,7 +103,7 @@ extern int __irq_get_irqchip_state(struc
+
+ extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
+
+-irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc, unsigned int *flags);
++irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc);
+ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc);
+ irqreturn_t handle_irq_event(struct irq_desc *desc);
+
diff --git a/patches/0003_random_split_add_interrupt_randomness.patch b/patches/0003_random_split_add_interrupt_randomness.patch
new file mode 100644
index 000000000000..bd0750993a37
--- /dev/null
+++ b/patches/0003_random_split_add_interrupt_randomness.patch
@@ -0,0 +1,87 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Subject: random: Split add_interrupt_randomness().
+Date: Tue, 07 Dec 2021 13:17:35 +0100
+
+Split add_interrupt_randomness() into two parts:
+- add_interrupt_randomness() which collects the entropy on the
+ invocation of a hardware interrupt and it feeds into the fast_pool,
+ a per-CPU variable (irq_randomness).
+
+- process_interrupt_randomness_pool() which feeds the fast_pool/
+ irq_randomness into the entropy_store if enough entropy has been
+ gathered.
+
+This is a preparations step to ease PREEMPT_RT support.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://lore.kernel.org/r/20211207121737.2347312-4-bigeasy@linutronix.de
+---
+ drivers/char/random.c | 47 +++++++++++++++++++++++++++--------------------
+ 1 file changed, 27 insertions(+), 20 deletions(-)
+
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -1242,29 +1242,10 @@ static __u32 get_reg(struct fast_pool *f
+ return *ptr;
+ }
+
+-void add_interrupt_randomness(int irq)
++static void process_interrupt_randomness_pool(struct fast_pool *fast_pool)
+ {
+ struct entropy_store *r;
+- struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
+- struct pt_regs *regs = get_irq_regs();
+ unsigned long now = jiffies;
+- cycles_t cycles = random_get_entropy();
+- __u32 c_high, j_high;
+- __u64 ip;
+-
+- if (cycles == 0)
+- cycles = get_reg(fast_pool, regs);
+- c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0;
+- j_high = (sizeof(now) > 4) ? now >> 32 : 0;
+- fast_pool->pool[0] ^= cycles ^ j_high ^ irq;
+- fast_pool->pool[1] ^= now ^ c_high;
+- ip = regs ? instruction_pointer(regs) : _RET_IP_;
+- fast_pool->pool[2] ^= ip;
+- fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 :
+- get_reg(fast_pool, regs);
+-
+- fast_mix(fast_pool);
+- add_interrupt_bench(cycles);
+
+ if (unlikely(crng_init == 0)) {
+ if ((fast_pool->count >= 64) &&
+@@ -1293,6 +1274,32 @@ void add_interrupt_randomness(int irq)
+ /* award one bit for the contents of the fast pool */
+ credit_entropy_bits(r, 1);
+ }
++
++void add_interrupt_randomness(int irq)
++{
++ struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
++ struct pt_regs *regs = get_irq_regs();
++ unsigned long now = jiffies;
++ cycles_t cycles = random_get_entropy();
++ __u32 c_high, j_high;
++ __u64 ip;
++
++ if (cycles == 0)
++ cycles = get_reg(fast_pool, regs);
++ c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0;
++ j_high = (sizeof(now) > 4) ? now >> 32 : 0;
++ fast_pool->pool[0] ^= cycles ^ j_high ^ irq;
++ fast_pool->pool[1] ^= now ^ c_high;
++ ip = regs ? instruction_pointer(regs) : _RET_IP_;
++ fast_pool->pool[2] ^= ip;
++ fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 :
++ get_reg(fast_pool, regs);
++
++ fast_mix(fast_pool);
++ add_interrupt_bench(cycles);
++
++ process_interrupt_randomness_pool(fast_pool);
++}
+ EXPORT_SYMBOL_GPL(add_interrupt_randomness);
+
+ #ifdef CONFIG_BLOCK
diff --git a/patches/0004_random_move_the_fast_pool_reset_into_the_caller.patch b/patches/0004_random_move_the_fast_pool_reset_into_the_caller.patch
new file mode 100644
index 000000000000..62e8ae44d325
--- /dev/null
+++ b/patches/0004_random_move_the_fast_pool_reset_into_the_caller.patch
@@ -0,0 +1,79 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Subject: random: Move the fast_pool reset into the caller.
+Date: Tue, 07 Dec 2021 13:17:36 +0100
+
+The state of the fast_pool (number of added entropy, timestamp of last
+addition) is reset after entropy has been consumed.
+
+Move the reset of the fast_pool into the caller.
+This is a preparations step to ease PREEMPT_RT support.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://lore.kernel.org/r/20211207121737.2347312-5-bigeasy@linutronix.de
+---
+ drivers/char/random.c | 29 +++++++++++++++--------------
+ 1 file changed, 15 insertions(+), 14 deletions(-)
+
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -1242,37 +1242,35 @@ static __u32 get_reg(struct fast_pool *f
+ return *ptr;
+ }
+
+-static void process_interrupt_randomness_pool(struct fast_pool *fast_pool)
++static bool process_interrupt_randomness_pool(struct fast_pool *fast_pool)
+ {
+ struct entropy_store *r;
+- unsigned long now = jiffies;
+
+ if (unlikely(crng_init == 0)) {
++ bool pool_reset = false;
++
+ if ((fast_pool->count >= 64) &&
+ crng_fast_load((char *) fast_pool->pool,
+- sizeof(fast_pool->pool))) {
+- fast_pool->count = 0;
+- fast_pool->last = now;
+- }
+- return;
++ sizeof(fast_pool->pool)))
++ pool_reset = true;
++
++ return pool_reset;
+ }
+
+ if ((fast_pool->count < 64) &&
+- !time_after(now, fast_pool->last + HZ))
+- return;
++ !time_after(jiffies, fast_pool->last + HZ))
++ return false;
+
+ r = &input_pool;
+ if (!spin_trylock(&r->lock))
+- return;
++ return false;
+
+- fast_pool->last = now;
+ __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool));
+ spin_unlock(&r->lock);
+
+- fast_pool->count = 0;
+-
+ /* award one bit for the contents of the fast pool */
+ credit_entropy_bits(r, 1);
++ return true;
+ }
+
+ void add_interrupt_randomness(int irq)
+@@ -1298,7 +1296,10 @@ void add_interrupt_randomness(int irq)
+ fast_mix(fast_pool);
+ add_interrupt_bench(cycles);
+
+- process_interrupt_randomness_pool(fast_pool);
++ if (process_interrupt_randomness_pool(fast_pool)) {
++ fast_pool->last = now;
++ fast_pool->count = 0;
++ }
+ }
+ EXPORT_SYMBOL_GPL(add_interrupt_randomness);
+
diff --git a/patches/0005_random_defer_processing_of_randomness_on_preempt_rt.patch b/patches/0005_random_defer_processing_of_randomness_on_preempt_rt.patch
new file mode 100644
index 000000000000..6a1164699e75
--- /dev/null
+++ b/patches/0005_random_defer_processing_of_randomness_on_preempt_rt.patch
@@ -0,0 +1,116 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Subject: random: Defer processing of randomness on PREEMPT_RT.
+Date: Tue, 07 Dec 2021 13:17:37 +0100
+
+On interrupt invocation, add_interrupt_randomness() adds entropy to its
+per-CPU state and if it gathered enough of it then it will mix it into a
+entropy_store. In order to do so, it needs to lock the pool by acquiring
+entropy_store::lock which is a spinlock_t. This lock can not be acquired
+on PREEMPT_RT with disabled interrupts because it is a sleeping lock.
+
+This lock could be made a raw_spinlock_t which will then allow to
+acquire it with disabled interrupts on PREEMPT_RT. The lock is usually
+hold for short amount of cycles while entropy is added to the pool and
+the invocation from the IRQ handler has a try-lock which avoids spinning
+on the lock if contended. The extraction of entropy (extract_buf())
+needs a few cycles more because it performs additionally few
+SHA1 transformations. This takes around 5-10us on a testing box (E5-2650
+32 Cores, 2way NUMA) and is negligible.
+
+The frequent invocation of the IOCTLs RNDADDTOENTCNT and RNDRESEEDCRNG
+on multiple CPUs in parallel leads to filling and depletion of the pool
+which in turn results in heavy contention on the lock. The spinning with
+disabled interrupts on multiple CPUs leads to latencies of at least
+100us on the same machine which is no longer acceptable.
+
+Collect only the IRQ randomness in IRQ-context on PREEMPT_RT.
+In threaded-IRQ context, make a copy of the per-CPU state with disabled
+interrupts to ensure that it is not modified while duplicated. Pass the
+copy to process_interrupt_randomness_pool() and reset the per-CPU
+afterwards if needed.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://lore.kernel.org/r/20211207121737.2347312-6-bigeasy@linutronix.de
+---
+ drivers/char/random.c | 39 ++++++++++++++++++++++++++++++++++++---
+ include/linux/random.h | 1 +
+ kernel/irq/manage.c | 3 +++
+ 3 files changed, 40 insertions(+), 3 deletions(-)
+
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -1273,6 +1273,32 @@ static bool process_interrupt_randomness
+ return true;
+ }
+
++#ifdef CONFIG_PREEMPT_RT
++void process_interrupt_randomness(void)
++{
++ struct fast_pool *cpu_pool;
++ struct fast_pool fast_pool;
++
++ lockdep_assert_irqs_enabled();
++
++ migrate_disable();
++ cpu_pool = this_cpu_ptr(&irq_randomness);
++
++ local_irq_disable();
++ memcpy(&fast_pool, cpu_pool, sizeof(fast_pool));
++ local_irq_enable();
++
++ if (process_interrupt_randomness_pool(&fast_pool)) {
++ local_irq_disable();
++ cpu_pool->last = jiffies;
++ cpu_pool->count = 0;
++ local_irq_enable();
++ }
++ memzero_explicit(&fast_pool, sizeof(fast_pool));
++ migrate_enable();
++}
++#endif
++
+ void add_interrupt_randomness(int irq)
+ {
+ struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
+@@ -1296,9 +1322,16 @@ void add_interrupt_randomness(int irq)
+ fast_mix(fast_pool);
+ add_interrupt_bench(cycles);
+
+- if (process_interrupt_randomness_pool(fast_pool)) {
+- fast_pool->last = now;
+- fast_pool->count = 0;
++ /*
++ * On PREEMPT_RT the entropy can not be fed into the input_pool because
++ * it needs to acquire sleeping locks with disabled interrupts.
++ * This is deferred to the threaded handler.
++ */
++ if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
++ if (process_interrupt_randomness_pool(fast_pool)) {
++ fast_pool->last = now;
++ fast_pool->count = 0;
++ }
+ }
+ }
+ EXPORT_SYMBOL_GPL(add_interrupt_randomness);
+--- a/include/linux/random.h
++++ b/include/linux/random.h
+@@ -36,6 +36,7 @@ static inline void add_latent_entropy(vo
+ extern void add_input_randomness(unsigned int type, unsigned int code,
+ unsigned int value) __latent_entropy;
+ extern void add_interrupt_randomness(int irq) __latent_entropy;
++extern void process_interrupt_randomness(void);
+
+ extern void get_random_bytes(void *buf, int nbytes);
+ extern int wait_for_random_bytes(void);
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -1281,6 +1281,9 @@ static int irq_thread(void *data)
+ if (action_ret == IRQ_WAKE_THREAD)
+ irq_wake_secondary(desc, action);
+
++ if (IS_ENABLED(CONFIG_PREEMPT_RT))
++ process_interrupt_randomness();
++
+ wake_threads_waitq(desc);
+ }
+
diff --git a/patches/0007_kernel_fork_only_cache_the_vmap_stack_in_finish_task_switch.patch b/patches/0007_kernel_fork_only_cache_the_vmap_stack_in_finish_task_switch.patch
index 2fc9e09f60ab..723cb17a34ce 100644
--- a/patches/0007_kernel_fork_only_cache_the_vmap_stack_in_finish_task_switch.patch
+++ b/patches/0007_kernel_fork_only_cache_the_vmap_stack_in_finish_task_switch.patch
@@ -214,7 +214,7 @@ Link: https://lore.kernel.org/r/20211118143452.136421-8-bigeasy@linutronix.de
return NULL;
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -4879,8 +4879,11 @@ static struct rq *finish_task_switch(str
+@@ -4881,8 +4881,11 @@ static struct rq *finish_task_switch(str
if (prev->sched_class->task_dead)
prev->sched_class->task_dead(prev);
diff --git a/patches/0010-drm-i915-Drop-the-irqs_disabled-check.patch b/patches/0010-drm-i915-Drop-the-irqs_disabled-check.patch
index 941c5be424a5..b236f2ab65eb 100644
--- a/patches/0010-drm-i915-Drop-the-irqs_disabled-check.patch
+++ b/patches/0010-drm-i915-Drop-the-irqs_disabled-check.patch
@@ -20,7 +20,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
-@@ -559,7 +559,6 @@ bool __i915_request_submit(struct i915_r
+@@ -560,7 +560,6 @@ bool __i915_request_submit(struct i915_r
RQ_TRACE(request, "\n");
@@ -28,7 +28,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
lockdep_assert_held(&engine->sched_engine->lock);
/*
-@@ -668,7 +667,6 @@ void __i915_request_unsubmit(struct i915
+@@ -669,7 +668,6 @@ void __i915_request_unsubmit(struct i915
*/
RQ_TRACE(request, "\n");
diff --git a/patches/Add_localversion_for_-RT_release.patch b/patches/Add_localversion_for_-RT_release.patch
index 8b36fd73f91a..6b1364508a7c 100644
--- a/patches/Add_localversion_for_-RT_release.patch
+++ b/patches/Add_localversion_for_-RT_release.patch
@@ -15,4 +15,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt9
++-rt10
diff --git a/patches/0002-drm-i915-Don-t-disable-interrupts-and-pretend-a-lock.patch b/patches/drm-i915-Don-t-disable-interrupts-and-pretend-a-lock.patch
index 9fcdcb0b5b52..7d70f071b1db 100644
--- a/patches/0002-drm-i915-Don-t-disable-interrupts-and-pretend-a-lock.patch
+++ b/patches/drm-i915-Don-t-disable-interrupts-and-pretend-a-lock.patch
@@ -1,11 +1,12 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Tue, 7 Jul 2020 12:25:11 +0200
-Subject: [PATCH 02/10] drm/i915: Don't disable interrupts and pretend a lock
- as been acquired in __timeline_mark_lock().
+Date: Fri, 10 Dec 2021 21:44:17 +0100
+Subject: [PATCH] drm/i915: Don't disable interrupts and pretend a lock as been
+ acquired in __timeline_mark_lock().
This is a revert of commits
d67739268cf0e ("drm/i915/gt: Mark up the nested engine-pm timeline lock as irqsafe")
6c69a45445af9 ("drm/i915/gt: Mark context->active_count as protected by timeline->mutex")
+ 6dcb85a0ad990 ("drm/i915: Hold irq-off for the entire fake lock period")
The existing code leads to a different behaviour depending on whether
lockdep is enabled or not. Any following lock that is acquired without
@@ -20,12 +21,16 @@ check in intel_context_mark_active(). The other problem with disabling
interrupts is that on PREEMPT_RT interrupts are also disabled which
leads to problems for instance later during memory allocation.
-Add a CONTEXT_IS_PARKED bit to intel_engine_cs and set_bit/clear_bit it
+Add a CONTEXT_IS_PARKING bit to intel_engine_cs and set_bit/clear_bit it
instead of mutex_acquire/mutex_release. Use test_bit in the two
identified spots which relied on the lockdep annotation.
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/YbO8Ie1Nj7XcQPNQ@linutronix.de
---
drivers/gpu/drm/i915/gt/intel_context.h | 3 +-
drivers/gpu/drm/i915/gt/intel_context_types.h | 1
@@ -41,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
- lockdep_assert_held(&ce->timeline->mutex);
+ lockdep_assert(lockdep_is_held(&ce->timeline->mutex) ||
-+ test_bit(CONTEXT_IS_PARKED, &ce->flags));
++ test_bit(CONTEXT_IS_PARKING, &ce->flags));
++ce->active_count;
}
@@ -51,7 +56,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#define CONTEXT_LRCA_DIRTY 9
#define CONTEXT_GUC_INIT 10
#define CONTEXT_PERMA_PIN 11
-+#define CONTEXT_IS_PARKED 12
++#define CONTEXT_IS_PARKING 12
struct {
u64 timeout_us;
@@ -110,7 +115,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* it causing an underflow of the engine->wakeref.
*/
- flags = __timeline_mark_lock(ce);
-+ set_bit(CONTEXT_IS_PARKED, &ce->flags);
++ set_bit(CONTEXT_IS_PARKING, &ce->flags);
GEM_BUG_ON(atomic_read(&ce->timeline->active_count) < 0);
rq = __i915_request_create(ce, GFP_NOWAIT);
@@ -119,7 +124,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
result = false;
out_unlock:
- __timeline_mark_unlock(ce, flags);
-+ clear_bit(CONTEXT_IS_PARKED, &ce->flags);
++ clear_bit(CONTEXT_IS_PARKING, &ce->flags);
return result;
}
@@ -131,7 +136,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return rcu_dereference_protected(rq->timeline,
- lockdep_is_held(&rcu_access_pointer(rq->timeline)->mutex));
+ lockdep_is_held(&rcu_access_pointer(rq->timeline)->mutex) ||
-+ test_bit(CONTEXT_IS_PARKED, &rq->context->flags));
++ test_bit(CONTEXT_IS_PARKING, &rq->context->flags));
}
static inline struct i915_gem_context *
diff --git a/patches/mm-memcontro--Disable-on-PREEMPT_RT.patch b/patches/mm-memcontro--Disable-on-PREEMPT_RT.patch
deleted file mode 100644
index a1b89d4f6770..000000000000
--- a/patches/mm-memcontro--Disable-on-PREEMPT_RT.patch
+++ /dev/null
@@ -1,25 +0,0 @@
-Subject: mm/memcontrol: Disable on PREEMPT_RT
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Sun, 25 Jul 2021 21:35:46 +0200
-
-559271146efc ("mm/memcg: optimize user context object stock access") is a
-classic example of optimizing for the cpu local BKL serialization without a
-clear protection scope.
-
-Disable MEMCG on RT for now.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
----
- init/Kconfig | 1 +
- 1 file changed, 1 insertion(+)
-
---- a/init/Kconfig
-+++ b/init/Kconfig
-@@ -943,6 +943,7 @@ config PAGE_COUNTER
-
- config MEMCG
- bool "Memory controller"
-+ depends on !PREEMPT_RT
- select PAGE_COUNTER
- select EVENTFD
- help
diff --git a/patches/mm-memcontrol-Disable-on-PREEMPT_RT.patch b/patches/mm-memcontrol-Disable-on-PREEMPT_RT.patch
new file mode 100644
index 000000000000..149c9877cfc0
--- /dev/null
+++ b/patches/mm-memcontrol-Disable-on-PREEMPT_RT.patch
@@ -0,0 +1,42 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 7 Dec 2021 16:52:09 +0100
+Subject: [PATCH] mm/memcontrol: Disable on PREEMPT_RT
+
+MEMCG has a few constructs which are not compatible with PREEMPT_RT's
+requirements. This includes:
+- relying on disabled interrupts from spin_lock_irqsave() locking for
+ something not related to lock itself (like the per-CPU counter).
+
+- explicitly disabling interrupts and acquiring a spinlock_t based lock
+ like in memcg_check_events() -> eventfd_signal().
+
+- explicitly disabling interrupts and freeing memory like in
+ drain_obj_stock() -> obj_cgroup_put() -> obj_cgroup_release() ->
+ percpu_ref_exit().
+
+Commit 559271146efc ("mm/memcg: optimize user context object stock
+access") continued to optimize for the CPU local access which
+complicates the PREEMPT_RT locking requirements further.
+
+Disable MEMCG on PREEMPT_RT until the whole situation can be evaluated
+again.
+
+[ bigeasy: commit description. ]
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://lkml.kernel.org/r/20211207155208.eyre5svucpg7krxe@linutronix.de
+---
+ init/Kconfig | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -943,6 +943,7 @@ config PAGE_COUNTER
+
+ config MEMCG
+ bool "Memory controller"
++ depends on !PREEMPT_RT
+ select PAGE_COUNTER
+ select EVENTFD
+ help
diff --git a/patches/net-dev-Always-serialize-on-Qdisc-busylock-in-__dev_.patch b/patches/net-dev-Always-serialize-on-Qdisc-busylock-in-__dev_.patch
new file mode 100644
index 000000000000..1c1e12aa4fd8
--- /dev/null
+++ b/patches/net-dev-Always-serialize-on-Qdisc-busylock-in-__dev_.patch
@@ -0,0 +1,39 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 30 Mar 2016 13:36:29 +0200
+Subject: [PATCH] net: dev: Always serialize on Qdisc::busylock in
+ __dev_xmit_skb() on PREEMPT_RT.
+
+The root-lock is dropped before dev_hard_start_xmit() is invoked and after
+setting the __QDISC___STATE_RUNNING bit. If the Qdisc owner is preempted
+by another sender/task with a higher priority then this new sender won't
+be able to submit packets to the NIC directly instead they will be
+enqueued into the Qdisc. The NIC will remain idle until the Qdisc owner
+is scheduled again and finishes the job.
+
+By serializing every task on the ->busylock then the task will be
+preempted by a sender only after the Qdisc has no owner.
+
+Always serialize on the busylock on PREEMPT_RT.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://lkml.kernel.org/r/YbcmKeLngWW/pb1V@linutronix.de
+---
+ net/core/dev.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3836,8 +3836,12 @@ static inline int __dev_xmit_skb(struct
+ * separate lock before trying to get qdisc main lock.
+ * This permits qdisc->running owner to get the lock more
+ * often and dequeue packets faster.
++ * On PREEMPT_RT it is possible to preempt the qdisc owner during xmit
++ * and then other tasks will only enqueue packets. The packets will be
++ * sent after the qdisc owner is scheduled again. To prevent this
++ * scenario the task always serialize on the lock.
+ */
+- contended = qdisc_is_running(q);
++ contended = IS_ENABLED(CONFIG_PREEMPT_RT) || qdisc_is_running(q);
+ if (unlikely(contended))
+ spin_lock(&q->busylock);
+
diff --git a/patches/net__Dequeue_in_dev_cpu_dead_without_the_lock.patch b/patches/net__Dequeue_in_dev_cpu_dead_without_the_lock.patch
deleted file mode 100644
index 132320db62cd..000000000000
--- a/patches/net__Dequeue_in_dev_cpu_dead_without_the_lock.patch
+++ /dev/null
@@ -1,35 +0,0 @@
-Subject: net: Dequeue in dev_cpu_dead() without the lock
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Wed Sep 16 16:15:39 2020 +0200
-
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-
-Upstream uses skb_dequeue() to acquire lock of `input_pkt_queue'. The reason is
-to synchronize against a remote CPU which still thinks that the CPU is online
-enqueues packets to this CPU.
-There are no guarantees that the packet is enqueued before the callback is run,
-it just hope.
-RT however complains about an not initialized lock because it uses another lock
-for `input_pkt_queue' due to the IRQ-off nature of the context.
-
-Use the unlocked dequeue version for `input_pkt_queue'.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-
-
----
- net/core/dev.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
----
---- a/net/core/dev.c
-+++ b/net/core/dev.c
-@@ -11344,7 +11344,7 @@ static int dev_cpu_dead(unsigned int old
- netif_rx_ni(skb);
- input_queue_head_incr(oldsd);
- }
-- while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
-+ while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
- netif_rx_ni(skb);
- input_queue_head_incr(oldsd);
- }
diff --git a/patches/net__Remove_preemption_disabling_in_netif_rx.patch b/patches/net__Remove_preemption_disabling_in_netif_rx.patch
index 291cf25e4a99..51fabc0faa4b 100644
--- a/patches/net__Remove_preemption_disabling_in_netif_rx.patch
+++ b/patches/net__Remove_preemption_disabling_in_netif_rx.patch
@@ -38,7 +38,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -4908,7 +4908,7 @@ static int netif_rx_internal(struct sk_b
+@@ -4915,7 +4915,7 @@ static int netif_rx_internal(struct sk_b
struct rps_dev_flow voidflow, *rflow = &voidflow;
int cpu;
@@ -47,7 +47,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
rcu_read_lock();
cpu = get_rps_cpu(skb->dev, skb, &rflow);
-@@ -4918,14 +4918,14 @@ static int netif_rx_internal(struct sk_b
+@@ -4925,14 +4925,14 @@ static int netif_rx_internal(struct sk_b
ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
rcu_read_unlock();
diff --git a/patches/net__Use_skbufhead_with_raw_lock.patch b/patches/net__Use_skbufhead_with_raw_lock.patch
index 19dd0dca4ac7..cd7de80d0103 100644
--- a/patches/net__Use_skbufhead_with_raw_lock.patch
+++ b/patches/net__Use_skbufhead_with_raw_lock.patch
@@ -13,8 +13,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/skbuff.h | 7 +++++++
- net/core/dev.c | 6 +++---
- 2 files changed, 10 insertions(+), 3 deletions(-)
+ net/core/dev.c | 8 ++++----
+ 2 files changed, 11 insertions(+), 4 deletions(-)
---
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -58,7 +58,16 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
}
-@@ -11660,7 +11660,7 @@ static int __init net_dev_init(void)
+@@ -11351,7 +11351,7 @@ static int dev_cpu_dead(unsigned int old
+ netif_rx_ni(skb);
+ input_queue_head_incr(oldsd);
+ }
+- while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
++ while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
+ netif_rx_ni(skb);
+ input_queue_head_incr(oldsd);
+ }
+@@ -11667,7 +11667,7 @@ static int __init net_dev_init(void)
INIT_WORK(flush, flush_backlog);
diff --git a/patches/net__dev__always_take_qdiscs_busylock_in___dev_xmit_skb.patch b/patches/net__dev__always_take_qdiscs_busylock_in___dev_xmit_skb.patch
deleted file mode 100644
index aee6836315b0..000000000000
--- a/patches/net__dev__always_take_qdiscs_busylock_in___dev_xmit_skb.patch
+++ /dev/null
@@ -1,39 +0,0 @@
-Subject: net: dev: always take qdisc's busylock in __dev_xmit_skb()
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Wed Mar 30 13:36:29 2016 +0200
-
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-
-The root-lock is dropped before dev_hard_start_xmit() is invoked and after
-setting the __QDISC___STATE_RUNNING bit. If this task is now pushed away
-by a task with a higher priority then the task with the higher priority
-won't be able to submit packets to the NIC directly instead they will be
-enqueued into the Qdisc. The NIC will remain idle until the task(s) with
-higher priority leave the CPU and the task with lower priority gets back
-and finishes the job.
-
-If we take always the busylock we ensure that the RT task can boost the
-low-prio task and submit the packet.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-
-
----
- net/core/dev.c | 4 ++++
- 1 file changed, 4 insertions(+)
----
---- a/net/core/dev.c
-+++ b/net/core/dev.c
-@@ -3839,7 +3839,11 @@ static inline int __dev_xmit_skb(struct
- * This permits qdisc->running owner to get the lock more
- * often and dequeue packets faster.
- */
-+#ifdef CONFIG_PREEMPT_RT
-+ contended = true;
-+#else
- contended = qdisc_is_running(q);
-+#endif
- if (unlikely(contended))
- spin_lock(&q->busylock);
-
diff --git a/patches/net_core__use_local_bh_disable_in_netif_rx_ni.patch b/patches/net_core__use_local_bh_disable_in_netif_rx_ni.patch
index 21a37af595d4..07818d6cf5a6 100644
--- a/patches/net_core__use_local_bh_disable_in_netif_rx_ni.patch
+++ b/patches/net_core__use_local_bh_disable_in_netif_rx_ni.patch
@@ -23,7 +23,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -4967,11 +4967,9 @@ int netif_rx_ni(struct sk_buff *skb)
+@@ -4974,11 +4974,9 @@ int netif_rx_ni(struct sk_buff *skb)
trace_netif_rx_ni_entry(skb);
diff --git a/patches/panic__skip_get_random_bytes_for_RT_FULL_in_init_oops_id.patch b/patches/panic__skip_get_random_bytes_for_RT_FULL_in_init_oops_id.patch
deleted file mode 100644
index 74f1f2508330..000000000000
--- a/patches/panic__skip_get_random_bytes_for_RT_FULL_in_init_oops_id.patch
+++ /dev/null
@@ -1,30 +0,0 @@
-Subject: panic: skip get_random_bytes for RT_FULL in init_oops_id
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Tue Jul 14 14:26:34 2015 +0200
-
-From: Thomas Gleixner <tglx@linutronix.de>
-
-Disable on -RT. If this is invoked from irq-context we will have problems
-to acquire the sleeping lock.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-
-
----
- kernel/panic.c | 2 ++
- 1 file changed, 2 insertions(+)
----
---- a/kernel/panic.c
-+++ b/kernel/panic.c
-@@ -545,9 +545,11 @@ static u64 oops_id;
-
- static int init_oops_id(void)
- {
-+#ifndef CONFIG_PREEMPT_RT
- if (!oops_id)
- get_random_bytes(&oops_id, sizeof(oops_id));
- else
-+#endif
- oops_id++;
-
- return 0;
diff --git a/patches/panic_remove_oops_id.patch b/patches/panic_remove_oops_id.patch
new file mode 100644
index 000000000000..65ff798ce0c7
--- /dev/null
+++ b/patches/panic_remove_oops_id.patch
@@ -0,0 +1,55 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Subject: panic: Remove oops_id.
+Date: Thu, 02 Dec 2021 15:27:13 +0100
+
+The oops id has been added as part of the end of trace marker for the
+kerneloops.org project. The id is used to automatically identify duplicate
+submissions of the same report. Identical looking reports with different
+a id can be considered as the same oops occurred again.
+
+The early initialisation of the oops_id can create a warning if the
+random core is not yet fully initialized. On PREEMPT_RT it is
+problematic if the id is initialized on demand from non preemptible
+context.
+
+The kernel oops project is not available since 2017.
+Remove the oops_id and use 0 in the output in case parser rely on it.
+
+Link: https://bugs.debian.org/953172
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://lore.kernel.org/r/20211202142713.ofadr43tawengfw4@linutronix.de
+---
+ kernel/panic.c | 19 +------------------
+ 1 file changed, 1 insertion(+), 18 deletions(-)
+
+--- a/kernel/panic.c
++++ b/kernel/panic.c
+@@ -538,26 +538,9 @@ void oops_enter(void)
+ trigger_all_cpu_backtrace();
+ }
+
+-/*
+- * 64-bit random ID for oopses:
+- */
+-static u64 oops_id;
+-
+-static int init_oops_id(void)
+-{
+- if (!oops_id)
+- get_random_bytes(&oops_id, sizeof(oops_id));
+- else
+- oops_id++;
+-
+- return 0;
+-}
+-late_initcall(init_oops_id);
+-
+ static void print_oops_end_marker(void)
+ {
+- init_oops_id();
+- pr_warn("---[ end trace %016llx ]---\n", (unsigned long long)oops_id);
++ pr_warn("---[ end trace %016llx ]---\n", 0ULL);
+ pr_flush(1000, true);
+ }
+
diff --git a/patches/random__Make_it_work_on_rt.patch b/patches/random__Make_it_work_on_rt.patch
deleted file mode 100644
index f0ddaaa61cbe..000000000000
--- a/patches/random__Make_it_work_on_rt.patch
+++ /dev/null
@@ -1,173 +0,0 @@
-Subject: random: Make it work on rt
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Tue Aug 21 20:38:50 2012 +0200
-
-From: Thomas Gleixner <tglx@linutronix.de>
-
-Delegate the random insertion to the forced threaded interrupt
-handler. Store the return IP of the hard interrupt handler in the irq
-descriptor and feed it into the random generator as a source of
-entropy.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-
-
-
----
- arch/x86/kernel/cpu/mshyperv.c | 3 ++-
- drivers/char/random.c | 11 +++++------
- drivers/hv/hyperv_vmbus.h | 1 +
- drivers/hv/vmbus_drv.c | 5 ++++-
- include/linux/irqdesc.h | 1 +
- include/linux/random.h | 2 +-
- kernel/irq/handle.c | 10 ++++++++--
- kernel/irq/manage.c | 6 ++++++
- 8 files changed, 28 insertions(+), 11 deletions(-)
----
---- a/arch/x86/kernel/cpu/mshyperv.c
-+++ b/arch/x86/kernel/cpu/mshyperv.c
-@@ -75,11 +75,12 @@ void hv_remove_vmbus_handler(void)
- DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_stimer0)
- {
- struct pt_regs *old_regs = set_irq_regs(regs);
-+ u64 ip = regs ? instruction_pointer(regs) : 0;
-
- inc_irq_stat(hyperv_stimer0_count);
- if (hv_stimer0_handler)
- hv_stimer0_handler();
-- add_interrupt_randomness(HYPERV_STIMER0_VECTOR, 0);
-+ add_interrupt_randomness(HYPERV_STIMER0_VECTOR, 0, ip);
- ack_APIC_irq();
-
- set_irq_regs(old_regs);
---- a/drivers/char/random.c
-+++ b/drivers/char/random.c
-@@ -1242,26 +1242,25 @@ static __u32 get_reg(struct fast_pool *f
- return *ptr;
- }
-
--void add_interrupt_randomness(int irq, int irq_flags)
-+void add_interrupt_randomness(int irq, int irq_flags, __u64 ip)
- {
- struct entropy_store *r;
- struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
-- struct pt_regs *regs = get_irq_regs();
- unsigned long now = jiffies;
- cycles_t cycles = random_get_entropy();
- __u32 c_high, j_high;
-- __u64 ip;
-
- if (cycles == 0)
-- cycles = get_reg(fast_pool, regs);
-+ cycles = get_reg(fast_pool, NULL);
- c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0;
- j_high = (sizeof(now) > 4) ? now >> 32 : 0;
- fast_pool->pool[0] ^= cycles ^ j_high ^ irq;
- fast_pool->pool[1] ^= now ^ c_high;
-- ip = regs ? instruction_pointer(regs) : _RET_IP_;
-+ if (!ip)
-+ ip = _RET_IP_;
- fast_pool->pool[2] ^= ip;
- fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 :
-- get_reg(fast_pool, regs);
-+ get_reg(fast_pool, NULL);
-
- fast_mix(fast_pool);
- add_interrupt_bench(cycles);
---- a/drivers/hv/hyperv_vmbus.h
-+++ b/drivers/hv/hyperv_vmbus.h
-@@ -19,6 +19,7 @@
- #include <linux/atomic.h>
- #include <linux/hyperv.h>
- #include <linux/interrupt.h>
-+#include <linux/irq.h>
-
- #include "hv_trace.h"
-
---- a/drivers/hv/vmbus_drv.c
-+++ b/drivers/hv/vmbus_drv.c
-@@ -22,6 +22,7 @@
- #include <linux/clockchips.h>
- #include <linux/cpu.h>
- #include <linux/sched/task_stack.h>
-+#include <linux/irq.h>
-
- #include <linux/delay.h>
- #include <linux/notifier.h>
-@@ -1337,6 +1338,8 @@ static void vmbus_isr(void)
- void *page_addr = hv_cpu->synic_event_page;
- struct hv_message *msg;
- union hv_synic_event_flags *event;
-+ struct pt_regs *regs = get_irq_regs();
-+ u64 ip = regs ? instruction_pointer(regs) : 0;
- bool handled = false;
-
- if (unlikely(page_addr == NULL))
-@@ -1381,7 +1384,7 @@ static void vmbus_isr(void)
- tasklet_schedule(&hv_cpu->msg_dpc);
- }
-
-- add_interrupt_randomness(vmbus_interrupt, 0);
-+ add_interrupt_randomness(vmbus_interrupt, 0, ip);
- }
-
- static irqreturn_t vmbus_percpu_isr(int irq, void *dev_id)
---- a/include/linux/irqdesc.h
-+++ b/include/linux/irqdesc.h
-@@ -68,6 +68,7 @@ struct irq_desc {
- unsigned int irqs_unhandled;
- atomic_t threads_handled;
- int threads_handled_last;
-+ u64 random_ip;
- raw_spinlock_t lock;
- struct cpumask *percpu_enabled;
- const struct cpumask *percpu_affinity;
---- a/include/linux/random.h
-+++ b/include/linux/random.h
-@@ -35,7 +35,7 @@ static inline void add_latent_entropy(vo
-
- extern void add_input_randomness(unsigned int type, unsigned int code,
- unsigned int value) __latent_entropy;
--extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
-+extern void add_interrupt_randomness(int irq, int irq_flags, __u64 ip) __latent_entropy;
-
- extern void get_random_bytes(void *buf, int nbytes);
- extern int wait_for_random_bytes(void);
---- a/kernel/irq/handle.c
-+++ b/kernel/irq/handle.c
-@@ -192,12 +192,18 @@ irqreturn_t __handle_irq_event_percpu(st
-
- irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
- {
-- irqreturn_t retval;
-+ struct pt_regs *regs = get_irq_regs();
-+ u64 ip = regs ? instruction_pointer(regs) : 0;
- unsigned int flags = 0;
-+ irqreturn_t retval;
-
- retval = __handle_irq_event_percpu(desc, &flags);
-
-- add_interrupt_randomness(desc->irq_data.irq, flags);
-+#ifdef CONFIG_PREEMPT_RT
-+ desc->random_ip = ip;
-+#else
-+ add_interrupt_randomness(desc->irq_data.irq, flags, ip);
-+#endif
-
- if (!irq_settings_no_debug(desc))
- note_interrupt(desc, retval);
---- a/kernel/irq/manage.c
-+++ b/kernel/irq/manage.c
-@@ -1281,6 +1281,12 @@ static int irq_thread(void *data)
- if (action_ret == IRQ_WAKE_THREAD)
- irq_wake_secondary(desc, action);
-
-+ if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
-+ migrate_disable();
-+ add_interrupt_randomness(action->irq, 0,
-+ desc->random_ip ^ (unsigned long) action);
-+ migrate_enable();
-+ }
- wake_threads_waitq(desc);
- }
-
diff --git a/patches/serial__8250__implement_write_atomic.patch b/patches/serial__8250__implement_write_atomic.patch
index c7bf2744e82b..b08a974b1ef9 100644
--- a/patches/serial__8250__implement_write_atomic.patch
+++ b/patches/serial__8250__implement_write_atomic.patch
@@ -276,7 +276,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
serial8250_rpm_put(up);
}
-@@ -2132,14 +2132,7 @@ static void serial8250_put_poll_char(str
+@@ -2125,14 +2125,7 @@ static void serial8250_put_poll_char(str
struct uart_8250_port *up = up_to_u8250p(port);
serial8250_rpm_get(up);
@@ -292,7 +292,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
wait_for_xmitr(up, BOTH_EMPTY);
/*
-@@ -2152,7 +2145,7 @@ static void serial8250_put_poll_char(str
+@@ -2145,7 +2138,7 @@ static void serial8250_put_poll_char(str
* and restore the IER
*/
wait_for_xmitr(up, BOTH_EMPTY);
@@ -301,7 +301,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
serial8250_rpm_put(up);
}
-@@ -2455,7 +2448,7 @@ void serial8250_do_shutdown(struct uart_
+@@ -2448,7 +2441,7 @@ void serial8250_do_shutdown(struct uart_
*/
spin_lock_irqsave(&port->lock, flags);
up->ier = 0;
@@ -310,7 +310,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
spin_unlock_irqrestore(&port->lock, flags);
synchronize_irq(port->irq);
-@@ -2837,7 +2830,7 @@ serial8250_do_set_termios(struct uart_po
+@@ -2830,7 +2823,7 @@ serial8250_do_set_termios(struct uart_po
if (up->capabilities & UART_CAP_RTOIE)
up->ier |= UART_IER_RTOIE;
@@ -319,7 +319,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (up->capabilities & UART_CAP_EFR) {
unsigned char efr = 0;
-@@ -3303,7 +3296,7 @@ EXPORT_SYMBOL_GPL(serial8250_set_default
+@@ -3296,7 +3289,7 @@ EXPORT_SYMBOL_GPL(serial8250_set_default
#ifdef CONFIG_SERIAL_8250_CONSOLE
@@ -328,7 +328,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
struct uart_8250_port *up = up_to_u8250p(port);
-@@ -3311,6 +3304,18 @@ static void serial8250_console_putchar(s
+@@ -3304,6 +3297,18 @@ static void serial8250_console_putchar(s
serial_port_out(port, UART_TX, ch);
}
@@ -347,7 +347,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Restore serial console when h/w power-off detected
*/
-@@ -3332,6 +3337,32 @@ static void serial8250_console_restore(s
+@@ -3325,6 +3330,32 @@ static void serial8250_console_restore(s
serial8250_out_MCR(up, UART_MCR_DTR | UART_MCR_RTS);
}
@@ -380,7 +380,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Print a string to the serial port trying not to disturb
* any possible real use of the port...
-@@ -3348,24 +3379,12 @@ void serial8250_console_write(struct uar
+@@ -3341,24 +3372,12 @@ void serial8250_console_write(struct uar
struct uart_port *port = &up->port;
unsigned long flags;
unsigned int ier;
@@ -407,7 +407,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* check scratch reg to see if port powered off during system sleep */
if (up->canary && (up->canary != serial_port_in(port, UART_SCR))) {
-@@ -3379,7 +3398,9 @@ void serial8250_console_write(struct uar
+@@ -3372,7 +3391,9 @@ void serial8250_console_write(struct uar
mdelay(port->rs485.delay_rts_before_send);
}
@@ -417,7 +417,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Finally, wait for transmitter to become empty
-@@ -3392,8 +3413,7 @@ void serial8250_console_write(struct uar
+@@ -3385,8 +3406,7 @@ void serial8250_console_write(struct uar
if (em485->tx_stopped)
up->rs485_stop_tx(up);
}
@@ -427,7 +427,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The receive handling will happen properly because the
-@@ -3405,8 +3425,7 @@ void serial8250_console_write(struct uar
+@@ -3398,8 +3418,7 @@ void serial8250_console_write(struct uar
if (up->msr_saved_flags)
serial8250_modem_status(up);
@@ -437,7 +437,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static unsigned int probe_baud(struct uart_port *port)
-@@ -3426,6 +3445,7 @@ static unsigned int probe_baud(struct ua
+@@ -3419,6 +3438,7 @@ static unsigned int probe_baud(struct ua
int serial8250_console_setup(struct uart_port *port, char *options, bool probe)
{
@@ -445,7 +445,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
int baud = 9600;
int bits = 8;
int parity = 'n';
-@@ -3435,6 +3455,8 @@ int serial8250_console_setup(struct uart
+@@ -3428,6 +3448,8 @@ int serial8250_console_setup(struct uart
if (!port->iobase && !port->membase)
return -ENODEV;
diff --git a/patches/series b/patches/series
index 3a51bfbf1013..0c3315c3efdc 100644
--- a/patches/series
+++ b/patches/series
@@ -32,6 +32,21 @@ net-Write-lock-dev_base_lock-without-disabling-botto.patch
fs-namespace-Boost-the-mount_lock.lock-owner-instead.patch
0001-blk-mq-Add-blk_mq_complete_request_direct.patch
0002-mmc-core-Use-blk_mq_complete_request_direct.patch
+drm-i915-Don-t-disable-interrupts-and-pretend-a-lock.patch
+kernel-locking-Use-a-pointer-in-ww_mutex_trylock.patch
+
+# lockdep
+0001-sched-Trigger-warning-if-migration_disabled-counter-.patch
+0002-locking-Remove-rt_rwlock_is_contended.patch
+0003-locking-rtmutex-Squash-self-deadlock-check-for-ww_rt.patch
+0004-locking-rtmutex-Add-rt_mutex_lock_nest_lock-and-rt_m.patch
+0005-lockdep-Remove-softirq-accounting-on-PREEMPT_RT.patch
+0006-lockdep-selftests-Avoid-using-local_lock_-acquire-re.patch
+0007-lockdep-selftests-Unbalanced-migrate_disable-rcu_rea.patch
+0008-lockdep-selftests-Skip-the-softirq-related-tests-on-.patch
+0009-lockdep-selftests-Adapt-ww-tests-for-PREEMPT_RT.patch
+0010-x86-mm-Include-spinlock_t-definition-in-pgtable.patch
+0011-locking-Allow-to-include-asm-spinlock_types.h-from-l.patch
###########################################################################
# Posted
@@ -39,8 +54,11 @@ fs-namespace-Boost-the-mount_lock.lock-owner-instead.patch
irq_poll-Use-raise_softirq_irqoff-in-cpu_dead-notifi.patch
smp_wake_ksoftirqd_on_preempt_rt_instead_do_softirq.patch
fscache-Use-only-one-fscache_object_cong_wait.patch
-kernel-locking-Use-a-pointer-in-ww_mutex_trylock.patch
tcp-Don-t-acquire-inet_listen_hashbucket-lock-with-d.patch
+panic_remove_oops_id.patch
+mm-memcontrol-Disable-on-PREEMPT_RT.patch
+net-dev-Always-serialize-on-Qdisc-busylock-in-__dev_.patch
+u64_stats-Disable-preemption-on-32bit-UP-SMP-PREEMPT.patch
# sched
0001_kernel_fork_redo_ifdefs_around_task_s_handling.patch
@@ -52,18 +70,12 @@ tcp-Don-t-acquire-inet_listen_hashbucket-lock-with-d.patch
0007_kernel_fork_only_cache_the_vmap_stack_in_finish_task_switch.patch
0008_kernel_fork_use_is_enabled_in_account_kernel_stack.patch
-# lockdep
-0001-sched-Trigger-warning-if-migration_disabled-counter-.patch
-0002-locking-Remove-rt_rwlock_is_contended.patch
-0003-locking-rtmutex-Squash-self-deadlock-check-for-ww_rt.patch
-0004-locking-rtmutex-Add-rt_mutex_lock_nest_lock-and-rt_m.patch
-0005-lockdep-Remove-softirq-accounting-on-PREEMPT_RT.patch
-0006-lockdep-selftests-Avoid-using-local_lock_-acquire-re.patch
-0007-lockdep-selftests-Unbalanced-migrate_disable-rcu_rea.patch
-0008-lockdep-selftests-Skip-the-softirq-related-tests-on-.patch
-0009-lockdep-selftests-Adapt-ww-tests-for-PREEMPT_RT.patch
-0010-x86-mm-Include-spinlock_t-definition-in-pgtable.patch
-0011-locking-Allow-to-include-asm-spinlock_types.h-from-l.patch
+# random
+0001_random_remove_unused_irq_flags_argument_from_add_interrupt_randomness.patch
+0002_irq_remove_unsued_flags_argument_from___handle_irq_event_percpu.patch
+0003_random_split_add_interrupt_randomness.patch
+0004_random_move_the_fast_pool_reset_into_the_caller.patch
+0005_random_defer_processing_of_randomness_on_preempt_rt.patch
###########################################################################
# Post
@@ -101,20 +113,6 @@ crypto__cryptd_-_add_a_lock_instead_preempt_disable_local_bh_disable.patch
softirq__Check_preemption_after_reenabling_interrupts.patch
###########################################################################
-# mm: Assorted RT bits. Need care
-###########################################################################
-u64_stats__Disable_preemption_on_32bit-UP_SMP_with_RT_during_updates.patch
-
-###########################################################################
-# Disable memcontrol for now. The protection scopes are FUBARed
-###########################################################################
-mm-memcontro--Disable-on-PREEMPT_RT.patch
-#mm_memcontrol__Disable_preemption_in___mod_memcg_lruvec_state.patch
-#mm__memcontrol__Replace_disable-IRQ_locking_with_a_local_lock.patch
-#mm_memcontrol__Dont_call_schedule_work_on_in_preemption_disabled_context.patch
-#mm_memcontrol__Replace_local_irq_disable_with_local_locks.patch
-
-###########################################################################
# ptrace: Revisit
###########################################################################
signal__Revert_ptrace_preempt_magic.patch
@@ -136,19 +134,10 @@ rcu__Delay_RCU-selftests.patch
###########################################################################
net_core__use_local_bh_disable_in_netif_rx_ni.patch
net__Use_skbufhead_with_raw_lock.patch
-net__Dequeue_in_dev_cpu_dead_without_the_lock.patch
-net__dev__always_take_qdiscs_busylock_in___dev_xmit_skb.patch
-
-###########################################################################
-# randomness:
-###########################################################################
-panic__skip_get_random_bytes_for_RT_FULL_in_init_oops_id.patch
-random__Make_it_work_on_rt.patch
###########################################################################
# DRM:
###########################################################################
-0002-drm-i915-Don-t-disable-interrupts-and-pretend-a-lock.patch
0003-drm-i915-Use-preempt_disable-enable_rt-where-recomme.patch
0004-drm-i915-Don-t-disable-interrupts-on-PREEMPT_RT-duri.patch
0005-drm-i915-Don-t-check-for-atomic-context-on-PREEMPT_R.patch
diff --git a/patches/softirq-Use-a-dedicated-thread-for-timer-wakeups.patch b/patches/softirq-Use-a-dedicated-thread-for-timer-wakeups.patch
index 92d63a22cd6e..12590bf0c60d 100644
--- a/patches/softirq-Use-a-dedicated-thread-for-timer-wakeups.patch
+++ b/patches/softirq-Use-a-dedicated-thread-for-timer-wakeups.patch
@@ -69,7 +69,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static inline struct task_struct *this_cpu_ksoftirqd(void)
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
-@@ -623,6 +623,22 @@ static inline void tick_irq_exit(void)
+@@ -624,6 +624,22 @@ static inline void tick_irq_exit(void)
#endif
}
@@ -92,7 +92,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static inline void __irq_exit_rcu(void)
{
#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
-@@ -634,6 +650,8 @@ static inline void __irq_exit_rcu(void)
+@@ -635,6 +651,8 @@ static inline void __irq_exit_rcu(void)
preempt_count_sub(HARDIRQ_OFFSET);
if (!in_interrupt() && local_softirq_pending())
invoke_softirq();
@@ -101,7 +101,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
tick_irq_exit();
}
-@@ -962,11 +980,69 @@ static struct smp_hotplug_thread softirq
+@@ -963,11 +981,69 @@ static struct smp_hotplug_thread softirq
.thread_comm = "ksoftirqd/%u",
};
diff --git a/patches/softirq__Check_preemption_after_reenabling_interrupts.patch b/patches/softirq__Check_preemption_after_reenabling_interrupts.patch
index 79c594270826..3251dc4fd512 100644
--- a/patches/softirq__Check_preemption_after_reenabling_interrupts.patch
+++ b/patches/softirq__Check_preemption_after_reenabling_interrupts.patch
@@ -61,7 +61,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(__dev_kfree_skb_irq);
-@@ -4668,6 +4670,7 @@ static int enqueue_to_backlog(struct sk_
+@@ -4675,6 +4677,7 @@ static int enqueue_to_backlog(struct sk_
rps_unlock(sd);
local_irq_restore(flags);
@@ -69,7 +69,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
atomic_long_inc(&skb->dev->rx_dropped);
kfree_skb(skb);
-@@ -6412,12 +6415,14 @@ static void net_rps_action_and_irq_enabl
+@@ -6419,12 +6422,14 @@ static void net_rps_action_and_irq_enabl
sd->rps_ipi_list = NULL;
local_irq_enable();
@@ -84,7 +84,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
-@@ -6495,6 +6500,7 @@ void __napi_schedule(struct napi_struct
+@@ -6502,6 +6507,7 @@ void __napi_schedule(struct napi_struct
local_irq_save(flags);
____napi_schedule(this_cpu_ptr(&softnet_data), n);
local_irq_restore(flags);
@@ -92,7 +92,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(__napi_schedule);
-@@ -11326,6 +11332,7 @@ static int dev_cpu_dead(unsigned int old
+@@ -11333,6 +11339,7 @@ static int dev_cpu_dead(unsigned int old
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_enable();
diff --git a/patches/tcp-Don-t-acquire-inet_listen_hashbucket-lock-with-d.patch b/patches/tcp-Don-t-acquire-inet_listen_hashbucket-lock-with-d.patch
index bf4d4ed3ec69..a0065abf3e7d 100644
--- a/patches/tcp-Don-t-acquire-inet_listen_hashbucket-lock-with-d.patch
+++ b/patches/tcp-Don-t-acquire-inet_listen_hashbucket-lock-with-d.patch
@@ -27,11 +27,11 @@ local_bh_disable() + spin_lock(&ilb->lock):
spin_lock(&ilb->lock);
acquire(&ilb->lock);
-Reverse order: spin_lock(&ilb->lock) + local_bh_disable():
+Reverse order: spin_lock(&ilb2->lock) + local_bh_disable():
tcp_seq_next()
listening_get_next()
- spin_lock(&ilb->lock);
- acquire(&ilb->lock);
+ spin_lock(&ilb2->lock);
+ acquire(&ilb2->lock);
tcp4_seq_show()
get_tcp4_sock()
diff --git a/patches/u64_stats__Disable_preemption_on_32bit-UP_SMP_with_RT_during_updates.patch b/patches/u64_stats-Disable-preemption-on-32bit-UP-SMP-PREEMPT.patch
index 417c3a241021..c89279153052 100644
--- a/patches/u64_stats__Disable_preemption_on_32bit-UP_SMP_with_RT_during_updates.patch
+++ b/patches/u64_stats-Disable-preemption-on-32bit-UP-SMP-PREEMPT.patch
@@ -1,25 +1,28 @@
-Subject: u64_stats: Disable preemption on 32bit-UP/SMP with RT during updates
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Mon Aug 17 12:28:10 2020 +0200
+Date: Mon, 17 Aug 2020 12:28:10 +0200
+Subject: [PATCH] u64_stats: Disable preemption on 32bit UP+SMP PREEMPT_RT
+ during updates.
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-
-On RT the seqcount_t is required even on UP because the softirq can be
-preempted. The IRQ handler is threaded so it is also preemptible.
+On PREEMPT_RT the seqcount_t for synchronisation is required on 32bit
+architectures even on UP because the softirq (and the threaded IRQ handler) can
+be preempted.
-Disable preemption on 32bit-RT during value updates. There is no need to
-disable interrupts on RT because the handler is run threaded. Therefore
-disabling preemption is enough to guarantee that the update is not
-interruped.
+With the seqcount_t for synchronisation, a reader with higher priority can
+preempt the writer and then spin endlessly in read_seqcount_begin() while the
+writer can't make progress.
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+To avoid such a lock up on PREEMPT_RT the writer must disable preemption during
+the update. There is no need to disable interrupts because no writer is using
+this API in hard-IRQ context on PREEMPT_RT.
+Disable preemption on 32bit-RT within the u64_stats write section.
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://lore.kernel.org/r/YbO4x7vRoDGUWxrv@linutronix.de
---
include/linux/u64_stats_sync.h | 42 +++++++++++++++++++++++++++--------------
1 file changed, 28 insertions(+), 14 deletions(-)
----
+
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -66,7 +66,7 @@
@@ -27,7 +30,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
struct u64_stats_sync {
-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
-+#if BITS_PER_LONG==32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
++#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
seqcount_t seq;
#endif
};
diff --git a/patches/x86__kvm_Require_const_tsc_for_RT.patch b/patches/x86__kvm_Require_const_tsc_for_RT.patch
index 4d855fa5c859..1d473b6265e4 100644
--- a/patches/x86__kvm_Require_const_tsc_for_RT.patch
+++ b/patches/x86__kvm_Require_const_tsc_for_RT.patch
@@ -18,7 +18,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
-@@ -8605,6 +8605,12 @@ int kvm_arch_init(void *opaque)
+@@ -8655,6 +8655,12 @@ int kvm_arch_init(void *opaque)
goto out;
}