summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2022-06-24 11:45:48 +0200
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2022-06-24 11:45:48 +0200
commit21555d2988163051727bbdab0bc2781071820506 (patch)
treea20b749d6c3d1f520a666c4b84587fc193b6e778
parentd377ffe55817ea13c3cf17e7eea073eec93f8f07 (diff)
downloadlinux-rt-21555d2988163051727bbdab0bc2781071820506.tar.gz
[ANNOUNCE] v5.19-rc3-rt5v5.19-rc3-rt5-patches
Dear RT folks! I'm pleased to announce the v5.19-rc3-rt5 patch set. Changes since v5.19-rc3-rt4: - Merge the "disable softirqs stack" patch that was applied upstream. - Merge the "don't disable preemption" patch that was applied upstream. - Slighly update the signal and ptrace patch. There is no visible change code wise. - Backport a patch from upstream to address a kfence warning. Reported by Mike Galbraith, patch by Jason A. Donenfeld. Known issues - Valentin Schneider reported a few splats on ARM64, see https://lkml.kernel.org/r/20210810134127.1394269-1-valentin.schneider@arm.com The delta patch against v5.19-rc3-rt4 is appended below and can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.19/incr/patch-5.19-rc3-rt4-rt5.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v5.19-rc3-rt5 The RT patch against v5.19-rc3 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.19/older/patch-5.19-rc3-rt5.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.19/older/patches-5.19-rc3-rt5.tar.xz Sebastian Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-rw-r--r--patches/0002-arm-Disable-softirq-stacks-on-PREEMPT_RT.patch35
-rw-r--r--patches/0016-printk-add-infrastucture-for-atomic-consoles.patch44
-rw-r--r--patches/0017-serial-8250-implement-write_atomic.patch46
-rw-r--r--patches/0018-printk-avoid-preempt_disable-for-PREEMPT_RT.patch10
-rw-r--r--patches/Add_localversion_for_-RT_release.patch2
-rw-r--r--patches/Disable-softirq-stacks-on-PREEMPT_RT.patch84
-rw-r--r--patches/arch-Disable-softirq-stacks-on-PREEMPT_RT.patch150
-rw-r--r--patches/blk-mq-Don-t-disable-preemption-around-__blk_mq_run_.patch26
-rw-r--r--patches/generic-softirq-Disable-softirq-stacks-on-PREEMPT_RT.patch30
-rw-r--r--patches/mm-kfence-select-random-number-before-taking-raw-loc.patch76
-rw-r--r--patches/sched-Consider-task_struct-saved_state-in-wait_task_.patch45
-rw-r--r--patches/sched__Add_support_for_lazy_preemption.patch14
-rw-r--r--patches/series11
-rw-r--r--patches/signal-Don-t-disable-preemption-in-ptrace_stop-on-PR.patch58
-rw-r--r--patches/signal__Revert_ptrace_preempt_magic.patch36
15 files changed, 385 insertions, 282 deletions
diff --git a/patches/0002-arm-Disable-softirq-stacks-on-PREEMPT_RT.patch b/patches/0002-arm-Disable-softirq-stacks-on-PREEMPT_RT.patch
deleted file mode 100644
index ca49c5c70be6..000000000000
--- a/patches/0002-arm-Disable-softirq-stacks-on-PREEMPT_RT.patch
+++ /dev/null
@@ -1,35 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Mon, 13 Jun 2022 19:04:55 +0200
-Subject: [PATCH 2/2] arm: Disable softirq stacks on PREEMPT_RT.
-
-PREEMPT_RT preempts softirqs and the current implementation avoids
-do_softirq_own_stack() and only uses __do_softirq().
-
-Disable the unused softirqs stacks on PREEMPT_RT to safe some memory and
-ensure that do_softirq_own_stack() is not used which is not expected.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Link: https://lkml.kernel.org/r/20220613182447.112191-3-bigeasy@linutronix.de
----
- arch/arm/kernel/irq.c | 3 ++-
- 1 file changed, 2 insertions(+), 1 deletion(-)
-
---- a/arch/arm/kernel/irq.c
-+++ b/arch/arm/kernel/irq.c
-@@ -70,6 +70,7 @@ static void __init init_irq_stacks(void)
- }
- }
-
-+#ifndef CONFIG_PREEMPT_RT
- static void ____do_softirq(void *arg)
- {
- __do_softirq();
-@@ -80,7 +81,7 @@ void do_softirq_own_stack(void)
- call_with_stack(____do_softirq, NULL,
- __this_cpu_read(irq_stack_ptr));
- }
--
-+#endif
- #endif
-
- int arch_show_interrupts(struct seq_file *p, int prec)
diff --git a/patches/0016-printk-add-infrastucture-for-atomic-consoles.patch b/patches/0016-printk-add-infrastucture-for-atomic-consoles.patch
index 3f8a3b11cf0c..9401d43337bb 100644
--- a/patches/0016-printk-add-infrastucture-for-atomic-consoles.patch
+++ b/patches/0016-printk-add-infrastucture-for-atomic-consoles.patch
@@ -118,7 +118,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include <linux/sched/clock.h>
#include <linux/sched/debug.h>
#include <linux/sched/task_stack.h>
-@@ -2060,19 +2061,28 @@ static int console_trylock_spinning(void
+@@ -2063,19 +2064,28 @@ static int console_trylock_spinning(void
* dropped, a dropped message will be written out first.
*/
static void call_console_driver(struct console *con, const char *text, size_t len,
@@ -153,7 +153,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -2426,6 +2436,76 @@ asmlinkage __visible int _printk(const c
+@@ -2429,6 +2439,76 @@ asmlinkage __visible int _printk(const c
}
EXPORT_SYMBOL(_printk);
@@ -230,7 +230,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress);
static void printk_start_kthread(struct console *con);
-@@ -2440,6 +2520,8 @@ static void printk_start_kthread(struct
+@@ -2443,6 +2523,8 @@ static void printk_start_kthread(struct
#define prb_first_valid_seq(rb) 0
#define prb_next_seq(rb) 0
@@ -239,7 +239,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static u64 syslog_seq;
static size_t record_print_text(const struct printk_record *r,
-@@ -2458,7 +2540,7 @@ static ssize_t msg_print_ext_body(char *
+@@ -2461,7 +2543,7 @@ static ssize_t msg_print_ext_body(char *
static void console_lock_spinning_enable(void) { }
static int console_lock_spinning_disable_and_check(void) { return 0; }
static void call_console_driver(struct console *con, const char *text, size_t len,
@@ -248,7 +248,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
}
static bool suppress_message_printing(int level) { return false; }
-@@ -2800,10 +2882,20 @@ static inline bool __console_is_usable(s
+@@ -2803,10 +2885,20 @@ static inline bool __console_is_usable(s
*
* Requires holding the console_lock.
*/
@@ -271,7 +271,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return __console_is_usable(con->flags);
}
-@@ -2828,6 +2920,66 @@ static void __console_unlock(void)
+@@ -2831,6 +2923,66 @@ static void __console_unlock(void)
up_console_sem();
}
@@ -338,7 +338,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Print one record for the given console. The record printed is whatever
* record is the next available record for the given console.
-@@ -2840,6 +2992,8 @@ static void __console_unlock(void)
+@@ -2843,6 +2995,8 @@ static void __console_unlock(void)
* If dropped messages should be printed, @dropped_text is a buffer of size
* DROPPED_TEXT_MAX. Otherwise @dropped_text must be NULL.
*
@@ -347,7 +347,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* @handover will be set to true if a printk waiter has taken over the
* console_lock, in which case the caller is no longer holding the
* console_lock. Otherwise it is set to false. A NULL pointer may be provided
-@@ -2852,7 +3006,8 @@ static void __console_unlock(void)
+@@ -2855,7 +3009,8 @@ static void __console_unlock(void)
* Requires con->lock otherwise.
*/
static bool __console_emit_next_record(struct console *con, char *text, char *ext_text,
@@ -357,7 +357,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
static atomic_t panic_console_dropped = ATOMIC_INIT(0);
struct printk_info info;
-@@ -2860,18 +3015,22 @@ static bool __console_emit_next_record(s
+@@ -2863,18 +3018,22 @@ static bool __console_emit_next_record(s
unsigned long flags;
char *write_text;
size_t len;
@@ -384,7 +384,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (panic_in_progress() &&
atomic_fetch_inc_relaxed(&panic_console_dropped) > 10) {
suppress_panic_printk = 1;
-@@ -2881,7 +3040,7 @@ static bool __console_emit_next_record(s
+@@ -2884,7 +3043,7 @@ static bool __console_emit_next_record(s
/* Skip record that has level above the console loglevel. */
if (suppress_message_printing(r.info->level)) {
@@ -393,7 +393,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto skip;
}
-@@ -2913,9 +3072,9 @@ static bool __console_emit_next_record(s
+@@ -2916,9 +3075,9 @@ static bool __console_emit_next_record(s
stop_critical_timings();
}
@@ -405,7 +405,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (handover) {
start_critical_timings();
-@@ -2947,7 +3106,7 @@ static bool console_emit_next_record_tra
+@@ -2950,7 +3109,7 @@ static bool console_emit_next_record_tra
handover = NULL;
}
@@ -414,7 +414,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -2995,7 +3154,7 @@ static bool console_flush_all(bool do_co
+@@ -2998,7 +3157,7 @@ static bool console_flush_all(bool do_co
for_each_console(con) {
bool progress;
@@ -423,7 +423,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
continue;
any_usable = true;
-@@ -3030,6 +3189,68 @@ static bool console_flush_all(bool do_co
+@@ -3033,6 +3192,68 @@ static bool console_flush_all(bool do_co
return any_usable;
}
@@ -492,7 +492,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* console_unlock - unlock the console system
*
-@@ -3145,6 +3366,11 @@ void console_unblank(void)
+@@ -3148,6 +3369,11 @@ void console_unblank(void)
*/
void console_flush_on_panic(enum con_flush_mode mode)
{
@@ -504,7 +504,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* If someone else is holding the console lock, trylock will fail
* and may_schedule may be set. Ignore and proceed to unlock so
-@@ -3161,7 +3387,7 @@ void console_flush_on_panic(enum con_flu
+@@ -3164,7 +3390,7 @@ void console_flush_on_panic(enum con_flu
seq = prb_first_valid_seq(prb);
for_each_console(c)
@@ -513,7 +513,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
console_unlock();
}
-@@ -3404,19 +3630,22 @@ void register_console(struct console *ne
+@@ -3407,19 +3633,22 @@ void register_console(struct console *ne
if (newcon->flags & CON_EXTENDED)
nr_ext_console_drivers++;
@@ -539,7 +539,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
if (printk_kthreads_available)
-@@ -3505,6 +3734,10 @@ int unregister_console(struct console *c
+@@ -3508,6 +3737,10 @@ int unregister_console(struct console *c
console_sysfs_notify();
@@ -550,7 +550,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (console->exit)
res = console->exit(console);
-@@ -3634,7 +3867,7 @@ static bool __pr_flush(struct console *c
+@@ -3637,7 +3870,7 @@ static bool __pr_flush(struct console *c
for_each_console(c) {
if (con && con != c)
continue;
@@ -559,7 +559,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
continue;
printk_seq = c->seq;
if (printk_seq < seq)
-@@ -3716,9 +3949,10 @@ static void printk_fallback_preferred_di
+@@ -3719,9 +3952,10 @@ static void printk_fallback_preferred_di
* See __console_emit_next_record() for argument and return details.
*/
static bool console_emit_next_record(struct console *con, char *text, char *ext_text,
@@ -572,7 +572,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static bool printer_should_wake(struct console *con, u64 seq)
-@@ -3756,6 +3990,11 @@ static int printk_kthread_func(void *dat
+@@ -3762,6 +3996,11 @@ static int printk_kthread_func(void *dat
char *text;
int error;
@@ -584,7 +584,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
text = kmalloc(CONSOLE_LOG_MAX, GFP_KERNEL);
if (!text) {
con_printk(KERN_ERR, con, "failed to allocate text buffer\n");
-@@ -3834,7 +4073,7 @@ static int printk_kthread_func(void *dat
+@@ -3840,7 +4079,7 @@ static int printk_kthread_func(void *dat
* which can conditionally invoke cond_resched().
*/
console_may_schedule = 0;
diff --git a/patches/0017-serial-8250-implement-write_atomic.patch b/patches/0017-serial-8250-implement-write_atomic.patch
index d26ba2b2d498..1ab4acc3cdfb 100644
--- a/patches/0017-serial-8250-implement-write_atomic.patch
+++ b/patches/0017-serial-8250-implement-write_atomic.patch
@@ -610,7 +610,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
}
EXPORT_SYMBOL_GPL(serial8250_em485_stop_tx);
-@@ -1707,7 +1734,7 @@ static void serial8250_disable_ms(struct
+@@ -1709,7 +1736,7 @@ static void serial8250_disable_ms(struct
mctrl_gpio_disable_ms(up->gpios);
up->ier &= ~UART_IER_MSI;
@@ -619,7 +619,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static void serial8250_enable_ms(struct uart_port *port)
-@@ -1723,7 +1750,7 @@ static void serial8250_enable_ms(struct
+@@ -1725,7 +1752,7 @@ static void serial8250_enable_ms(struct
up->ier |= UART_IER_MSI;
serial8250_rpm_get(up);
@@ -628,7 +628,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
serial8250_rpm_put(up);
}
-@@ -2152,14 +2179,7 @@ static void serial8250_put_poll_char(str
+@@ -2154,14 +2181,7 @@ static void serial8250_put_poll_char(str
struct uart_8250_port *up = up_to_u8250p(port);
serial8250_rpm_get(up);
@@ -644,7 +644,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
wait_for_xmitr(up, BOTH_EMPTY);
/*
-@@ -2172,7 +2192,7 @@ static void serial8250_put_poll_char(str
+@@ -2174,7 +2194,7 @@ static void serial8250_put_poll_char(str
* and restore the IER
*/
wait_for_xmitr(up, BOTH_EMPTY);
@@ -653,7 +653,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
serial8250_rpm_put(up);
}
-@@ -2181,8 +2201,10 @@ static void serial8250_put_poll_char(str
+@@ -2183,8 +2203,10 @@ static void serial8250_put_poll_char(str
int serial8250_do_startup(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
@@ -664,7 +664,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int retval;
if (!port->fifosize)
-@@ -2202,7 +2224,7 @@ int serial8250_do_startup(struct uart_po
+@@ -2204,7 +2226,7 @@ int serial8250_do_startup(struct uart_po
up->acr = 0;
serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B);
serial_port_out(port, UART_EFR, UART_EFR_ECB);
@@ -673,7 +673,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
serial_port_out(port, UART_LCR, 0);
serial_icr_write(up, UART_CSR, 0); /* Reset the UART */
serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B);
-@@ -2212,7 +2234,7 @@ int serial8250_do_startup(struct uart_po
+@@ -2214,7 +2236,7 @@ int serial8250_do_startup(struct uart_po
if (port->type == PORT_DA830) {
/* Reset the port */
@@ -682,7 +682,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
serial_port_out(port, UART_DA830_PWREMU_MGMT, 0);
mdelay(10);
-@@ -2307,6 +2329,8 @@ int serial8250_do_startup(struct uart_po
+@@ -2309,6 +2331,8 @@ int serial8250_do_startup(struct uart_po
if (port->irq && (up->port.flags & UPF_SHARE_IRQ))
up->port.irqflags |= IRQF_SHARED;
@@ -691,7 +691,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (port->irq && !(up->port.flags & UPF_NO_THRE_TEST)) {
unsigned char iir1;
-@@ -2323,6 +2347,9 @@ int serial8250_do_startup(struct uart_po
+@@ -2325,6 +2349,9 @@ int serial8250_do_startup(struct uart_po
*/
spin_lock_irqsave(&port->lock, flags);
@@ -701,7 +701,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
wait_for_xmitr(up, UART_LSR_THRE);
serial_port_out_sync(port, UART_IER, UART_IER_THRI);
udelay(1); /* allow THRE to set */
-@@ -2333,6 +2360,9 @@ int serial8250_do_startup(struct uart_po
+@@ -2335,6 +2362,9 @@ int serial8250_do_startup(struct uart_po
iir = serial_port_in(port, UART_IIR);
serial_port_out(port, UART_IER, 0);
@@ -711,7 +711,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
spin_unlock_irqrestore(&port->lock, flags);
if (port->irqflags & IRQF_SHARED)
-@@ -2389,10 +2419,14 @@ int serial8250_do_startup(struct uart_po
+@@ -2391,10 +2421,14 @@ int serial8250_do_startup(struct uart_po
* Do a quick test to see if we receive an interrupt when we enable
* the TX irq.
*/
@@ -726,7 +726,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (lsr & UART_LSR_TEMT && iir & UART_IIR_NO_INT) {
if (!(up->bugs & UART_BUG_TXEN)) {
-@@ -2424,7 +2458,7 @@ int serial8250_do_startup(struct uart_po
+@@ -2426,7 +2460,7 @@ int serial8250_do_startup(struct uart_po
if (up->dma) {
const char *msg = NULL;
@@ -735,7 +735,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
msg = "forbid DMA for kernel console";
else if (serial8250_request_dma(up))
msg = "failed to request DMA";
-@@ -2475,7 +2509,7 @@ void serial8250_do_shutdown(struct uart_
+@@ -2477,7 +2511,7 @@ void serial8250_do_shutdown(struct uart_
*/
spin_lock_irqsave(&port->lock, flags);
up->ier = 0;
@@ -744,7 +744,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
spin_unlock_irqrestore(&port->lock, flags);
synchronize_irq(port->irq);
-@@ -2841,7 +2875,7 @@ serial8250_do_set_termios(struct uart_po
+@@ -2843,7 +2877,7 @@ serial8250_do_set_termios(struct uart_po
if (up->capabilities & UART_CAP_RTOIE)
up->ier |= UART_IER_RTOIE;
@@ -753,7 +753,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (up->capabilities & UART_CAP_EFR) {
unsigned char efr = 0;
-@@ -3307,7 +3341,7 @@ EXPORT_SYMBOL_GPL(serial8250_set_default
+@@ -3309,7 +3343,7 @@ EXPORT_SYMBOL_GPL(serial8250_set_default
#ifdef CONFIG_SERIAL_8250_CONSOLE
@@ -762,7 +762,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
struct uart_8250_port *up = up_to_u8250p(port);
-@@ -3315,6 +3349,18 @@ static void serial8250_console_putchar(s
+@@ -3317,6 +3351,18 @@ static void serial8250_console_putchar(s
serial_port_out(port, UART_TX, ch);
}
@@ -781,7 +781,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Restore serial console when h/w power-off detected
*/
-@@ -3336,6 +3382,32 @@ static void serial8250_console_restore(s
+@@ -3338,6 +3384,32 @@ static void serial8250_console_restore(s
serial8250_out_MCR(up, up->mcr | UART_MCR_DTR | UART_MCR_RTS);
}
@@ -814,7 +814,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Print a string to the serial port using the device FIFO
*
-@@ -3381,24 +3453,12 @@ void serial8250_console_write(struct uar
+@@ -3383,24 +3455,12 @@ void serial8250_console_write(struct uar
struct uart_port *port = &up->port;
unsigned long flags;
unsigned int ier, use_fifo;
@@ -841,7 +841,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* check scratch reg to see if port powered off during system sleep */
if (up->canary && (up->canary != serial_port_in(port, UART_SCR))) {
-@@ -3432,10 +3492,12 @@ void serial8250_console_write(struct uar
+@@ -3434,10 +3494,12 @@ void serial8250_console_write(struct uar
*/
!(up->port.flags & UPF_CONS_FLOW);
@@ -854,7 +854,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Finally, wait for transmitter to become empty
-@@ -3448,8 +3510,7 @@ void serial8250_console_write(struct uar
+@@ -3450,8 +3512,7 @@ void serial8250_console_write(struct uar
if (em485->tx_stopped)
up->rs485_stop_tx(up);
}
@@ -864,7 +864,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* The receive handling will happen properly because the
-@@ -3461,8 +3522,7 @@ void serial8250_console_write(struct uar
+@@ -3463,8 +3524,7 @@ void serial8250_console_write(struct uar
if (up->msr_saved_flags)
serial8250_modem_status(up);
@@ -874,7 +874,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static unsigned int probe_baud(struct uart_port *port)
-@@ -3482,6 +3542,7 @@ static unsigned int probe_baud(struct ua
+@@ -3484,6 +3544,7 @@ static unsigned int probe_baud(struct ua
int serial8250_console_setup(struct uart_port *port, char *options, bool probe)
{
@@ -882,7 +882,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int baud = 9600;
int bits = 8;
int parity = 'n';
-@@ -3491,6 +3552,8 @@ int serial8250_console_setup(struct uart
+@@ -3493,6 +3554,8 @@ int serial8250_console_setup(struct uart
if (!port->iobase && !port->membase)
return -ENODEV;
diff --git a/patches/0018-printk-avoid-preempt_disable-for-PREEMPT_RT.patch b/patches/0018-printk-avoid-preempt_disable-for-PREEMPT_RT.patch
index 662ff965dfb4..645def427f10 100644
--- a/patches/0018-printk-avoid-preempt_disable-for-PREEMPT_RT.patch
+++ b/patches/0018-printk-avoid-preempt_disable-for-PREEMPT_RT.patch
@@ -30,7 +30,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -1981,6 +1981,7 @@ static int console_lock_spinning_disable
+@@ -1984,6 +1984,7 @@ static int console_lock_spinning_disable
return 1;
}
@@ -38,7 +38,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* console_trylock_spinning - try to get console_lock by busy waiting
*
-@@ -2054,6 +2055,7 @@ static int console_trylock_spinning(void
+@@ -2057,6 +2058,7 @@ static int console_trylock_spinning(void
return 1;
}
@@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Call the specified console driver, asking it to write out the specified
-@@ -2393,6 +2395,18 @@ asmlinkage int vprintk_emit(int facility
+@@ -2396,6 +2398,18 @@ asmlinkage int vprintk_emit(int facility
/* If called from the scheduler, we can not call up(). */
if (!in_sched && allow_direct_printing()) {
@@ -65,7 +65,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* The caller may be holding system-critical or
* timing-sensitive locks. Disable preemption during direct
-@@ -2410,6 +2424,7 @@ asmlinkage int vprintk_emit(int facility
+@@ -2413,6 +2427,7 @@ asmlinkage int vprintk_emit(int facility
if (console_trylock_spinning())
console_unlock();
preempt_enable();
@@ -73,7 +73,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
wake_up_klogd();
-@@ -3100,8 +3115,12 @@ static bool console_emit_next_record_tra
+@@ -3103,8 +3118,12 @@ static bool console_emit_next_record_tra
/*
* Handovers are only supported if threaded printers are atomically
* blocked. The context taking over the console_lock may be atomic.
diff --git a/patches/Add_localversion_for_-RT_release.patch b/patches/Add_localversion_for_-RT_release.patch
index 41fc0b58e69e..f2d35e0c0528 100644
--- a/patches/Add_localversion_for_-RT_release.patch
+++ b/patches/Add_localversion_for_-RT_release.patch
@@ -15,4 +15,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt4
++-rt5
diff --git a/patches/Disable-softirq-stacks-on-PREEMPT_RT.patch b/patches/Disable-softirq-stacks-on-PREEMPT_RT.patch
deleted file mode 100644
index 753d2dfd6413..000000000000
--- a/patches/Disable-softirq-stacks-on-PREEMPT_RT.patch
+++ /dev/null
@@ -1,84 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Mon, 13 Jun 2022 19:04:55 +0200
-Subject: [PATCH] *: Disable softirq stacks on PREEMPT_RT.
-
-PREEMPT_RT preempts softirqs and the current implementation avoids
-do_softirq_own_stack() and only uses __do_softirq().
-
-Disable the unused softirqs stacks on PREEMPT_RT to safe some memory and
-ensure that do_softirq_own_stack() is not used which is not expected.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/powerpc/kernel/irq.c | 4 ++++
- arch/sh/kernel/irq.c | 2 ++
- arch/sparc/kernel/irq_64.c | 2 ++
- 3 files changed, 8 insertions(+)
-
---- a/arch/powerpc/kernel/irq.c
-+++ b/arch/powerpc/kernel/irq.c
-@@ -611,6 +611,7 @@ static inline void check_stack_overflow(
- }
- }
-
-+#ifndef CONFIG_PREEMPT_RT
- static __always_inline void call_do_softirq(const void *sp)
- {
- /* Temporarily switch r1 to sp, call __do_softirq() then restore r1. */
-@@ -629,6 +630,7 @@ static __always_inline void call_do_soft
- "r11", "r12"
- );
- }
-+#endif
-
- static __always_inline void call_do_irq(struct pt_regs *regs, void *sp)
- {
-@@ -747,10 +749,12 @@ void *mcheckirq_ctx[NR_CPUS] __read_most
- void *softirq_ctx[NR_CPUS] __read_mostly;
- void *hardirq_ctx[NR_CPUS] __read_mostly;
-
-+#ifndef CONFIG_PREEMPT_RT
- void do_softirq_own_stack(void)
- {
- call_do_softirq(softirq_ctx[smp_processor_id()]);
- }
-+#endif
-
- irq_hw_number_t virq_to_hw(unsigned int virq)
- {
---- a/arch/sh/kernel/irq.c
-+++ b/arch/sh/kernel/irq.c
-@@ -149,6 +149,7 @@ void irq_ctx_exit(int cpu)
- hardirq_ctx[cpu] = NULL;
- }
-
-+#ifndef CONFIG_PREEMPT_RT
- void do_softirq_own_stack(void)
- {
- struct thread_info *curctx;
-@@ -176,6 +177,7 @@ void do_softirq_own_stack(void)
- "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr"
- );
- }
-+#endif
- #else
- static inline void handle_one_irq(unsigned int irq)
- {
---- a/arch/sparc/kernel/irq_64.c
-+++ b/arch/sparc/kernel/irq_64.c
-@@ -855,6 +855,7 @@ void __irq_entry handler_irq(int pil, st
- set_irq_regs(old_regs);
- }
-
-+#ifndef CONFIG_PREEMPT_RT
- void do_softirq_own_stack(void)
- {
- void *orig_sp, *sp = softirq_stack[smp_processor_id()];
-@@ -869,6 +870,7 @@ void do_softirq_own_stack(void)
- __asm__ __volatile__("mov %0, %%sp"
- : : "r" (orig_sp));
- }
-+#endif
-
- #ifdef CONFIG_HOTPLUG_CPU
- void fixup_irqs(void)
diff --git a/patches/arch-Disable-softirq-stacks-on-PREEMPT_RT.patch b/patches/arch-Disable-softirq-stacks-on-PREEMPT_RT.patch
new file mode 100644
index 000000000000..7ecedd5e3a39
--- /dev/null
+++ b/patches/arch-Disable-softirq-stacks-on-PREEMPT_RT.patch
@@ -0,0 +1,150 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 14 Jun 2022 20:18:14 +0200
+Subject: [PATCH] arch/*: Disable softirq stacks on PREEMPT_RT.
+
+PREEMPT_RT preempts softirqs and the current implementation avoids
+do_softirq_own_stack() and only uses __do_softirq().
+
+Disable the unused softirqs stacks on PREEMPT_RT to save some memory and
+ensure that do_softirq_own_stack() is not used bwcause it is not expected.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://lkml.kernel.org/r/CAK8P3a1QmeAscV-Ory-Dae4RoLvDSPEjEgFGQHR9U8jUervGuA@mail.gmail.com
+---
+ arch/arm/kernel/irq.c | 3 ++-
+ arch/parisc/kernel/irq.c | 2 ++
+ arch/powerpc/kernel/irq.c | 4 ++++
+ arch/s390/include/asm/softirq_stack.h | 3 ++-
+ arch/sh/kernel/irq.c | 2 ++
+ arch/sparc/kernel/irq_64.c | 2 ++
+ include/asm-generic/softirq_stack.h | 2 +-
+ 7 files changed, 15 insertions(+), 3 deletions(-)
+
+--- a/arch/arm/kernel/irq.c
++++ b/arch/arm/kernel/irq.c
+@@ -70,6 +70,7 @@ static void __init init_irq_stacks(void)
+ }
+ }
+
++#ifndef CONFIG_PREEMPT_RT
+ static void ____do_softirq(void *arg)
+ {
+ __do_softirq();
+@@ -80,7 +81,7 @@ void do_softirq_own_stack(void)
+ call_with_stack(____do_softirq, NULL,
+ __this_cpu_read(irq_stack_ptr));
+ }
+-
++#endif
+ #endif
+
+ int arch_show_interrupts(struct seq_file *p, int prec)
+--- a/arch/parisc/kernel/irq.c
++++ b/arch/parisc/kernel/irq.c
+@@ -480,10 +480,12 @@ static void execute_on_irq_stack(void *f
+ *irq_stack_in_use = 1;
+ }
+
++#ifndef CONFIG_PREEMPT_RT
+ void do_softirq_own_stack(void)
+ {
+ execute_on_irq_stack(__do_softirq, 0);
+ }
++#endif
+ #endif /* CONFIG_IRQSTACKS */
+
+ /* ONLY called from entry.S:intr_extint() */
+--- a/arch/powerpc/kernel/irq.c
++++ b/arch/powerpc/kernel/irq.c
+@@ -611,6 +611,7 @@ static inline void check_stack_overflow(
+ }
+ }
+
++#ifndef CONFIG_PREEMPT_RT
+ static __always_inline void call_do_softirq(const void *sp)
+ {
+ /* Temporarily switch r1 to sp, call __do_softirq() then restore r1. */
+@@ -629,6 +630,7 @@ static __always_inline void call_do_soft
+ "r11", "r12"
+ );
+ }
++#endif
+
+ static __always_inline void call_do_irq(struct pt_regs *regs, void *sp)
+ {
+@@ -747,10 +749,12 @@ void *mcheckirq_ctx[NR_CPUS] __read_most
+ void *softirq_ctx[NR_CPUS] __read_mostly;
+ void *hardirq_ctx[NR_CPUS] __read_mostly;
+
++#ifndef CONFIG_PREEMPT_RT
+ void do_softirq_own_stack(void)
+ {
+ call_do_softirq(softirq_ctx[smp_processor_id()]);
+ }
++#endif
+
+ irq_hw_number_t virq_to_hw(unsigned int virq)
+ {
+--- a/arch/s390/include/asm/softirq_stack.h
++++ b/arch/s390/include/asm/softirq_stack.h
+@@ -5,9 +5,10 @@
+ #include <asm/lowcore.h>
+ #include <asm/stacktrace.h>
+
++#ifndef CONFIG_PREEMPT_RT
+ static inline void do_softirq_own_stack(void)
+ {
+ call_on_stack(0, S390_lowcore.async_stack, void, __do_softirq);
+ }
+-
++#endif
+ #endif /* __ASM_S390_SOFTIRQ_STACK_H */
+--- a/arch/sh/kernel/irq.c
++++ b/arch/sh/kernel/irq.c
+@@ -149,6 +149,7 @@ void irq_ctx_exit(int cpu)
+ hardirq_ctx[cpu] = NULL;
+ }
+
++#ifndef CONFIG_PREEMPT_RT
+ void do_softirq_own_stack(void)
+ {
+ struct thread_info *curctx;
+@@ -176,6 +177,7 @@ void do_softirq_own_stack(void)
+ "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr"
+ );
+ }
++#endif
+ #else
+ static inline void handle_one_irq(unsigned int irq)
+ {
+--- a/arch/sparc/kernel/irq_64.c
++++ b/arch/sparc/kernel/irq_64.c
+@@ -855,6 +855,7 @@ void __irq_entry handler_irq(int pil, st
+ set_irq_regs(old_regs);
+ }
+
++#ifndef CONFIG_PREEMPT_RT
+ void do_softirq_own_stack(void)
+ {
+ void *orig_sp, *sp = softirq_stack[smp_processor_id()];
+@@ -869,6 +870,7 @@ void do_softirq_own_stack(void)
+ __asm__ __volatile__("mov %0, %%sp"
+ : : "r" (orig_sp));
+ }
++#endif
+
+ #ifdef CONFIG_HOTPLUG_CPU
+ void fixup_irqs(void)
+--- a/include/asm-generic/softirq_stack.h
++++ b/include/asm-generic/softirq_stack.h
+@@ -2,7 +2,7 @@
+ #ifndef __ASM_GENERIC_SOFTIRQ_STACK_H
+ #define __ASM_GENERIC_SOFTIRQ_STACK_H
+
+-#ifdef CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK
++#if defined(CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK) && !defined(CONFIG_PREEMPT_RT)
+ void do_softirq_own_stack(void);
+ #else
+ static inline void do_softirq_own_stack(void)
diff --git a/patches/blk-mq-Don-t-disable-preemption-around-__blk_mq_run_.patch b/patches/blk-mq-Don-t-disable-preemption-around-__blk_mq_run_.patch
index 64ff59fb1536..c7739220766f 100644
--- a/patches/blk-mq-Don-t-disable-preemption-around-__blk_mq_run_.patch
+++ b/patches/blk-mq-Don-t-disable-preemption-around-__blk_mq_run_.patch
@@ -1,5 +1,5 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Thu, 5 May 2022 19:21:47 +0200
+Date: Wed, 22 Jun 2022 09:42:37 +0200
Subject: [PATCH] blk-mq: Don't disable preemption around
__blk_mq_run_hw_queue().
@@ -10,39 +10,35 @@ number is part the mask.
__blk_mq_run_hw_queue() acquires a spin_lock_t which is a sleeping lock
on PREEMPT_RT and can't be acquired with disabled preemption.
-If it is important that the current CPU matches the requested CPU mask
-and that the context does not migrate to another CPU while
-__blk_mq_run_hw_queue() is invoked then it possible to achieve this by
-disabling migration and keeping the context preemptible.
+It is not required for correctness to invoke __blk_mq_run_hw_queue() on
+a CPU matching hctx->cpumask. Both (async and direct requests) can run
+on a CPU not matching hctx->cpumask.
-Disable only migration while testing the CPU mask and invoking
+The CPU mask without disabling preemption and invoking
__blk_mq_run_hw_queue().
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Link: https://lore.kernel.org/r/YnQHqx/5+54jd+U+@linutronix.de
-Link: https://lore.kernel.org/r/YqISXf6GAQeWqcR+@linutronix.de
+Link: https://lkml.kernel.org/r/YrLSEiNvagKJaDs5@linutronix.de
+Reviewed-by: Ming Lei <ming.lei@redhat.com>
---
- block/blk-mq.c | 8 ++++----
- 1 file changed, 4 insertions(+), 4 deletions(-)
+ block/blk-mq.c | 6 +-----
+ 1 file changed, 1 insertion(+), 5 deletions(-)
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
-@@ -2083,14 +2083,14 @@ static void __blk_mq_delay_run_hw_queue(
+@@ -2085,14 +2085,10 @@ static void __blk_mq_delay_run_hw_queue(
return;
if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
- int cpu = get_cpu();
- if (cpumask_test_cpu(cpu, hctx->cpumask)) {
-+ migrate_disable();
+ if (cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)) {
__blk_mq_run_hw_queue(hctx);
- put_cpu();
-+ migrate_enable();
return;
}
-
+-
- put_cpu();
-+ migrate_enable();
}
kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work,
diff --git a/patches/generic-softirq-Disable-softirq-stacks-on-PREEMPT_RT.patch b/patches/generic-softirq-Disable-softirq-stacks-on-PREEMPT_RT.patch
deleted file mode 100644
index 49638dc6a90f..000000000000
--- a/patches/generic-softirq-Disable-softirq-stacks-on-PREEMPT_RT.patch
+++ /dev/null
@@ -1,30 +0,0 @@
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Fri, 24 Sep 2021 17:05:48 +0200
-Subject: [PATCH] generic/softirq: Disable softirq stacks on PREEMPT_RT
-
-PREEMPT_RT preempts softirqs and the current implementation avoids
-do_softirq_own_stack() and only uses __do_softirq().
-
-Disable the unused softirqs stacks on PREEMPT_RT to safe some memory and
-ensure that do_softirq_own_stack() is not used which is not expected.
-
-[bigeasy: commit description.]
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Link: https://lkml.kernel.org/r/20220613182746.114115-1-bigeasy@linutronix.de
----
- include/asm-generic/softirq_stack.h | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/include/asm-generic/softirq_stack.h
-+++ b/include/asm-generic/softirq_stack.h
-@@ -2,7 +2,7 @@
- #ifndef __ASM_GENERIC_SOFTIRQ_STACK_H
- #define __ASM_GENERIC_SOFTIRQ_STACK_H
-
--#ifdef CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK
-+#if defined(CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK) && !defined(CONFIG_PREEMPT_RT)
- void do_softirq_own_stack(void);
- #else
- static inline void do_softirq_own_stack(void)
diff --git a/patches/mm-kfence-select-random-number-before-taking-raw-loc.patch b/patches/mm-kfence-select-random-number-before-taking-raw-loc.patch
new file mode 100644
index 000000000000..3293b8ebfc0f
--- /dev/null
+++ b/patches/mm-kfence-select-random-number-before-taking-raw-loc.patch
@@ -0,0 +1,76 @@
+From: "Jason A. Donenfeld" <Jason@zx2c4.com>
+Date: Thu, 9 Jun 2022 14:33:19 +0200
+Subject: [PATCH] mm/kfence: select random number before taking raw lock
+
+The RNG uses vanilla spinlocks, not raw spinlocks, so kfence should pick
+its random numbers before taking its raw spinlocks. This also has the
+nice effect of doing less work inside the lock. It should fix a splat
+that Geert saw with CONFIG_PROVE_RAW_LOCK_NESTING:
+
+ dump_backtrace.part.0+0x98/0xc0
+ show_stack+0x14/0x28
+ dump_stack_lvl+0xac/0xec
+ dump_stack+0x14/0x2c
+ __lock_acquire+0x388/0x10a0
+ lock_acquire+0x190/0x2c0
+ _raw_spin_lock_irqsave+0x6c/0x94
+ crng_make_state+0x148/0x1e4
+ _get_random_bytes.part.0+0x4c/0xe8
+ get_random_u32+0x4c/0x140
+ __kfence_alloc+0x460/0x5c4
+ kmem_cache_alloc_trace+0x194/0x1dc
+ __kthread_create_on_node+0x5c/0x1a8
+ kthread_create_on_node+0x58/0x7c
+ printk_start_kthread.part.0+0x34/0xa8
+ printk_activate_kthreads+0x4c/0x54
+ do_one_initcall+0xec/0x278
+ kernel_init_freeable+0x11c/0x214
+ kernel_init+0x24/0x124
+ ret_from_fork+0x10/0x20
+
+Link: https://lkml.kernel.org/r/20220609123319.17576-1-Jason@zx2c4.com
+Fixes: d4150779e60f ("random32: use real rng for non-deterministic randomness")
+Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
+Reported-by: Geert Uytterhoeven <geert@linux-m68k.org>
+Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Reviewed-by: Marco Elver <elver@google.com>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+Cc: John Ogness <john.ogness@linutronix.de>
+Cc: Alexander Potapenko <glider@google.com>
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ mm/kfence/core.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/mm/kfence/core.c
++++ b/mm/kfence/core.c
+@@ -360,6 +360,9 @@ static void *kfence_guarded_alloc(struct
+ unsigned long flags;
+ struct slab *slab;
+ void *addr;
++ const bool random_right_allocate = prandom_u32_max(2);
++ const bool random_fault = CONFIG_KFENCE_STRESS_TEST_FAULTS &&
++ !prandom_u32_max(CONFIG_KFENCE_STRESS_TEST_FAULTS);
+
+ /* Try to obtain a free object. */
+ raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
+@@ -404,7 +407,7 @@ static void *kfence_guarded_alloc(struct
+ * is that the out-of-bounds accesses detected are deterministic for
+ * such allocations.
+ */
+- if (prandom_u32_max(2)) {
++ if (random_right_allocate) {
+ /* Allocate on the "right" side, re-calculate address. */
+ meta->addr += PAGE_SIZE - size;
+ meta->addr = ALIGN_DOWN(meta->addr, cache->align);
+@@ -444,7 +447,7 @@ static void *kfence_guarded_alloc(struct
+ if (cache->ctor)
+ cache->ctor(addr);
+
+- if (CONFIG_KFENCE_STRESS_TEST_FAULTS && !prandom_u32_max(CONFIG_KFENCE_STRESS_TEST_FAULTS))
++ if (random_fault)
+ kfence_protect(meta->addr); /* Random "faults" by protecting the object. */
+
+ atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCATED]);
diff --git a/patches/sched-Consider-task_struct-saved_state-in-wait_task_.patch b/patches/sched-Consider-task_struct-saved_state-in-wait_task_.patch
index 6a6ba6386881..f127901d2138 100644
--- a/patches/sched-Consider-task_struct-saved_state-in-wait_task_.patch
+++ b/patches/sched-Consider-task_struct-saved_state-in-wait_task_.patch
@@ -1,19 +1,26 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Mon, 2 May 2022 13:58:03 +0200
+Date: Wed, 22 Jun 2022 12:27:05 +0200
Subject: [PATCH] sched: Consider task_struct::saved_state in
wait_task_inactive().
Ptrace is using wait_task_inactive() to wait for the tracee to reach a
certain task state. On PREEMPT_RT that state may be stored in
-task_struct::saved_state while the tracee blocks on a sleeping lock.
+task_struct::saved_state while the tracee blocks on a sleeping lock and
+task_struct::__state is set to TASK_RTLOCK_WAIT.
+It is not possible to check only for TASK_RTLOCK_WAIT to be sure that the task
+is blocked on a sleeping lock because during wake up (after the sleeping lock
+has been acquired) the task state is set TASK_RUNNING. After the task in on CPU
+and acquired the pi_lock it will reset the state accordingly but until then
+TASK_RUNNING will be observed (with the desired state saved in saved_state).
-In that case wait_task_inactive() should wait until the requested state
-is in task_struct::__state and the task idle.
+Check also for task_struct::saved_state if the desired match was not found in
+task_struct::__state on PREEMPT_RT. If the state was found in saved_state, wait
+until the task is idle and state is visible in task_struct::__state.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- kernel/sched/core.c | 38 ++++++++++++++++++++++++++++++++++----
- 1 file changed, 34 insertions(+), 4 deletions(-)
+ kernel/sched/core.c | 40 ++++++++++++++++++++++++++++++++++++----
+ 1 file changed, 36 insertions(+), 4 deletions(-)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -26,32 +33,34 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
for (;;) {
/*
-@@ -3301,8 +3303,22 @@ unsigned long wait_task_inactive(struct
+@@ -3301,8 +3303,24 @@ unsigned long wait_task_inactive(struct
* is actually now running somewhere else!
*/
while (task_running(rq, p)) {
- if (match_state && unlikely(READ_ONCE(p->__state) != match_state))
- return 0;
++
+ if (match_state) {
++ bool mismatch = false;
++#ifndef CONFIG_PREEMPT_RT
++ if (READ_ONCE(p->__state != match_state)
++ mismatch = true;
++#else
+ unsigned long flags;
-+ bool missmatch = false;
+
+ raw_spin_lock_irqsave(&p->pi_lock, flags);
-+#ifdef CONFIG_PREEMPT_RT
-+ if ((READ_ONCE(p->__state) != match_state) &&
-+ (READ_ONCE(p->saved_state) != match_state))
-+#else
-+ if (READ_ONCE(p->__state) != match_state)
-+#endif
-+ missmatch = true;
++ if (READ_ONCE(p->__state) != match_state &&
++ READ_ONCE(p->saved_state) != match_state)
++ mismatch = true;
+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-+ if (missmatch)
++#endif
++ if (mismatch)
+ return 0;
+ }
cpu_relax();
}
-@@ -3316,7 +3332,21 @@ unsigned long wait_task_inactive(struct
+@@ -3316,7 +3334,21 @@ unsigned long wait_task_inactive(struct
running = task_running(rq, p);
queued = task_on_rq_queued(p);
ncsw = 0;
@@ -74,7 +83,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
task_rq_unlock(rq, p, &rf);
-@@ -3346,7 +3376,7 @@ unsigned long wait_task_inactive(struct
+@@ -3346,7 +3378,7 @@ unsigned long wait_task_inactive(struct
* running right now), it's preempted, and we should
* yield - it could be a while.
*/
diff --git a/patches/sched__Add_support_for_lazy_preemption.patch b/patches/sched__Add_support_for_lazy_preemption.patch
index bb74b61e61df..48a416bba703 100644
--- a/patches/sched__Add_support_for_lazy_preemption.patch
+++ b/patches/sched__Add_support_for_lazy_preemption.patch
@@ -357,7 +357,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_enable();
}
EXPORT_SYMBOL_GPL(migrate_enable);
-@@ -4584,6 +4626,9 @@ int sched_fork(unsigned long clone_flags
+@@ -4586,6 +4628,9 @@ int sched_fork(unsigned long clone_flags
p->on_cpu = 0;
#endif
init_task_preempt_count(p);
@@ -367,7 +367,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_SMP
plist_node_init(&p->pushable_tasks, MAX_PRIO);
RB_CLEAR_NODE(&p->pushable_dl_tasks);
-@@ -6421,6 +6466,7 @@ static void __sched notrace __schedule(u
+@@ -6453,6 +6498,7 @@ static void __sched notrace __schedule(u
next = pick_next_task(rq, prev, &rf);
clear_tsk_need_resched(prev);
@@ -375,7 +375,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
clear_preempt_need_resched();
#ifdef CONFIG_SCHED_DEBUG
rq->last_seen_need_resched_ns = 0;
-@@ -6631,6 +6677,30 @@ static void __sched notrace preempt_sche
+@@ -6663,6 +6709,30 @@ static void __sched notrace preempt_sche
} while (need_resched());
}
@@ -406,7 +406,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_PREEMPTION
/*
* This is the entry point to schedule() from in-kernel preemption
-@@ -6644,6 +6714,8 @@ asmlinkage __visible void __sched notrac
+@@ -6676,6 +6746,8 @@ asmlinkage __visible void __sched notrac
*/
if (likely(!preemptible()))
return;
@@ -415,7 +415,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_schedule_common();
}
NOKPROBE_SYMBOL(preempt_schedule);
-@@ -6691,6 +6763,9 @@ asmlinkage __visible void __sched notrac
+@@ -6723,6 +6795,9 @@ asmlinkage __visible void __sched notrac
if (likely(!preemptible()))
return;
@@ -425,7 +425,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
do {
/*
* Because the function tracer can trace preempt_count_sub()
-@@ -8919,7 +8994,9 @@ void __init init_idle(struct task_struct
+@@ -8951,7 +9026,9 @@ void __init init_idle(struct task_struct
/* Set the preempt count _outside_ the spinlocks! */
init_idle_preempt_count(idle, cpu);
@@ -524,7 +524,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
-@@ -2310,6 +2310,15 @@ extern void reweight_task(struct task_st
+@@ -2315,6 +2315,15 @@ extern void reweight_task(struct task_st
extern void resched_curr(struct rq *rq);
extern void resched_cpu(int cpu);
diff --git a/patches/series b/patches/series
index 3e96d7dd1428..8f16ed9ef4b2 100644
--- a/patches/series
+++ b/patches/series
@@ -1,4 +1,5 @@
# Applied upstream
+mm-kfence-select-random-number-before-taking-raw-loc.patch
###########################################################################
# John's printk queue
@@ -10,18 +11,19 @@
###########################################################################
# Posted and applied
###########################################################################
+arch-Disable-softirq-stacks-on-PREEMPT_RT.patch
+blk-mq-Don-t-disable-preemption-around-__blk_mq_run_.patch
+mm-slub-Move-the-stackdepot-related-allocation-out-o.patch
# signal_x86__Delay_calling_signals_in_atomic.patch
###########################################################################
# Posted
###########################################################################
-blk-mq-Don-t-disable-preemption-around-__blk_mq_run_.patch
genirq-Provide-generic_handle_domain_irq_safe.patch
-mm-slub-Move-the-stackdepot-related-allocation-out-o.patch
# Hacks to get ptrace to work.
-signal__Revert_ptrace_preempt_magic.patch
+signal-Don-t-disable-preemption-in-ptrace_stop-on-PR.patch
sched-Consider-task_struct-saved_state-in-wait_task_.patch
###########################################################################
@@ -50,8 +52,6 @@ rcutorture-Also-force-sched-priority-to-timersd-on-b.patch
tick-Fix-timer-storm-since-introduction-of-timersd.patch
tpm_tis__fix_stall_after_iowrites.patch
drivers_block_zram__Replace_bit_spinlocks_with_rtmutex_for_-rt.patch
-generic-softirq-Disable-softirq-stacks-on-PREEMPT_RT.patch
-Disable-softirq-stacks-on-PREEMPT_RT.patch
iio-adc-stm32-adc-Use-generic_handle_domain_irq.patch
locking-lockdep-Remove-lockdep_init_map_crosslock.patch
@@ -83,7 +83,6 @@ arch_arm64__Add_lazy_preempt_support.patch
# ARM/ARM64
###########################################################################
0001-arm-Disable-jump-label-on-PREEMPT_RT.patch
-0002-arm-Disable-softirq-stacks-on-PREEMPT_RT.patch
ARM__enable_irq_in_translation_section_permission_fault_handlers.patch
# arm64-signal-Use-ARCH_RT_DELAYS_SIGNAL_SEND.patch
tty_serial_omap__Make_the_locking_RT_aware.patch
diff --git a/patches/signal-Don-t-disable-preemption-in-ptrace_stop-on-PR.patch b/patches/signal-Don-t-disable-preemption-in-ptrace_stop-on-PR.patch
new file mode 100644
index 000000000000..9653a1036aba
--- /dev/null
+++ b/patches/signal-Don-t-disable-preemption-in-ptrace_stop-on-PR.patch
@@ -0,0 +1,58 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 22 Jun 2022 11:36:17 +0200
+Subject: [PATCH] signal: Don't disable preemption in ptrace_stop() on
+ PREEMPT_RT.
+
+Commit
+ 53da1d9456fe7 ("fix ptrace slowness")
+
+is just band aid around the problem.
+The invocation of do_notify_parent_cldstop() wakes the parent and makes
+it runnable. The scheduler then wants to replace this still running task
+with the parent. With the read_lock() acquired this is not possible
+because preemption is disabled and so this is is deferred until
+read_unlock(). This scheduling point is undesired and is avoided by
+disabling preemption around the unlock operation enabled again before
+the schedule() invocation without a preemption point.
+This is only undesired because the parent sleeps a cycle in
+wait_task_inactive() until the traced task leaves the run-queue in
+schedule(). It is not a correctness issue, it is just band aid to avoid the
+visbile delay which sums up over multiple invocations.
+The task can still be preempted if an interrupt occurs between
+preempt_enable_no_resched() and freezable_schedule() because on the IRQ-exit
+path of the interrupt scheduling _will_ happen. This is ignored since it does
+not happen very often.
+
+On PREEMPT_RT keeping preemption disabled during the invocation of
+cgroup_enter_frozen() becomes a problem because the function acquires
+css_set_lock which is a sleeping lock on PREEMPT_RT and must not be
+acquired with disabled preemption.
+
+Don't disable preemption on PREEMPT_RT. Remove the TODO regarding adding
+read_unlock_no_resched() as there is no need for it and will cause harm.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/signal.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -2297,13 +2297,13 @@ static int ptrace_stop(int exit_code, in
+ /*
+ * Don't want to allow preemption here, because
+ * sys_ptrace() needs this task to be inactive.
+- *
+- * XXX: implement read_unlock_no_resched().
+ */
+- preempt_disable();
++ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
++ preempt_disable();
+ read_unlock(&tasklist_lock);
+ cgroup_enter_frozen();
+- preempt_enable_no_resched();
++ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
++ preempt_enable_no_resched();
+ freezable_schedule();
+ cgroup_leave_frozen(true);
+
diff --git a/patches/signal__Revert_ptrace_preempt_magic.patch b/patches/signal__Revert_ptrace_preempt_magic.patch
deleted file mode 100644
index dbac2c73d5c7..000000000000
--- a/patches/signal__Revert_ptrace_preempt_magic.patch
+++ /dev/null
@@ -1,36 +0,0 @@
-Subject: signal: Revert ptrace preempt magic
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Wed Sep 21 19:57:12 2011 +0200
-
-From: Thomas Gleixner <tglx@linutronix.de>
-
-Upstream commit '53da1d9456fe7f8 fix ptrace slowness' is nothing more
-than a bandaid around the ptrace design trainwreck. It's not a
-correctness issue, it's merily a cosmetic bandaid.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-
-
----
- kernel/signal.c | 8 --------
- 1 file changed, 8 deletions(-)
----
---- a/kernel/signal.c
-+++ b/kernel/signal.c
-@@ -2294,16 +2294,8 @@ static int ptrace_stop(int exit_code, in
- if (gstop_done && (!current->ptrace || ptrace_reparented(current)))
- do_notify_parent_cldstop(current, false, why);
-
-- /*
-- * Don't want to allow preemption here, because
-- * sys_ptrace() needs this task to be inactive.
-- *
-- * XXX: implement read_unlock_no_resched().
-- */
-- preempt_disable();
- read_unlock(&tasklist_lock);
- cgroup_enter_frozen();
-- preempt_enable_no_resched();
- freezable_schedule();
- cgroup_leave_frozen(true);
-