summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2021-11-18 17:19:58 +0100
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2021-11-18 17:19:58 +0100
commit9e4601b90c271ca372a407ad6bad19e3118fe0c3 (patch)
treecd391403a5f31d7f8089cddf10b52ffcebb2a2fd
parent2ffa973cfd9df3a9057f5522bdd4efafeacff9a8 (diff)
downloadlinux-rt-9e4601b90c271ca372a407ad6bad19e3118fe0c3.tar.gz
[ANNOUNCE] v5.16-rc1-rt2v5.16-rc1-rt2-patches
Dear RT folks! I'm pleased to announce the v5.16-rc1-rt2 patch set. Changes since v5.16-rc1-rt1: - Redo the delayed deallocation of the task-stack. Known issues - netconsole triggers WARN. - The "Memory controller" (CONFIG_MEMCG) has been disabled. - Valentin Schneider reported a few splats on ARM64, see https://lkml.kernel.org/r/20210810134127.1394269-1-valentin.schneider@arm.com The delta patch against v5.16-rc1-rt1 is appended below and can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.16/incr/patch-5.16-rc1-rt1-rt2.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v5.16-rc1-rt2 The RT patch against v5.16-rc1 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.16/older/patch-5.16-rc1-rt2.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.16/older/patches-5.16-rc1-rt2.tar.xz Sebastian Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-rw-r--r--patches/0001-sched-Trigger-warning-if-migration_disabled-counter-.patch2
-rw-r--r--patches/0001_kernel_fork_redo_ifdefs_around_task_s_handling.patch161
-rw-r--r--patches/0002_kernel_fork_duplicate_task_struct_before_stack_allocation.patch49
-rw-r--r--patches/0003-drm-i915-Use-preempt_disable-enable_rt-where-recomme.patch4
-rw-r--r--patches/0003-rtmutex-Add-a-special-case-for-ww-mutex-handling.patch2
-rw-r--r--patches/0003_kernel_fork_ia64_provide_a_alloc_thread_stack_node_for_ia64.patch66
-rw-r--r--patches/0004_kernel_fork_don_t_assign_the_stack_pointer_in_dup_task_struct.patch148
-rw-r--r--patches/0004_sched_delay_task_stack_freeing_on_rt.patch65
-rw-r--r--patches/0005-drm-i915-Don-t-check-for-atomic-context-on-PREEMPT_R.patch2
-rw-r--r--patches/0005-lockdep-Make-it-RT-aware.patch4
-rw-r--r--patches/0005_kernel_fork_move_memcg_charge_kernel_stack_into_config_vmap_stack.patch121
-rw-r--r--patches/0006_kernel_fork_move_task_stack_account_to_do_exit.patch163
-rw-r--r--patches/0007-drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch2
-rw-r--r--patches/0007_kernel_fork_only_cache_the_vmap_stack_in_finish_task_switch.patch230
-rw-r--r--patches/0008_kernel_fork_use_is_enabled_in_account_kernel_stack.patch39
-rw-r--r--patches/0009-drm-i915-gt-Use-spin_lock_irq-instead-of-local_irq_d.patch12
-rw-r--r--patches/ARM64__Allow_to_enable_RT.patch4
-rw-r--r--patches/ARM__enable_irq_in_translation_section_permission_fault_handlers.patch4
-rw-r--r--patches/Add_localversion_for_-RT_release.patch2
-rw-r--r--patches/KVM__arm_arm64__downgrade_preempt_disabled_region_to_migrate_disable.patch6
-rw-r--r--patches/arm64-sve-Make-kernel-FPU-protection-RT-friendly.patch4
-rw-r--r--patches/arm64_mm_make_arch_faults_on_old_pte_check_for_migratability.patch2
-rw-r--r--patches/block_mq__do_not_invoke_preempt_disable.patch2
-rw-r--r--patches/console__add_write_atomic_interface.patch8
-rw-r--r--patches/drivers_block_zram__Replace_bit_spinlocks_with_rtmutex_for_-rt.patch2
-rw-r--r--patches/entry--Fix-the-preempt-lazy-fallout.patch2
-rw-r--r--patches/fs_dcache__use_swait_queue_instead_of_waitqueue.patch12
-rw-r--r--patches/kernel_sched__add_putget_cpu_light.patch2
-rw-r--r--patches/mm-Disable-zsmalloc-on-PREEMPT_RT.patch4
-rw-r--r--patches/mm-memcontro--Disable-on-PREEMPT_RT.patch2
-rw-r--r--patches/mm-zsmalloc-Replace-bit-spinlock-and-get_cpu_var-usa.patch4
-rw-r--r--patches/mm_vmalloc__Another_preempt_disable_region_which_sucks.patch6
-rw-r--r--patches/net__Dequeue_in_dev_cpu_dead_without_the_lock.patch2
-rw-r--r--patches/net__Remove_preemption_disabling_in_netif_rx.patch4
-rw-r--r--patches/net__Use_skbufhead_with_raw_lock.patch4
-rw-r--r--patches/net__dev__always_take_qdiscs_busylock_in___dev_xmit_skb.patch2
-rw-r--r--patches/net_core__use_local_bh_disable_in_netif_rx_ni.patch2
-rw-r--r--patches/printk__Enhance_the_condition_check_of_msleep_in_pr_flush.patch2
-rw-r--r--patches/printk__add_console_handover.patch2
-rw-r--r--patches/printk__add_pr_flush.patch4
-rw-r--r--patches/printk__call_boot_delay_msec_in_printk_delay.patch2
-rw-r--r--patches/printk__use_seqcount_latch_for_console_seq.patch18
-rw-r--r--patches/ptrace__fix_ptrace_vs_tasklist_lock_race.patch6
-rw-r--r--patches/random__Make_it_work_on_rt.patch2
-rw-r--r--patches/rcu-tree-Protect-rcu_rdp_is_offloaded-invocations-on.patch6
-rw-r--r--patches/rcu__Delay_RCU-selftests.patch10
-rw-r--r--patches/sched-Make-preempt_enable_no_resched-behave-like-pre.patch2
-rw-r--r--patches/sched__Add_support_for_lazy_preemption.patch10
-rw-r--r--patches/sched_introduce_migratable.patch2
-rw-r--r--patches/serial__8250__implement_write_atomic.patch22
-rw-r--r--patches/series9
-rw-r--r--patches/signal__Revert_ptrace_preempt_magic.patch2
-rw-r--r--patches/signal_x86__Delay_calling_signals_in_atomic.patch2
-rw-r--r--patches/softirq__Check_preemption_after_reenabling_interrupts.patch16
-rw-r--r--patches/x86__Enable_RT_also_on_32bit.patch2
55 files changed, 1094 insertions, 175 deletions
diff --git a/patches/0001-sched-Trigger-warning-if-migration_disabled-counter-.patch b/patches/0001-sched-Trigger-warning-if-migration_disabled-counter-.patch
index aa761042d167..83c8ac36379e 100644
--- a/patches/0001-sched-Trigger-warning-if-migration_disabled-counter-.patch
+++ b/patches/0001-sched-Trigger-warning-if-migration_disabled-counter-.patch
@@ -16,7 +16,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -2152,6 +2152,8 @@ void migrate_enable(void)
+@@ -2171,6 +2171,8 @@ void migrate_enable(void)
if (p->migration_disabled > 1) {
p->migration_disabled--;
return;
diff --git a/patches/0001_kernel_fork_redo_ifdefs_around_task_s_handling.patch b/patches/0001_kernel_fork_redo_ifdefs_around_task_s_handling.patch
new file mode 100644
index 000000000000..bde4a304b21c
--- /dev/null
+++ b/patches/0001_kernel_fork_redo_ifdefs_around_task_s_handling.patch
@@ -0,0 +1,161 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Subject: kernel/fork: Redo ifdefs around task's handling.
+Date: Thu, 18 Nov 2021 15:34:45 +0100
+
+The use of ifdef CONFIG_VMAP_STACK is confusing in terms what is
+actually happenning and what can happen.
+For instance from reading free_thread_stack() it appears that in the
+CONFIG_VMAP_STACK case we may receive a non-NULL vm pointer but it may
+also be NULL in which case __free_pages() is used to free the stack.
+This is however not the case because in the VMAP case a non-NULL pointer
+is always returned here.
+Since it looks like this might happen, the compiler creates the correct
+dead code with the invocation to __free_pages() and everything around
+it. Twice.
+
+Add spaces between the ifdef and the identifer to recognize the ifdef
+level that we are currently in.
+Add the current identifer as a comment behind #else and #endif.
+Move the code within free_thread_stack() and alloc_thread_stack_node()
+into the relavant ifdef block.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://lore.kernel.org/r/20211118143452.136421-2-bigeasy@linutronix.de
+---
+ kernel/fork.c | 74 ++++++++++++++++++++++++++++++----------------------------
+ 1 file changed, 39 insertions(+), 35 deletions(-)
+
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -184,7 +184,7 @@ static inline void free_task_struct(stru
+ */
+ # if THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK)
+
+-#ifdef CONFIG_VMAP_STACK
++# ifdef CONFIG_VMAP_STACK
+ /*
+ * vmalloc() is a bit slow, and calling vfree() enough times will force a TLB
+ * flush. Try to minimize the number of calls by caching stacks.
+@@ -209,11 +209,9 @@ static int free_vm_stack_cache(unsigned
+
+ return 0;
+ }
+-#endif
+
+ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
+ {
+-#ifdef CONFIG_VMAP_STACK
+ void *stack;
+ int i;
+
+@@ -257,45 +255,53 @@ static unsigned long *alloc_thread_stack
+ tsk->stack = stack;
+ }
+ return stack;
+-#else
+- struct page *page = alloc_pages_node(node, THREADINFO_GFP,
+- THREAD_SIZE_ORDER);
+-
+- if (likely(page)) {
+- tsk->stack = kasan_reset_tag(page_address(page));
+- return tsk->stack;
+- }
+- return NULL;
+-#endif
+ }
+
+-static inline void free_thread_stack(struct task_struct *tsk)
++static void free_thread_stack(struct task_struct *tsk)
+ {
+-#ifdef CONFIG_VMAP_STACK
+ struct vm_struct *vm = task_stack_vm_area(tsk);
++ int i;
+
+- if (vm) {
+- int i;
++ for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
++ memcg_kmem_uncharge_page(vm->pages[i], 0);
+
+- for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
+- memcg_kmem_uncharge_page(vm->pages[i], 0);
++ for (i = 0; i < NR_CACHED_STACKS; i++) {
++ if (this_cpu_cmpxchg(cached_stacks[i], NULL,
++ tsk->stack_vm_area) != NULL)
++ continue;
++
++ tsk->stack = NULL;
++ tsk->stack_vm_area = NULL;
++ return;
++ }
++ vfree_atomic(tsk->stack);
++ tsk->stack = NULL;
++ tsk->stack_vm_area = NULL;
++}
+
+- for (i = 0; i < NR_CACHED_STACKS; i++) {
+- if (this_cpu_cmpxchg(cached_stacks[i],
+- NULL, tsk->stack_vm_area) != NULL)
+- continue;
++# else /* !CONFIG_VMAP_STACK */
+
+- return;
+- }
++static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
++{
++ struct page *page = alloc_pages_node(node, THREADINFO_GFP,
++ THREAD_SIZE_ORDER);
+
+- vfree_atomic(tsk->stack);
+- return;
++ if (likely(page)) {
++ tsk->stack = kasan_reset_tag(page_address(page));
++ return tsk->stack;
+ }
+-#endif
++ return NULL;
++}
+
++static void free_thread_stack(struct task_struct *tsk)
++{
+ __free_pages(virt_to_page(tsk->stack), THREAD_SIZE_ORDER);
++ tsk->stack = NULL;
+ }
+-# else
++
++# endif /* CONFIG_VMAP_STACK */
++# else /* !(THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK)) */
++
+ static struct kmem_cache *thread_stack_cache;
+
+ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk,
+@@ -311,6 +317,7 @@ static unsigned long *alloc_thread_stack
+ static void free_thread_stack(struct task_struct *tsk)
+ {
+ kmem_cache_free(thread_stack_cache, tsk->stack);
++ tsk->stack = NULL;
+ }
+
+ void thread_stack_cache_init(void)
+@@ -320,8 +327,9 @@ void thread_stack_cache_init(void)
+ THREAD_SIZE, NULL);
+ BUG_ON(thread_stack_cache == NULL);
+ }
+-# endif
+-#endif
++
++# endif /* THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK) */
++#endif /* !CONFIG_ARCH_THREAD_STACK_ALLOCATOR */
+
+ /* SLAB cache for signal_struct structures (tsk->signal) */
+ static struct kmem_cache *signal_cachep;
+@@ -429,10 +437,6 @@ static void release_task_stack(struct ta
+
+ account_kernel_stack(tsk, -1);
+ free_thread_stack(tsk);
+- tsk->stack = NULL;
+-#ifdef CONFIG_VMAP_STACK
+- tsk->stack_vm_area = NULL;
+-#endif
+ }
+
+ #ifdef CONFIG_THREAD_INFO_IN_TASK
diff --git a/patches/0002_kernel_fork_duplicate_task_struct_before_stack_allocation.patch b/patches/0002_kernel_fork_duplicate_task_struct_before_stack_allocation.patch
new file mode 100644
index 000000000000..9b5bd4dcd325
--- /dev/null
+++ b/patches/0002_kernel_fork_duplicate_task_struct_before_stack_allocation.patch
@@ -0,0 +1,49 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Subject: kernel/fork: Duplicate task_struct before stack allocation.
+Date: Thu, 18 Nov 2021 15:34:46 +0100
+
+alloc_thread_stack_node() already populates the task_struct::stack
+member except on IA64. The stack pointer is saved and populated again
+because IA64 needs it and arch_dup_task_struct() overwrites it.
+
+Allocate thread's stack after task_struct has been duplicated as a
+preparation.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://lore.kernel.org/r/20211118143452.136421-3-bigeasy@linutronix.de
+---
+ kernel/fork.c | 9 ++++-----
+ 1 file changed, 4 insertions(+), 5 deletions(-)
+
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -887,6 +887,10 @@ static struct task_struct *dup_task_stru
+ if (!tsk)
+ return NULL;
+
++ err = arch_dup_task_struct(tsk, orig);
++ if (err)
++ goto free_tsk;
++
+ stack = alloc_thread_stack_node(tsk, node);
+ if (!stack)
+ goto free_tsk;
+@@ -896,8 +900,6 @@ static struct task_struct *dup_task_stru
+
+ stack_vm_area = task_stack_vm_area(tsk);
+
+- err = arch_dup_task_struct(tsk, orig);
+-
+ /*
+ * arch_dup_task_struct() clobbers the stack-related fields. Make
+ * sure they're properly initialized before using any stack-related
+@@ -911,9 +913,6 @@ static struct task_struct *dup_task_stru
+ refcount_set(&tsk->stack_refcount, 1);
+ #endif
+
+- if (err)
+- goto free_stack;
+-
+ err = scs_prepare(tsk, node);
+ if (err)
+ goto free_stack;
diff --git a/patches/0003-drm-i915-Use-preempt_disable-enable_rt-where-recomme.patch b/patches/0003-drm-i915-Use-preempt_disable-enable_rt-where-recomme.patch
index fa8699c3b14a..1d2fecd82e55 100644
--- a/patches/0003-drm-i915-Use-preempt_disable-enable_rt-where-recomme.patch
+++ b/patches/0003-drm-i915-Use-preempt_disable-enable_rt-where-recomme.patch
@@ -33,7 +33,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
-@@ -886,7 +886,8 @@ static bool i915_get_crtc_scanoutpos(str
+@@ -916,7 +916,8 @@ static bool i915_get_crtc_scanoutpos(str
*/
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
@@ -43,7 +43,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Get optional system timestamp before query. */
if (stime)
-@@ -950,7 +951,8 @@ static bool i915_get_crtc_scanoutpos(str
+@@ -980,7 +981,8 @@ static bool i915_get_crtc_scanoutpos(str
if (etime)
*etime = ktime_get();
diff --git a/patches/0003-rtmutex-Add-a-special-case-for-ww-mutex-handling.patch b/patches/0003-rtmutex-Add-a-special-case-for-ww-mutex-handling.patch
index 43c1ffd5a2fd..07323cbb3bd9 100644
--- a/patches/0003-rtmutex-Add-a-special-case-for-ww-mutex-handling.patch
+++ b/patches/0003-rtmutex-Add-a-special-case-for-ww-mutex-handling.patch
@@ -17,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -1097,8 +1097,26 @@ static int __sched task_blocks_on_rt_mut
+@@ -1104,8 +1104,26 @@ static int __sched task_blocks_on_rt_mut
* which is wrong, as the other waiter is not in a deadlock
* situation.
*/
diff --git a/patches/0003_kernel_fork_ia64_provide_a_alloc_thread_stack_node_for_ia64.patch b/patches/0003_kernel_fork_ia64_provide_a_alloc_thread_stack_node_for_ia64.patch
new file mode 100644
index 000000000000..fe5eaa4110bc
--- /dev/null
+++ b/patches/0003_kernel_fork_ia64_provide_a_alloc_thread_stack_node_for_ia64.patch
@@ -0,0 +1,66 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Subject: kernel/fork, IA64: Provide a alloc_thread_stack_node() for IA64.
+Date: Thu, 18 Nov 2021 15:34:47 +0100
+
+Provide a generic alloc_thread_stack_node() for IA64/
+CONFIG_ARCH_THREAD_STACK_ALLOCATOR which returns stack pointer and sets
+task_struct::stack so it behaves exactly like the other implementations.
+
+Rename IA64's alloc_thread_stack_node() and add the generic version to
+the fork code so it is in one place _and_ to drastically lower chances
+of fat fingering the IA64 code.
+Do the same for free_thread_stack().
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://lore.kernel.org/r/20211118143452.136421-4-bigeasy@linutronix.de
+---
+ arch/ia64/include/asm/thread_info.h | 6 +++---
+ kernel/fork.c | 16 ++++++++++++++++
+ 2 files changed, 19 insertions(+), 3 deletions(-)
+
+--- a/arch/ia64/include/asm/thread_info.h
++++ b/arch/ia64/include/asm/thread_info.h
+@@ -55,15 +55,15 @@ struct thread_info {
+ #ifndef ASM_OFFSETS_C
+ /* how to get the thread information struct from C */
+ #define current_thread_info() ((struct thread_info *) ((char *) current + IA64_TASK_SIZE))
+-#define alloc_thread_stack_node(tsk, node) \
++#define arch_alloc_thread_stack_node(tsk, node) \
+ ((unsigned long *) ((char *) (tsk) + IA64_TASK_SIZE))
+ #define task_thread_info(tsk) ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
+ #else
+ #define current_thread_info() ((struct thread_info *) 0)
+-#define alloc_thread_stack_node(tsk, node) ((unsigned long *) 0)
++#define arch_alloc_thread_stack_node(tsk, node) ((unsigned long *) 0)
+ #define task_thread_info(tsk) ((struct thread_info *) 0)
+ #endif
+-#define free_thread_stack(tsk) /* nothing */
++#define arch_free_thread_stack(tsk) /* nothing */
+ #define task_stack_page(tsk) ((void *)(tsk))
+
+ #define __HAVE_THREAD_FUNCTIONS
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -329,6 +329,22 @@ void thread_stack_cache_init(void)
+ }
+
+ # endif /* THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK) */
++#else /* CONFIG_ARCH_THREAD_STACK_ALLOCATOR */
++
++static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
++{
++ unsigned long *stack;
++
++ stack = arch_alloc_thread_stack_node(tsk, node);
++ tsk->stack = stack;
++ return stack;
++}
++
++static void free_thread_stack(struct task_struct *tsk, bool cache_only)
++{
++ arch_free_thread_stack(tsk);
++}
++
+ #endif /* !CONFIG_ARCH_THREAD_STACK_ALLOCATOR */
+
+ /* SLAB cache for signal_struct structures (tsk->signal) */
diff --git a/patches/0004_kernel_fork_don_t_assign_the_stack_pointer_in_dup_task_struct.patch b/patches/0004_kernel_fork_don_t_assign_the_stack_pointer_in_dup_task_struct.patch
new file mode 100644
index 000000000000..680760cc4753
--- /dev/null
+++ b/patches/0004_kernel_fork_don_t_assign_the_stack_pointer_in_dup_task_struct.patch
@@ -0,0 +1,148 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Subject: kernel/fork: Don't assign the stack pointer in dup_task_struct().
+Date: Thu, 18 Nov 2021 15:34:48 +0100
+
+All four versions of alloc_thread_stack_node() assign now
+task_struct::stack in case the allocation was successful.
+
+Let alloc_thread_stack_node() return an error code instead of the stack
+pointer and remove the stack assignment in dup_task_struct().
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://lore.kernel.org/r/20211118143452.136421-5-bigeasy@linutronix.de
+---
+ kernel/fork.c | 47 ++++++++++++++++-------------------------------
+ 1 file changed, 16 insertions(+), 31 deletions(-)
+
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -210,7 +210,7 @@ static int free_vm_stack_cache(unsigned
+ return 0;
+ }
+
+-static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
++static int alloc_thread_stack_node(struct task_struct *tsk, int node)
+ {
+ void *stack;
+ int i;
+@@ -231,7 +231,7 @@ static unsigned long *alloc_thread_stack
+
+ tsk->stack_vm_area = s;
+ tsk->stack = s->addr;
+- return s->addr;
++ return 0;
+ }
+
+ /*
+@@ -244,17 +244,16 @@ static unsigned long *alloc_thread_stack
+ THREADINFO_GFP & ~__GFP_ACCOUNT,
+ PAGE_KERNEL,
+ 0, node, __builtin_return_address(0));
+-
++ if (!stack)
++ return -ENOMEM;
+ /*
+ * We can't call find_vm_area() in interrupt context, and
+ * free_thread_stack() can be called in interrupt context,
+ * so cache the vm_struct.
+ */
+- if (stack) {
+- tsk->stack_vm_area = find_vm_area(stack);
+- tsk->stack = stack;
+- }
+- return stack;
++ tsk->stack_vm_area = find_vm_area(stack);
++ tsk->stack = stack;
++ return 0;
+ }
+
+ static void free_thread_stack(struct task_struct *tsk)
+@@ -281,16 +280,16 @@ static void free_thread_stack(struct tas
+
+ # else /* !CONFIG_VMAP_STACK */
+
+-static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
++static int alloc_thread_stack_node(struct task_struct *tsk, int node)
+ {
+ struct page *page = alloc_pages_node(node, THREADINFO_GFP,
+ THREAD_SIZE_ORDER);
+
+ if (likely(page)) {
+ tsk->stack = kasan_reset_tag(page_address(page));
+- return tsk->stack;
++ return 0;
+ }
+- return NULL;
++ return -ENOMEM;
+ }
+
+ static void free_thread_stack(struct task_struct *tsk)
+@@ -304,14 +303,13 @@ static void free_thread_stack(struct tas
+
+ static struct kmem_cache *thread_stack_cache;
+
+-static unsigned long *alloc_thread_stack_node(struct task_struct *tsk,
+- int node)
++static int alloc_thread_stack_node(struct task_struct *tsk, int node)
+ {
+ unsigned long *stack;
+ stack = kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node);
+ stack = kasan_reset_tag(stack);
+ tsk->stack = stack;
+- return stack;
++ return stack ? 0 : -ENOMEM;
+ }
+
+ static void free_thread_stack(struct task_struct *tsk)
+@@ -331,13 +329,13 @@ void thread_stack_cache_init(void)
+ # endif /* THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK) */
+ #else /* CONFIG_ARCH_THREAD_STACK_ALLOCATOR */
+
+-static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
++static int alloc_thread_stack_node(struct task_struct *tsk, int node)
+ {
+ unsigned long *stack;
+
+ stack = arch_alloc_thread_stack_node(tsk, node);
+ tsk->stack = stack;
+- return stack;
++ return stack ? 0 : -ENOMEM;
+ }
+
+ static void free_thread_stack(struct task_struct *tsk, bool cache_only)
+@@ -893,8 +891,6 @@ void set_task_stack_end_magic(struct tas
+ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
+ {
+ struct task_struct *tsk;
+- unsigned long *stack;
+- struct vm_struct *stack_vm_area __maybe_unused;
+ int err;
+
+ if (node == NUMA_NO_NODE)
+@@ -907,24 +903,13 @@ static struct task_struct *dup_task_stru
+ if (err)
+ goto free_tsk;
+
+- stack = alloc_thread_stack_node(tsk, node);
+- if (!stack)
++ err = alloc_thread_stack_node(tsk, node);
++ if (err)
+ goto free_tsk;
+
+ if (memcg_charge_kernel_stack(tsk))
+ goto free_stack;
+
+- stack_vm_area = task_stack_vm_area(tsk);
+-
+- /*
+- * arch_dup_task_struct() clobbers the stack-related fields. Make
+- * sure they're properly initialized before using any stack-related
+- * functions again.
+- */
+- tsk->stack = stack;
+-#ifdef CONFIG_VMAP_STACK
+- tsk->stack_vm_area = stack_vm_area;
+-#endif
+ #ifdef CONFIG_THREAD_INFO_IN_TASK
+ refcount_set(&tsk->stack_refcount, 1);
+ #endif
diff --git a/patches/0004_sched_delay_task_stack_freeing_on_rt.patch b/patches/0004_sched_delay_task_stack_freeing_on_rt.patch
deleted file mode 100644
index 89588aa0da4d..000000000000
--- a/patches/0004_sched_delay_task_stack_freeing_on_rt.patch
+++ /dev/null
@@ -1,65 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Subject: sched: Delay task stack freeing on RT
-Date: Tue, 28 Sep 2021 14:24:30 +0200
-
-Anything which is done on behalf of a dead task at the end of
-finish_task_switch() is preventing the incoming task from doing useful
-work. While it is benefitial for fork heavy workloads to recycle the task
-stack quickly, this is a latency source for real-time tasks.
-
-Therefore delay the stack cleanup on RT enabled kernels.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Link: https://lore.kernel.org/r/20210928122411.593486363@linutronix.de
----
- kernel/exit.c | 5 +++++
- kernel/fork.c | 5 ++++-
- kernel/sched/core.c | 8 ++++++--
- 3 files changed, 15 insertions(+), 3 deletions(-)
-
---- a/kernel/exit.c
-+++ b/kernel/exit.c
-@@ -172,6 +172,11 @@ static void delayed_put_task_struct(stru
- kprobe_flush_task(tsk);
- perf_event_delayed_put(tsk);
- trace_sched_process_free(tsk);
-+
-+ /* RT enabled kernels delay freeing the VMAP'ed task stack */
-+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
-+ put_task_stack(tsk);
-+
- put_task_struct(tsk);
- }
-
---- a/kernel/fork.c
-+++ b/kernel/fork.c
-@@ -289,7 +289,10 @@ static inline void free_thread_stack(str
- return;
- }
-
-- vfree_atomic(tsk->stack);
-+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
-+ vfree_atomic(tsk->stack);
-+ else
-+ vfree(tsk->stack);
- return;
- }
- #endif
---- a/kernel/sched/core.c
-+++ b/kernel/sched/core.c
-@@ -4845,8 +4845,12 @@ static struct rq *finish_task_switch(str
- if (prev->sched_class->task_dead)
- prev->sched_class->task_dead(prev);
-
-- /* Task is done with its stack. */
-- put_task_stack(prev);
-+ /*
-+ * Release VMAP'ed task stack immediate for reuse. On RT
-+ * enabled kernels this is delayed for latency reasons.
-+ */
-+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
-+ put_task_stack(prev);
-
- put_task_struct_rcu_user(prev);
- }
diff --git a/patches/0005-drm-i915-Don-t-check-for-atomic-context-on-PREEMPT_R.patch b/patches/0005-drm-i915-Don-t-check-for-atomic-context-on-PREEMPT_R.patch
index 88fd283330f1..723c24bb124b 100644
--- a/patches/0005-drm-i915-Don-t-check-for-atomic-context-on-PREEMPT_R.patch
+++ b/patches/0005-drm-i915-Don-t-check-for-atomic-context-on-PREEMPT_R.patch
@@ -18,7 +18,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
-@@ -343,7 +343,7 @@ wait_remaining_ms_from_jiffies(unsigned
+@@ -344,7 +344,7 @@ wait_remaining_ms_from_jiffies(unsigned
#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000)
/* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */
diff --git a/patches/0005-lockdep-Make-it-RT-aware.patch b/patches/0005-lockdep-Make-it-RT-aware.patch
index 833ded1f6536..0e220d5a1223 100644
--- a/patches/0005-lockdep-Make-it-RT-aware.patch
+++ b/patches/0005-lockdep-Make-it-RT-aware.patch
@@ -61,7 +61,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
extern void stop_critical_timings(void);
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
-@@ -5473,6 +5473,7 @@ static noinstr void check_flags(unsigned
+@@ -5485,6 +5485,7 @@ static noinstr void check_flags(unsigned
}
}
@@ -69,7 +69,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* We dont accurately track softirq state in e.g.
* hardirq contexts (such as on 4KSTACKS), so only
-@@ -5487,6 +5488,7 @@ static noinstr void check_flags(unsigned
+@@ -5499,6 +5500,7 @@ static noinstr void check_flags(unsigned
DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
}
}
diff --git a/patches/0005_kernel_fork_move_memcg_charge_kernel_stack_into_config_vmap_stack.patch b/patches/0005_kernel_fork_move_memcg_charge_kernel_stack_into_config_vmap_stack.patch
new file mode 100644
index 000000000000..771eb276f9d1
--- /dev/null
+++ b/patches/0005_kernel_fork_move_memcg_charge_kernel_stack_into_config_vmap_stack.patch
@@ -0,0 +1,121 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Subject: kernel/fork: Move memcg_charge_kernel_stack() into CONFIG_VMAP_STACK.
+Date: Thu, 18 Nov 2021 15:34:49 +0100
+
+memcg_charge_kernel_stack() is only used in the CONFIG_VMAP_STACK case.
+
+Move memcg_charge_kernel_stack() into the CONFIG_VMAP_STACK block and
+invoke it from within alloc_thread_stack_node().
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://lore.kernel.org/r/20211118143452.136421-6-bigeasy@linutronix.de
+---
+ kernel/fork.c | 69 ++++++++++++++++++++++++++++++----------------------------
+ 1 file changed, 36 insertions(+), 33 deletions(-)
+
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -210,6 +210,32 @@ static int free_vm_stack_cache(unsigned
+ return 0;
+ }
+
++static int memcg_charge_kernel_stack(struct task_struct *tsk)
++{
++ struct vm_struct *vm = task_stack_vm_area(tsk);
++ int i;
++ int ret;
++
++ BUILD_BUG_ON(IS_ENABLED(CONFIG_VMAP_STACK) && PAGE_SIZE % 1024 != 0);
++ BUG_ON(vm->nr_pages != THREAD_SIZE / PAGE_SIZE);
++
++ for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) {
++ ret = memcg_kmem_charge_page(vm->pages[i], GFP_KERNEL, 0);
++ if (ret)
++ goto err;
++ }
++ return 0;
++err:
++ /*
++ * If memcg_kmem_charge_page() fails, page's memory cgroup pointer is
++ * NULL, and memcg_kmem_uncharge_page() in free_thread_stack() will
++ * ignore this page.
++ */
++ for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
++ memcg_kmem_uncharge_page(vm->pages[i], 0);
++ return ret;
++}
++
+ static int alloc_thread_stack_node(struct task_struct *tsk, int node)
+ {
+ void *stack;
+@@ -229,6 +255,11 @@ static int alloc_thread_stack_node(struc
+ /* Clear stale pointers from reused stack. */
+ memset(s->addr, 0, THREAD_SIZE);
+
++ if (memcg_charge_kernel_stack(tsk)) {
++ vfree(s->addr);
++ return -ENOMEM;
++ }
++
+ tsk->stack_vm_area = s;
+ tsk->stack = s->addr;
+ return 0;
+@@ -246,6 +277,11 @@ static int alloc_thread_stack_node(struc
+ 0, node, __builtin_return_address(0));
+ if (!stack)
+ return -ENOMEM;
++
++ if (memcg_charge_kernel_stack(tsk)) {
++ vfree(stack);
++ return -ENOMEM;
++ }
+ /*
+ * We can't call find_vm_area() in interrupt context, and
+ * free_thread_stack() can be called in interrupt context,
+@@ -414,36 +450,6 @@ static void account_kernel_stack(struct
+ }
+ }
+
+-static int memcg_charge_kernel_stack(struct task_struct *tsk)
+-{
+-#ifdef CONFIG_VMAP_STACK
+- struct vm_struct *vm = task_stack_vm_area(tsk);
+- int ret;
+-
+- BUILD_BUG_ON(IS_ENABLED(CONFIG_VMAP_STACK) && PAGE_SIZE % 1024 != 0);
+-
+- if (vm) {
+- int i;
+-
+- BUG_ON(vm->nr_pages != THREAD_SIZE / PAGE_SIZE);
+-
+- for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) {
+- /*
+- * If memcg_kmem_charge_page() fails, page's
+- * memory cgroup pointer is NULL, and
+- * memcg_kmem_uncharge_page() in free_thread_stack()
+- * will ignore this page.
+- */
+- ret = memcg_kmem_charge_page(vm->pages[i], GFP_KERNEL,
+- 0);
+- if (ret)
+- return ret;
+- }
+- }
+-#endif
+- return 0;
+-}
+-
+ static void release_task_stack(struct task_struct *tsk)
+ {
+ if (WARN_ON(READ_ONCE(tsk->__state) != TASK_DEAD))
+@@ -907,9 +913,6 @@ static struct task_struct *dup_task_stru
+ if (err)
+ goto free_tsk;
+
+- if (memcg_charge_kernel_stack(tsk))
+- goto free_stack;
+-
+ #ifdef CONFIG_THREAD_INFO_IN_TASK
+ refcount_set(&tsk->stack_refcount, 1);
+ #endif
diff --git a/patches/0006_kernel_fork_move_task_stack_account_to_do_exit.patch b/patches/0006_kernel_fork_move_task_stack_account_to_do_exit.patch
new file mode 100644
index 000000000000..9f50ac6d70a5
--- /dev/null
+++ b/patches/0006_kernel_fork_move_task_stack_account_to_do_exit.patch
@@ -0,0 +1,163 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Subject: kernel/fork: Move task stack account to do_exit().
+Date: Thu, 18 Nov 2021 15:34:50 +0100
+
+There is no need to perform the stack accounting of the outgoing task in
+its final schedule() invocation which happens with disabled preemption.
+The task is leaving, the resources will be freed and the accounting can
+happen in do_exit() before the actual schedule invocation which
+frees the stack memory.
+
+Move the accounting of the stack memory from release_task_stack() to
+exit_task_stack_account() which then can be invoked from do_exit().
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://lore.kernel.org/r/20211118143452.136421-7-bigeasy@linutronix.de
+---
+ include/linux/sched/task_stack.h | 2 ++
+ kernel/exit.c | 1 +
+ kernel/fork.c | 35 +++++++++++++++++++++++------------
+ 3 files changed, 26 insertions(+), 12 deletions(-)
+
+--- a/include/linux/sched/task_stack.h
++++ b/include/linux/sched/task_stack.h
+@@ -79,6 +79,8 @@ static inline void *try_get_task_stack(s
+ static inline void put_task_stack(struct task_struct *tsk) {}
+ #endif
+
++void exit_task_stack_account(struct task_struct *tsk);
++
+ #define task_stack_end_corrupted(task) \
+ (*(end_of_stack(task)) != STACK_END_MAGIC)
+
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -871,6 +871,7 @@ void __noreturn do_exit(long code)
+ put_page(tsk->task_frag.page);
+
+ validate_creds_for_do_exit(tsk);
++ exit_task_stack_account(tsk);
+
+ check_stack_usage();
+ preempt_disable();
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -210,9 +210,8 @@ static int free_vm_stack_cache(unsigned
+ return 0;
+ }
+
+-static int memcg_charge_kernel_stack(struct task_struct *tsk)
++static int memcg_charge_kernel_stack(struct vm_struct *vm)
+ {
+- struct vm_struct *vm = task_stack_vm_area(tsk);
+ int i;
+ int ret;
+
+@@ -238,6 +237,7 @@ static int memcg_charge_kernel_stack(str
+
+ static int alloc_thread_stack_node(struct task_struct *tsk, int node)
+ {
++ struct vm_struct *vm;
+ void *stack;
+ int i;
+
+@@ -255,7 +255,7 @@ static int alloc_thread_stack_node(struc
+ /* Clear stale pointers from reused stack. */
+ memset(s->addr, 0, THREAD_SIZE);
+
+- if (memcg_charge_kernel_stack(tsk)) {
++ if (memcg_charge_kernel_stack(s)) {
+ vfree(s->addr);
+ return -ENOMEM;
+ }
+@@ -278,7 +278,8 @@ static int alloc_thread_stack_node(struc
+ if (!stack)
+ return -ENOMEM;
+
+- if (memcg_charge_kernel_stack(tsk)) {
++ vm = find_vm_area(stack);
++ if (memcg_charge_kernel_stack(vm)) {
+ vfree(stack);
+ return -ENOMEM;
+ }
+@@ -287,19 +288,15 @@ static int alloc_thread_stack_node(struc
+ * free_thread_stack() can be called in interrupt context,
+ * so cache the vm_struct.
+ */
+- tsk->stack_vm_area = find_vm_area(stack);
++ tsk->stack_vm_area = vm;
+ tsk->stack = stack;
+ return 0;
+ }
+
+ static void free_thread_stack(struct task_struct *tsk)
+ {
+- struct vm_struct *vm = task_stack_vm_area(tsk);
+ int i;
+
+- for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
+- memcg_kmem_uncharge_page(vm->pages[i], 0);
+-
+ for (i = 0; i < NR_CACHED_STACKS; i++) {
+ if (this_cpu_cmpxchg(cached_stacks[i], NULL,
+ tsk->stack_vm_area) != NULL)
+@@ -450,12 +447,25 @@ static void account_kernel_stack(struct
+ }
+ }
+
++void exit_task_stack_account(struct task_struct *tsk)
++{
++ account_kernel_stack(tsk, -1);
++
++ if (IS_ENABLED(CONFIG_VMAP_STACK)) {
++ struct vm_struct *vm;
++ int i;
++
++ vm = task_stack_vm_area(tsk);
++ for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
++ memcg_kmem_uncharge_page(vm->pages[i], 0);
++ }
++}
++
+ static void release_task_stack(struct task_struct *tsk)
+ {
+ if (WARN_ON(READ_ONCE(tsk->__state) != TASK_DEAD))
+ return; /* Better to leak the stack than to free prematurely */
+
+- account_kernel_stack(tsk, -1);
+ free_thread_stack(tsk);
+ }
+
+@@ -916,6 +926,7 @@ static struct task_struct *dup_task_stru
+ #ifdef CONFIG_THREAD_INFO_IN_TASK
+ refcount_set(&tsk->stack_refcount, 1);
+ #endif
++ account_kernel_stack(tsk, 1);
+
+ err = scs_prepare(tsk, node);
+ if (err)
+@@ -959,8 +970,6 @@ static struct task_struct *dup_task_stru
+ tsk->wake_q.next = NULL;
+ tsk->pf_io_worker = NULL;
+
+- account_kernel_stack(tsk, 1);
+-
+ kcov_task_init(tsk);
+ kmap_local_fork(tsk);
+
+@@ -979,6 +988,7 @@ static struct task_struct *dup_task_stru
+ return tsk;
+
+ free_stack:
++ exit_task_stack_account(tsk);
+ free_thread_stack(tsk);
+ free_tsk:
+ free_task_struct(tsk);
+@@ -2475,6 +2485,7 @@ static __latent_entropy struct task_stru
+ exit_creds(p);
+ bad_fork_free:
+ WRITE_ONCE(p->__state, TASK_DEAD);
++ exit_task_stack_account(p);
+ put_task_stack(p);
+ delayed_free_task(p);
+ fork_out:
diff --git a/patches/0007-drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch b/patches/0007-drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch
index f04c7d1224c9..1118dd443bfb 100644
--- a/patches/0007-drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch
+++ b/patches/0007-drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch
@@ -17,7 +17,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
-@@ -826,7 +826,7 @@ DEFINE_EVENT(i915_request, i915_request_
+@@ -823,7 +823,7 @@ DEFINE_EVENT(i915_request, i915_request_
TP_ARGS(rq)
);
diff --git a/patches/0007_kernel_fork_only_cache_the_vmap_stack_in_finish_task_switch.patch b/patches/0007_kernel_fork_only_cache_the_vmap_stack_in_finish_task_switch.patch
new file mode 100644
index 000000000000..2fc9e09f60ab
--- /dev/null
+++ b/patches/0007_kernel_fork_only_cache_the_vmap_stack_in_finish_task_switch.patch
@@ -0,0 +1,230 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Subject: kernel/fork: Only cache the VMAP stack in finish_task_switch().
+Date: Thu, 18 Nov 2021 15:34:51 +0100
+
+The task stack could be deallocated later in delayed_put_task_struct().
+For fork()/exec() kind of workloads (say a shell script executing
+several commands) it is important that the stack is released in
+finish_task_switch() so that in VMAP_STACK case it can be cached and
+reused in the new task.
+If the free/caching is RCU-delayed then a new stack has to be allocated
+because the cache is filled in batches of which only two stacks, out of
+many, are recycled.
+
+For PREEMPT_RT it would be good if the wake-up in vfree_atomic() could
+be avoided in the scheduling path. Far worse are the other
+free_thread_stack() implementations which invoke __free_pages()/
+kmem_cache_free() with disabled preemption.
+
+Introduce put_task_stack_sched() which is invoked from the
+finish_task_switch() and only caches the VMAP stack. If the cache is
+full or !CONFIG_VMAP_STACK is used than the stack is freed from
+delayed_put_task_struct(). In the VMAP case this is another opportunity
+to fill the cache.
+
+The stack is finally released in delayed_put_task_struct() which means
+that a valid stack reference can be held during its invocation. As such
+there can be made no assumption whether the task_struct::stack pointer
+can be freed if non-NULL.
+Set the lowest bit of task_struct::stack if the stack was released via
+put_task_stack_sched() and needs a final free in
+delayed_put_task_struct(). If the bit is missing then a reference is
+held and put_task_stack() will release it.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://lore.kernel.org/r/20211118143452.136421-8-bigeasy@linutronix.de
+---
+ include/linux/sched/task_stack.h | 8 +++++
+ kernel/exit.c | 1
+ kernel/fork.c | 60 ++++++++++++++++++++++++++++++++-------
+ kernel/sched/core.c | 7 +++-
+ 4 files changed, 64 insertions(+), 12 deletions(-)
+
+--- a/include/linux/sched/task_stack.h
++++ b/include/linux/sched/task_stack.h
+@@ -70,6 +70,7 @@ static inline void *try_get_task_stack(s
+ }
+
+ extern void put_task_stack(struct task_struct *tsk);
++extern void put_task_stack_sched(struct task_struct *tsk);
+ #else
+ static inline void *try_get_task_stack(struct task_struct *tsk)
+ {
+@@ -77,6 +78,13 @@ static inline void *try_get_task_stack(s
+ }
+
+ static inline void put_task_stack(struct task_struct *tsk) {}
++static inline void put_task_stack_sched(struct task_struct *tsk) {}
++#endif
++
++#ifdef CONFIG_ARCH_THREAD_STACK_ALLOCATOR
++static inline void task_stack_cleanup(struct task_struct *tsk) {}
++#else
++extern void task_stack_cleanup(struct task_struct *tsk);
+ #endif
+
+ void exit_task_stack_account(struct task_struct *tsk);
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -171,6 +171,7 @@ static void delayed_put_task_struct(stru
+ kprobe_flush_task(tsk);
+ perf_event_delayed_put(tsk);
+ trace_sched_process_free(tsk);
++ task_stack_cleanup(tsk);
+ put_task_struct(tsk);
+ }
+
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -178,6 +178,16 @@ static inline void free_task_struct(stru
+
+ #ifndef CONFIG_ARCH_THREAD_STACK_ALLOCATOR
+
++#define THREAD_STACK_DELAYED_FREE 1UL
++
++static void thread_stack_mark_delayed_free(struct task_struct *tsk)
++{
++ unsigned long val = (unsigned long)tsk->stack;
++
++ val |= THREAD_STACK_DELAYED_FREE;
++ WRITE_ONCE(tsk->stack, (void *)val);
++}
++
+ /*
+ * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
+ * kmemcache based allocator.
+@@ -293,7 +303,7 @@ static int alloc_thread_stack_node(struc
+ return 0;
+ }
+
+-static void free_thread_stack(struct task_struct *tsk)
++static void free_thread_stack(struct task_struct *tsk, bool cache_only)
+ {
+ int i;
+
+@@ -306,7 +316,12 @@ static void free_thread_stack(struct tas
+ tsk->stack_vm_area = NULL;
+ return;
+ }
+- vfree_atomic(tsk->stack);
++ if (cache_only) {
++ thread_stack_mark_delayed_free(tsk);
++ return;
++ }
++
++ vfree(tsk->stack);
+ tsk->stack = NULL;
+ tsk->stack_vm_area = NULL;
+ }
+@@ -325,8 +340,12 @@ static int alloc_thread_stack_node(struc
+ return -ENOMEM;
+ }
+
+-static void free_thread_stack(struct task_struct *tsk)
++static void free_thread_stack(struct task_struct *tsk, bool cache_only)
+ {
++ if (cache_only) {
++ thread_stack_mark_delayed_free(tsk);
++ return;
++ }
+ __free_pages(virt_to_page(tsk->stack), THREAD_SIZE_ORDER);
+ tsk->stack = NULL;
+ }
+@@ -345,8 +364,12 @@ static int alloc_thread_stack_node(struc
+ return stack ? 0 : -ENOMEM;
+ }
+
+-static void free_thread_stack(struct task_struct *tsk)
++static void free_thread_stack(struct task_struct *tsk, bool cache_only)
+ {
++ if (cache_only) {
++ thread_stack_mark_delayed_free(tsk);
++ return;
++ }
+ kmem_cache_free(thread_stack_cache, tsk->stack);
+ tsk->stack = NULL;
+ }
+@@ -360,8 +383,19 @@ void thread_stack_cache_init(void)
+ }
+
+ # endif /* THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK) */
+-#else /* CONFIG_ARCH_THREAD_STACK_ALLOCATOR */
+
++void task_stack_cleanup(struct task_struct *tsk)
++{
++ unsigned long val = (unsigned long)tsk->stack;
++
++ if (!(val & THREAD_STACK_DELAYED_FREE))
++ return;
++
++ WRITE_ONCE(tsk->stack, (void *)(val & ~THREAD_STACK_DELAYED_FREE));
++ free_thread_stack(tsk, false);
++}
++
++#else /* CONFIG_ARCH_THREAD_STACK_ALLOCATOR */
+ static int alloc_thread_stack_node(struct task_struct *tsk, int node)
+ {
+ unsigned long *stack;
+@@ -461,19 +495,25 @@ void exit_task_stack_account(struct task
+ }
+ }
+
+-static void release_task_stack(struct task_struct *tsk)
++static void release_task_stack(struct task_struct *tsk, bool cache_only)
+ {
+ if (WARN_ON(READ_ONCE(tsk->__state) != TASK_DEAD))
+ return; /* Better to leak the stack than to free prematurely */
+
+- free_thread_stack(tsk);
++ free_thread_stack(tsk, cache_only);
+ }
+
+ #ifdef CONFIG_THREAD_INFO_IN_TASK
+ void put_task_stack(struct task_struct *tsk)
+ {
+ if (refcount_dec_and_test(&tsk->stack_refcount))
+- release_task_stack(tsk);
++ release_task_stack(tsk, false);
++}
++
++void put_task_stack_sched(struct task_struct *tsk)
++{
++ if (refcount_dec_and_test(&tsk->stack_refcount))
++ release_task_stack(tsk, true);
+ }
+ #endif
+
+@@ -487,7 +527,7 @@ void free_task(struct task_struct *tsk)
+ * The task is finally done with both the stack and thread_info,
+ * so free both.
+ */
+- release_task_stack(tsk);
++ release_task_stack(tsk, false);
+ #else
+ /*
+ * If the task had a separate stack allocation, it should be gone
+@@ -989,7 +1029,7 @@ static struct task_struct *dup_task_stru
+
+ free_stack:
+ exit_task_stack_account(tsk);
+- free_thread_stack(tsk);
++ free_thread_stack(tsk, false);
+ free_tsk:
+ free_task_struct(tsk);
+ return NULL;
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -4879,8 +4879,11 @@ static struct rq *finish_task_switch(str
+ if (prev->sched_class->task_dead)
+ prev->sched_class->task_dead(prev);
+
+- /* Task is done with its stack. */
+- put_task_stack(prev);
++ /*
++ * Cache only the VMAP stack. The final deallocation is in
++ * delayed_put_task_struct.
++ */
++ put_task_stack_sched(prev);
+
+ put_task_struct_rcu_user(prev);
+ }
diff --git a/patches/0008_kernel_fork_use_is_enabled_in_account_kernel_stack.patch b/patches/0008_kernel_fork_use_is_enabled_in_account_kernel_stack.patch
new file mode 100644
index 000000000000..09633ced1ff0
--- /dev/null
+++ b/patches/0008_kernel_fork_use_is_enabled_in_account_kernel_stack.patch
@@ -0,0 +1,39 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Subject: kernel/fork: Use IS_ENABLED() in account_kernel_stack().
+Date: Thu, 18 Nov 2021 15:34:52 +0100
+
+Not strickly needed but checking CONFIG_VMAP_STACK instead of
+task_stack_vm_area()' result allows the compiler the remove the else
+path in the CONFIG_VMAP_STACK case where the pointer can't be NULL.
+
+Check for CONFIG_VMAP_STACK in order to use the proper path.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://lore.kernel.org/r/20211118143452.136421-9-bigeasy@linutronix.de
+---
+ kernel/fork.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -465,16 +465,16 @@ void vm_area_free(struct vm_area_struct
+
+ static void account_kernel_stack(struct task_struct *tsk, int account)
+ {
+- void *stack = task_stack_page(tsk);
+- struct vm_struct *vm = task_stack_vm_area(tsk);
+-
+- if (vm) {
++ if (IS_ENABLED(CONFIG_VMAP_STACK)) {
++ struct vm_struct *vm = task_stack_vm_area(tsk);
+ int i;
+
+ for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
+ mod_lruvec_page_state(vm->pages[i], NR_KERNEL_STACK_KB,
+ account * (PAGE_SIZE / 1024));
+ } else {
++ void *stack = task_stack_page(tsk);
++
+ /* All stack pages are in the same node. */
+ mod_lruvec_kmem_state(stack, NR_KERNEL_STACK_KB,
+ account * (THREAD_SIZE / 1024));
diff --git a/patches/0009-drm-i915-gt-Use-spin_lock_irq-instead-of-local_irq_d.patch b/patches/0009-drm-i915-gt-Use-spin_lock_irq-instead-of-local_irq_d.patch
index 59dcdda0f7e4..08b6e92f1e63 100644
--- a/patches/0009-drm-i915-gt-Use-spin_lock_irq-instead-of-local_irq_d.patch
+++ b/patches/0009-drm-i915-gt-Use-spin_lock_irq-instead-of-local_irq_d.patch
@@ -27,7 +27,7 @@ Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
-@@ -1283,7 +1283,7 @@ static void execlists_dequeue(struct int
+@@ -1284,7 +1284,7 @@ static void execlists_dequeue(struct int
* and context switches) submission.
*/
@@ -36,7 +36,7 @@ Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
/*
* If the queue is higher priority than the last
-@@ -1383,7 +1383,7 @@ static void execlists_dequeue(struct int
+@@ -1384,7 +1384,7 @@ static void execlists_dequeue(struct int
* Even if ELSP[1] is occupied and not worthy
* of timeslices, our queue might be.
*/
@@ -45,7 +45,7 @@ Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
return;
}
}
-@@ -1409,7 +1409,7 @@ static void execlists_dequeue(struct int
+@@ -1410,7 +1410,7 @@ static void execlists_dequeue(struct int
if (last && !can_merge_rq(last, rq)) {
spin_unlock(&ve->base.sched_engine->lock);
@@ -54,7 +54,7 @@ Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
return; /* leave this for another sibling */
}
-@@ -1571,7 +1571,7 @@ static void execlists_dequeue(struct int
+@@ -1572,7 +1572,7 @@ static void execlists_dequeue(struct int
*/
sched_engine->queue_priority_hint = queue_prio(sched_engine);
i915_sched_engine_reset_on_empty(sched_engine);
@@ -63,7 +63,7 @@ Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
/*
* We can skip poking the HW if we ended up with exactly the same set
-@@ -1597,13 +1597,6 @@ static void execlists_dequeue(struct int
+@@ -1598,13 +1598,6 @@ static void execlists_dequeue(struct int
}
}
@@ -77,7 +77,7 @@ Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
static void clear_ports(struct i915_request **ports, int count)
{
memset_p((void **)ports, NULL, count);
-@@ -2427,7 +2420,7 @@ static void execlists_submission_tasklet
+@@ -2424,7 +2417,7 @@ static void execlists_submission_tasklet
}
if (!engine->execlists.pending[0]) {
diff --git a/patches/ARM64__Allow_to_enable_RT.patch b/patches/ARM64__Allow_to_enable_RT.patch
index b14f2716b088..d60d459a8015 100644
--- a/patches/ARM64__Allow_to_enable_RT.patch
+++ b/patches/ARM64__Allow_to_enable_RT.patch
@@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
-@@ -88,6 +88,7 @@ config ARM64
+@@ -89,6 +89,7 @@ config ARM64
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
select ARCH_SUPPORTS_NUMA_BALANCING
@@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT
select ARCH_WANT_DEFAULT_BPF_JIT
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
-@@ -214,6 +215,7 @@ config ARM64
+@@ -215,6 +216,7 @@ config ARM64
select PCI_DOMAINS_GENERIC if PCI
select PCI_ECAM if (ACPI && PCI)
select PCI_SYSCALL if PCI
diff --git a/patches/ARM__enable_irq_in_translation_section_permission_fault_handlers.patch b/patches/ARM__enable_irq_in_translation_section_permission_fault_handlers.patch
index 019eb46c0434..3920e31ded38 100644
--- a/patches/ARM__enable_irq_in_translation_section_permission_fault_handlers.patch
+++ b/patches/ARM__enable_irq_in_translation_section_permission_fault_handlers.patch
@@ -68,7 +68,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
-@@ -400,6 +400,9 @@ do_translation_fault(unsigned long addr,
+@@ -407,6 +407,9 @@ do_translation_fault(unsigned long addr,
if (addr < TASK_SIZE)
return do_page_fault(addr, fsr, regs);
@@ -78,7 +78,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (user_mode(regs))
goto bad_area;
-@@ -470,6 +473,9 @@ do_translation_fault(unsigned long addr,
+@@ -477,6 +480,9 @@ do_translation_fault(unsigned long addr,
static int
do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
diff --git a/patches/Add_localversion_for_-RT_release.patch b/patches/Add_localversion_for_-RT_release.patch
index 0e6dbb0e3c10..d960d516454d 100644
--- a/patches/Add_localversion_for_-RT_release.patch
+++ b/patches/Add_localversion_for_-RT_release.patch
@@ -15,4 +15,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt1
++-rt2
diff --git a/patches/KVM__arm_arm64__downgrade_preempt_disabled_region_to_migrate_disable.patch b/patches/KVM__arm_arm64__downgrade_preempt_disabled_region_to_migrate_disable.patch
index ce0d6a1c5b63..b3f09f2a67d5 100644
--- a/patches/KVM__arm_arm64__downgrade_preempt_disabled_region_to_migrate_disable.patch
+++ b/patches/KVM__arm_arm64__downgrade_preempt_disabled_region_to_migrate_disable.patch
@@ -27,7 +27,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
-@@ -811,7 +811,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
+@@ -813,7 +813,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
* involves poking the GIC, which must be done in a
* non-preemptible context.
*/
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
kvm_pmu_flush_hwstate(vcpu);
-@@ -835,7 +835,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
+@@ -837,7 +837,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
kvm_timer_sync_user(vcpu);
kvm_vgic_sync_hwstate(vcpu);
local_irq_enable();
@@ -45,7 +45,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
continue;
}
-@@ -907,7 +907,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
+@@ -909,7 +909,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
/* Exit types that need handling before we can be preempted */
handle_exit_early(vcpu, ret);
diff --git a/patches/arm64-sve-Make-kernel-FPU-protection-RT-friendly.patch b/patches/arm64-sve-Make-kernel-FPU-protection-RT-friendly.patch
index 53405c1ea57d..89da78d382d3 100644
--- a/patches/arm64-sve-Make-kernel-FPU-protection-RT-friendly.patch
+++ b/patches/arm64-sve-Make-kernel-FPU-protection-RT-friendly.patch
@@ -21,7 +21,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
-@@ -179,10 +179,19 @@ static void __get_cpu_fpsimd_context(voi
+@@ -201,10 +201,19 @@ static void __get_cpu_fpsimd_context(voi
*
* The double-underscore version must only be called if you know the task
* can't be preempted.
@@ -42,7 +42,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
__get_cpu_fpsimd_context();
}
-@@ -203,7 +212,10 @@ static void __put_cpu_fpsimd_context(voi
+@@ -225,7 +234,10 @@ static void __put_cpu_fpsimd_context(voi
static void put_cpu_fpsimd_context(void)
{
__put_cpu_fpsimd_context();
diff --git a/patches/arm64_mm_make_arch_faults_on_old_pte_check_for_migratability.patch b/patches/arm64_mm_make_arch_faults_on_old_pte_check_for_migratability.patch
index 0c882e494c7e..01b100a09d25 100644
--- a/patches/arm64_mm_make_arch_faults_on_old_pte_check_for_migratability.patch
+++ b/patches/arm64_mm_make_arch_faults_on_old_pte_check_for_migratability.patch
@@ -22,7 +22,7 @@ Link: https://lore.kernel.org/r/20210811201354.1976839-5-valentin.schneider@arm.
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
-@@ -995,7 +995,7 @@ static inline void update_mmu_cache(stru
+@@ -1001,7 +1001,7 @@ static inline void update_mmu_cache(stru
*/
static inline bool arch_faults_on_old_pte(void)
{
diff --git a/patches/block_mq__do_not_invoke_preempt_disable.patch b/patches/block_mq__do_not_invoke_preempt_disable.patch
index 3b27284f0f86..0b42b033c655 100644
--- a/patches/block_mq__do_not_invoke_preempt_disable.patch
+++ b/patches/block_mq__do_not_invoke_preempt_disable.patch
@@ -18,7 +18,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
-@@ -1559,14 +1559,14 @@ static void __blk_mq_delay_run_hw_queue(
+@@ -1856,14 +1856,14 @@ static void __blk_mq_delay_run_hw_queue(
return;
if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
diff --git a/patches/console__add_write_atomic_interface.patch b/patches/console__add_write_atomic_interface.patch
index 1ae6fb74501b..ee5b697b9199 100644
--- a/patches/console__add_write_atomic_interface.patch
+++ b/patches/console__add_write_atomic_interface.patch
@@ -259,7 +259,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <linux/sched/clock.h>
#include <linux/sched/debug.h>
#include <linux/sched/task_stack.h>
-@@ -3582,6 +3583,7 @@ EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
+@@ -3583,6 +3584,7 @@ EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
#ifdef CONFIG_SMP
static atomic_t printk_cpulock_owner = ATOMIC_INIT(-1);
static atomic_t printk_cpulock_nested = ATOMIC_INIT(0);
@@ -267,7 +267,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* __printk_wait_on_cpu_lock() - Busy wait until the printk cpu-reentrant
-@@ -3661,6 +3663,9 @@ EXPORT_SYMBOL(__printk_cpu_trylock);
+@@ -3662,6 +3664,9 @@ EXPORT_SYMBOL(__printk_cpu_trylock);
*/
void __printk_cpu_unlock(void)
{
@@ -277,7 +277,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (atomic_read(&printk_cpulock_nested)) {
atomic_dec(&printk_cpulock_nested);
return;
-@@ -3671,6 +3676,12 @@ void __printk_cpu_unlock(void)
+@@ -3672,6 +3677,12 @@ void __printk_cpu_unlock(void)
* LMM(__printk_cpu_unlock:A)
*/
@@ -290,7 +290,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Guarantee loads and stores from this CPU when it was the
* lock owner are visible to the next lock owner. This pairs
-@@ -3691,6 +3702,21 @@ void __printk_cpu_unlock(void)
+@@ -3692,6 +3703,21 @@ void __printk_cpu_unlock(void)
*/
atomic_set_release(&printk_cpulock_owner,
-1); /* LMM(__printk_cpu_unlock:B) */
diff --git a/patches/drivers_block_zram__Replace_bit_spinlocks_with_rtmutex_for_-rt.patch b/patches/drivers_block_zram__Replace_bit_spinlocks_with_rtmutex_for_-rt.patch
index e4b5ce4c1d14..2dd9a5d4284a 100644
--- a/patches/drivers_block_zram__Replace_bit_spinlocks_with_rtmutex_for_-rt.patch
+++ b/patches/drivers_block_zram__Replace_bit_spinlocks_with_rtmutex_for_-rt.patch
@@ -68,7 +68,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static inline bool init_done(struct zram *zram)
{
-@@ -1169,6 +1204,7 @@ static bool zram_meta_alloc(struct zram
+@@ -1199,6 +1234,7 @@ static bool zram_meta_alloc(struct zram
if (!huge_class_size)
huge_class_size = zs_huge_class_size(zram->mem_pool);
diff --git a/patches/entry--Fix-the-preempt-lazy-fallout.patch b/patches/entry--Fix-the-preempt-lazy-fallout.patch
index 5cda3af73d5f..6b35c197ea3a 100644
--- a/patches/entry--Fix-the-preempt-lazy-fallout.patch
+++ b/patches/entry--Fix-the-preempt-lazy-fallout.patch
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
-@@ -150,8 +150,6 @@ struct thread_info {
+@@ -153,8 +153,6 @@ struct thread_info {
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
diff --git a/patches/fs_dcache__use_swait_queue_instead_of_waitqueue.patch b/patches/fs_dcache__use_swait_queue_instead_of_waitqueue.patch
index 32d362bd2305..02bc269c806b 100644
--- a/patches/fs_dcache__use_swait_queue_instead_of_waitqueue.patch
+++ b/patches/fs_dcache__use_swait_queue_instead_of_waitqueue.patch
@@ -117,7 +117,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Don't go there if it's already dead */
if (unlikely(IS_DEADDIR(inode)))
-@@ -3194,7 +3194,7 @@ static struct dentry *lookup_open(struct
+@@ -3192,7 +3192,7 @@ static struct dentry *lookup_open(struct
struct dentry *dentry;
int error, create_error = 0;
umode_t mode = op->mode;
@@ -128,7 +128,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return ERR_PTR(-ENOENT);
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
-@@ -636,7 +636,7 @@ void nfs_prime_dcache(struct dentry *par
+@@ -638,7 +638,7 @@ void nfs_prime_dcache(struct dentry *par
unsigned long dir_verifier)
{
struct qstr filename = QSTR_INIT(entry->name, entry->len);
@@ -137,7 +137,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
struct dentry *dentry;
struct dentry *alias;
struct inode *inode;
-@@ -1876,7 +1876,7 @@ int nfs_atomic_open(struct inode *dir, s
+@@ -1860,7 +1860,7 @@ int nfs_atomic_open(struct inode *dir, s
struct file *file, unsigned open_flags,
umode_t mode)
{
@@ -168,7 +168,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
spin_lock(&dentry->d_lock);
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
-@@ -95,6 +95,7 @@
+@@ -96,6 +96,7 @@
#include <linux/posix-timers.h>
#include <linux/time_namespace.h>
#include <linux/resctrl.h>
@@ -176,7 +176,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <linux/cn_proc.h>
#include <trace/events/oom.h>
#include "internal.h"
-@@ -2040,7 +2041,7 @@ bool proc_fill_cache(struct file *file,
+@@ -2045,7 +2046,7 @@ bool proc_fill_cache(struct file *file,
child = d_hash_and_lookup(dir, &qname);
if (!child) {
@@ -218,7 +218,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
extern struct dentry * d_exact_alias(struct dentry *, struct inode *);
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
-@@ -1692,7 +1692,7 @@ struct nfs_unlinkdata {
+@@ -1684,7 +1684,7 @@ struct nfs_unlinkdata {
struct nfs_removeargs args;
struct nfs_removeres res;
struct dentry *dentry;
diff --git a/patches/kernel_sched__add_putget_cpu_light.patch b/patches/kernel_sched__add_putget_cpu_light.patch
index 575aed9da145..32d3717dfdec 100644
--- a/patches/kernel_sched__add_putget_cpu_light.patch
+++ b/patches/kernel_sched__add_putget_cpu_light.patch
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
-@@ -268,6 +268,9 @@ static inline int get_boot_cpu_id(void)
+@@ -267,6 +267,9 @@ static inline int get_boot_cpu_id(void)
#define get_cpu() ({ preempt_disable(); __smp_processor_id(); })
#define put_cpu() preempt_enable()
diff --git a/patches/mm-Disable-zsmalloc-on-PREEMPT_RT.patch b/patches/mm-Disable-zsmalloc-on-PREEMPT_RT.patch
index fe8fad120f87..3f281a460f4d 100644
--- a/patches/mm-Disable-zsmalloc-on-PREEMPT_RT.patch
+++ b/patches/mm-Disable-zsmalloc-on-PREEMPT_RT.patch
@@ -27,7 +27,7 @@ Link: https://lkml.kernel.org/r/20210923170121.1860133-1-bigeasy@linutronix.de
--- a/mm/Kconfig
+++ b/mm/Kconfig
-@@ -640,6 +640,7 @@ config ZSWAP_ZPOOL_DEFAULT_Z3FOLD
+@@ -643,6 +643,7 @@ config ZSWAP_ZPOOL_DEFAULT_Z3FOLD
config ZSWAP_ZPOOL_DEFAULT_ZSMALLOC
bool "zsmalloc"
@@ -35,7 +35,7 @@ Link: https://lkml.kernel.org/r/20210923170121.1860133-1-bigeasy@linutronix.de
select ZSMALLOC
help
Use the zsmalloc allocator as the default allocator.
-@@ -690,7 +691,7 @@ config Z3FOLD
+@@ -693,7 +694,7 @@ config Z3FOLD
config ZSMALLOC
tristate "Memory allocator for compressed pages"
diff --git a/patches/mm-memcontro--Disable-on-PREEMPT_RT.patch b/patches/mm-memcontro--Disable-on-PREEMPT_RT.patch
index 23dc2e239c10..a1b89d4f6770 100644
--- a/patches/mm-memcontro--Disable-on-PREEMPT_RT.patch
+++ b/patches/mm-memcontro--Disable-on-PREEMPT_RT.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/init/Kconfig
+++ b/init/Kconfig
-@@ -938,6 +938,7 @@ config PAGE_COUNTER
+@@ -943,6 +943,7 @@ config PAGE_COUNTER
config MEMCG
bool "Memory controller"
diff --git a/patches/mm-zsmalloc-Replace-bit-spinlock-and-get_cpu_var-usa.patch b/patches/mm-zsmalloc-Replace-bit-spinlock-and-get_cpu_var-usa.patch
index 91b6617bc683..8a4c80446b7f 100644
--- a/patches/mm-zsmalloc-Replace-bit-spinlock-and-get_cpu_var-usa.patch
+++ b/patches/mm-zsmalloc-Replace-bit-spinlock-and-get_cpu_var-usa.patch
@@ -37,7 +37,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/Kconfig
+++ b/mm/Kconfig
-@@ -640,7 +640,6 @@ config ZSWAP_ZPOOL_DEFAULT_Z3FOLD
+@@ -643,7 +643,6 @@ config ZSWAP_ZPOOL_DEFAULT_Z3FOLD
config ZSWAP_ZPOOL_DEFAULT_ZSMALLOC
bool "zsmalloc"
@@ -45,7 +45,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
select ZSMALLOC
help
Use the zsmalloc allocator as the default allocator.
-@@ -691,7 +690,7 @@ config Z3FOLD
+@@ -694,7 +693,7 @@ config Z3FOLD
config ZSMALLOC
tristate "Memory allocator for compressed pages"
diff --git a/patches/mm_vmalloc__Another_preempt_disable_region_which_sucks.patch b/patches/mm_vmalloc__Another_preempt_disable_region_which_sucks.patch
index 6d6a0f370ba6..5bd3fc524a99 100644
--- a/patches/mm_vmalloc__Another_preempt_disable_region_which_sucks.patch
+++ b/patches/mm_vmalloc__Another_preempt_disable_region_which_sucks.patch
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
-@@ -1918,11 +1918,12 @@ static void *new_vmap_block(unsigned int
+@@ -1922,11 +1922,12 @@ static void *new_vmap_block(unsigned int
return ERR_PTR(err);
}
@@ -29,7 +29,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return vaddr;
}
-@@ -2001,7 +2002,8 @@ static void *vb_alloc(unsigned long size
+@@ -2005,7 +2006,8 @@ static void *vb_alloc(unsigned long size
order = get_order(size);
rcu_read_lock();
@@ -39,7 +39,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
list_for_each_entry_rcu(vb, &vbq->free, free_list) {
unsigned long pages_off;
-@@ -2024,7 +2026,7 @@ static void *vb_alloc(unsigned long size
+@@ -2028,7 +2030,7 @@ static void *vb_alloc(unsigned long size
break;
}
diff --git a/patches/net__Dequeue_in_dev_cpu_dead_without_the_lock.patch b/patches/net__Dequeue_in_dev_cpu_dead_without_the_lock.patch
index 17a1b723d226..132320db62cd 100644
--- a/patches/net__Dequeue_in_dev_cpu_dead_without_the_lock.patch
+++ b/patches/net__Dequeue_in_dev_cpu_dead_without_the_lock.patch
@@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -11310,7 +11310,7 @@ static int dev_cpu_dead(unsigned int old
+@@ -11344,7 +11344,7 @@ static int dev_cpu_dead(unsigned int old
netif_rx_ni(skb);
input_queue_head_incr(oldsd);
}
diff --git a/patches/net__Remove_preemption_disabling_in_netif_rx.patch b/patches/net__Remove_preemption_disabling_in_netif_rx.patch
index 648527035472..291cf25e4a99 100644
--- a/patches/net__Remove_preemption_disabling_in_netif_rx.patch
+++ b/patches/net__Remove_preemption_disabling_in_netif_rx.patch
@@ -38,7 +38,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -4884,7 +4884,7 @@ static int netif_rx_internal(struct sk_b
+@@ -4908,7 +4908,7 @@ static int netif_rx_internal(struct sk_b
struct rps_dev_flow voidflow, *rflow = &voidflow;
int cpu;
@@ -47,7 +47,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
rcu_read_lock();
cpu = get_rps_cpu(skb->dev, skb, &rflow);
-@@ -4894,14 +4894,14 @@ static int netif_rx_internal(struct sk_b
+@@ -4918,14 +4918,14 @@ static int netif_rx_internal(struct sk_b
ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
rcu_read_unlock();
diff --git a/patches/net__Use_skbufhead_with_raw_lock.patch b/patches/net__Use_skbufhead_with_raw_lock.patch
index 71c809283351..19dd0dca4ac7 100644
--- a/patches/net__Use_skbufhead_with_raw_lock.patch
+++ b/patches/net__Use_skbufhead_with_raw_lock.patch
@@ -26,7 +26,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
};
struct sk_buff;
-@@ -1916,6 +1917,12 @@ static inline void skb_queue_head_init(s
+@@ -1953,6 +1954,12 @@ static inline void skb_queue_head_init(s
__skb_queue_head_init(list);
}
@@ -58,7 +58,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
}
-@@ -11626,7 +11626,7 @@ static int __init net_dev_init(void)
+@@ -11660,7 +11660,7 @@ static int __init net_dev_init(void)
INIT_WORK(flush, flush_backlog);
diff --git a/patches/net__dev__always_take_qdiscs_busylock_in___dev_xmit_skb.patch b/patches/net__dev__always_take_qdiscs_busylock_in___dev_xmit_skb.patch
index c74397a7af05..aee6836315b0 100644
--- a/patches/net__dev__always_take_qdiscs_busylock_in___dev_xmit_skb.patch
+++ b/patches/net__dev__always_take_qdiscs_busylock_in___dev_xmit_skb.patch
@@ -25,7 +25,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -3825,7 +3825,11 @@ static inline int __dev_xmit_skb(struct
+@@ -3839,7 +3839,11 @@ static inline int __dev_xmit_skb(struct
* This permits qdisc->running owner to get the lock more
* often and dequeue packets faster.
*/
diff --git a/patches/net_core__use_local_bh_disable_in_netif_rx_ni.patch b/patches/net_core__use_local_bh_disable_in_netif_rx_ni.patch
index 992f9a13e726..21a37af595d4 100644
--- a/patches/net_core__use_local_bh_disable_in_netif_rx_ni.patch
+++ b/patches/net_core__use_local_bh_disable_in_netif_rx_ni.patch
@@ -23,7 +23,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -4943,11 +4943,9 @@ int netif_rx_ni(struct sk_buff *skb)
+@@ -4967,11 +4967,9 @@ int netif_rx_ni(struct sk_buff *skb)
trace_netif_rx_ni_entry(skb);
diff --git a/patches/printk__Enhance_the_condition_check_of_msleep_in_pr_flush.patch b/patches/printk__Enhance_the_condition_check_of_msleep_in_pr_flush.patch
index c37159cd7ba3..18a51f7130f9 100644
--- a/patches/printk__Enhance_the_condition_check_of_msleep_in_pr_flush.patch
+++ b/patches/printk__Enhance_the_condition_check_of_msleep_in_pr_flush.patch
@@ -27,7 +27,7 @@ Link: https://lore.kernel.org/lkml/20210719022649.3444072-1-chao.qin@intel.com
---
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -3649,7 +3649,9 @@ bool pr_flush(int timeout_ms, bool reset
+@@ -3650,7 +3650,9 @@ bool pr_flush(int timeout_ms, bool reset
u64 diff;
u64 seq;
diff --git a/patches/printk__add_console_handover.patch b/patches/printk__add_console_handover.patch
index 19ab4cf204d8..10c1a5f566a7 100644
--- a/patches/printk__add_console_handover.patch
+++ b/patches/printk__add_console_handover.patch
@@ -66,7 +66,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (console_trylock()) {
con->write(con, text, text_len);
console_unlock();
-@@ -2891,8 +2900,10 @@ void register_console(struct console *ne
+@@ -2892,8 +2901,10 @@ void register_console(struct console *ne
* the real console are the same physical device, it's annoying to
* see the beginning boot messages twice
*/
diff --git a/patches/printk__add_pr_flush.patch b/patches/printk__add_pr_flush.patch
index 9ebc8682fefe..d83ac06a2099 100644
--- a/patches/printk__add_pr_flush.patch
+++ b/patches/printk__add_pr_flush.patch
@@ -111,7 +111,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -3285,6 +3285,12 @@ void kmsg_dump(enum kmsg_dump_reason rea
+@@ -3286,6 +3286,12 @@ void kmsg_dump(enum kmsg_dump_reason rea
sync_mode = true;
pr_info("enabled sync mode\n");
}
@@ -124,7 +124,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
rcu_read_lock();
-@@ -3606,3 +3612,78 @@ bool kgdb_roundup_delay(unsigned int cpu
+@@ -3607,3 +3613,78 @@ bool kgdb_roundup_delay(unsigned int cpu
}
EXPORT_SYMBOL(kgdb_roundup_delay);
#endif /* CONFIG_SMP */
diff --git a/patches/printk__call_boot_delay_msec_in_printk_delay.patch b/patches/printk__call_boot_delay_msec_in_printk_delay.patch
index 6eb1326f0c67..e7f15d686f16 100644
--- a/patches/printk__call_boot_delay_msec_in_printk_delay.patch
+++ b/patches/printk__call_boot_delay_msec_in_printk_delay.patch
@@ -29,7 +29,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (unlikely(printk_delay_msec)) {
int m = printk_delay_msec;
-@@ -2223,8 +2225,7 @@ asmlinkage int vprintk_emit(int facility
+@@ -2224,8 +2226,7 @@ asmlinkage int vprintk_emit(int facility
in_sched = true;
}
diff --git a/patches/printk__use_seqcount_latch_for_console_seq.patch b/patches/printk__use_seqcount_latch_for_console_seq.patch
index 1bdbe68ca654..db5be2c61d85 100644
--- a/patches/printk__use_seqcount_latch_for_console_seq.patch
+++ b/patches/printk__use_seqcount_latch_for_console_seq.patch
@@ -54,7 +54,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static void latched_seq_write(struct latched_seq *ls, u64 val)
{
raw_write_seqcount_latch(&ls->latch);
-@@ -2278,9 +2287,9 @@ EXPORT_SYMBOL(_printk);
+@@ -2279,9 +2288,9 @@ EXPORT_SYMBOL(_printk);
#define prb_read_valid(rb, seq, r) false
#define prb_first_valid_seq(rb) 0
@@ -66,7 +66,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static u64 exclusive_console_stop_seq;
static unsigned long console_dropped;
-@@ -2608,7 +2617,7 @@ void console_unlock(void)
+@@ -2609,7 +2618,7 @@ void console_unlock(void)
bool do_cond_resched, retry;
struct printk_info info;
struct printk_record r;
@@ -75,7 +75,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (console_suspended) {
up_console_sem();
-@@ -2652,12 +2661,14 @@ void console_unlock(void)
+@@ -2653,12 +2662,14 @@ void console_unlock(void)
size_t len;
skip:
@@ -94,7 +94,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
if (suppress_message_printing(r.info->level)) {
-@@ -2666,13 +2677,13 @@ void console_unlock(void)
+@@ -2667,13 +2678,13 @@ void console_unlock(void)
* directly to the console when we received it, and
* record that has level above the console loglevel.
*/
@@ -110,7 +110,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
exclusive_console = NULL;
}
-@@ -2693,7 +2704,7 @@ void console_unlock(void)
+@@ -2694,7 +2705,7 @@ void console_unlock(void)
len = record_print_text(&r,
console_msg_format & MSG_FORMAT_SYSLOG,
printk_time);
@@ -119,7 +119,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* While actively printing out messages, if another printk()
-@@ -2721,9 +2732,6 @@ void console_unlock(void)
+@@ -2722,9 +2733,6 @@ void console_unlock(void)
cond_resched();
}
@@ -129,7 +129,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
console_locked = 0;
up_console_sem();
-@@ -2733,7 +2741,7 @@ void console_unlock(void)
+@@ -2734,7 +2742,7 @@ void console_unlock(void)
* there's a new owner and the console_unlock() from them will do the
* flush, no worries.
*/
@@ -138,7 +138,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (retry && console_trylock())
goto again;
}
-@@ -2785,18 +2793,19 @@ void console_unblank(void)
+@@ -2786,18 +2794,19 @@ void console_unblank(void)
*/
void console_flush_on_panic(enum con_flush_mode mode)
{
@@ -170,7 +170,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
console_unlock();
}
-@@ -3032,11 +3041,11 @@ void register_console(struct console *ne
+@@ -3033,11 +3042,11 @@ void register_console(struct console *ne
* ignores console_lock.
*/
exclusive_console = newcon;
diff --git a/patches/ptrace__fix_ptrace_vs_tasklist_lock_race.patch b/patches/ptrace__fix_ptrace_vs_tasklist_lock_race.patch
index 93924c9805a8..2777572ac4dc 100644
--- a/patches/ptrace__fix_ptrace_vs_tasklist_lock_race.patch
+++ b/patches/ptrace__fix_ptrace_vs_tasklist_lock_race.patch
@@ -49,7 +49,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Special states are those that do not use the normal wait-loop pattern. See
* the comment with set_special_state().
-@@ -2015,6 +2011,81 @@ static inline int test_tsk_need_resched(
+@@ -2009,6 +2005,81 @@ static inline int test_tsk_need_resched(
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
@@ -195,7 +195,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3207,7 +3207,7 @@ unsigned long wait_task_inactive(struct
+@@ -3226,7 +3226,7 @@ unsigned long wait_task_inactive(struct
* is actually now running somewhere else!
*/
while (task_running(rq, p)) {
@@ -204,7 +204,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return 0;
cpu_relax();
}
-@@ -3222,7 +3222,7 @@ unsigned long wait_task_inactive(struct
+@@ -3241,7 +3241,7 @@ unsigned long wait_task_inactive(struct
running = task_running(rq, p);
queued = task_on_rq_queued(p);
ncsw = 0;
diff --git a/patches/random__Make_it_work_on_rt.patch b/patches/random__Make_it_work_on_rt.patch
index 2b921cb263fa..f0ddaaa61cbe 100644
--- a/patches/random__Make_it_work_on_rt.patch
+++ b/patches/random__Make_it_work_on_rt.patch
@@ -135,7 +135,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
extern int wait_for_random_bytes(void);
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
-@@ -190,12 +190,18 @@ irqreturn_t __handle_irq_event_percpu(st
+@@ -192,12 +192,18 @@ irqreturn_t __handle_irq_event_percpu(st
irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
{
diff --git a/patches/rcu-tree-Protect-rcu_rdp_is_offloaded-invocations-on.patch b/patches/rcu-tree-Protect-rcu_rdp_is_offloaded-invocations-on.patch
index d9da124064af..447f173fa18d 100644
--- a/patches/rcu-tree-Protect-rcu_rdp_is_offloaded-invocations-on.patch
+++ b/patches/rcu-tree-Protect-rcu_rdp_is_offloaded-invocations-on.patch
@@ -48,7 +48,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
-@@ -2278,13 +2278,13 @@ rcu_report_qs_rdp(struct rcu_data *rdp)
+@@ -2276,13 +2276,13 @@ rcu_report_qs_rdp(struct rcu_data *rdp)
{
unsigned long flags;
unsigned long mask;
@@ -64,7 +64,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq ||
rdp->gpwrap) {
-@@ -2446,7 +2446,7 @@ static void rcu_do_batch(struct rcu_data
+@@ -2444,7 +2444,7 @@ static void rcu_do_batch(struct rcu_data
int div;
bool __maybe_unused empty;
unsigned long flags;
@@ -73,7 +73,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct rcu_head *rhp;
struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
long bl, count = 0;
-@@ -2472,6 +2472,7 @@ static void rcu_do_batch(struct rcu_data
+@@ -2470,6 +2470,7 @@ static void rcu_do_batch(struct rcu_data
rcu_nocb_lock(rdp);
WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
pending = rcu_segcblist_n_cbs(&rdp->cblist);
diff --git a/patches/rcu__Delay_RCU-selftests.patch b/patches/rcu__Delay_RCU-selftests.patch
index 33b1de34eaa7..0ccc1b75ec4e 100644
--- a/patches/rcu__Delay_RCU-selftests.patch
+++ b/patches/rcu__Delay_RCU-selftests.patch
@@ -18,7 +18,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
-@@ -94,6 +94,13 @@ void rcu_init_tasks_generic(void);
+@@ -95,6 +95,13 @@ void rcu_init_tasks_generic(void);
static inline void rcu_init_tasks_generic(void) { }
#endif
@@ -34,7 +34,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void rcu_sysrq_end(void);
--- a/init/main.c
+++ b/init/main.c
-@@ -1602,6 +1602,7 @@ static noinline void __init kernel_init_
+@@ -1597,6 +1597,7 @@ static noinline void __init kernel_init_
rcu_init_tasks_generic();
do_pre_smp_initcalls();
@@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
smp_init();
--- a/kernel/rcu/tasks.h
+++ b/kernel/rcu/tasks.h
-@@ -1348,7 +1348,7 @@ static void test_rcu_tasks_callback(stru
+@@ -1345,7 +1345,7 @@ static void test_rcu_tasks_callback(stru
rttd->notrun = true;
}
@@ -53,7 +53,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
pr_info("Running RCU-tasks wait API self tests\n");
#ifdef CONFIG_TASKS_RCU
-@@ -1385,9 +1385,7 @@ static int rcu_tasks_verify_self_tests(v
+@@ -1382,9 +1382,7 @@ static int rcu_tasks_verify_self_tests(v
return ret;
}
late_initcall(rcu_tasks_verify_self_tests);
@@ -64,7 +64,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void __init rcu_init_tasks_generic(void)
{
-@@ -1402,9 +1400,6 @@ void __init rcu_init_tasks_generic(void)
+@@ -1399,9 +1397,6 @@ void __init rcu_init_tasks_generic(void)
#ifdef CONFIG_TASKS_TRACE_RCU
rcu_spawn_tasks_trace_kthread();
#endif
diff --git a/patches/sched-Make-preempt_enable_no_resched-behave-like-pre.patch b/patches/sched-Make-preempt_enable_no_resched-behave-like-pre.patch
index c9883943d9c3..1d4cd3751abb 100644
--- a/patches/sched-Make-preempt_enable_no_resched-behave-like-pre.patch
+++ b/patches/sched-Make-preempt_enable_no_resched-behave-like-pre.patch
@@ -11,7 +11,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
-@@ -189,7 +189,11 @@ do { \
+@@ -210,7 +210,11 @@ do { \
preempt_count_dec(); \
} while (0)
diff --git a/patches/sched__Add_support_for_lazy_preemption.patch b/patches/sched__Add_support_for_lazy_preemption.patch
index 119f8121b373..dbf76920b1a9 100644
--- a/patches/sched__Add_support_for_lazy_preemption.patch
+++ b/patches/sched__Add_support_for_lazy_preemption.patch
@@ -361,7 +361,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_SMP
plist_node_init(&p->pushable_tasks, MAX_PRIO);
RB_CLEAR_NODE(&p->pushable_dl_tasks);
-@@ -6217,6 +6262,7 @@ static void __sched notrace __schedule(u
+@@ -6220,6 +6265,7 @@ static void __sched notrace __schedule(u
next = pick_next_task(rq, prev, &rf);
clear_tsk_need_resched(prev);
@@ -369,7 +369,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
clear_preempt_need_resched();
#ifdef CONFIG_SCHED_DEBUG
rq->last_seen_need_resched_ns = 0;
-@@ -6428,6 +6474,30 @@ static void __sched notrace preempt_sche
+@@ -6431,6 +6477,30 @@ static void __sched notrace preempt_sche
} while (need_resched());
}
@@ -400,7 +400,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_PREEMPTION
/*
* This is the entry point to schedule() from in-kernel preemption
-@@ -6441,7 +6511,8 @@ asmlinkage __visible void __sched notrac
+@@ -6444,7 +6514,8 @@ asmlinkage __visible void __sched notrac
*/
if (likely(!preemptible()))
return;
@@ -410,7 +410,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_schedule_common();
}
NOKPROBE_SYMBOL(preempt_schedule);
-@@ -6474,6 +6545,9 @@ asmlinkage __visible void __sched notrac
+@@ -6477,6 +6548,9 @@ asmlinkage __visible void __sched notrac
if (likely(!preemptible()))
return;
@@ -420,7 +420,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
do {
/*
* Because the function tracer can trace preempt_count_sub()
-@@ -8657,7 +8731,9 @@ void __init init_idle(struct task_struct
+@@ -8660,7 +8734,9 @@ void __init init_idle(struct task_struct
/* Set the preempt count _outside_ the spinlocks! */
init_idle_preempt_count(idle, cpu);
diff --git a/patches/sched_introduce_migratable.patch b/patches/sched_introduce_migratable.patch
index f66b422e03ae..479cbefd7ab4 100644
--- a/patches/sched_introduce_migratable.patch
+++ b/patches/sched_introduce_migratable.patch
@@ -26,7 +26,7 @@ Link: https://lore.kernel.org/r/20210811201354.1976839-3-valentin.schneider@arm.
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1730,6 +1730,16 @@ static __always_inline bool is_percpu_th
+@@ -1727,6 +1727,16 @@ static __always_inline bool is_percpu_th
#endif
}
diff --git a/patches/serial__8250__implement_write_atomic.patch b/patches/serial__8250__implement_write_atomic.patch
index a3e6ac809d6e..c7bf2744e82b 100644
--- a/patches/serial__8250__implement_write_atomic.patch
+++ b/patches/serial__8250__implement_write_atomic.patch
@@ -144,7 +144,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
.setup = univ8250_console_setup,
--- a/drivers/tty/serial/8250/8250_fsl.c
+++ b/drivers/tty/serial/8250/8250_fsl.c
-@@ -60,9 +60,18 @@ int fsl8250_handle_irq(struct uart_port
+@@ -56,9 +56,18 @@ int fsl8250_handle_irq(struct uart_port
/* Stop processing interrupts on input overrun */
if ((orig_lsr & UART_LSR_OE) && (up->overrun_backoff_time_ms > 0)) {
@@ -310,7 +310,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
spin_unlock_irqrestore(&port->lock, flags);
synchronize_irq(port->irq);
-@@ -2824,7 +2817,7 @@ serial8250_do_set_termios(struct uart_po
+@@ -2837,7 +2830,7 @@ serial8250_do_set_termios(struct uart_po
if (up->capabilities & UART_CAP_RTOIE)
up->ier |= UART_IER_RTOIE;
@@ -319,7 +319,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (up->capabilities & UART_CAP_EFR) {
unsigned char efr = 0;
-@@ -3290,7 +3283,7 @@ EXPORT_SYMBOL_GPL(serial8250_set_default
+@@ -3303,7 +3296,7 @@ EXPORT_SYMBOL_GPL(serial8250_set_default
#ifdef CONFIG_SERIAL_8250_CONSOLE
@@ -328,7 +328,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
struct uart_8250_port *up = up_to_u8250p(port);
-@@ -3298,6 +3291,18 @@ static void serial8250_console_putchar(s
+@@ -3311,6 +3304,18 @@ static void serial8250_console_putchar(s
serial_port_out(port, UART_TX, ch);
}
@@ -347,7 +347,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Restore serial console when h/w power-off detected
*/
-@@ -3319,6 +3324,32 @@ static void serial8250_console_restore(s
+@@ -3332,6 +3337,32 @@ static void serial8250_console_restore(s
serial8250_out_MCR(up, UART_MCR_DTR | UART_MCR_RTS);
}
@@ -380,7 +380,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Print a string to the serial port trying not to disturb
* any possible real use of the port...
-@@ -3335,24 +3366,12 @@ void serial8250_console_write(struct uar
+@@ -3348,24 +3379,12 @@ void serial8250_console_write(struct uar
struct uart_port *port = &up->port;
unsigned long flags;
unsigned int ier;
@@ -407,7 +407,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* check scratch reg to see if port powered off during system sleep */
if (up->canary && (up->canary != serial_port_in(port, UART_SCR))) {
-@@ -3366,7 +3385,9 @@ void serial8250_console_write(struct uar
+@@ -3379,7 +3398,9 @@ void serial8250_console_write(struct uar
mdelay(port->rs485.delay_rts_before_send);
}
@@ -417,7 +417,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Finally, wait for transmitter to become empty
-@@ -3379,8 +3400,7 @@ void serial8250_console_write(struct uar
+@@ -3392,8 +3413,7 @@ void serial8250_console_write(struct uar
if (em485->tx_stopped)
up->rs485_stop_tx(up);
}
@@ -427,7 +427,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The receive handling will happen properly because the
-@@ -3392,8 +3412,7 @@ void serial8250_console_write(struct uar
+@@ -3405,8 +3425,7 @@ void serial8250_console_write(struct uar
if (up->msr_saved_flags)
serial8250_modem_status(up);
@@ -437,7 +437,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static unsigned int probe_baud(struct uart_port *port)
-@@ -3413,6 +3432,7 @@ static unsigned int probe_baud(struct ua
+@@ -3426,6 +3445,7 @@ static unsigned int probe_baud(struct ua
int serial8250_console_setup(struct uart_port *port, char *options, bool probe)
{
@@ -445,7 +445,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
int baud = 9600;
int bits = 8;
int parity = 'n';
-@@ -3422,6 +3442,8 @@ int serial8250_console_setup(struct uart
+@@ -3435,6 +3455,8 @@ int serial8250_console_setup(struct uart
if (!port->iobase && !port->membase)
return -ENODEV;
diff --git a/patches/series b/patches/series
index b85bb7ef7855..ff3c6f427b73 100644
--- a/patches/series
+++ b/patches/series
@@ -40,7 +40,14 @@ fscache-Use-only-one-fscache_object_cong_wait.patch
kernel-locking-Use-a-pointer-in-ww_mutex_trylock.patch
# sched
-0004_sched_delay_task_stack_freeing_on_rt.patch
+0001_kernel_fork_redo_ifdefs_around_task_s_handling.patch
+0002_kernel_fork_duplicate_task_struct_before_stack_allocation.patch
+0003_kernel_fork_ia64_provide_a_alloc_thread_stack_node_for_ia64.patch
+0004_kernel_fork_don_t_assign_the_stack_pointer_in_dup_task_struct.patch
+0005_kernel_fork_move_memcg_charge_kernel_stack_into_config_vmap_stack.patch
+0006_kernel_fork_move_task_stack_account_to_do_exit.patch
+0007_kernel_fork_only_cache_the_vmap_stack_in_finish_task_switch.patch
+0008_kernel_fork_use_is_enabled_in_account_kernel_stack.patch
###########################################################################
# Post
diff --git a/patches/signal__Revert_ptrace_preempt_magic.patch b/patches/signal__Revert_ptrace_preempt_magic.patch
index 2e08ecc7e403..266271bd329e 100644
--- a/patches/signal__Revert_ptrace_preempt_magic.patch
+++ b/patches/signal__Revert_ptrace_preempt_magic.patch
@@ -17,7 +17,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/kernel/signal.c
+++ b/kernel/signal.c
-@@ -2275,16 +2275,8 @@ static void ptrace_stop(int exit_code, i
+@@ -2249,16 +2249,8 @@ static void ptrace_stop(int exit_code, i
if (gstop_done && ptrace_reparented(current))
do_notify_parent_cldstop(current, false, why);
diff --git a/patches/signal_x86__Delay_calling_signals_in_atomic.patch b/patches/signal_x86__Delay_calling_signals_in_atomic.patch
index 3a00c187b7c1..4abe2b5aeaa6 100644
--- a/patches/signal_x86__Delay_calling_signals_in_atomic.patch
+++ b/patches/signal_x86__Delay_calling_signals_in_atomic.patch
@@ -66,7 +66,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
typedef sigset_t compat_sigset_t;
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1080,6 +1080,10 @@ struct task_struct {
+@@ -1078,6 +1078,10 @@ struct task_struct {
/* Restored if set_restore_sigmask() was used: */
sigset_t saved_sigmask;
struct sigpending pending;
diff --git a/patches/softirq__Check_preemption_after_reenabling_interrupts.patch b/patches/softirq__Check_preemption_after_reenabling_interrupts.patch
index 86618b8cbf01..79c594270826 100644
--- a/patches/softirq__Check_preemption_after_reenabling_interrupts.patch
+++ b/patches/softirq__Check_preemption_after_reenabling_interrupts.patch
@@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
-@@ -191,8 +191,10 @@ do { \
+@@ -212,8 +212,10 @@ do { \
#ifndef CONFIG_PREEMPT_RT
# define preempt_enable_no_resched() sched_preempt_enable_no_resched()
@@ -35,7 +35,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
#define preemptible() (preempt_count() == 0 && !irqs_disabled())
-@@ -263,6 +265,7 @@ do { \
+@@ -284,6 +286,7 @@ do { \
#define preempt_disable_notrace() barrier()
#define preempt_enable_no_resched_notrace() barrier()
#define preempt_enable_notrace() barrier()
@@ -45,7 +45,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif /* CONFIG_PREEMPT_COUNT */
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -3040,6 +3040,7 @@ static void __netif_reschedule(struct Qd
+@@ -3048,6 +3048,7 @@ static void __netif_reschedule(struct Qd
sd->output_queue_tailp = &q->next_sched;
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
@@ -53,7 +53,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
void __netif_schedule(struct Qdisc *q)
-@@ -3102,6 +3103,7 @@ void __dev_kfree_skb_irq(struct sk_buff
+@@ -3110,6 +3111,7 @@ void __dev_kfree_skb_irq(struct sk_buff
__this_cpu_write(softnet_data.completion_queue, skb);
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
@@ -61,7 +61,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(__dev_kfree_skb_irq);
-@@ -4644,6 +4646,7 @@ static int enqueue_to_backlog(struct sk_
+@@ -4668,6 +4670,7 @@ static int enqueue_to_backlog(struct sk_
rps_unlock(sd);
local_irq_restore(flags);
@@ -69,7 +69,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
atomic_long_inc(&skb->dev->rx_dropped);
kfree_skb(skb);
-@@ -6387,12 +6390,14 @@ static void net_rps_action_and_irq_enabl
+@@ -6412,12 +6415,14 @@ static void net_rps_action_and_irq_enabl
sd->rps_ipi_list = NULL;
local_irq_enable();
@@ -84,7 +84,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
-@@ -6470,6 +6475,7 @@ void __napi_schedule(struct napi_struct
+@@ -6495,6 +6500,7 @@ void __napi_schedule(struct napi_struct
local_irq_save(flags);
____napi_schedule(this_cpu_ptr(&softnet_data), n);
local_irq_restore(flags);
@@ -92,7 +92,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(__napi_schedule);
-@@ -11292,6 +11298,7 @@ static int dev_cpu_dead(unsigned int old
+@@ -11326,6 +11332,7 @@ static int dev_cpu_dead(unsigned int old
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_enable();
diff --git a/patches/x86__Enable_RT_also_on_32bit.patch b/patches/x86__Enable_RT_also_on_32bit.patch
index babd4a4b3d46..cacc949ad4fa 100644
--- a/patches/x86__Enable_RT_also_on_32bit.patch
+++ b/patches/x86__Enable_RT_also_on_32bit.patch
@@ -22,7 +22,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
select ARCH_USE_CMPXCHG_LOCKREF
select HAVE_ARCH_SOFT_DIRTY
select MODULES_USE_ELF_RELA
-@@ -108,6 +107,7 @@ config X86
+@@ -109,6 +108,7 @@ config X86
select ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP if NR_CPUS <= 4096
select ARCH_SUPPORTS_LTO_CLANG
select ARCH_SUPPORTS_LTO_CLANG_THIN