summaryrefslogtreecommitdiff
path: root/patches
diff options
context:
space:
mode:
Diffstat (limited to 'patches')
-rw-r--r--patches/0001-blk-mq-Add-blk_mq_complete_request_direct.patch39
-rw-r--r--patches/0001-mm-memcg-Disable-threshold-event-handlers-on-PREEMPT.patch20
-rw-r--r--patches/0001-net-dev-Remove-the-preempt_disable-in-netif_rx_inter.patch4
-rw-r--r--patches/0001-printk-rename-cpulock-functions.patch8
-rw-r--r--patches/0001-sched-Trigger-warning-if-migration_disabled-counter-.patch28
-rw-r--r--patches/0001_kernel_fork_redo_ifdefs_around_task_s_handling.patch12
-rw-r--r--patches/0001_random_remove_unused_irq_flags_argument_from_add_interrupt_randomness.patch97
-rw-r--r--patches/0001_zsmalloc_introduce_some_helper_functions.patch187
-rw-r--r--patches/0002-locking-Remove-rt_rwlock_is_contended.patch34
-rw-r--r--patches/0002-mm-memcg-Protect-per-CPU-counter-by-disabling-preemp.patch12
-rw-r--r--patches/0002-mmc-core-Use-blk_mq_complete_request_direct.patch88
-rw-r--r--patches/0002-net-dev-Remove-get_cpu-in-netif_rx_internal.patch2
-rw-r--r--patches/0002-printk-cpu-sync-always-disable-interrupts.patch4
-rw-r--r--patches/0002_irq_remove_unsued_flags_argument_from___handle_irq_event_percpu.patch82
-rw-r--r--patches/0002_kernel_fork_duplicate_task_struct_before_stack_allocation.patch6
-rw-r--r--patches/0002_zsmalloc_rename_zs_stat_type_to_class_stat_type.patch105
-rw-r--r--patches/0003-locking-rtmutex-Squash-self-deadlock-check-for-ww_rt.patch40
-rw-r--r--patches/0003-mm-memcg-Add-a-local_lock_t-for-IRQ-and-TASK-object.patch40
-rw-r--r--patches/0003-net-dev-Makes-sure-netif_rx-can-be-invoked-in-any-co.patch6
-rw-r--r--patches/0003_kernel_fork_ia64_provide_a_alloc_thread_stack_node_for_ia64.patch2
-rw-r--r--patches/0003_random_split_add_interrupt_randomness.patch111
-rw-r--r--patches/0003_zsmalloc_decouple_class_actions_from_zspage_works.patch131
-rw-r--r--patches/0004-drm-i915-Don-t-disable-interrupts-on-PREEMPT_RT-duri.patch16
-rw-r--r--patches/0004-locking-rtmutex-Add-rt_mutex_lock_nest_lock-and-rt_m.patch116
-rw-r--r--patches/0004-mm-memcg-Allow-the-task_obj-optimization-only-on-non.patch12
-rw-r--r--patches/0004-net-dev-Make-rps_lock-disable-interrupts.patch16
-rw-r--r--patches/0004_kernel_fork_don_t_assign_the_stack_pointer_in_dup_task_struct.patch16
-rw-r--r--patches/0004_random_move_the_fast_pool_reset_into_the_caller.patch34
-rw-r--r--patches/0004_zsmalloc_introduce_obj_allocated.patch102
-rw-r--r--patches/0005-lockdep-Remove-softirq-accounting-on-PREEMPT_RT.patch83
-rw-r--r--patches/0005-printk-refactor-and-rework-printing-logic.patch57
-rw-r--r--patches/0005_kernel_fork_move_memcg_charge_kernel_stack_into_config_vmap_stack.patch10
-rw-r--r--patches/0005_random_defer_processing_of_randomness_on_preempt_rt.patch23
-rw-r--r--patches/0005_zsmalloc_move_huge_compressed_obj_from_page_to_zspage.patch150
-rw-r--r--patches/0006-drm-i915-Disable-tracing-points-on-PREEMPT_RT.patch2
-rw-r--r--patches/0006-lockdep-selftests-Avoid-using-local_lock_-acquire-re.patch125
-rw-r--r--patches/0006-printk-move-buffer-definitions-into-console_emit_nex.patch18
-rw-r--r--patches/0006_kernel_fork_move_task_stack_account_to_do_exit.patch24
-rw-r--r--patches/0006_zsmalloc_remove_zspage_isolation_for_migration.patch309
-rw-r--r--patches/0007-drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch2
-rw-r--r--patches/0007-lockdep-selftests-Unbalanced-migrate_disable-rcu_rea.patch83
-rw-r--r--patches/0007-printk-add-pr_flush.patch14
-rw-r--r--patches/0007_kernel_fork_only_cache_the_vmap_stack_in_finish_task_switch.patch20
-rw-r--r--patches/0007_locking_rwlocks_introduce_write_lock_nested.patch144
-rw-r--r--patches/0008-lockdep-selftests-Skip-the-softirq-related-tests-on-.patch218
-rw-r--r--patches/0008-printk-add-kthread-console-printers.patch31
-rw-r--r--patches/0008_kernel_fork_use_is_enabled_in_account_kernel_stack.patch2
-rw-r--r--patches/0008_zsmalloc_replace_per_zpage_lock_with_pool_migrate_lock.patch469
-rw-r--r--patches/0009-drm-i915-gt-Use-spin_lock_irq-instead-of-local_irq_d.patch2
-rw-r--r--patches/0009-lockdep-selftests-Adapt-ww-tests-for-PREEMPT_RT.patch254
-rw-r--r--patches/0009-printk-reimplement-console_lock-for-proper-kthread-s.patch36
-rw-r--r--patches/0009_zsmalloc_replace_get_cpu_var_with_local_lock.patch70
-rw-r--r--patches/0010-drm-i915-Drop-the-irqs_disabled-check.patch4
-rw-r--r--patches/0010-printk-remove-console_locked.patch10
-rw-r--r--patches/0010-x86-mm-Include-spinlock_t-definition-in-pgtable.patch36
-rw-r--r--patches/0011-console-introduce-CON_MIGHT_SLEEP-for-vt.patch2
-rw-r--r--patches/0011-locking-Allow-to-include-asm-spinlock_types.h-from-l.patch302
-rw-r--r--patches/0012-printk-add-infrastucture-for-atomic-consoles.patch48
-rw-r--r--patches/0013-serial-8250-implement-write_atomic.patch8
-rw-r--r--patches/0014-printk-avoid-preempt_disable-for-PREEMPT_RT.patch2
-rw-r--r--patches/ARM__enable_irq_in_translation_section_permission_fault_handlers.patch4
-rw-r--r--patches/Add_localversion_for_-RT_release.patch2
-rw-r--r--patches/KVM__arm_arm64__downgrade_preempt_disabled_region_to_migrate_disable.patch6
-rw-r--r--patches/POWERPC__Allow_to_enable_RT.patch4
-rw-r--r--patches/Revert-tty-serial-Use-fifo-in-8250-console-driver.patch122
-rw-r--r--patches/Use-generic_handle_irq_safe-where-it-makes-sense.patch4
-rw-r--r--patches/arch_arm64__Add_lazy_preempt_support.patch4
-rw-r--r--patches/arm64-sve-Delay-freeing-memory-in-fpsimd_flush_threa.patch4
-rw-r--r--patches/arm64-sve-Make-kernel-FPU-protection-RT-friendly.patch4
-rw-r--r--patches/block_mq__do_not_invoke_preempt_disable.patch2
-rw-r--r--patches/cgroup__use_irqsave_in_cgroup_rstat_flush_locked.patch4
-rw-r--r--patches/drm-i915-Don-t-disable-interrupts-and-pretend-a-lock.patch142
-rw-r--r--patches/fs-namespace-Boost-the-mount_lock.lock-owner-instead.patch2
-rw-r--r--patches/fs_dcache__disable_preemption_on_i_dir_seqs_write_side.patch4
-rw-r--r--patches/fs_dcache__use_swait_queue_instead_of_waitqueue.patch18
-rw-r--r--patches/fscache-Use-only-one-fscache_object_cong_wait.patch122
-rw-r--r--patches/jump-label__disable_if_stop_machine_is_used.patch2
-rw-r--r--patches/kernel-locking-Use-a-pointer-in-ww_mutex_trylock.patch24
-rw-r--r--patches/locking-Enable-RT_MUTEXES-by-default-on-PREEMPT_RT.patch26
-rw-r--r--patches/md__raid5__Make_raid5_percpu_handling_RT_aware.patch65
-rw-r--r--patches/mm_vmalloc__Another_preempt_disable_region_which_sucks.patch6
-rw-r--r--patches/net-Write-lock-dev_base_lock-without-disabling-botto.patch154
-rw-r--r--patches/net-dev-Always-serialize-on-Qdisc-busylock-in-__dev_.patch39
-rw-r--r--patches/panic_remove_oops_id.patch56
-rw-r--r--patches/powerpc__Add_support_for_lazy_preemption.patch8
-rw-r--r--patches/powerpc_kvm__Disable_in-kernel_MPIC_emulation_for_PREEMPT_RT.patch2
-rw-r--r--patches/ptrace__fix_ptrace_vs_tasklist_lock_race.patch6
-rw-r--r--patches/rcu-tree-Protect-rcu_rdp_is_offloaded-invocations-on.patch83
-rw-r--r--patches/rcu__Delay_RCU-selftests.patch8
-rw-r--r--patches/sched__Add_support_for_lazy_preemption.patch95
-rw-r--r--patches/sched_introduce_migratable.patch2
-rw-r--r--patches/series54
-rw-r--r--patches/signal__Revert_ptrace_preempt_magic.patch2
-rw-r--r--patches/signal_x86__Delay_calling_signals_in_atomic.patch4
-rw-r--r--patches/softirq-Disable-softirq-stacks-on-PREEMPT_RT.patch2
-rw-r--r--patches/softirq-Use-a-dedicated-thread-for-timer-wakeups.patch2
-rw-r--r--patches/softirq__Check_preemption_after_reenabling_interrupts.patch10
-rw-r--r--patches/sunrpc__Make_svc_xprt_do_enqueue_use_get_cpu_light.patch6
-rw-r--r--patches/tcp-Add-a-stub-for-sk_defer_free_flush.patch43
-rw-r--r--patches/tcp-add-a-missing-sk_defer_free_flush-in-tcp_splice_.patch29
-rw-r--r--patches/tty_serial_pl011__Make_the_locking_work_on_RT.patch4
-rw-r--r--patches/u64_stats-Disable-preemption-on-32bit-UP-SMP-PREEMPT.patch152
-rw-r--r--patches/x86__Enable_RT_also_on_32bit.patch2
-rw-r--r--patches/x86__kvm_Require_const_tsc_for_RT.patch2
104 files changed, 668 insertions, 4596 deletions
diff --git a/patches/0001-blk-mq-Add-blk_mq_complete_request_direct.patch b/patches/0001-blk-mq-Add-blk_mq_complete_request_direct.patch
deleted file mode 100644
index 6e186d413cd6..000000000000
--- a/patches/0001-blk-mq-Add-blk_mq_complete_request_direct.patch
+++ /dev/null
@@ -1,39 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Mon, 25 Oct 2021 09:06:57 +0200
-Subject: [PATCH 1/2] blk-mq: Add blk_mq_complete_request_direct()
-
-Add blk_mq_complete_request_direct() which completes the block request
-directly instead deferring it to softirq for single queue devices.
-
-This is useful for devices which complete the requests in preemptible
-context and raising softirq from means scheduling ksoftirqd.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Reviewed-by: Christoph Hellwig <hch@lst.de>
-Link: https://lore.kernel.org/r/20211025070658.1565848-2-bigeasy@linutronix.de
-Signed-off-by: Jens Axboe <axboe@kernel.dk>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/blk-mq.h | 11 +++++++++++
- 1 file changed, 11 insertions(+)
-
---- a/include/linux/blk-mq.h
-+++ b/include/linux/blk-mq.h
-@@ -752,6 +752,17 @@ static inline void blk_mq_set_request_co
- WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
- }
-
-+/*
-+ * Complete the request directly instead of deferring it to softirq or
-+ * completing it another CPU. Useful in preemptible instead of an interrupt.
-+ */
-+static inline void blk_mq_complete_request_direct(struct request *rq,
-+ void (*complete)(struct request *rq))
-+{
-+ WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
-+ complete(rq);
-+}
-+
- void blk_mq_start_request(struct request *rq);
- void blk_mq_end_request(struct request *rq, blk_status_t error);
- void __blk_mq_end_request(struct request *rq, blk_status_t error);
diff --git a/patches/0001-mm-memcg-Disable-threshold-event-handlers-on-PREEMPT.patch b/patches/0001-mm-memcg-Disable-threshold-event-handlers-on-PREEMPT.patch
index d299bfa5b069..1ec5a27d048a 100644
--- a/patches/0001-mm-memcg-Disable-threshold-event-handlers-on-PREEMPT.patch
+++ b/patches/0001-mm-memcg-Disable-threshold-event-handlers-on-PREEMPT.patch
@@ -103,7 +103,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
{
struct mem_cgroup_tree_per_node *mctz;
-@@ -821,50 +783,6 @@ static void mem_cgroup_charge_statistics
+@@ -827,50 +789,6 @@ static void mem_cgroup_charge_statistics
__this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages);
}
@@ -154,7 +154,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
{
/*
-@@ -3751,8 +3669,12 @@ static ssize_t mem_cgroup_write(struct k
+@@ -3763,8 +3681,12 @@ static ssize_t mem_cgroup_write(struct k
}
break;
case RES_SOFT_LIMIT:
@@ -167,7 +167,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
break;
}
return ret ?: nbytes;
-@@ -4057,6 +3979,343 @@ static int mem_cgroup_swappiness_write(s
+@@ -4069,6 +3991,343 @@ static int mem_cgroup_swappiness_write(s
return 0;
}
@@ -511,7 +511,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
{
struct mem_cgroup_threshold_ary *t;
-@@ -4119,6 +4378,25 @@ static void mem_cgroup_threshold(struct
+@@ -4131,6 +4390,25 @@ static void mem_cgroup_threshold(struct
}
}
@@ -537,7 +537,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static int compare_thresholds(const void *a, const void *b)
{
const struct mem_cgroup_threshold *_a = a;
-@@ -4133,27 +4411,6 @@ static int compare_thresholds(const void
+@@ -4145,27 +4423,6 @@ static int compare_thresholds(const void
return 0;
}
@@ -565,7 +565,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
struct eventfd_ctx *eventfd, const char *args, enum res_type type)
{
-@@ -4382,259 +4639,6 @@ static void mem_cgroup_oom_unregister_ev
+@@ -4394,259 +4651,6 @@ static void mem_cgroup_oom_unregister_ev
spin_unlock(&memcg_oom_lock);
}
@@ -825,7 +825,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Unregister event and free resources.
*
-@@ -4845,6 +4849,18 @@ static ssize_t memcg_write_event_control
+@@ -4857,6 +4861,18 @@ static ssize_t memcg_write_event_control
return ret;
}
@@ -841,6 +841,6 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+
+#endif
+
- static struct cftype mem_cgroup_legacy_files[] = {
- {
- .name = "usage_in_bytes",
+ #if defined(CONFIG_MEMCG_KMEM) && (defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG))
+ static int mem_cgroup_slab_show(struct seq_file *m, void *p)
+ {
diff --git a/patches/0001-net-dev-Remove-the-preempt_disable-in-netif_rx_inter.patch b/patches/0001-net-dev-Remove-the-preempt_disable-in-netif_rx_inter.patch
index 2b9643dd92fa..40aec006655f 100644
--- a/patches/0001-net-dev-Remove-the-preempt_disable-in-netif_rx_inter.patch
+++ b/patches/0001-net-dev-Remove-the-preempt_disable-in-netif_rx_inter.patch
@@ -21,7 +21,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -4915,7 +4915,6 @@ static int netif_rx_internal(struct sk_b
+@@ -4796,7 +4796,6 @@ static int netif_rx_internal(struct sk_b
struct rps_dev_flow voidflow, *rflow = &voidflow;
int cpu;
@@ -29,7 +29,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
rcu_read_lock();
cpu = get_rps_cpu(skb->dev, skb, &rflow);
-@@ -4925,7 +4924,6 @@ static int netif_rx_internal(struct sk_b
+@@ -4806,7 +4805,6 @@ static int netif_rx_internal(struct sk_b
ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
rcu_read_unlock();
diff --git a/patches/0001-printk-rename-cpulock-functions.patch b/patches/0001-printk-rename-cpulock-functions.patch
index 0a276401a706..4bd440979c6e 100644
--- a/patches/0001-printk-rename-cpulock-functions.patch
+++ b/patches/0001-printk-rename-cpulock-functions.patch
@@ -22,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
-@@ -281,43 +281,43 @@ static inline void printk_trigger_flush(
+@@ -277,43 +277,43 @@ static inline void printk_trigger_flush(
#endif
#ifdef CONFIG_SMP
@@ -89,7 +89,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -3586,26 +3586,26 @@ EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
+@@ -3600,26 +3600,26 @@ EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
#endif
#ifdef CONFIG_SMP
@@ -125,7 +125,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*
* If no processor has the lock, the calling processor takes the lock and
* becomes the owner. If the calling processor is already the owner of the
-@@ -3614,7 +3614,7 @@ EXPORT_SYMBOL(__printk_wait_on_cpu_lock)
+@@ -3628,7 +3628,7 @@ EXPORT_SYMBOL(__printk_wait_on_cpu_lock)
* Context: Any context. Expects interrupts to be disabled.
* Return: 1 on success, otherwise 0.
*/
@@ -134,7 +134,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
int cpu;
int old;
-@@ -3624,79 +3624,80 @@ int __printk_cpu_trylock(void)
+@@ -3638,79 +3638,80 @@ int __printk_cpu_trylock(void)
/*
* Guarantee loads and stores from this CPU when it is the lock owner
* are _not_ visible to the previous lock owner. This pairs with
diff --git a/patches/0001-sched-Trigger-warning-if-migration_disabled-counter-.patch b/patches/0001-sched-Trigger-warning-if-migration_disabled-counter-.patch
deleted file mode 100644
index 59c0890f4be6..000000000000
--- a/patches/0001-sched-Trigger-warning-if-migration_disabled-counter-.patch
+++ /dev/null
@@ -1,28 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Mon, 29 Nov 2021 18:46:44 +0100
-Subject: [PATCH 01/11] sched: Trigger warning if ->migration_disabled counter
- underflows.
-
-If migrate_enable() is used more often than its counter part then it
-remains undetected and rq::nr_pinned will underflow, too.
-
-Add a warning if migrate_enable() is attempted if without a matching a
-migrate_disable().
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Link: https://lore.kernel.org/r/20211129174654.668506-2-bigeasy@linutronix.de
----
- kernel/sched/core.c | 2 ++
- 1 file changed, 2 insertions(+)
-
---- a/kernel/sched/core.c
-+++ b/kernel/sched/core.c
-@@ -2171,6 +2171,8 @@ void migrate_enable(void)
- if (p->migration_disabled > 1) {
- p->migration_disabled--;
- return;
-+ } else if (WARN_ON_ONCE(p->migration_disabled == 0)) {
-+ return;
- }
-
- /*
diff --git a/patches/0001_kernel_fork_redo_ifdefs_around_task_s_handling.patch b/patches/0001_kernel_fork_redo_ifdefs_around_task_s_handling.patch
index bde4a304b21c..f13597735bc0 100644
--- a/patches/0001_kernel_fork_redo_ifdefs_around_task_s_handling.patch
+++ b/patches/0001_kernel_fork_redo_ifdefs_around_task_s_handling.patch
@@ -27,7 +27,7 @@ Link: https://lore.kernel.org/r/20211118143452.136421-2-bigeasy@linutronix.de
--- a/kernel/fork.c
+++ b/kernel/fork.c
-@@ -184,7 +184,7 @@ static inline void free_task_struct(stru
+@@ -185,7 +185,7 @@ static inline void free_task_struct(stru
*/
# if THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK)
@@ -36,7 +36,7 @@ Link: https://lore.kernel.org/r/20211118143452.136421-2-bigeasy@linutronix.de
/*
* vmalloc() is a bit slow, and calling vfree() enough times will force a TLB
* flush. Try to minimize the number of calls by caching stacks.
-@@ -209,11 +209,9 @@ static int free_vm_stack_cache(unsigned
+@@ -210,11 +210,9 @@ static int free_vm_stack_cache(unsigned
return 0;
}
@@ -48,7 +48,7 @@ Link: https://lore.kernel.org/r/20211118143452.136421-2-bigeasy@linutronix.de
void *stack;
int i;
-@@ -257,45 +255,53 @@ static unsigned long *alloc_thread_stack
+@@ -258,45 +256,53 @@ static unsigned long *alloc_thread_stack
tsk->stack = stack;
}
return stack;
@@ -128,7 +128,7 @@ Link: https://lore.kernel.org/r/20211118143452.136421-2-bigeasy@linutronix.de
static struct kmem_cache *thread_stack_cache;
static unsigned long *alloc_thread_stack_node(struct task_struct *tsk,
-@@ -311,6 +317,7 @@ static unsigned long *alloc_thread_stack
+@@ -312,6 +318,7 @@ static unsigned long *alloc_thread_stack
static void free_thread_stack(struct task_struct *tsk)
{
kmem_cache_free(thread_stack_cache, tsk->stack);
@@ -136,7 +136,7 @@ Link: https://lore.kernel.org/r/20211118143452.136421-2-bigeasy@linutronix.de
}
void thread_stack_cache_init(void)
-@@ -320,8 +327,9 @@ void thread_stack_cache_init(void)
+@@ -321,8 +328,9 @@ void thread_stack_cache_init(void)
THREAD_SIZE, NULL);
BUG_ON(thread_stack_cache == NULL);
}
@@ -148,7 +148,7 @@ Link: https://lore.kernel.org/r/20211118143452.136421-2-bigeasy@linutronix.de
/* SLAB cache for signal_struct structures (tsk->signal) */
static struct kmem_cache *signal_cachep;
-@@ -429,10 +437,6 @@ static void release_task_stack(struct ta
+@@ -432,10 +440,6 @@ static void release_task_stack(struct ta
account_kernel_stack(tsk, -1);
free_thread_stack(tsk);
diff --git a/patches/0001_random_remove_unused_irq_flags_argument_from_add_interrupt_randomness.patch b/patches/0001_random_remove_unused_irq_flags_argument_from_add_interrupt_randomness.patch
deleted file mode 100644
index 01dcb9789602..000000000000
--- a/patches/0001_random_remove_unused_irq_flags_argument_from_add_interrupt_randomness.patch
+++ /dev/null
@@ -1,97 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Subject: random: Remove unused irq_flags argument from add_interrupt_randomness().
-Date: Tue, 07 Dec 2021 13:17:33 +0100
-
-Since commit
- ee3e00e9e7101 ("random: use registers from interrupted code for CPU's w/o a cycle counter")
-
-the irq_flags argument is no longer used.
-
-Remove unused irq_flags irq_flags.
-
-Cc: Borislav Petkov <bp@alien8.de>
-Cc: Dave Hansen <dave.hansen@linux.intel.com>
-Cc: Dexuan Cui <decui@microsoft.com>
-Cc: H. Peter Anvin <hpa@zytor.com>
-Cc: Haiyang Zhang <haiyangz@microsoft.com>
-Cc: Ingo Molnar <mingo@redhat.com>
-Cc: K. Y. Srinivasan <kys@microsoft.com>
-Cc: Stephen Hemminger <sthemmin@microsoft.com>
-Cc: Thomas Gleixner <tglx@linutronix.de>
-Cc: Wei Liu <wei.liu@kernel.org>
-Cc: linux-hyperv@vger.kernel.org
-Cc: x86@kernel.org
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Link: https://lore.kernel.org/r/20211207121737.2347312-2-bigeasy@linutronix.de
----
- arch/x86/kernel/cpu/mshyperv.c | 2 +-
- drivers/char/random.c | 4 ++--
- drivers/hv/vmbus_drv.c | 2 +-
- include/linux/random.h | 2 +-
- kernel/irq/handle.c | 2 +-
- 5 files changed, 6 insertions(+), 6 deletions(-)
-
---- a/arch/x86/kernel/cpu/mshyperv.c
-+++ b/arch/x86/kernel/cpu/mshyperv.c
-@@ -79,7 +79,7 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_sti
- inc_irq_stat(hyperv_stimer0_count);
- if (hv_stimer0_handler)
- hv_stimer0_handler();
-- add_interrupt_randomness(HYPERV_STIMER0_VECTOR, 0);
-+ add_interrupt_randomness(HYPERV_STIMER0_VECTOR);
- ack_APIC_irq();
-
- set_irq_regs(old_regs);
---- a/drivers/char/random.c
-+++ b/drivers/char/random.c
-@@ -200,7 +200,7 @@
- * void add_device_randomness(const void *buf, unsigned int size);
- * void add_input_randomness(unsigned int type, unsigned int code,
- * unsigned int value);
-- * void add_interrupt_randomness(int irq, int irq_flags);
-+ * void add_interrupt_randomness(int irq);
- * void add_disk_randomness(struct gendisk *disk);
- *
- * add_device_randomness() is for adding data to the random pool that
-@@ -1260,7 +1260,7 @@ static __u32 get_reg(struct fast_pool *f
- return *ptr;
- }
-
--void add_interrupt_randomness(int irq, int irq_flags)
-+void add_interrupt_randomness(int irq)
- {
- struct entropy_store *r;
- struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
---- a/drivers/hv/vmbus_drv.c
-+++ b/drivers/hv/vmbus_drv.c
-@@ -1381,7 +1381,7 @@ static void vmbus_isr(void)
- tasklet_schedule(&hv_cpu->msg_dpc);
- }
-
-- add_interrupt_randomness(vmbus_interrupt, 0);
-+ add_interrupt_randomness(vmbus_interrupt);
- }
-
- static irqreturn_t vmbus_percpu_isr(int irq, void *dev_id)
---- a/include/linux/random.h
-+++ b/include/linux/random.h
-@@ -35,7 +35,7 @@ static inline void add_latent_entropy(vo
-
- extern void add_input_randomness(unsigned int type, unsigned int code,
- unsigned int value) __latent_entropy;
--extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
-+extern void add_interrupt_randomness(int irq) __latent_entropy;
-
- extern void get_random_bytes(void *buf, int nbytes);
- extern int wait_for_random_bytes(void);
---- a/kernel/irq/handle.c
-+++ b/kernel/irq/handle.c
-@@ -197,7 +197,7 @@ irqreturn_t handle_irq_event_percpu(stru
-
- retval = __handle_irq_event_percpu(desc, &flags);
-
-- add_interrupt_randomness(desc->irq_data.irq, flags);
-+ add_interrupt_randomness(desc->irq_data.irq);
-
- if (!irq_settings_no_debug(desc))
- note_interrupt(desc, retval);
diff --git a/patches/0001_zsmalloc_introduce_some_helper_functions.patch b/patches/0001_zsmalloc_introduce_some_helper_functions.patch
deleted file mode 100644
index bdadaf2d44fb..000000000000
--- a/patches/0001_zsmalloc_introduce_some_helper_functions.patch
+++ /dev/null
@@ -1,187 +0,0 @@
-From: Minchan Kim <minchan@kernel.org>
-Subject: zsmalloc: introduce some helper functions
-Date: Mon, 15 Nov 2021 10:59:01 -0800
-
-get_zspage_mapping returns fullness as well as class_idx. However,
-the fullness is usually not used since it could be stale in some
-contexts. It causes misleading as well as unnecessary instructions
-so this patch introduces zspage_class.
-
-obj_to_location also produces page and index but we don't need
-always the index, either so this patch introduces obj_to_page.
-
-Signed-off-by: Minchan Kim <minchan@kernel.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Link: https://lore.kernel.org/r/20211115185909.3949505-2-minchan@kernel.org
----
- mm/zsmalloc.c | 54 +++++++++++++++++++++++-------------------------------
- 1 file changed, 23 insertions(+), 31 deletions(-)
-
---- a/mm/zsmalloc.c
-+++ b/mm/zsmalloc.c
-@@ -517,6 +517,12 @@ static void get_zspage_mapping(struct zs
- *class_idx = zspage->class;
- }
-
-+static struct size_class *zspage_class(struct zs_pool *pool,
-+ struct zspage *zspage)
-+{
-+ return pool->size_class[zspage->class];
-+}
-+
- static void set_zspage_mapping(struct zspage *zspage,
- unsigned int class_idx,
- enum fullness_group fullness)
-@@ -844,6 +850,12 @@ static void obj_to_location(unsigned lon
- *obj_idx = (obj & OBJ_INDEX_MASK);
- }
-
-+static void obj_to_page(unsigned long obj, struct page **page)
-+{
-+ obj >>= OBJ_TAG_BITS;
-+ *page = pfn_to_page(obj >> OBJ_INDEX_BITS);
-+}
-+
- /**
- * location_to_obj - get obj value encoded from (<page>, <obj_idx>)
- * @page: page object resides in zspage
-@@ -1246,8 +1258,6 @@ void *zs_map_object(struct zs_pool *pool
- unsigned long obj, off;
- unsigned int obj_idx;
-
-- unsigned int class_idx;
-- enum fullness_group fg;
- struct size_class *class;
- struct mapping_area *area;
- struct page *pages[2];
-@@ -1270,8 +1280,7 @@ void *zs_map_object(struct zs_pool *pool
- /* migration cannot move any subpage in this zspage */
- migrate_read_lock(zspage);
-
-- get_zspage_mapping(zspage, &class_idx, &fg);
-- class = pool->size_class[class_idx];
-+ class = zspage_class(pool, zspage);
- off = (class->size * obj_idx) & ~PAGE_MASK;
-
- area = &get_cpu_var(zs_map_area);
-@@ -1304,16 +1313,13 @@ void zs_unmap_object(struct zs_pool *poo
- unsigned long obj, off;
- unsigned int obj_idx;
-
-- unsigned int class_idx;
-- enum fullness_group fg;
- struct size_class *class;
- struct mapping_area *area;
-
- obj = handle_to_obj(handle);
- obj_to_location(obj, &page, &obj_idx);
- zspage = get_zspage(page);
-- get_zspage_mapping(zspage, &class_idx, &fg);
-- class = pool->size_class[class_idx];
-+ class = zspage_class(pool, zspage);
- off = (class->size * obj_idx) & ~PAGE_MASK;
-
- area = this_cpu_ptr(&zs_map_area);
-@@ -1491,8 +1497,6 @@ void zs_free(struct zs_pool *pool, unsig
- struct zspage *zspage;
- struct page *f_page;
- unsigned long obj;
-- unsigned int f_objidx;
-- int class_idx;
- struct size_class *class;
- enum fullness_group fullness;
- bool isolated;
-@@ -1502,13 +1506,11 @@ void zs_free(struct zs_pool *pool, unsig
-
- pin_tag(handle);
- obj = handle_to_obj(handle);
-- obj_to_location(obj, &f_page, &f_objidx);
-+ obj_to_page(obj, &f_page);
- zspage = get_zspage(f_page);
-
- migrate_read_lock(zspage);
--
-- get_zspage_mapping(zspage, &class_idx, &fullness);
-- class = pool->size_class[class_idx];
-+ class = zspage_class(pool, zspage);
-
- spin_lock(&class->lock);
- obj_free(class, obj);
-@@ -1866,8 +1868,6 @@ static bool zs_page_isolate(struct page
- {
- struct zs_pool *pool;
- struct size_class *class;
-- int class_idx;
-- enum fullness_group fullness;
- struct zspage *zspage;
- struct address_space *mapping;
-
-@@ -1880,15 +1880,10 @@ static bool zs_page_isolate(struct page
-
- zspage = get_zspage(page);
-
-- /*
-- * Without class lock, fullness could be stale while class_idx is okay
-- * because class_idx is constant unless page is freed so we should get
-- * fullness again under class lock.
-- */
-- get_zspage_mapping(zspage, &class_idx, &fullness);
- mapping = page_mapping(page);
- pool = mapping->private_data;
-- class = pool->size_class[class_idx];
-+
-+ class = zspage_class(pool, zspage);
-
- spin_lock(&class->lock);
- if (get_zspage_inuse(zspage) == 0) {
-@@ -1907,6 +1902,9 @@ static bool zs_page_isolate(struct page
- * size_class to prevent further object allocation from the zspage.
- */
- if (!list_empty(&zspage->list) && !is_zspage_isolated(zspage)) {
-+ enum fullness_group fullness;
-+ unsigned int class_idx;
-+
- get_zspage_mapping(zspage, &class_idx, &fullness);
- atomic_long_inc(&pool->isolated_pages);
- remove_zspage(class, zspage, fullness);
-@@ -1923,8 +1921,6 @@ static int zs_page_migrate(struct addres
- {
- struct zs_pool *pool;
- struct size_class *class;
-- int class_idx;
-- enum fullness_group fullness;
- struct zspage *zspage;
- struct page *dummy;
- void *s_addr, *d_addr, *addr;
-@@ -1949,9 +1945,8 @@ static int zs_page_migrate(struct addres
-
- /* Concurrent compactor cannot migrate any subpage in zspage */
- migrate_write_lock(zspage);
-- get_zspage_mapping(zspage, &class_idx, &fullness);
- pool = mapping->private_data;
-- class = pool->size_class[class_idx];
-+ class = zspage_class(pool, zspage);
- offset = get_first_obj_offset(page);
-
- spin_lock(&class->lock);
-@@ -2049,8 +2044,6 @@ static void zs_page_putback(struct page
- {
- struct zs_pool *pool;
- struct size_class *class;
-- int class_idx;
-- enum fullness_group fg;
- struct address_space *mapping;
- struct zspage *zspage;
-
-@@ -2058,10 +2051,9 @@ static void zs_page_putback(struct page
- VM_BUG_ON_PAGE(!PageIsolated(page), page);
-
- zspage = get_zspage(page);
-- get_zspage_mapping(zspage, &class_idx, &fg);
- mapping = page_mapping(page);
- pool = mapping->private_data;
-- class = pool->size_class[class_idx];
-+ class = zspage_class(pool, zspage);
-
- spin_lock(&class->lock);
- dec_zspage_isolation(zspage);
diff --git a/patches/0002-locking-Remove-rt_rwlock_is_contended.patch b/patches/0002-locking-Remove-rt_rwlock_is_contended.patch
deleted file mode 100644
index 0baf43a953c1..000000000000
--- a/patches/0002-locking-Remove-rt_rwlock_is_contended.patch
+++ /dev/null
@@ -1,34 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Mon, 29 Nov 2021 18:46:45 +0100
-Subject: [PATCH 02/11] locking: Remove rt_rwlock_is_contended().
-
-rt_rwlock_is_contended() has not users. It makes no sense to use it as
-rwlock_is_contended() because it is a sleeping lock on RT and preemption
-is possible. It reports always != 0 if used by a writer and even if
-there is a waiter then the lock might not be handed over if the
-current owner has the highest priority.
-
-Remove rt_rwlock_is_contended().
-
-Reported-by: kernel test robot <lkp@intel.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Link: https://lore.kernel.org/r/20211129174654.668506-3-bigeasy@linutronix.de
----
- kernel/locking/spinlock_rt.c | 6 ------
- 1 file changed, 6 deletions(-)
-
---- a/kernel/locking/spinlock_rt.c
-+++ b/kernel/locking/spinlock_rt.c
-@@ -257,12 +257,6 @@ void __sched rt_write_unlock(rwlock_t *r
- }
- EXPORT_SYMBOL(rt_write_unlock);
-
--int __sched rt_rwlock_is_contended(rwlock_t *rwlock)
--{
-- return rw_base_is_contended(&rwlock->rwbase);
--}
--EXPORT_SYMBOL(rt_rwlock_is_contended);
--
- #ifdef CONFIG_DEBUG_LOCK_ALLOC
- void __rt_rwlock_init(rwlock_t *rwlock, const char *name,
- struct lock_class_key *key)
diff --git a/patches/0002-mm-memcg-Protect-per-CPU-counter-by-disabling-preemp.patch b/patches/0002-mm-memcg-Protect-per-CPU-counter-by-disabling-preemp.patch
index 7ed1f73e4ea1..c14ca8a80061 100644
--- a/patches/0002-mm-memcg-Protect-per-CPU-counter-by-disabling-preemp.patch
+++ b/patches/0002-mm-memcg-Protect-per-CPU-counter-by-disabling-preemp.patch
@@ -31,7 +31,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
-@@ -661,6 +661,8 @@ void __mod_memcg_lruvec_state(struct lru
+@@ -667,6 +667,8 @@ void __mod_memcg_lruvec_state(struct lru
pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
memcg = pn->memcg;
@@ -40,29 +40,29 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Update memcg */
__this_cpu_add(memcg->vmstats_percpu->state[idx], val);
-@@ -668,6 +670,8 @@ void __mod_memcg_lruvec_state(struct lru
+@@ -674,6 +676,8 @@ void __mod_memcg_lruvec_state(struct lru
__this_cpu_add(pn->lruvec_stats_percpu->state[idx], val);
- memcg_rstat_updated(memcg);
+ memcg_rstat_updated(memcg, val);
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_enable();
}
/**
-@@ -750,8 +754,12 @@ void __count_memcg_events(struct mem_cgr
+@@ -756,8 +760,12 @@ void __count_memcg_events(struct mem_cgr
if (mem_cgroup_disabled())
return;
+ if (IS_ENABLED(PREEMPT_RT))
+ preempt_disable();
__this_cpu_add(memcg->vmstats_percpu->events[idx], count);
- memcg_rstat_updated(memcg);
+ memcg_rstat_updated(memcg, count);
+ if (IS_ENABLED(PREEMPT_RT))
+ preempt_enable();
}
static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
-@@ -7173,9 +7181,18 @@ void mem_cgroup_swapout(struct page *pag
+@@ -7194,9 +7202,18 @@ void mem_cgroup_swapout(struct page *pag
* i_pages lock which is taken with interrupts-off. It is
* important here to have the interrupts disabled because it is the
* only synchronisation we have for updating the per-CPU variables.
diff --git a/patches/0002-mmc-core-Use-blk_mq_complete_request_direct.patch b/patches/0002-mmc-core-Use-blk_mq_complete_request_direct.patch
deleted file mode 100644
index 91e16510d055..000000000000
--- a/patches/0002-mmc-core-Use-blk_mq_complete_request_direct.patch
+++ /dev/null
@@ -1,88 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Mon, 25 Oct 2021 09:06:58 +0200
-Subject: [PATCH 2/2] mmc: core: Use blk_mq_complete_request_direct().
-
-The completion callback for the sdhci-pci device is invoked from a
-kworker.
-I couldn't identify in which context is mmc_blk_mq_req_done() invoke but
-the remaining caller are from invoked from preemptible context. Here it
-would make sense to complete the request directly instead scheduling
-ksoftirqd for its completion.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org>
-Acked-by: Adrian Hunter <adrian.hunter@intel.com>
-Link: https://lore.kernel.org/r/20211025070658.1565848-3-bigeasy@linutronix.de
-Signed-off-by: Jens Axboe <axboe@kernel.dk>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/mmc/core/block.c | 22 ++++++++++++++--------
- 1 file changed, 14 insertions(+), 8 deletions(-)
-
---- a/drivers/mmc/core/block.c
-+++ b/drivers/mmc/core/block.c
-@@ -2051,7 +2051,8 @@ static void mmc_blk_mq_dec_in_flight(str
- mmc_put_card(mq->card, &mq->ctx);
- }
-
--static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req)
-+static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req,
-+ bool can_sleep)
- {
- struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
- struct mmc_request *mrq = &mqrq->brq.mrq;
-@@ -2063,10 +2064,14 @@ static void mmc_blk_mq_post_req(struct m
- * Block layer timeouts race with completions which means the normal
- * completion path cannot be used during recovery.
- */
-- if (mq->in_recovery)
-+ if (mq->in_recovery) {
- mmc_blk_mq_complete_rq(mq, req);
-- else if (likely(!blk_should_fake_timeout(req->q)))
-- blk_mq_complete_request(req);
-+ } else if (likely(!blk_should_fake_timeout(req->q))) {
-+ if (can_sleep)
-+ blk_mq_complete_request_direct(req, mmc_blk_mq_complete);
-+ else
-+ blk_mq_complete_request(req);
-+ }
-
- mmc_blk_mq_dec_in_flight(mq, req);
- }
-@@ -2087,7 +2092,7 @@ void mmc_blk_mq_recovery(struct mmc_queu
-
- mmc_blk_urgent_bkops(mq, mqrq);
-
-- mmc_blk_mq_post_req(mq, req);
-+ mmc_blk_mq_post_req(mq, req, true);
- }
-
- static void mmc_blk_mq_complete_prev_req(struct mmc_queue *mq,
-@@ -2106,7 +2111,7 @@ static void mmc_blk_mq_complete_prev_req
- if (prev_req)
- *prev_req = mq->complete_req;
- else
-- mmc_blk_mq_post_req(mq, mq->complete_req);
-+ mmc_blk_mq_post_req(mq, mq->complete_req, true);
-
- mq->complete_req = NULL;
-
-@@ -2178,7 +2183,8 @@ static void mmc_blk_mq_req_done(struct m
- mq->rw_wait = false;
- wake_up(&mq->wait);
-
-- mmc_blk_mq_post_req(mq, req);
-+ /* context unknown */
-+ mmc_blk_mq_post_req(mq, req, false);
- }
-
- static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, int *err)
-@@ -2238,7 +2244,7 @@ static int mmc_blk_mq_issue_rw_rq(struct
- err = mmc_start_request(host, &mqrq->brq.mrq);
-
- if (prev_req)
-- mmc_blk_mq_post_req(mq, prev_req);
-+ mmc_blk_mq_post_req(mq, prev_req, true);
-
- if (err)
- mq->rw_wait = false;
diff --git a/patches/0002-net-dev-Remove-get_cpu-in-netif_rx_internal.patch b/patches/0002-net-dev-Remove-get_cpu-in-netif_rx_internal.patch
index db706e5c9c8f..61ed5009aef8 100644
--- a/patches/0002-net-dev-Remove-get_cpu-in-netif_rx_internal.patch
+++ b/patches/0002-net-dev-Remove-get_cpu-in-netif_rx_internal.patch
@@ -29,7 +29,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -4929,8 +4929,7 @@ static int netif_rx_internal(struct sk_b
+@@ -4810,8 +4810,7 @@ static int netif_rx_internal(struct sk_b
{
unsigned int qtail;
diff --git a/patches/0002-printk-cpu-sync-always-disable-interrupts.patch b/patches/0002-printk-cpu-sync-always-disable-interrupts.patch
index 0ddf311ee46a..0499f8e5ff54 100644
--- a/patches/0002-printk-cpu-sync-always-disable-interrupts.patch
+++ b/patches/0002-printk-cpu-sync-always-disable-interrupts.patch
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
-@@ -285,9 +285,16 @@ extern int __printk_cpu_sync_try_get(voi
+@@ -281,9 +281,16 @@ extern int __printk_cpu_sync_try_get(voi
extern void __printk_cpu_sync_wait(void);
extern void __printk_cpu_sync_put(void);
@@ -33,7 +33,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* @flags: Stack-allocated storage for saving local interrupt state,
* to be passed to printk_cpu_sync_put_irqrestore().
*
-@@ -314,13 +321,6 @@ extern void __printk_cpu_sync_put(void);
+@@ -310,13 +317,6 @@ extern void __printk_cpu_sync_put(void);
local_irq_restore(flags); \
} while (0)
diff --git a/patches/0002_irq_remove_unsued_flags_argument_from___handle_irq_event_percpu.patch b/patches/0002_irq_remove_unsued_flags_argument_from___handle_irq_event_percpu.patch
deleted file mode 100644
index 70a3f0a993a4..000000000000
--- a/patches/0002_irq_remove_unsued_flags_argument_from___handle_irq_event_percpu.patch
+++ /dev/null
@@ -1,82 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Subject: irq: Remove unsued flags argument from __handle_irq_event_percpu().
-Date: Tue, 07 Dec 2021 13:17:34 +0100
-
-The __IRQF_TIMER bit from the flags argument was used in
-add_interrupt_randomness() to distinguish the timer interrupt from other
-interrupts. This is no longer the case.
-
-Remove the flags argument from __handle_irq_event_percpu().
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Link: https://lore.kernel.org/r/20211207121737.2347312-3-bigeasy@linutronix.de
----
- kernel/irq/chip.c | 4 +---
- kernel/irq/handle.c | 9 ++-------
- kernel/irq/internals.h | 2 +-
- 3 files changed, 4 insertions(+), 11 deletions(-)
-
---- a/kernel/irq/chip.c
-+++ b/kernel/irq/chip.c
-@@ -575,8 +575,6 @@ EXPORT_SYMBOL_GPL(handle_simple_irq);
- */
- void handle_untracked_irq(struct irq_desc *desc)
- {
-- unsigned int flags = 0;
--
- raw_spin_lock(&desc->lock);
-
- if (!irq_may_run(desc))
-@@ -593,7 +591,7 @@ void handle_untracked_irq(struct irq_des
- irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
- raw_spin_unlock(&desc->lock);
-
-- __handle_irq_event_percpu(desc, &flags);
-+ __handle_irq_event_percpu(desc);
-
- raw_spin_lock(&desc->lock);
- irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
---- a/kernel/irq/handle.c
-+++ b/kernel/irq/handle.c
-@@ -136,7 +136,7 @@ void __irq_wake_thread(struct irq_desc *
- wake_up_process(action->thread);
- }
-
--irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc, unsigned int *flags)
-+irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc)
- {
- irqreturn_t retval = IRQ_NONE;
- unsigned int irq = desc->irq_data.irq;
-@@ -174,10 +174,6 @@ irqreturn_t __handle_irq_event_percpu(st
- }
-
- __irq_wake_thread(desc, action);
--
-- fallthrough; /* to add to randomness */
-- case IRQ_HANDLED:
-- *flags |= action->flags;
- break;
-
- default:
-@@ -193,9 +189,8 @@ irqreturn_t __handle_irq_event_percpu(st
- irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
- {
- irqreturn_t retval;
-- unsigned int flags = 0;
-
-- retval = __handle_irq_event_percpu(desc, &flags);
-+ retval = __handle_irq_event_percpu(desc);
-
- add_interrupt_randomness(desc->irq_data.irq);
-
---- a/kernel/irq/internals.h
-+++ b/kernel/irq/internals.h
-@@ -103,7 +103,7 @@ extern int __irq_get_irqchip_state(struc
-
- extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
-
--irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc, unsigned int *flags);
-+irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc);
- irqreturn_t handle_irq_event_percpu(struct irq_desc *desc);
- irqreturn_t handle_irq_event(struct irq_desc *desc);
-
diff --git a/patches/0002_kernel_fork_duplicate_task_struct_before_stack_allocation.patch b/patches/0002_kernel_fork_duplicate_task_struct_before_stack_allocation.patch
index 9b5bd4dcd325..75b499e5db19 100644
--- a/patches/0002_kernel_fork_duplicate_task_struct_before_stack_allocation.patch
+++ b/patches/0002_kernel_fork_duplicate_task_struct_before_stack_allocation.patch
@@ -17,7 +17,7 @@ Link: https://lore.kernel.org/r/20211118143452.136421-3-bigeasy@linutronix.de
--- a/kernel/fork.c
+++ b/kernel/fork.c
-@@ -887,6 +887,10 @@ static struct task_struct *dup_task_stru
+@@ -888,6 +888,10 @@ static struct task_struct *dup_task_stru
if (!tsk)
return NULL;
@@ -28,7 +28,7 @@ Link: https://lore.kernel.org/r/20211118143452.136421-3-bigeasy@linutronix.de
stack = alloc_thread_stack_node(tsk, node);
if (!stack)
goto free_tsk;
-@@ -896,8 +900,6 @@ static struct task_struct *dup_task_stru
+@@ -897,8 +901,6 @@ static struct task_struct *dup_task_stru
stack_vm_area = task_stack_vm_area(tsk);
@@ -37,7 +37,7 @@ Link: https://lore.kernel.org/r/20211118143452.136421-3-bigeasy@linutronix.de
/*
* arch_dup_task_struct() clobbers the stack-related fields. Make
* sure they're properly initialized before using any stack-related
-@@ -911,9 +913,6 @@ static struct task_struct *dup_task_stru
+@@ -912,9 +914,6 @@ static struct task_struct *dup_task_stru
refcount_set(&tsk->stack_refcount, 1);
#endif
diff --git a/patches/0002_zsmalloc_rename_zs_stat_type_to_class_stat_type.patch b/patches/0002_zsmalloc_rename_zs_stat_type_to_class_stat_type.patch
deleted file mode 100644
index 8efd92883d8a..000000000000
--- a/patches/0002_zsmalloc_rename_zs_stat_type_to_class_stat_type.patch
+++ /dev/null
@@ -1,105 +0,0 @@
-From: Minchan Kim <minchan@kernel.org>
-Subject: zsmalloc: rename zs_stat_type to class_stat_type
-Date: Mon, 15 Nov 2021 10:59:02 -0800
-
-The stat aims for class stat, not zspage so rename it.
-
-Signed-off-by: Minchan Kim <minchan@kernel.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Link: https://lore.kernel.org/r/20211115185909.3949505-3-minchan@kernel.org
----
- mm/zsmalloc.c | 24 ++++++++++++------------
- 1 file changed, 12 insertions(+), 12 deletions(-)
-
---- a/mm/zsmalloc.c
-+++ b/mm/zsmalloc.c
-@@ -158,7 +158,7 @@ enum fullness_group {
- NR_ZS_FULLNESS,
- };
-
--enum zs_stat_type {
-+enum class_stat_type {
- CLASS_EMPTY,
- CLASS_ALMOST_EMPTY,
- CLASS_ALMOST_FULL,
-@@ -549,21 +549,21 @@ static int get_size_class_index(int size
- return min_t(int, ZS_SIZE_CLASSES - 1, idx);
- }
-
--/* type can be of enum type zs_stat_type or fullness_group */
--static inline void zs_stat_inc(struct size_class *class,
-+/* type can be of enum type class_stat_type or fullness_group */
-+static inline void class_stat_inc(struct size_class *class,
- int type, unsigned long cnt)
- {
- class->stats.objs[type] += cnt;
- }
-
--/* type can be of enum type zs_stat_type or fullness_group */
--static inline void zs_stat_dec(struct size_class *class,
-+/* type can be of enum type class_stat_type or fullness_group */
-+static inline void class_stat_dec(struct size_class *class,
- int type, unsigned long cnt)
- {
- class->stats.objs[type] -= cnt;
- }
-
--/* type can be of enum type zs_stat_type or fullness_group */
-+/* type can be of enum type class_stat_type or fullness_group */
- static inline unsigned long zs_stat_get(struct size_class *class,
- int type)
- {
-@@ -725,7 +725,7 @@ static void insert_zspage(struct size_cl
- {
- struct zspage *head;
-
-- zs_stat_inc(class, fullness, 1);
-+ class_stat_inc(class, fullness, 1);
- head = list_first_entry_or_null(&class->fullness_list[fullness],
- struct zspage, list);
- /*
-@@ -750,7 +750,7 @@ static void remove_zspage(struct size_cl
- VM_BUG_ON(is_zspage_isolated(zspage));
-
- list_del_init(&zspage->list);
-- zs_stat_dec(class, fullness, 1);
-+ class_stat_dec(class, fullness, 1);
- }
-
- /*
-@@ -964,7 +964,7 @@ static void __free_zspage(struct zs_pool
-
- cache_free_zspage(pool, zspage);
-
-- zs_stat_dec(class, OBJ_ALLOCATED, class->objs_per_zspage);
-+ class_stat_dec(class, OBJ_ALLOCATED, class->objs_per_zspage);
- atomic_long_sub(class->pages_per_zspage,
- &pool->pages_allocated);
- }
-@@ -1394,7 +1394,7 @@ static unsigned long obj_malloc(struct s
-
- kunmap_atomic(vaddr);
- mod_zspage_inuse(zspage, 1);
-- zs_stat_inc(class, OBJ_USED, 1);
-+ class_stat_inc(class, OBJ_USED, 1);
-
- obj = location_to_obj(m_page, obj);
-
-@@ -1458,7 +1458,7 @@ unsigned long zs_malloc(struct zs_pool *
- record_obj(handle, obj);
- atomic_long_add(class->pages_per_zspage,
- &pool->pages_allocated);
-- zs_stat_inc(class, OBJ_ALLOCATED, class->objs_per_zspage);
-+ class_stat_inc(class, OBJ_ALLOCATED, class->objs_per_zspage);
-
- /* We completely set up zspage so mark them as movable */
- SetZsPageMovable(pool, zspage);
-@@ -1489,7 +1489,7 @@ static void obj_free(struct size_class *
- kunmap_atomic(vaddr);
- set_freeobj(zspage, f_objidx);
- mod_zspage_inuse(zspage, -1);
-- zs_stat_dec(class, OBJ_USED, 1);
-+ class_stat_dec(class, OBJ_USED, 1);
- }
-
- void zs_free(struct zs_pool *pool, unsigned long handle)
diff --git a/patches/0003-locking-rtmutex-Squash-self-deadlock-check-for-ww_rt.patch b/patches/0003-locking-rtmutex-Squash-self-deadlock-check-for-ww_rt.patch
deleted file mode 100644
index 6e78805783bc..000000000000
--- a/patches/0003-locking-rtmutex-Squash-self-deadlock-check-for-ww_rt.patch
+++ /dev/null
@@ -1,40 +0,0 @@
-From: Peter Zijlstra <peterz@infradead.org>
-Date: Mon, 29 Nov 2021 18:46:46 +0100
-Subject: [PATCH 03/11] locking/rtmutex: Squash self-deadlock check for
- ww_rt_mutex.
-
-Similar to the issues in commits:
-
- 6467822b8cc9 ("locking/rtmutex: Prevent spurious EDEADLK return caused by ww_mutexes")
- a055fcc132d4 ("locking/rtmutex: Return success on deadlock for ww_mutex waiters")
-
-ww_rt_mutex_lock() should not return EDEADLK without first going through
-the __ww_mutex logic to set the required state. In fact, the chain-walk
-can deal with the spurious cycles (per the above commits) this check
-warns about and is trying to avoid.
-
-Therefore ignore this test for ww_rt_mutex and simply let things fall
-in place.
-
-Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Link: https://lore.kernel.org/r/20211129174654.668506-4-bigeasy@linutronix.de
----
- kernel/locking/rtmutex.c | 5 ++++-
- 1 file changed, 4 insertions(+), 1 deletion(-)
-
---- a/kernel/locking/rtmutex.c
-+++ b/kernel/locking/rtmutex.c
-@@ -1103,8 +1103,11 @@ static int __sched task_blocks_on_rt_mut
- * the other will detect the deadlock and return -EDEADLOCK,
- * which is wrong, as the other waiter is not in a deadlock
- * situation.
-+ *
-+ * Except for ww_mutex, in that case the chain walk must already deal
-+ * with spurious cycles, see the comments at [3] and [6].
- */
-- if (owner == task)
-+ if (owner == task && !(build_ww_mutex() && ww_ctx))
- return -EDEADLK;
-
- raw_spin_lock(&task->pi_lock);
diff --git a/patches/0003-mm-memcg-Add-a-local_lock_t-for-IRQ-and-TASK-object.patch b/patches/0003-mm-memcg-Add-a-local_lock_t-for-IRQ-and-TASK-object.patch
index 13ba236cffa9..dc2280807190 100644
--- a/patches/0003-mm-memcg-Add-a-local_lock_t-for-IRQ-and-TASK-object.patch
+++ b/patches/0003-mm-memcg-Add-a-local_lock_t-for-IRQ-and-TASK-object.patch
@@ -77,7 +77,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
spin_lock_irqsave(&css_set_lock, flags);
list_del(&objcg->list);
-@@ -2017,26 +2019,40 @@ struct obj_stock {
+@@ -2024,26 +2026,40 @@ struct obj_stock {
};
struct memcg_stock_pcp {
@@ -121,7 +121,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
struct mem_cgroup *root_memcg)
-@@ -2065,7 +2081,7 @@ static bool consume_stock(struct mem_cgr
+@@ -2072,7 +2088,7 @@ static bool consume_stock(struct mem_cgr
if (nr_pages > MEMCG_CHARGE_BATCH)
return ret;
@@ -130,7 +130,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
stock = this_cpu_ptr(&memcg_stock);
if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
-@@ -2073,7 +2089,7 @@ static bool consume_stock(struct mem_cgr
+@@ -2080,7 +2096,7 @@ static bool consume_stock(struct mem_cgr
ret = true;
}
@@ -139,7 +139,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return ret;
}
-@@ -2101,38 +2117,43 @@ static void drain_stock(struct memcg_sto
+@@ -2108,38 +2124,43 @@ static void drain_stock(struct memcg_sto
static void drain_local_stock(struct work_struct *dummy)
{
@@ -200,7 +200,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (stock->cached != memcg) { /* reset if necessary */
drain_stock(stock);
css_get(&memcg->css);
-@@ -2142,8 +2163,20 @@ static void refill_stock(struct mem_cgro
+@@ -2149,8 +2170,20 @@ static void refill_stock(struct mem_cgro
if (stock->nr_pages > MEMCG_CHARGE_BATCH)
drain_stock(stock);
@@ -222,7 +222,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -2152,7 +2185,7 @@ static void refill_stock(struct mem_cgro
+@@ -2159,7 +2192,7 @@ static void refill_stock(struct mem_cgro
*/
static void drain_all_stock(struct mem_cgroup *root_memcg)
{
@@ -231,7 +231,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* If someone's already draining, avoid adding running more workers. */
if (!mutex_trylock(&percpu_charge_mutex))
-@@ -2163,7 +2196,7 @@ static void drain_all_stock(struct mem_c
+@@ -2170,7 +2203,7 @@ static void drain_all_stock(struct mem_c
* as well as workers from this path always operate on the local
* per-cpu data. CPU up doesn't touch memcg_stock at all.
*/
@@ -240,7 +240,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
for_each_online_cpu(cpu) {
struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
struct mem_cgroup *memcg;
-@@ -2179,14 +2212,10 @@ static void drain_all_stock(struct mem_c
+@@ -2186,14 +2219,10 @@ static void drain_all_stock(struct mem_c
rcu_read_unlock();
if (flush &&
@@ -258,7 +258,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
mutex_unlock(&percpu_charge_mutex);
}
-@@ -2587,7 +2616,7 @@ static int try_charge_memcg(struct mem_c
+@@ -2594,7 +2623,7 @@ static int try_charge_memcg(struct mem_c
done_restock:
if (batch > nr_pages)
@@ -267,7 +267,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* If the hierarchy is above the normal consumption range, schedule
-@@ -2700,28 +2729,36 @@ static struct mem_cgroup *get_mem_cgroup
+@@ -2707,28 +2736,36 @@ static struct mem_cgroup *get_mem_cgroup
* can only be accessed after disabling interrupt. User context code can
* access interrupt object stock, but not vice versa.
*/
@@ -313,7 +313,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -2899,7 +2936,8 @@ static void memcg_free_cache_id(int id)
+@@ -2911,7 +2948,8 @@ static void memcg_free_cache_id(int id)
* @nr_pages: number of pages to uncharge
*/
static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
@@ -323,7 +323,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
struct mem_cgroup *memcg;
-@@ -2907,7 +2945,7 @@ static void obj_cgroup_uncharge_pages(st
+@@ -2919,7 +2957,7 @@ static void obj_cgroup_uncharge_pages(st
if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
page_counter_uncharge(&memcg->kmem, nr_pages);
@@ -332,7 +332,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
css_put(&memcg->css);
}
-@@ -2981,7 +3019,7 @@ void __memcg_kmem_uncharge_page(struct p
+@@ -2993,7 +3031,7 @@ void __memcg_kmem_uncharge_page(struct p
return;
objcg = __folio_objcg(folio);
@@ -341,7 +341,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
folio->memcg_data = 0;
obj_cgroup_put(objcg);
}
-@@ -2989,17 +3027,21 @@ void __memcg_kmem_uncharge_page(struct p
+@@ -3001,17 +3039,21 @@ void __memcg_kmem_uncharge_page(struct p
void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
enum node_stat_item idx, int nr)
{
@@ -365,7 +365,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
obj_cgroup_get(objcg);
stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
-@@ -3043,38 +3085,43 @@ void mod_objcg_state(struct obj_cgroup *
+@@ -3055,38 +3097,43 @@ void mod_objcg_state(struct obj_cgroup *
if (nr)
mod_objcg_mlstate(objcg, pgdat, idx, nr);
@@ -415,7 +415,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* The leftover is flushed to the centralized per-memcg value.
-@@ -3109,8 +3156,8 @@ static void drain_obj_stock(struct obj_s
+@@ -3121,8 +3168,8 @@ static void drain_obj_stock(struct obj_s
stock->cached_pgdat = NULL;
}
@@ -425,7 +425,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
-@@ -3118,11 +3165,13 @@ static bool obj_stock_flush_required(str
+@@ -3130,11 +3177,13 @@ static bool obj_stock_flush_required(str
{
struct mem_cgroup *memcg;
@@ -439,7 +439,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (stock->irq_obj.cached_objcg) {
memcg = obj_cgroup_memcg(stock->irq_obj.cached_objcg);
if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
-@@ -3135,12 +3184,15 @@ static bool obj_stock_flush_required(str
+@@ -3147,12 +3196,15 @@ static bool obj_stock_flush_required(str
static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
bool allow_uncharge)
{
@@ -457,7 +457,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
obj_cgroup_get(objcg);
stock->cached_objcg = objcg;
stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
-@@ -3154,10 +3206,12 @@ static void refill_obj_stock(struct obj_
+@@ -3166,10 +3218,12 @@ static void refill_obj_stock(struct obj_
stock->nr_bytes &= (PAGE_SIZE - 1);
}
@@ -472,7 +472,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
-@@ -7041,7 +7095,7 @@ void mem_cgroup_uncharge_skmem(struct me
+@@ -7062,7 +7116,7 @@ void mem_cgroup_uncharge_skmem(struct me
mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
diff --git a/patches/0003-net-dev-Makes-sure-netif_rx-can-be-invoked-in-any-co.patch b/patches/0003-net-dev-Makes-sure-netif_rx-can-be-invoked-in-any-co.patch
index 4aa99938e63b..5f926571a28c 100644
--- a/patches/0003-net-dev-Makes-sure-netif_rx-can-be-invoked-in-any-co.patch
+++ b/patches/0003-net-dev-Makes-sure-netif_rx-can-be-invoked-in-any-co.patch
@@ -38,7 +38,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
-@@ -4003,8 +4003,17 @@ u32 bpf_prog_run_generic_xdp(struct sk_b
+@@ -3668,8 +3668,17 @@ u32 bpf_prog_run_generic_xdp(struct sk_b
void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog);
int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb);
int netif_rx(struct sk_buff *skb);
@@ -57,7 +57,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+
int netif_receive_skb(struct sk_buff *skb);
int netif_receive_skb_core(struct sk_buff *skb);
- void netif_receive_skb_list(struct list_head *head);
+ void netif_receive_skb_list_internal(struct list_head *head);
--- a/include/trace/events/net.h
+++ b/include/trace/events/net.h
@@ -260,13 +260,6 @@ DEFINE_EVENT(net_dev_rx_verbose_template
@@ -90,7 +90,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -4953,47 +4953,17 @@ int netif_rx(struct sk_buff *skb)
+@@ -4834,47 +4834,17 @@ int netif_rx(struct sk_buff *skb)
{
int ret;
diff --git a/patches/0003_kernel_fork_ia64_provide_a_alloc_thread_stack_node_for_ia64.patch b/patches/0003_kernel_fork_ia64_provide_a_alloc_thread_stack_node_for_ia64.patch
index fe5eaa4110bc..c6d41c7d9bc1 100644
--- a/patches/0003_kernel_fork_ia64_provide_a_alloc_thread_stack_node_for_ia64.patch
+++ b/patches/0003_kernel_fork_ia64_provide_a_alloc_thread_stack_node_for_ia64.patch
@@ -41,7 +41,7 @@ Link: https://lore.kernel.org/r/20211118143452.136421-4-bigeasy@linutronix.de
#define __HAVE_THREAD_FUNCTIONS
--- a/kernel/fork.c
+++ b/kernel/fork.c
-@@ -329,6 +329,22 @@ void thread_stack_cache_init(void)
+@@ -330,6 +330,22 @@ void thread_stack_cache_init(void)
}
# endif /* THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK) */
diff --git a/patches/0003_random_split_add_interrupt_randomness.patch b/patches/0003_random_split_add_interrupt_randomness.patch
index d94c0ca75a94..4a29e3e4905b 100644
--- a/patches/0003_random_split_add_interrupt_randomness.patch
+++ b/patches/0003_random_split_add_interrupt_randomness.patch
@@ -16,72 +16,73 @@ This is a preparations step to ease PREEMPT_RT support.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Link: https://lore.kernel.org/r/20211207121737.2347312-4-bigeasy@linutronix.de
---
- drivers/char/random.c | 47 +++++++++++++++++++++++++++--------------------
- 1 file changed, 27 insertions(+), 20 deletions(-)
+ drivers/char/random.c | 51 +++++++++++++++++++++++++++-----------------------
+ 1 file changed, 28 insertions(+), 23 deletions(-)
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
-@@ -1260,29 +1260,10 @@ static __u32 get_reg(struct fast_pool *f
+@@ -1220,6 +1220,33 @@ static u32 get_reg(struct fast_pool *f,
return *ptr;
}
--void add_interrupt_randomness(int irq)
+static void process_interrupt_randomness_pool(struct fast_pool *fast_pool)
- {
- struct entropy_store *r;
-- struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
-- struct pt_regs *regs = get_irq_regs();
- unsigned long now = jiffies;
-- cycles_t cycles = random_get_entropy();
-- __u32 c_high, j_high;
-- __u64 ip;
--
-- if (cycles == 0)
-- cycles = get_reg(fast_pool, regs);
-- c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0;
-- j_high = (sizeof(now) > 4) ? now >> 32 : 0;
-- fast_pool->pool[0] ^= cycles ^ j_high ^ irq;
-- fast_pool->pool[1] ^= now ^ c_high;
-- ip = regs ? instruction_pointer(regs) : _RET_IP_;
-- fast_pool->pool[2] ^= ip;
-- fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 :
-- get_reg(fast_pool, regs);
--
-- fast_mix(fast_pool);
-- add_interrupt_bench(cycles);
-
- if (unlikely(crng_init == 0)) {
- if ((fast_pool->count >= 64) &&
-@@ -1311,6 +1292,32 @@ void add_interrupt_randomness(int irq)
- /* award one bit for the contents of the fast pool */
- credit_entropy_bits(r, 1);
- }
-+
-+void add_interrupt_randomness(int irq)
+{
-+ struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
-+ struct pt_regs *regs = get_irq_regs();
-+ unsigned long now = jiffies;
-+ cycles_t cycles = random_get_entropy();
-+ __u32 c_high, j_high;
-+ __u64 ip;
++ if (unlikely(crng_init == 0)) {
++ if ((fast_pool->count >= 64) &&
++ crng_fast_load((u8 *)fast_pool->pool, sizeof(fast_pool->pool)) > 0) {
++ fast_pool->count = 0;
++ fast_pool->last = jiffies;
++ }
++ return;
++ }
+
-+ if (cycles == 0)
-+ cycles = get_reg(fast_pool, regs);
-+ c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0;
-+ j_high = (sizeof(now) > 4) ? now >> 32 : 0;
-+ fast_pool->pool[0] ^= cycles ^ j_high ^ irq;
-+ fast_pool->pool[1] ^= now ^ c_high;
-+ ip = regs ? instruction_pointer(regs) : _RET_IP_;
-+ fast_pool->pool[2] ^= ip;
-+ fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 :
-+ get_reg(fast_pool, regs);
++ if ((fast_pool->count < 64) && !time_after(jiffies, fast_pool->last + HZ))
++ return;
+
-+ fast_mix(fast_pool);
-+ add_interrupt_bench(cycles);
++ if (!spin_trylock(&input_pool.lock))
++ return;
+
-+ process_interrupt_randomness_pool(fast_pool);
++ fast_pool->last = jiffies;
++ __mix_pool_bytes(&fast_pool->pool, sizeof(fast_pool->pool));
++ spin_unlock(&input_pool.lock);
++
++ fast_pool->count = 0;
++
++ /* award one bit for the contents of the fast pool */
++ credit_entropy_bits(1);
+}
++
+ void add_interrupt_randomness(int irq)
+ {
+ struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
+@@ -1243,29 +1270,7 @@ void add_interrupt_randomness(int irq)
+ fast_mix(fast_pool);
+ add_interrupt_bench(cycles);
+
+- if (unlikely(crng_init == 0)) {
+- if ((fast_pool->count >= 64) &&
+- crng_fast_load((u8 *)fast_pool->pool, sizeof(fast_pool->pool)) > 0) {
+- fast_pool->count = 0;
+- fast_pool->last = now;
+- }
+- return;
+- }
+-
+- if ((fast_pool->count < 64) && !time_after(now, fast_pool->last + HZ))
+- return;
+-
+- if (!spin_trylock(&input_pool.lock))
+- return;
+-
+- fast_pool->last = now;
+- __mix_pool_bytes(&fast_pool->pool, sizeof(fast_pool->pool));
+- spin_unlock(&input_pool.lock);
+-
+- fast_pool->count = 0;
+-
+- /* award one bit for the contents of the fast pool */
+- credit_entropy_bits(1);
++ process_interrupt_randomness_pool(fast_pool);
+ }
EXPORT_SYMBOL_GPL(add_interrupt_randomness);
- #ifdef CONFIG_BLOCK
diff --git a/patches/0003_zsmalloc_decouple_class_actions_from_zspage_works.patch b/patches/0003_zsmalloc_decouple_class_actions_from_zspage_works.patch
deleted file mode 100644
index a1af4aa766fc..000000000000
--- a/patches/0003_zsmalloc_decouple_class_actions_from_zspage_works.patch
+++ /dev/null
@@ -1,131 +0,0 @@
-From: Minchan Kim <minchan@kernel.org>
-Subject: zsmalloc: decouple class actions from zspage works
-Date: Mon, 15 Nov 2021 10:59:03 -0800
-
-This patch moves class stat update out of obj_malloc since
-it's not related to zspage operation.
-This is a preparation to introduce new lock scheme in next
-patch.
-
-Signed-off-by: Minchan Kim <minchan@kernel.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Link: https://lore.kernel.org/r/20211115185909.3949505-4-minchan@kernel.org
----
- mm/zsmalloc.c | 23 +++++++++++++----------
- 1 file changed, 13 insertions(+), 10 deletions(-)
-
---- a/mm/zsmalloc.c
-+++ b/mm/zsmalloc.c
-@@ -1360,17 +1360,19 @@ size_t zs_huge_class_size(struct zs_pool
- }
- EXPORT_SYMBOL_GPL(zs_huge_class_size);
-
--static unsigned long obj_malloc(struct size_class *class,
-+static unsigned long obj_malloc(struct zs_pool *pool,
- struct zspage *zspage, unsigned long handle)
- {
- int i, nr_page, offset;
- unsigned long obj;
- struct link_free *link;
-+ struct size_class *class;
-
- struct page *m_page;
- unsigned long m_offset;
- void *vaddr;
-
-+ class = pool->size_class[zspage->class];
- handle |= OBJ_ALLOCATED_TAG;
- obj = get_freeobj(zspage);
-
-@@ -1394,7 +1396,6 @@ static unsigned long obj_malloc(struct s
-
- kunmap_atomic(vaddr);
- mod_zspage_inuse(zspage, 1);
-- class_stat_inc(class, OBJ_USED, 1);
-
- obj = location_to_obj(m_page, obj);
-
-@@ -1433,10 +1434,11 @@ unsigned long zs_malloc(struct zs_pool *
- spin_lock(&class->lock);
- zspage = find_get_zspage(class);
- if (likely(zspage)) {
-- obj = obj_malloc(class, zspage, handle);
-+ obj = obj_malloc(pool, zspage, handle);
- /* Now move the zspage to another fullness group, if required */
- fix_fullness_group(class, zspage);
- record_obj(handle, obj);
-+ class_stat_inc(class, OBJ_USED, 1);
- spin_unlock(&class->lock);
-
- return handle;
-@@ -1451,7 +1453,7 @@ unsigned long zs_malloc(struct zs_pool *
- }
-
- spin_lock(&class->lock);
-- obj = obj_malloc(class, zspage, handle);
-+ obj = obj_malloc(pool, zspage, handle);
- newfg = get_fullness_group(class, zspage);
- insert_zspage(class, zspage, newfg);
- set_zspage_mapping(zspage, class->index, newfg);
-@@ -1459,6 +1461,7 @@ unsigned long zs_malloc(struct zs_pool *
- atomic_long_add(class->pages_per_zspage,
- &pool->pages_allocated);
- class_stat_inc(class, OBJ_ALLOCATED, class->objs_per_zspage);
-+ class_stat_inc(class, OBJ_USED, 1);
-
- /* We completely set up zspage so mark them as movable */
- SetZsPageMovable(pool, zspage);
-@@ -1468,7 +1471,7 @@ unsigned long zs_malloc(struct zs_pool *
- }
- EXPORT_SYMBOL_GPL(zs_malloc);
-
--static void obj_free(struct size_class *class, unsigned long obj)
-+static void obj_free(int class_size, unsigned long obj)
- {
- struct link_free *link;
- struct zspage *zspage;
-@@ -1478,7 +1481,7 @@ static void obj_free(struct size_class *
- void *vaddr;
-
- obj_to_location(obj, &f_page, &f_objidx);
-- f_offset = (class->size * f_objidx) & ~PAGE_MASK;
-+ f_offset = (class_size * f_objidx) & ~PAGE_MASK;
- zspage = get_zspage(f_page);
-
- vaddr = kmap_atomic(f_page);
-@@ -1489,7 +1492,6 @@ static void obj_free(struct size_class *
- kunmap_atomic(vaddr);
- set_freeobj(zspage, f_objidx);
- mod_zspage_inuse(zspage, -1);
-- class_stat_dec(class, OBJ_USED, 1);
- }
-
- void zs_free(struct zs_pool *pool, unsigned long handle)
-@@ -1513,7 +1515,8 @@ void zs_free(struct zs_pool *pool, unsig
- class = zspage_class(pool, zspage);
-
- spin_lock(&class->lock);
-- obj_free(class, obj);
-+ obj_free(class->size, obj);
-+ class_stat_dec(class, OBJ_USED, 1);
- fullness = fix_fullness_group(class, zspage);
- if (fullness != ZS_EMPTY) {
- migrate_read_unlock(zspage);
-@@ -1671,7 +1674,7 @@ static int migrate_zspage(struct zs_pool
- }
-
- used_obj = handle_to_obj(handle);
-- free_obj = obj_malloc(class, get_zspage(d_page), handle);
-+ free_obj = obj_malloc(pool, get_zspage(d_page), handle);
- zs_object_copy(class, free_obj, used_obj);
- obj_idx++;
- /*
-@@ -1683,7 +1686,7 @@ static int migrate_zspage(struct zs_pool
- free_obj |= BIT(HANDLE_PIN_BIT);
- record_obj(handle, free_obj);
- unpin_tag(handle);
-- obj_free(class, used_obj);
-+ obj_free(class->size, used_obj);
- }
-
- /* Remember last position in this iteration */
diff --git a/patches/0004-drm-i915-Don-t-disable-interrupts-on-PREEMPT_RT-duri.patch b/patches/0004-drm-i915-Don-t-disable-interrupts-on-PREEMPT_RT-duri.patch
index 61f6fe7a4aa9..9cc17b2707d8 100644
--- a/patches/0004-drm-i915-Don-t-disable-interrupts-on-PREEMPT_RT-duri.patch
+++ b/patches/0004-drm-i915-Don-t-disable-interrupts-on-PREEMPT_RT-duri.patch
@@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/gpu/drm/i915/display/intel_crtc.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc.c
-@@ -425,7 +425,8 @@ void intel_pipe_update_start(const struc
+@@ -517,7 +517,8 @@ void intel_pipe_update_start(struct inte
*/
intel_psr_wait_for_idle(new_crtc_state);
@@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
crtc->debug.min_vbl = min;
crtc->debug.max_vbl = max;
-@@ -450,11 +451,13 @@ void intel_pipe_update_start(const struc
+@@ -542,11 +543,13 @@ void intel_pipe_update_start(struct inte
break;
}
@@ -62,7 +62,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
finish_wait(wq, &wait);
-@@ -487,7 +490,8 @@ void intel_pipe_update_start(const struc
+@@ -579,7 +582,8 @@ void intel_pipe_update_start(struct inte
return;
irq_disable:
@@ -72,13 +72,13 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE)
-@@ -566,7 +570,8 @@ void intel_pipe_update_end(struct intel_
- new_crtc_state->uapi.event = NULL;
- }
+@@ -678,7 +682,8 @@ void intel_pipe_update_end(struct intel_
+ */
+ intel_vrr_send_push(new_crtc_state);
- local_irq_enable();
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ local_irq_enable();
- /* Send VRR Push to terminate Vblank */
- intel_vrr_send_push(new_crtc_state);
+ if (intel_vgpu_active(dev_priv))
+ return;
diff --git a/patches/0004-locking-rtmutex-Add-rt_mutex_lock_nest_lock-and-rt_m.patch b/patches/0004-locking-rtmutex-Add-rt_mutex_lock_nest_lock-and-rt_m.patch
deleted file mode 100644
index 59dd5c9390a2..000000000000
--- a/patches/0004-locking-rtmutex-Add-rt_mutex_lock_nest_lock-and-rt_m.patch
+++ /dev/null
@@ -1,116 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Mon, 29 Nov 2021 18:46:47 +0100
-Subject: [PATCH 04/11] locking/rtmutex: Add rt_mutex_lock_nest_lock() and
- rt_mutex_lock_killable().
-
-The locking selftest for ww-mutex expects to operate directly on the
-base-mutex which becomes a rtmutex on PREEMPT_RT.
-
-Add a rtmutex based implementation of mutex_lock_nest_lock() and
-mutex_lock_killable() named rt_mutex_lock_nest_lock() abd
-rt_mutex_lock_killable().
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Link: https://lore.kernel.org/r/20211129174654.668506-5-bigeasy@linutronix.de
----
- include/linux/rtmutex.h | 9 +++++++++
- kernel/locking/rtmutex_api.c | 30 ++++++++++++++++++++++++++----
- 2 files changed, 35 insertions(+), 4 deletions(-)
-
---- a/include/linux/rtmutex.h
-+++ b/include/linux/rtmutex.h
-@@ -99,13 +99,22 @@ extern void __rt_mutex_init(struct rt_mu
-
- #ifdef CONFIG_DEBUG_LOCK_ALLOC
- extern void rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass);
-+extern void _rt_mutex_lock_nest_lock(struct rt_mutex *lock, struct lockdep_map *nest_lock);
- #define rt_mutex_lock(lock) rt_mutex_lock_nested(lock, 0)
-+#define rt_mutex_lock_nest_lock(lock, nest_lock) \
-+ do { \
-+ typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
-+ _rt_mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \
-+ } while (0)
-+
- #else
- extern void rt_mutex_lock(struct rt_mutex *lock);
- #define rt_mutex_lock_nested(lock, subclass) rt_mutex_lock(lock)
-+#define rt_mutex_lock_nest_lock(lock, nest_lock) rt_mutex_lock(lock)
- #endif
-
- extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
-+extern int rt_mutex_lock_killable(struct rt_mutex *lock);
- extern int rt_mutex_trylock(struct rt_mutex *lock);
-
- extern void rt_mutex_unlock(struct rt_mutex *lock);
---- a/kernel/locking/rtmutex_api.c
-+++ b/kernel/locking/rtmutex_api.c
-@@ -21,12 +21,13 @@ int max_lock_depth = 1024;
- */
- static __always_inline int __rt_mutex_lock_common(struct rt_mutex *lock,
- unsigned int state,
-+ struct lockdep_map *nest_lock,
- unsigned int subclass)
- {
- int ret;
-
- might_sleep();
-- mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
-+ mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, _RET_IP_);
- ret = __rt_mutex_lock(&lock->rtmutex, state);
- if (ret)
- mutex_release(&lock->dep_map, _RET_IP_);
-@@ -48,10 +49,16 @@ EXPORT_SYMBOL(rt_mutex_base_init);
- */
- void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
- {
-- __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass);
-+ __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, NULL, subclass);
- }
- EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
-
-+void __sched _rt_mutex_lock_nest_lock(struct rt_mutex *lock, struct lockdep_map *nest_lock)
-+{
-+ __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, nest_lock, 0);
-+}
-+EXPORT_SYMBOL_GPL(_rt_mutex_lock_nest_lock);
-+
- #else /* !CONFIG_DEBUG_LOCK_ALLOC */
-
- /**
-@@ -61,7 +68,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
- */
- void __sched rt_mutex_lock(struct rt_mutex *lock)
- {
-- __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0);
-+ __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, NULL, 0);
- }
- EXPORT_SYMBOL_GPL(rt_mutex_lock);
- #endif
-@@ -77,11 +84,26 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock);
- */
- int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
- {
-- return __rt_mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0);
-+ return __rt_mutex_lock_common(lock, TASK_INTERRUPTIBLE, NULL, 0);
- }
- EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
-
- /**
-+ * rt_mutex_lock_killable - lock a rt_mutex killable
-+ *
-+ * @lock: the rt_mutex to be locked
-+ *
-+ * Returns:
-+ * 0 on success
-+ * -EINTR when interrupted by a signal
-+ */
-+int __sched rt_mutex_lock_killable(struct rt_mutex *lock)
-+{
-+ return __rt_mutex_lock_common(lock, TASK_KILLABLE, NULL, 0);
-+}
-+EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
-+
-+/**
- * rt_mutex_trylock - try to lock a rt_mutex
- *
- * @lock: the rt_mutex to be locked
diff --git a/patches/0004-mm-memcg-Allow-the-task_obj-optimization-only-on-non.patch b/patches/0004-mm-memcg-Allow-the-task_obj-optimization-only-on-non.patch
index 1bfba2709995..11c3043d3148 100644
--- a/patches/0004-mm-memcg-Allow-the-task_obj-optimization-only-on-non.patch
+++ b/patches/0004-mm-memcg-Allow-the-task_obj-optimization-only-on-non.patch
@@ -25,7 +25,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
-@@ -2023,7 +2023,7 @@ struct memcg_stock_pcp {
+@@ -2030,7 +2030,7 @@ struct memcg_stock_pcp {
local_lock_t stock_lock;
struct mem_cgroup *cached; /* this never be root cgroup */
unsigned int nr_pages;
@@ -34,7 +34,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Protects only task_obj */
local_lock_t task_obj_lock;
struct obj_stock task_obj;
-@@ -2036,7 +2036,7 @@ struct memcg_stock_pcp {
+@@ -2043,7 +2043,7 @@ struct memcg_stock_pcp {
};
static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock) = {
.stock_lock = INIT_LOCAL_LOCK(stock_lock),
@@ -43,7 +43,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
.task_obj_lock = INIT_LOCAL_LOCK(task_obj_lock),
#endif
};
-@@ -2125,7 +2125,7 @@ static void drain_local_stock(struct wor
+@@ -2132,7 +2132,7 @@ static void drain_local_stock(struct wor
* drain_stock races is that we always operate on local CPU stock
* here with IRQ disabled
*/
@@ -52,7 +52,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
local_lock(&memcg_stock.task_obj_lock);
old = drain_obj_stock(&this_cpu_ptr(&memcg_stock)->task_obj, NULL);
local_unlock(&memcg_stock.task_obj_lock);
-@@ -2734,7 +2734,7 @@ static inline struct obj_stock *get_obj_
+@@ -2741,7 +2741,7 @@ static inline struct obj_stock *get_obj_
{
struct memcg_stock_pcp *stock;
@@ -61,7 +61,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (likely(in_task())) {
*pflags = 0UL;
*stock_lock_acquried = false;
-@@ -2752,7 +2752,7 @@ static inline struct obj_stock *get_obj_
+@@ -2759,7 +2759,7 @@ static inline struct obj_stock *get_obj_
static inline void put_obj_stock(unsigned long flags,
bool stock_lock_acquried)
{
@@ -70,7 +70,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (likely(!stock_lock_acquried)) {
local_unlock(&memcg_stock.task_obj_lock);
return;
-@@ -3165,7 +3165,7 @@ static bool obj_stock_flush_required(str
+@@ -3177,7 +3177,7 @@ static bool obj_stock_flush_required(str
{
struct mem_cgroup *memcg;
diff --git a/patches/0004-net-dev-Make-rps_lock-disable-interrupts.patch b/patches/0004-net-dev-Make-rps_lock-disable-interrupts.patch
index ed029448f9b5..74e36a936816 100644
--- a/patches/0004-net-dev-Make-rps_lock-disable-interrupts.patch
+++ b/patches/0004-net-dev-Make-rps_lock-disable-interrupts.patch
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -222,18 +222,38 @@ static inline struct hlist_head *dev_ind
+@@ -216,18 +216,38 @@ static inline struct hlist_head *dev_ind
return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
}
@@ -70,7 +70,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static struct netdev_name_node *netdev_name_node_alloc(struct net_device *dev,
-@@ -4644,9 +4664,7 @@ static int enqueue_to_backlog(struct sk_
+@@ -4525,9 +4545,7 @@ static int enqueue_to_backlog(struct sk_
sd = &per_cpu(softnet_data, cpu);
@@ -81,7 +81,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!netif_running(skb->dev))
goto drop;
qlen = skb_queue_len(&sd->input_pkt_queue);
-@@ -4655,26 +4673,30 @@ static int enqueue_to_backlog(struct sk_
+@@ -4536,26 +4554,30 @@ static int enqueue_to_backlog(struct sk_
enqueue:
__skb_queue_tail(&sd->input_pkt_queue, skb);
input_queue_tail_incr_save(sd, qtail);
@@ -117,7 +117,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
atomic_long_inc(&skb->dev->rx_dropped);
kfree_skb(skb);
-@@ -5735,8 +5757,7 @@ static void flush_backlog(struct work_st
+@@ -5617,8 +5639,7 @@ static void flush_backlog(struct work_st
local_bh_disable();
sd = this_cpu_ptr(&softnet_data);
@@ -127,7 +127,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
if (skb->dev->reg_state == NETREG_UNREGISTERING) {
__skb_unlink(skb, &sd->input_pkt_queue);
-@@ -5744,8 +5765,7 @@ static void flush_backlog(struct work_st
+@@ -5626,8 +5647,7 @@ static void flush_backlog(struct work_st
input_queue_head_incr(sd);
}
}
@@ -137,7 +137,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
if (skb->dev->reg_state == NETREG_UNREGISTERING) {
-@@ -5763,16 +5783,14 @@ static bool flush_required(int cpu)
+@@ -5645,16 +5665,14 @@ static bool flush_required(int cpu)
struct softnet_data *sd = &per_cpu(softnet_data, cpu);
bool do_flush;
@@ -156,7 +156,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return do_flush;
#endif
-@@ -6431,8 +6449,7 @@ static int process_backlog(struct napi_s
+@@ -5769,8 +5787,7 @@ static int process_backlog(struct napi_s
}
@@ -166,7 +166,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (skb_queue_empty(&sd->input_pkt_queue)) {
/*
* Inline a custom version of __napi_complete().
-@@ -6448,8 +6465,7 @@ static int process_backlog(struct napi_s
+@@ -5786,8 +5803,7 @@ static int process_backlog(struct napi_s
skb_queue_splice_tail_init(&sd->input_pkt_queue,
&sd->process_queue);
}
diff --git a/patches/0004_kernel_fork_don_t_assign_the_stack_pointer_in_dup_task_struct.patch b/patches/0004_kernel_fork_don_t_assign_the_stack_pointer_in_dup_task_struct.patch
index 680760cc4753..1f5b2e7a0b83 100644
--- a/patches/0004_kernel_fork_don_t_assign_the_stack_pointer_in_dup_task_struct.patch
+++ b/patches/0004_kernel_fork_don_t_assign_the_stack_pointer_in_dup_task_struct.patch
@@ -16,7 +16,7 @@ Link: https://lore.kernel.org/r/20211118143452.136421-5-bigeasy@linutronix.de
--- a/kernel/fork.c
+++ b/kernel/fork.c
-@@ -210,7 +210,7 @@ static int free_vm_stack_cache(unsigned
+@@ -211,7 +211,7 @@ static int free_vm_stack_cache(unsigned
return 0;
}
@@ -25,7 +25,7 @@ Link: https://lore.kernel.org/r/20211118143452.136421-5-bigeasy@linutronix.de
{
void *stack;
int i;
-@@ -231,7 +231,7 @@ static unsigned long *alloc_thread_stack
+@@ -232,7 +232,7 @@ static unsigned long *alloc_thread_stack
tsk->stack_vm_area = s;
tsk->stack = s->addr;
@@ -34,7 +34,7 @@ Link: https://lore.kernel.org/r/20211118143452.136421-5-bigeasy@linutronix.de
}
/*
-@@ -244,17 +244,16 @@ static unsigned long *alloc_thread_stack
+@@ -245,17 +245,16 @@ static unsigned long *alloc_thread_stack
THREADINFO_GFP & ~__GFP_ACCOUNT,
PAGE_KERNEL,
0, node, __builtin_return_address(0));
@@ -57,7 +57,7 @@ Link: https://lore.kernel.org/r/20211118143452.136421-5-bigeasy@linutronix.de
}
static void free_thread_stack(struct task_struct *tsk)
-@@ -281,16 +280,16 @@ static void free_thread_stack(struct tas
+@@ -282,16 +281,16 @@ static void free_thread_stack(struct tas
# else /* !CONFIG_VMAP_STACK */
@@ -77,7 +77,7 @@ Link: https://lore.kernel.org/r/20211118143452.136421-5-bigeasy@linutronix.de
}
static void free_thread_stack(struct task_struct *tsk)
-@@ -304,14 +303,13 @@ static void free_thread_stack(struct tas
+@@ -305,14 +304,13 @@ static void free_thread_stack(struct tas
static struct kmem_cache *thread_stack_cache;
@@ -94,7 +94,7 @@ Link: https://lore.kernel.org/r/20211118143452.136421-5-bigeasy@linutronix.de
}
static void free_thread_stack(struct task_struct *tsk)
-@@ -331,13 +329,13 @@ void thread_stack_cache_init(void)
+@@ -332,13 +330,13 @@ void thread_stack_cache_init(void)
# endif /* THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK) */
#else /* CONFIG_ARCH_THREAD_STACK_ALLOCATOR */
@@ -110,7 +110,7 @@ Link: https://lore.kernel.org/r/20211118143452.136421-5-bigeasy@linutronix.de
}
static void free_thread_stack(struct task_struct *tsk, bool cache_only)
-@@ -893,8 +891,6 @@ void set_task_stack_end_magic(struct tas
+@@ -894,8 +892,6 @@ void set_task_stack_end_magic(struct tas
static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
{
struct task_struct *tsk;
@@ -119,7 +119,7 @@ Link: https://lore.kernel.org/r/20211118143452.136421-5-bigeasy@linutronix.de
int err;
if (node == NUMA_NO_NODE)
-@@ -907,24 +903,13 @@ static struct task_struct *dup_task_stru
+@@ -908,24 +904,13 @@ static struct task_struct *dup_task_stru
if (err)
goto free_tsk;
diff --git a/patches/0004_random_move_the_fast_pool_reset_into_the_caller.patch b/patches/0004_random_move_the_fast_pool_reset_into_the_caller.patch
index 9c8b5dc57660..b3e5b858ff54 100644
--- a/patches/0004_random_move_the_fast_pool_reset_into_the_caller.patch
+++ b/patches/0004_random_move_the_fast_pool_reset_into_the_caller.patch
@@ -11,61 +11,53 @@ This is a preparations step to ease PREEMPT_RT support.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Link: https://lore.kernel.org/r/20211207121737.2347312-5-bigeasy@linutronix.de
---
- drivers/char/random.c | 29 +++++++++++++++--------------
- 1 file changed, 15 insertions(+), 14 deletions(-)
+ drivers/char/random.c | 25 +++++++++++++------------
+ 1 file changed, 13 insertions(+), 12 deletions(-)
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
-@@ -1260,37 +1260,35 @@ static __u32 get_reg(struct fast_pool *f
+@@ -1220,31 +1220,29 @@ static u32 get_reg(struct fast_pool *f,
return *ptr;
}
-static void process_interrupt_randomness_pool(struct fast_pool *fast_pool)
+static bool process_interrupt_randomness_pool(struct fast_pool *fast_pool)
{
- struct entropy_store *r;
-- unsigned long now = jiffies;
-
if (unlikely(crng_init == 0)) {
+ bool pool_reset = false;
+
if ((fast_pool->count >= 64) &&
- crng_fast_load((char *) fast_pool->pool,
-- sizeof(fast_pool->pool))) {
+- crng_fast_load((u8 *)fast_pool->pool, sizeof(fast_pool->pool)) > 0) {
- fast_pool->count = 0;
-- fast_pool->last = now;
+- fast_pool->last = jiffies;
- }
- return;
-+ sizeof(fast_pool->pool)))
++ crng_fast_load((u8 *)fast_pool->pool, sizeof(fast_pool->pool)) > 0)
+ pool_reset = true;
-+
+ return pool_reset;
}
- if ((fast_pool->count < 64) &&
-- !time_after(now, fast_pool->last + HZ))
+ if ((fast_pool->count < 64) && !time_after(jiffies, fast_pool->last + HZ))
- return;
-+ !time_after(jiffies, fast_pool->last + HZ))
+ return false;
- r = &input_pool;
- if (!spin_trylock(&r->lock))
+ if (!spin_trylock(&input_pool.lock))
- return;
+ return false;
-- fast_pool->last = now;
- __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool));
- spin_unlock(&r->lock);
+- fast_pool->last = jiffies;
+ __mix_pool_bytes(&fast_pool->pool, sizeof(fast_pool->pool));
+ spin_unlock(&input_pool.lock);
- fast_pool->count = 0;
-
/* award one bit for the contents of the fast pool */
- credit_entropy_bits(r, 1);
+ credit_entropy_bits(1);
+ return true;
}
void add_interrupt_randomness(int irq)
-@@ -1316,7 +1314,10 @@ void add_interrupt_randomness(int irq)
+@@ -1270,7 +1268,10 @@ void add_interrupt_randomness(int irq)
fast_mix(fast_pool);
add_interrupt_bench(cycles);
diff --git a/patches/0004_zsmalloc_introduce_obj_allocated.patch b/patches/0004_zsmalloc_introduce_obj_allocated.patch
deleted file mode 100644
index eb344cd8835f..000000000000
--- a/patches/0004_zsmalloc_introduce_obj_allocated.patch
+++ /dev/null
@@ -1,102 +0,0 @@
-From: Minchan Kim <minchan@kernel.org>
-Subject: zsmalloc: introduce obj_allocated
-Date: Mon, 15 Nov 2021 10:59:04 -0800
-
-The usage pattern for obj_to_head is to check whether the zpage
-is allocated or not. Thus, introduce obj_allocated.
-
-Signed-off-by: Minchan Kim <minchan@kernel.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Link: https://lore.kernel.org/r/20211115185909.3949505-5-minchan@kernel.org
----
- mm/zsmalloc.c | 33 ++++++++++++++++-----------------
- 1 file changed, 16 insertions(+), 17 deletions(-)
-
---- a/mm/zsmalloc.c
-+++ b/mm/zsmalloc.c
-@@ -877,13 +877,21 @@ static unsigned long handle_to_obj(unsig
- return *(unsigned long *)handle;
- }
-
--static unsigned long obj_to_head(struct page *page, void *obj)
-+static bool obj_allocated(struct page *page, void *obj, unsigned long *phandle)
- {
-+ unsigned long handle;
-+
- if (unlikely(PageHugeObject(page))) {
- VM_BUG_ON_PAGE(!is_first_page(page), page);
-- return page->index;
-+ handle = page->index;
- } else
-- return *(unsigned long *)obj;
-+ handle = *(unsigned long *)obj;
-+
-+ if (!(handle & OBJ_ALLOCATED_TAG))
-+ return false;
-+
-+ *phandle = handle & ~OBJ_ALLOCATED_TAG;
-+ return true;
- }
-
- static inline int testpin_tag(unsigned long handle)
-@@ -1606,7 +1614,6 @@ static void zs_object_copy(struct size_c
- static unsigned long find_alloced_obj(struct size_class *class,
- struct page *page, int *obj_idx)
- {
-- unsigned long head;
- int offset = 0;
- int index = *obj_idx;
- unsigned long handle = 0;
-@@ -1616,9 +1623,7 @@ static unsigned long find_alloced_obj(st
- offset += class->size * index;
-
- while (offset < PAGE_SIZE) {
-- head = obj_to_head(page, addr + offset);
-- if (head & OBJ_ALLOCATED_TAG) {
-- handle = head & ~OBJ_ALLOCATED_TAG;
-+ if (obj_allocated(page, addr + offset, &handle)) {
- if (trypin_tag(handle))
- break;
- handle = 0;
-@@ -1928,7 +1933,7 @@ static int zs_page_migrate(struct addres
- struct page *dummy;
- void *s_addr, *d_addr, *addr;
- int offset, pos;
-- unsigned long handle, head;
-+ unsigned long handle;
- unsigned long old_obj, new_obj;
- unsigned int obj_idx;
- int ret = -EAGAIN;
-@@ -1964,9 +1969,7 @@ static int zs_page_migrate(struct addres
- pos = offset;
- s_addr = kmap_atomic(page);
- while (pos < PAGE_SIZE) {
-- head = obj_to_head(page, s_addr + pos);
-- if (head & OBJ_ALLOCATED_TAG) {
-- handle = head & ~OBJ_ALLOCATED_TAG;
-+ if (obj_allocated(page, s_addr + pos, &handle)) {
- if (!trypin_tag(handle))
- goto unpin_objects;
- }
-@@ -1982,9 +1985,7 @@ static int zs_page_migrate(struct addres
-
- for (addr = s_addr + offset; addr < s_addr + pos;
- addr += class->size) {
-- head = obj_to_head(page, addr);
-- if (head & OBJ_ALLOCATED_TAG) {
-- handle = head & ~OBJ_ALLOCATED_TAG;
-+ if (obj_allocated(page, addr, &handle)) {
- BUG_ON(!testpin_tag(handle));
-
- old_obj = handle_to_obj(handle);
-@@ -2029,9 +2030,7 @@ static int zs_page_migrate(struct addres
- unpin_objects:
- for (addr = s_addr + offset; addr < s_addr + pos;
- addr += class->size) {
-- head = obj_to_head(page, addr);
-- if (head & OBJ_ALLOCATED_TAG) {
-- handle = head & ~OBJ_ALLOCATED_TAG;
-+ if (obj_allocated(page, addr, &handle)) {
- BUG_ON(!testpin_tag(handle));
- unpin_tag(handle);
- }
diff --git a/patches/0005-lockdep-Remove-softirq-accounting-on-PREEMPT_RT.patch b/patches/0005-lockdep-Remove-softirq-accounting-on-PREEMPT_RT.patch
deleted file mode 100644
index 0d164d3cf8f4..000000000000
--- a/patches/0005-lockdep-Remove-softirq-accounting-on-PREEMPT_RT.patch
+++ /dev/null
@@ -1,83 +0,0 @@
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Mon, 29 Nov 2021 18:46:48 +0100
-Subject: [PATCH 05/11] lockdep: Remove softirq accounting on PREEMPT_RT.
-
-There is not really a softirq context on PREEMPT_RT.
-Softirqs on PREEMPT_RT are always invoked within the context of a
-threaded interrupt handler or within ksoftirqd. The "in-softirq" context
-is preemptible and is protected by a per-CPU lock to ensure mutual
-exclusion.
-
-There is no difference on PREEMPT_RT between spin_lock_irq() and
-spin_lock() because the former does not disable interrupts. Therefore if
-a lock is used in_softirq() and locked once with spin_lock_irq() then
-lockdep will report this with "inconsistent {SOFTIRQ-ON-W} ->
-{IN-SOFTIRQ-W} usage".
-
-Teach lockdep that we don't really do softirqs on -RT.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Link: https://lore.kernel.org/r/20211129174654.668506-6-bigeasy@linutronix.de
----
- include/linux/irqflags.h | 23 +++++++++++++++--------
- kernel/locking/lockdep.c | 2 ++
- 2 files changed, 17 insertions(+), 8 deletions(-)
-
---- a/include/linux/irqflags.h
-+++ b/include/linux/irqflags.h
-@@ -71,14 +71,6 @@ do { \
- do { \
- __this_cpu_dec(hardirq_context); \
- } while (0)
--# define lockdep_softirq_enter() \
--do { \
-- current->softirq_context++; \
--} while (0)
--# define lockdep_softirq_exit() \
--do { \
-- current->softirq_context--; \
--} while (0)
-
- # define lockdep_hrtimer_enter(__hrtimer) \
- ({ \
-@@ -140,6 +132,21 @@ do { \
- # define lockdep_irq_work_exit(__work) do { } while (0)
- #endif
-
-+#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PREEMPT_RT)
-+# define lockdep_softirq_enter() \
-+do { \
-+ current->softirq_context++; \
-+} while (0)
-+# define lockdep_softirq_exit() \
-+do { \
-+ current->softirq_context--; \
-+} while (0)
-+
-+#else
-+# define lockdep_softirq_enter() do { } while (0)
-+# define lockdep_softirq_exit() do { } while (0)
-+#endif
-+
- #if defined(CONFIG_IRQSOFF_TRACER) || \
- defined(CONFIG_PREEMPT_TRACER)
- extern void stop_critical_timings(void);
---- a/kernel/locking/lockdep.c
-+++ b/kernel/locking/lockdep.c
-@@ -5485,6 +5485,7 @@ static noinstr void check_flags(unsigned
- }
- }
-
-+#ifndef CONFIG_PREEMPT_RT
- /*
- * We dont accurately track softirq state in e.g.
- * hardirq contexts (such as on 4KSTACKS), so only
-@@ -5499,6 +5500,7 @@ static noinstr void check_flags(unsigned
- DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
- }
- }
-+#endif
-
- if (!debug_locks)
- print_irqtrace_events(current);
diff --git a/patches/0005-printk-refactor-and-rework-printing-logic.patch b/patches/0005-printk-refactor-and-rework-printing-logic.patch
index 97e3fbd0477c..5cc54477ded0 100644
--- a/patches/0005-printk-refactor-and-rework-printing-logic.patch
+++ b/patches/0005-printk-refactor-and-rework-printing-logic.patch
@@ -21,8 +21,8 @@ Signed-off-by: John Ogness <john.ogness@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/console.h | 2
- kernel/printk/printk.c | 380 ++++++++++++++++++++++++------------------------
- 2 files changed, 194 insertions(+), 188 deletions(-)
+ kernel/printk/printk.c | 382 ++++++++++++++++++++++++------------------------
+ 2 files changed, 196 insertions(+), 188 deletions(-)
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -37,7 +37,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
};
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -267,11 +267,6 @@ static void __up_console_sem(unsigned lo
+@@ -268,11 +268,6 @@ static void __up_console_sem(unsigned lo
static int console_locked, console_suspended;
/*
@@ -143,7 +143,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static size_t record_print_text(const struct printk_record *r,
bool syslog, bool time)
-@@ -2300,8 +2263,7 @@ static ssize_t msg_print_ext_body(char *
+@@ -2300,9 +2263,10 @@ static ssize_t msg_print_ext_body(char *
struct dev_printk_info *dev_info) { return 0; }
static void console_lock_spinning_enable(void) { }
static int console_lock_spinning_disable_and_check(void) { return 0; }
@@ -151,9 +151,12 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- const char *text, size_t len) {}
+static void call_console_driver(struct console *con, const char *text, size_t len) {}
static bool suppress_message_printing(int level) { return false; }
++static inline void boot_delay_msec(int level) { }
++static inline void printk_delay(void) { }
#endif /* CONFIG_PRINTK */
-@@ -2560,31 +2522,166 @@ int is_console_locked(void)
+
+@@ -2560,31 +2524,166 @@ int is_console_locked(void)
EXPORT_SYMBOL(is_console_locked);
/*
@@ -196,9 +199,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+{
+ console_locked = 0;
+ up_console_sem();
-+}
-+
-+/*
+ }
+
+ /*
+- * Can we actually use the console at this time on this cpu?
+ * Print one record for the given console. The record printed is whatever
+ * record is the next available record for the given console.
+ *
@@ -206,13 +210,18 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ *
+ * Returns false if the given console has no next record to print, otherwise
+ * true.
-+ *
+ *
+- * Console drivers may assume that per-cpu resources have been allocated. So
+- * unless they're explicitly marked as being able to cope (CON_ANYTIME) don't
+- * call them until per-cpu resources have been allocated.
+ * @handover will be set to true if a printk waiter has taken over the
+ * console_lock, in which case the caller is no longer holding the
+ * console_lock.
-+ */
+ */
+-static inline int can_use_console(void)
+static bool console_emit_next_record(struct console *con, bool *handover)
-+{
+ {
+- return (printk_percpu_data_ready() || have_callable_console());
+ static char ext_text[CONSOLE_EXT_LOG_MAX];
+ static char text[CONSOLE_LOG_MAX];
+ struct printk_info info;
@@ -273,30 +282,24 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ printk_delay();
+skip:
+ return true;
- }
-
- /*
-- * Can we actually use the console at this time on this cpu?
++}
++
++/*
+ * Print out all remaining records to all consoles.
+ *
+ * Requires the console_lock.
+ *
+ * Returns true if a console was available for flushing, otherwise false.
- *
-- * Console drivers may assume that per-cpu resources have been allocated. So
-- * unless they're explicitly marked as being able to cope (CON_ANYTIME) don't
-- * call them until per-cpu resources have been allocated.
++ *
+ * @next_seq is set to the highest sequence number of all of the consoles that
+ * were flushed.
+ *
+ * @handover will be set to true if a printk waiter has taken over the
+ * console_lock, in which case the caller is no longer holding the
+ * console_lock.
- */
--static inline int can_use_console(void)
++ */
+static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handover)
- {
-- return (printk_percpu_data_ready() || have_callable_console());
++{
+ bool any_usable = false;
+ struct console *con;
+ bool any_progress;
@@ -335,7 +338,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/**
-@@ -2603,21 +2700,16 @@ static inline int can_use_console(void)
+@@ -2603,21 +2702,16 @@ static inline int can_use_console(void)
*/
void console_unlock(void)
{
@@ -361,7 +364,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Console drivers are called with interrupts disabled, so
* @console_may_schedule should be cleared before; however, we may
-@@ -2633,110 +2725,27 @@ void console_unlock(void)
+@@ -2633,110 +2727,27 @@ void console_unlock(void)
* and cleared after the "again" goto label.
*/
do_cond_resched = console_may_schedule;
@@ -485,7 +488,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
EXPORT_SYMBOL(console_unlock);
-@@ -2796,8 +2805,14 @@ void console_flush_on_panic(enum con_flu
+@@ -2796,8 +2807,14 @@ void console_flush_on_panic(enum con_flu
console_trylock();
console_may_schedule = 0;
@@ -502,7 +505,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
console_unlock();
}
-@@ -3019,26 +3034,15 @@ void register_console(struct console *ne
+@@ -3032,26 +3049,15 @@ void register_console(struct console *ne
if (newcon->flags & CON_EXTENDED)
nr_ext_console_drivers++;
diff --git a/patches/0005_kernel_fork_move_memcg_charge_kernel_stack_into_config_vmap_stack.patch b/patches/0005_kernel_fork_move_memcg_charge_kernel_stack_into_config_vmap_stack.patch
index 771eb276f9d1..31ec685e5c33 100644
--- a/patches/0005_kernel_fork_move_memcg_charge_kernel_stack_into_config_vmap_stack.patch
+++ b/patches/0005_kernel_fork_move_memcg_charge_kernel_stack_into_config_vmap_stack.patch
@@ -15,7 +15,7 @@ Link: https://lore.kernel.org/r/20211118143452.136421-6-bigeasy@linutronix.de
--- a/kernel/fork.c
+++ b/kernel/fork.c
-@@ -210,6 +210,32 @@ static int free_vm_stack_cache(unsigned
+@@ -211,6 +211,32 @@ static int free_vm_stack_cache(unsigned
return 0;
}
@@ -48,7 +48,7 @@ Link: https://lore.kernel.org/r/20211118143452.136421-6-bigeasy@linutronix.de
static int alloc_thread_stack_node(struct task_struct *tsk, int node)
{
void *stack;
-@@ -229,6 +255,11 @@ static int alloc_thread_stack_node(struc
+@@ -230,6 +256,11 @@ static int alloc_thread_stack_node(struc
/* Clear stale pointers from reused stack. */
memset(s->addr, 0, THREAD_SIZE);
@@ -60,7 +60,7 @@ Link: https://lore.kernel.org/r/20211118143452.136421-6-bigeasy@linutronix.de
tsk->stack_vm_area = s;
tsk->stack = s->addr;
return 0;
-@@ -246,6 +277,11 @@ static int alloc_thread_stack_node(struc
+@@ -247,6 +278,11 @@ static int alloc_thread_stack_node(struc
0, node, __builtin_return_address(0));
if (!stack)
return -ENOMEM;
@@ -72,7 +72,7 @@ Link: https://lore.kernel.org/r/20211118143452.136421-6-bigeasy@linutronix.de
/*
* We can't call find_vm_area() in interrupt context, and
* free_thread_stack() can be called in interrupt context,
-@@ -414,36 +450,6 @@ static void account_kernel_stack(struct
+@@ -417,36 +453,6 @@ static void account_kernel_stack(struct
}
}
@@ -109,7 +109,7 @@ Link: https://lore.kernel.org/r/20211118143452.136421-6-bigeasy@linutronix.de
static void release_task_stack(struct task_struct *tsk)
{
if (WARN_ON(READ_ONCE(tsk->__state) != TASK_DEAD))
-@@ -907,9 +913,6 @@ static struct task_struct *dup_task_stru
+@@ -908,9 +914,6 @@ static struct task_struct *dup_task_stru
if (err)
goto free_tsk;
diff --git a/patches/0005_random_defer_processing_of_randomness_on_preempt_rt.patch b/patches/0005_random_defer_processing_of_randomness_on_preempt_rt.patch
index 70bb19289067..9f0d4c359ffd 100644
--- a/patches/0005_random_defer_processing_of_randomness_on_preempt_rt.patch
+++ b/patches/0005_random_defer_processing_of_randomness_on_preempt_rt.patch
@@ -32,14 +32,14 @@ afterwards if needed.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Link: https://lore.kernel.org/r/20211207121737.2347312-6-bigeasy@linutronix.de
---
- drivers/char/random.c | 39 ++++++++++++++++++++++++++++++++++++---
+ drivers/char/random.c | 34 +++++++++++++++++++++++++++++++++-
include/linux/random.h | 1 +
kernel/irq/manage.c | 3 +++
- 3 files changed, 40 insertions(+), 3 deletions(-)
+ 3 files changed, 37 insertions(+), 1 deletion(-)
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
-@@ -1291,6 +1291,32 @@ static bool process_interrupt_randomness
+@@ -1245,6 +1245,32 @@ static bool process_interrupt_randomness
return true;
}
@@ -71,27 +71,22 @@ Link: https://lore.kernel.org/r/20211207121737.2347312-6-bigeasy@linutronix.de
+
void add_interrupt_randomness(int irq)
{
- struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
-@@ -1314,9 +1340,16 @@ void add_interrupt_randomness(int irq)
+ struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
+@@ -1268,7 +1294,13 @@ void add_interrupt_randomness(int irq)
fast_mix(fast_pool);
add_interrupt_bench(cycles);
- if (process_interrupt_randomness_pool(fast_pool)) {
-- fast_pool->last = now;
-- fast_pool->count = 0;
+ /*
+ * On PREEMPT_RT the entropy can not be fed into the input_pool because
+ * it needs to acquire sleeping locks with disabled interrupts.
+ * This is deferred to the threaded handler.
+ */
-+ if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
-+ if (process_interrupt_randomness_pool(fast_pool)) {
-+ fast_pool->last = now;
-+ fast_pool->count = 0;
-+ }
++ if (!IS_ENABLED(CONFIG_PREEMPT_RT) &&
++ process_interrupt_randomness_pool(fast_pool)) {
+ fast_pool->last = now;
+ fast_pool->count = 0;
}
- }
- EXPORT_SYMBOL_GPL(add_interrupt_randomness);
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -36,6 +36,7 @@ static inline void add_latent_entropy(vo
diff --git a/patches/0005_zsmalloc_move_huge_compressed_obj_from_page_to_zspage.patch b/patches/0005_zsmalloc_move_huge_compressed_obj_from_page_to_zspage.patch
deleted file mode 100644
index 954d95c74977..000000000000
--- a/patches/0005_zsmalloc_move_huge_compressed_obj_from_page_to_zspage.patch
+++ /dev/null
@@ -1,150 +0,0 @@
-From: Minchan Kim <minchan@kernel.org>
-Subject: zsmalloc: move huge compressed obj from page to zspage
-Date: Mon, 15 Nov 2021 10:59:05 -0800
-
-the flag aims for zspage, not per page. Let's move it to zspage.
-
-Signed-off-by: Minchan Kim <minchan@kernel.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Link: https://lore.kernel.org/r/20211115185909.3949505-6-minchan@kernel.org
----
- mm/zsmalloc.c | 50 ++++++++++++++++++++++++++------------------------
- 1 file changed, 26 insertions(+), 24 deletions(-)
-
---- a/mm/zsmalloc.c
-+++ b/mm/zsmalloc.c
-@@ -121,6 +121,7 @@
- #define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS - OBJ_TAG_BITS)
- #define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1)
-
-+#define HUGE_BITS 1
- #define FULLNESS_BITS 2
- #define CLASS_BITS 8
- #define ISOLATED_BITS 3
-@@ -213,22 +214,6 @@ struct size_class {
- struct zs_size_stat stats;
- };
-
--/* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */
--static void SetPageHugeObject(struct page *page)
--{
-- SetPageOwnerPriv1(page);
--}
--
--static void ClearPageHugeObject(struct page *page)
--{
-- ClearPageOwnerPriv1(page);
--}
--
--static int PageHugeObject(struct page *page)
--{
-- return PageOwnerPriv1(page);
--}
--
- /*
- * Placed within free objects to form a singly linked list.
- * For every zspage, zspage->freeobj gives head of this list.
-@@ -278,6 +263,7 @@ struct zs_pool {
-
- struct zspage {
- struct {
-+ unsigned int huge:HUGE_BITS;
- unsigned int fullness:FULLNESS_BITS;
- unsigned int class:CLASS_BITS + 1;
- unsigned int isolated:ISOLATED_BITS;
-@@ -298,6 +284,17 @@ struct mapping_area {
- enum zs_mapmode vm_mm; /* mapping mode */
- };
-
-+/* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */
-+static void SetZsHugePage(struct zspage *zspage)
-+{
-+ zspage->huge = 1;
-+}
-+
-+static bool ZsHugePage(struct zspage *zspage)
-+{
-+ return zspage->huge;
-+}
-+
- #ifdef CONFIG_COMPACTION
- static int zs_register_migration(struct zs_pool *pool);
- static void zs_unregister_migration(struct zs_pool *pool);
-@@ -830,7 +827,9 @@ static struct zspage *get_zspage(struct
-
- static struct page *get_next_page(struct page *page)
- {
-- if (unlikely(PageHugeObject(page)))
-+ struct zspage *zspage = get_zspage(page);
-+
-+ if (unlikely(ZsHugePage(zspage)))
- return NULL;
-
- return page->freelist;
-@@ -880,8 +879,9 @@ static unsigned long handle_to_obj(unsig
- static bool obj_allocated(struct page *page, void *obj, unsigned long *phandle)
- {
- unsigned long handle;
-+ struct zspage *zspage = get_zspage(page);
-
-- if (unlikely(PageHugeObject(page))) {
-+ if (unlikely(ZsHugePage(zspage))) {
- VM_BUG_ON_PAGE(!is_first_page(page), page);
- handle = page->index;
- } else
-@@ -920,7 +920,6 @@ static void reset_page(struct page *page
- ClearPagePrivate(page);
- set_page_private(page, 0);
- page_mapcount_reset(page);
-- ClearPageHugeObject(page);
- page->freelist = NULL;
- }
-
-@@ -1062,7 +1061,7 @@ static void create_page_chain(struct siz
- SetPagePrivate(page);
- if (unlikely(class->objs_per_zspage == 1 &&
- class->pages_per_zspage == 1))
-- SetPageHugeObject(page);
-+ SetZsHugePage(zspage);
- } else {
- prev_page->freelist = page;
- }
-@@ -1307,7 +1306,7 @@ void *zs_map_object(struct zs_pool *pool
-
- ret = __zs_map_object(area, pages, off, class->size);
- out:
-- if (likely(!PageHugeObject(page)))
-+ if (likely(!ZsHugePage(zspage)))
- ret += ZS_HANDLE_SIZE;
-
- return ret;
-@@ -1395,7 +1394,7 @@ static unsigned long obj_malloc(struct z
- vaddr = kmap_atomic(m_page);
- link = (struct link_free *)vaddr + m_offset / sizeof(*link);
- set_freeobj(zspage, link->next >> OBJ_TAG_BITS);
-- if (likely(!PageHugeObject(m_page)))
-+ if (likely(!ZsHugePage(zspage)))
- /* record handle in the header of allocated chunk */
- link->handle = handle;
- else
-@@ -1496,7 +1495,10 @@ static void obj_free(int class_size, uns
-
- /* Insert this object in containing zspage's freelist */
- link = (struct link_free *)(vaddr + f_offset);
-- link->next = get_freeobj(zspage) << OBJ_TAG_BITS;
-+ if (likely(!ZsHugePage(zspage)))
-+ link->next = get_freeobj(zspage) << OBJ_TAG_BITS;
-+ else
-+ f_page->index = 0;
- kunmap_atomic(vaddr);
- set_freeobj(zspage, f_objidx);
- mod_zspage_inuse(zspage, -1);
-@@ -1867,7 +1869,7 @@ static void replace_sub_page(struct size
-
- create_page_chain(class, zspage, pages);
- set_first_obj_offset(newpage, get_first_obj_offset(oldpage));
-- if (unlikely(PageHugeObject(oldpage)))
-+ if (unlikely(ZsHugePage(zspage)))
- newpage->index = oldpage->index;
- __SetPageMovable(newpage, page_mapping(oldpage));
- }
diff --git a/patches/0006-drm-i915-Disable-tracing-points-on-PREEMPT_RT.patch b/patches/0006-drm-i915-Disable-tracing-points-on-PREEMPT_RT.patch
index ce5b4a1d7c98..6c9e80411e78 100644
--- a/patches/0006-drm-i915-Disable-tracing-points-on-PREEMPT_RT.patch
+++ b/patches/0006-drm-i915-Disable-tracing-points-on-PREEMPT_RT.patch
@@ -31,7 +31,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
-@@ -2,6 +2,10 @@
+@@ -6,6 +6,10 @@
#if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
#define _I915_TRACE_H_
diff --git a/patches/0006-lockdep-selftests-Avoid-using-local_lock_-acquire-re.patch b/patches/0006-lockdep-selftests-Avoid-using-local_lock_-acquire-re.patch
deleted file mode 100644
index b7681a4ba87d..000000000000
--- a/patches/0006-lockdep-selftests-Avoid-using-local_lock_-acquire-re.patch
+++ /dev/null
@@ -1,125 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Mon, 29 Nov 2021 18:46:49 +0100
-Subject: [PATCH 06/11] lockdep/selftests: Avoid using
- local_lock_{acquire|release}().
-
-The local_lock related functions
- local_lock_acquire()
- local_lock_release()
-
-are part of the internal implementation and should be avoided.
-Define the lock as DEFINE_PER_CPU so the normal local_lock() function
-can be used.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Link: https://lore.kernel.org/r/20211129174654.668506-7-bigeasy@linutronix.de
----
- lib/locking-selftest.c | 32 ++++++++++++++++----------------
- 1 file changed, 16 insertions(+), 16 deletions(-)
-
---- a/lib/locking-selftest.c
-+++ b/lib/locking-selftest.c
-@@ -139,7 +139,7 @@ static DEFINE_RT_MUTEX(rtmutex_Z2);
-
- #endif
-
--static local_lock_t local_A = INIT_LOCAL_LOCK(local_A);
-+static DEFINE_PER_CPU(local_lock_t, local_A);
-
- /*
- * non-inlined runtime initializers, to let separate locks share
-@@ -1320,7 +1320,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_
- # define I_MUTEX(x) lockdep_reset_lock(&mutex_##x.dep_map)
- # define I_RWSEM(x) lockdep_reset_lock(&rwsem_##x.dep_map)
- # define I_WW(x) lockdep_reset_lock(&x.dep_map)
--# define I_LOCAL_LOCK(x) lockdep_reset_lock(&local_##x.dep_map)
-+# define I_LOCAL_LOCK(x) lockdep_reset_lock(this_cpu_ptr(&local_##x.dep_map))
- #ifdef CONFIG_RT_MUTEXES
- # define I_RTMUTEX(x) lockdep_reset_lock(&rtmutex_##x.dep_map)
- #endif
-@@ -1380,7 +1380,7 @@ static void reset_locks(void)
- init_shared_classes();
- raw_spin_lock_init(&raw_lock_A);
- raw_spin_lock_init(&raw_lock_B);
-- local_lock_init(&local_A);
-+ local_lock_init(this_cpu_ptr(&local_A));
-
- ww_mutex_init(&o, &ww_lockdep); ww_mutex_init(&o2, &ww_lockdep); ww_mutex_init(&o3, &ww_lockdep);
- memset(&t, 0, sizeof(t)); memset(&t2, 0, sizeof(t2));
-@@ -2646,8 +2646,8 @@ static void wait_context_tests(void)
-
- static void local_lock_2(void)
- {
-- local_lock_acquire(&local_A); /* IRQ-ON */
-- local_lock_release(&local_A);
-+ local_lock(&local_A); /* IRQ-ON */
-+ local_unlock(&local_A);
-
- HARDIRQ_ENTER();
- spin_lock(&lock_A); /* IN-IRQ */
-@@ -2656,18 +2656,18 @@ static void local_lock_2(void)
-
- HARDIRQ_DISABLE();
- spin_lock(&lock_A);
-- local_lock_acquire(&local_A); /* IN-IRQ <-> IRQ-ON cycle, false */
-- local_lock_release(&local_A);
-+ local_lock(&local_A); /* IN-IRQ <-> IRQ-ON cycle, false */
-+ local_unlock(&local_A);
- spin_unlock(&lock_A);
- HARDIRQ_ENABLE();
- }
-
- static void local_lock_3A(void)
- {
-- local_lock_acquire(&local_A); /* IRQ-ON */
-+ local_lock(&local_A); /* IRQ-ON */
- spin_lock(&lock_B); /* IRQ-ON */
- spin_unlock(&lock_B);
-- local_lock_release(&local_A);
-+ local_unlock(&local_A);
-
- HARDIRQ_ENTER();
- spin_lock(&lock_A); /* IN-IRQ */
-@@ -2676,18 +2676,18 @@ static void local_lock_3A(void)
-
- HARDIRQ_DISABLE();
- spin_lock(&lock_A);
-- local_lock_acquire(&local_A); /* IN-IRQ <-> IRQ-ON cycle only if we count local_lock(), false */
-- local_lock_release(&local_A);
-+ local_lock(&local_A); /* IN-IRQ <-> IRQ-ON cycle only if we count local_lock(), false */
-+ local_unlock(&local_A);
- spin_unlock(&lock_A);
- HARDIRQ_ENABLE();
- }
-
- static void local_lock_3B(void)
- {
-- local_lock_acquire(&local_A); /* IRQ-ON */
-+ local_lock(&local_A); /* IRQ-ON */
- spin_lock(&lock_B); /* IRQ-ON */
- spin_unlock(&lock_B);
-- local_lock_release(&local_A);
-+ local_unlock(&local_A);
-
- HARDIRQ_ENTER();
- spin_lock(&lock_A); /* IN-IRQ */
-@@ -2696,8 +2696,8 @@ static void local_lock_3B(void)
-
- HARDIRQ_DISABLE();
- spin_lock(&lock_A);
-- local_lock_acquire(&local_A); /* IN-IRQ <-> IRQ-ON cycle only if we count local_lock(), false */
-- local_lock_release(&local_A);
-+ local_lock(&local_A); /* IN-IRQ <-> IRQ-ON cycle only if we count local_lock(), false */
-+ local_unlock(&local_A);
- spin_unlock(&lock_A);
- HARDIRQ_ENABLE();
-
-@@ -2812,7 +2812,7 @@ void locking_selftest(void)
- printk("------------------------\n");
- printk("| Locking API testsuite:\n");
- printk("----------------------------------------------------------------------------\n");
-- printk(" | spin |wlock |rlock |mutex | wsem | rsem |\n");
-+ printk(" | spin |wlock |rlock |mutex | wsem | rsem |rtmutex\n");
- printk(" --------------------------------------------------------------------------\n");
-
- init_shared_classes();
diff --git a/patches/0006-printk-move-buffer-definitions-into-console_emit_nex.patch b/patches/0006-printk-move-buffer-definitions-into-console_emit_nex.patch
index 98afc516a82d..59754bf3f0a3 100644
--- a/patches/0006-printk-move-buffer-definitions-into-console_emit_nex.patch
+++ b/patches/0006-printk-move-buffer-definitions-into-console_emit_nex.patch
@@ -79,11 +79,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static int console_lock_spinning_disable_and_check(void) { return 0; }
-static void call_console_driver(struct console *con, const char *text, size_t len) {}
+static void call_console_driver(struct console *con, const char *text, size_t len,
-+ char *dropped_text) {}
++ char *dropped_text) { }
static bool suppress_message_printing(int level) { return false; }
-
- #endif /* CONFIG_PRINTK */
-@@ -2558,6 +2563,14 @@ static void __console_unlock(void)
+ static inline void boot_delay_msec(int level) { }
+ static inline void printk_delay(void) { }
+@@ -2560,6 +2565,14 @@ static void __console_unlock(void)
* Print one record for the given console. The record printed is whatever
* record is the next available record for the given console.
*
@@ -98,7 +98,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* Requires the console_lock.
*
* Returns false if the given console has no next record to print, otherwise
-@@ -2567,17 +2580,16 @@ static void __console_unlock(void)
+@@ -2569,17 +2582,16 @@ static void __console_unlock(void)
* console_lock, in which case the caller is no longer holding the
* console_lock.
*/
@@ -119,7 +119,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!prb_read_valid(prb, con->seq, &r))
return false;
-@@ -2593,13 +2605,13 @@ static bool console_emit_next_record(str
+@@ -2595,13 +2607,13 @@ static bool console_emit_next_record(str
goto skip;
}
@@ -138,7 +138,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
len = record_print_text(&r, console_msg_format & MSG_FORMAT_SYSLOG, printk_time);
}
-@@ -2617,7 +2629,7 @@ static bool console_emit_next_record(str
+@@ -2619,7 +2631,7 @@ static bool console_emit_next_record(str
console_lock_spinning_enable();
stop_critical_timings(); /* don't trace print latency */
@@ -147,7 +147,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
start_critical_timings();
con->seq++;
-@@ -2647,6 +2659,9 @@ static bool console_emit_next_record(str
+@@ -2649,6 +2661,9 @@ static bool console_emit_next_record(str
*/
static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handover)
{
@@ -157,7 +157,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
bool any_usable = false;
struct console *con;
bool any_progress;
-@@ -2664,7 +2679,16 @@ static bool console_flush_all(bool do_co
+@@ -2666,7 +2681,16 @@ static bool console_flush_all(bool do_co
continue;
any_usable = true;
diff --git a/patches/0006_kernel_fork_move_task_stack_account_to_do_exit.patch b/patches/0006_kernel_fork_move_task_stack_account_to_do_exit.patch
index 9f50ac6d70a5..24951b1777bf 100644
--- a/patches/0006_kernel_fork_move_task_stack_account_to_do_exit.patch
+++ b/patches/0006_kernel_fork_move_task_stack_account_to_do_exit.patch
@@ -32,7 +32,7 @@ Link: https://lore.kernel.org/r/20211118143452.136421-7-bigeasy@linutronix.de
--- a/kernel/exit.c
+++ b/kernel/exit.c
-@@ -871,6 +871,7 @@ void __noreturn do_exit(long code)
+@@ -845,6 +845,7 @@ void __noreturn do_exit(long code)
put_page(tsk->task_frag.page);
validate_creds_for_do_exit(tsk);
@@ -42,7 +42,7 @@ Link: https://lore.kernel.org/r/20211118143452.136421-7-bigeasy@linutronix.de
preempt_disable();
--- a/kernel/fork.c
+++ b/kernel/fork.c
-@@ -210,9 +210,8 @@ static int free_vm_stack_cache(unsigned
+@@ -211,9 +211,8 @@ static int free_vm_stack_cache(unsigned
return 0;
}
@@ -53,7 +53,7 @@ Link: https://lore.kernel.org/r/20211118143452.136421-7-bigeasy@linutronix.de
int i;
int ret;
-@@ -238,6 +237,7 @@ static int memcg_charge_kernel_stack(str
+@@ -239,6 +238,7 @@ static int memcg_charge_kernel_stack(str
static int alloc_thread_stack_node(struct task_struct *tsk, int node)
{
@@ -61,7 +61,7 @@ Link: https://lore.kernel.org/r/20211118143452.136421-7-bigeasy@linutronix.de
void *stack;
int i;
-@@ -255,7 +255,7 @@ static int alloc_thread_stack_node(struc
+@@ -256,7 +256,7 @@ static int alloc_thread_stack_node(struc
/* Clear stale pointers from reused stack. */
memset(s->addr, 0, THREAD_SIZE);
@@ -70,7 +70,7 @@ Link: https://lore.kernel.org/r/20211118143452.136421-7-bigeasy@linutronix.de
vfree(s->addr);
return -ENOMEM;
}
-@@ -278,7 +278,8 @@ static int alloc_thread_stack_node(struc
+@@ -279,7 +279,8 @@ static int alloc_thread_stack_node(struc
if (!stack)
return -ENOMEM;
@@ -80,7 +80,7 @@ Link: https://lore.kernel.org/r/20211118143452.136421-7-bigeasy@linutronix.de
vfree(stack);
return -ENOMEM;
}
-@@ -287,19 +288,15 @@ static int alloc_thread_stack_node(struc
+@@ -288,19 +289,15 @@ static int alloc_thread_stack_node(struc
* free_thread_stack() can be called in interrupt context,
* so cache the vm_struct.
*/
@@ -101,7 +101,7 @@ Link: https://lore.kernel.org/r/20211118143452.136421-7-bigeasy@linutronix.de
for (i = 0; i < NR_CACHED_STACKS; i++) {
if (this_cpu_cmpxchg(cached_stacks[i], NULL,
tsk->stack_vm_area) != NULL)
-@@ -450,12 +447,25 @@ static void account_kernel_stack(struct
+@@ -453,12 +450,25 @@ static void account_kernel_stack(struct
}
}
@@ -128,7 +128,7 @@ Link: https://lore.kernel.org/r/20211118143452.136421-7-bigeasy@linutronix.de
free_thread_stack(tsk);
}
-@@ -916,6 +926,7 @@ static struct task_struct *dup_task_stru
+@@ -917,6 +927,7 @@ static struct task_struct *dup_task_stru
#ifdef CONFIG_THREAD_INFO_IN_TASK
refcount_set(&tsk->stack_refcount, 1);
#endif
@@ -136,16 +136,16 @@ Link: https://lore.kernel.org/r/20211118143452.136421-7-bigeasy@linutronix.de
err = scs_prepare(tsk, node);
if (err)
-@@ -959,8 +970,6 @@ static struct task_struct *dup_task_stru
+@@ -960,8 +971,6 @@ static struct task_struct *dup_task_stru
tsk->wake_q.next = NULL;
- tsk->pf_io_worker = NULL;
+ tsk->worker_private = NULL;
- account_kernel_stack(tsk, 1);
-
kcov_task_init(tsk);
kmap_local_fork(tsk);
-@@ -979,6 +988,7 @@ static struct task_struct *dup_task_stru
+@@ -980,6 +989,7 @@ static struct task_struct *dup_task_stru
return tsk;
free_stack:
@@ -153,7 +153,7 @@ Link: https://lore.kernel.org/r/20211118143452.136421-7-bigeasy@linutronix.de
free_thread_stack(tsk);
free_tsk:
free_task_struct(tsk);
-@@ -2475,6 +2485,7 @@ static __latent_entropy struct task_stru
+@@ -2448,6 +2458,7 @@ static __latent_entropy struct task_stru
exit_creds(p);
bad_fork_free:
WRITE_ONCE(p->__state, TASK_DEAD);
diff --git a/patches/0006_zsmalloc_remove_zspage_isolation_for_migration.patch b/patches/0006_zsmalloc_remove_zspage_isolation_for_migration.patch
deleted file mode 100644
index f9d3f7596660..000000000000
--- a/patches/0006_zsmalloc_remove_zspage_isolation_for_migration.patch
+++ /dev/null
@@ -1,309 +0,0 @@
-From: Minchan Kim <minchan@kernel.org>
-Subject: zsmalloc: remove zspage isolation for migration
-Date: Mon, 15 Nov 2021 10:59:06 -0800
-
-zspage isolation for migration introduced additional exceptions
-to be dealt with since the zspage was isolated from class list.
-The reason why I isolated zspage from class list was to prevent
-race between obj_malloc and page migration via allocating zpage
-from the zspage further. However, it couldn't prevent object
-freeing from zspage so it needed corner case handling.
-
-This patch removes the whole mess. Now, we are fine since
-class->lock and zspage->lock can prevent the race.
-
-Signed-off-by: Minchan Kim <minchan@kernel.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Link: https://lore.kernel.org/r/20211115185909.3949505-7-minchan@kernel.org
----
- mm/zsmalloc.c | 157 ++--------------------------------------------------------
- 1 file changed, 8 insertions(+), 149 deletions(-)
-
---- a/mm/zsmalloc.c
-+++ b/mm/zsmalloc.c
-@@ -254,10 +254,6 @@ struct zs_pool {
- #ifdef CONFIG_COMPACTION
- struct inode *inode;
- struct work_struct free_work;
-- /* A wait queue for when migration races with async_free_zspage() */
-- struct wait_queue_head migration_wait;
-- atomic_long_t isolated_pages;
-- bool destroying;
- #endif
- };
-
-@@ -454,11 +450,6 @@ MODULE_ALIAS("zpool-zsmalloc");
- /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
- static DEFINE_PER_CPU(struct mapping_area, zs_map_area);
-
--static bool is_zspage_isolated(struct zspage *zspage)
--{
-- return zspage->isolated;
--}
--
- static __maybe_unused int is_first_page(struct page *page)
- {
- return PagePrivate(page);
-@@ -744,7 +735,6 @@ static void remove_zspage(struct size_cl
- enum fullness_group fullness)
- {
- VM_BUG_ON(list_empty(&class->fullness_list[fullness]));
-- VM_BUG_ON(is_zspage_isolated(zspage));
-
- list_del_init(&zspage->list);
- class_stat_dec(class, fullness, 1);
-@@ -770,13 +760,9 @@ static enum fullness_group fix_fullness_
- if (newfg == currfg)
- goto out;
-
-- if (!is_zspage_isolated(zspage)) {
-- remove_zspage(class, zspage, currfg);
-- insert_zspage(class, zspage, newfg);
-- }
--
-+ remove_zspage(class, zspage, currfg);
-+ insert_zspage(class, zspage, newfg);
- set_zspage_mapping(zspage, class_idx, newfg);
--
- out:
- return newfg;
- }
-@@ -1511,7 +1497,6 @@ void zs_free(struct zs_pool *pool, unsig
- unsigned long obj;
- struct size_class *class;
- enum fullness_group fullness;
-- bool isolated;
-
- if (unlikely(!handle))
- return;
-@@ -1533,11 +1518,9 @@ void zs_free(struct zs_pool *pool, unsig
- goto out;
- }
-
-- isolated = is_zspage_isolated(zspage);
- migrate_read_unlock(zspage);
- /* If zspage is isolated, zs_page_putback will free the zspage */
-- if (likely(!isolated))
-- free_zspage(pool, class, zspage);
-+ free_zspage(pool, class, zspage);
- out:
-
- spin_unlock(&class->lock);
-@@ -1718,7 +1701,6 @@ static struct zspage *isolate_zspage(str
- zspage = list_first_entry_or_null(&class->fullness_list[fg[i]],
- struct zspage, list);
- if (zspage) {
-- VM_BUG_ON(is_zspage_isolated(zspage));
- remove_zspage(class, zspage, fg[i]);
- return zspage;
- }
-@@ -1739,8 +1721,6 @@ static enum fullness_group putback_zspag
- {
- enum fullness_group fullness;
-
-- VM_BUG_ON(is_zspage_isolated(zspage));
--
- fullness = get_fullness_group(class, zspage);
- insert_zspage(class, zspage, fullness);
- set_zspage_mapping(zspage, class->index, fullness);
-@@ -1822,35 +1802,10 @@ static void inc_zspage_isolation(struct
-
- static void dec_zspage_isolation(struct zspage *zspage)
- {
-+ VM_BUG_ON(zspage->isolated == 0);
- zspage->isolated--;
- }
-
--static void putback_zspage_deferred(struct zs_pool *pool,
-- struct size_class *class,
-- struct zspage *zspage)
--{
-- enum fullness_group fg;
--
-- fg = putback_zspage(class, zspage);
-- if (fg == ZS_EMPTY)
-- schedule_work(&pool->free_work);
--
--}
--
--static inline void zs_pool_dec_isolated(struct zs_pool *pool)
--{
-- VM_BUG_ON(atomic_long_read(&pool->isolated_pages) <= 0);
-- atomic_long_dec(&pool->isolated_pages);
-- /*
-- * Checking pool->destroying must happen after atomic_long_dec()
-- * for pool->isolated_pages above. Paired with the smp_mb() in
-- * zs_unregister_migration().
-- */
-- smp_mb__after_atomic();
-- if (atomic_long_read(&pool->isolated_pages) == 0 && pool->destroying)
-- wake_up_all(&pool->migration_wait);
--}
--
- static void replace_sub_page(struct size_class *class, struct zspage *zspage,
- struct page *newpage, struct page *oldpage)
- {
-@@ -1876,10 +1831,7 @@ static void replace_sub_page(struct size
-
- static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
- {
-- struct zs_pool *pool;
-- struct size_class *class;
- struct zspage *zspage;
-- struct address_space *mapping;
-
- /*
- * Page is locked so zspage couldn't be destroyed. For detail, look at
-@@ -1889,39 +1841,9 @@ static bool zs_page_isolate(struct page
- VM_BUG_ON_PAGE(PageIsolated(page), page);
-
- zspage = get_zspage(page);
--
-- mapping = page_mapping(page);
-- pool = mapping->private_data;
--
-- class = zspage_class(pool, zspage);
--
-- spin_lock(&class->lock);
-- if (get_zspage_inuse(zspage) == 0) {
-- spin_unlock(&class->lock);
-- return false;
-- }
--
-- /* zspage is isolated for object migration */
-- if (list_empty(&zspage->list) && !is_zspage_isolated(zspage)) {
-- spin_unlock(&class->lock);
-- return false;
-- }
--
-- /*
-- * If this is first time isolation for the zspage, isolate zspage from
-- * size_class to prevent further object allocation from the zspage.
-- */
-- if (!list_empty(&zspage->list) && !is_zspage_isolated(zspage)) {
-- enum fullness_group fullness;
-- unsigned int class_idx;
--
-- get_zspage_mapping(zspage, &class_idx, &fullness);
-- atomic_long_inc(&pool->isolated_pages);
-- remove_zspage(class, zspage, fullness);
-- }
--
-+ migrate_write_lock(zspage);
- inc_zspage_isolation(zspage);
-- spin_unlock(&class->lock);
-+ migrate_write_unlock(zspage);
-
- return true;
- }
-@@ -2004,21 +1926,6 @@ static int zs_page_migrate(struct addres
-
- dec_zspage_isolation(zspage);
-
-- /*
-- * Page migration is done so let's putback isolated zspage to
-- * the list if @page is final isolated subpage in the zspage.
-- */
-- if (!is_zspage_isolated(zspage)) {
-- /*
-- * We cannot race with zs_destroy_pool() here because we wait
-- * for isolation to hit zero before we start destroying.
-- * Also, we ensure that everyone can see pool->destroying before
-- * we start waiting.
-- */
-- putback_zspage_deferred(pool, class, zspage);
-- zs_pool_dec_isolated(pool);
-- }
--
- if (page_zone(newpage) != page_zone(page)) {
- dec_zone_page_state(page, NR_ZSPAGES);
- inc_zone_page_state(newpage, NR_ZSPAGES);
-@@ -2046,30 +1953,15 @@ static int zs_page_migrate(struct addres
-
- static void zs_page_putback(struct page *page)
- {
-- struct zs_pool *pool;
-- struct size_class *class;
-- struct address_space *mapping;
- struct zspage *zspage;
-
- VM_BUG_ON_PAGE(!PageMovable(page), page);
- VM_BUG_ON_PAGE(!PageIsolated(page), page);
-
- zspage = get_zspage(page);
-- mapping = page_mapping(page);
-- pool = mapping->private_data;
-- class = zspage_class(pool, zspage);
--
-- spin_lock(&class->lock);
-+ migrate_write_lock(zspage);
- dec_zspage_isolation(zspage);
-- if (!is_zspage_isolated(zspage)) {
-- /*
-- * Due to page_lock, we cannot free zspage immediately
-- * so let's defer.
-- */
-- putback_zspage_deferred(pool, class, zspage);
-- zs_pool_dec_isolated(pool);
-- }
-- spin_unlock(&class->lock);
-+ migrate_write_unlock(zspage);
- }
-
- static const struct address_space_operations zsmalloc_aops = {
-@@ -2091,36 +1983,8 @@ static int zs_register_migration(struct
- return 0;
- }
-
--static bool pool_isolated_are_drained(struct zs_pool *pool)
--{
-- return atomic_long_read(&pool->isolated_pages) == 0;
--}
--
--/* Function for resolving migration */
--static void wait_for_isolated_drain(struct zs_pool *pool)
--{
--
-- /*
-- * We're in the process of destroying the pool, so there are no
-- * active allocations. zs_page_isolate() fails for completely free
-- * zspages, so we need only wait for the zs_pool's isolated
-- * count to hit zero.
-- */
-- wait_event(pool->migration_wait,
-- pool_isolated_are_drained(pool));
--}
--
- static void zs_unregister_migration(struct zs_pool *pool)
- {
-- pool->destroying = true;
-- /*
-- * We need a memory barrier here to ensure global visibility of
-- * pool->destroying. Thus pool->isolated pages will either be 0 in which
-- * case we don't care, or it will be > 0 and pool->destroying will
-- * ensure that we wake up once isolation hits 0.
-- */
-- smp_mb();
-- wait_for_isolated_drain(pool); /* This can block */
- flush_work(&pool->free_work);
- iput(pool->inode);
- }
-@@ -2150,7 +2014,6 @@ static void async_free_zspage(struct wor
- spin_unlock(&class->lock);
- }
-
--
- list_for_each_entry_safe(zspage, tmp, &free_pages, list) {
- list_del(&zspage->list);
- lock_zspage(zspage);
-@@ -2363,10 +2226,6 @@ struct zs_pool *zs_create_pool(const cha
- if (!pool->name)
- goto err;
-
--#ifdef CONFIG_COMPACTION
-- init_waitqueue_head(&pool->migration_wait);
--#endif
--
- if (create_cache(pool))
- goto err;
-
diff --git a/patches/0007-drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch b/patches/0007-drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch
index 1118dd443bfb..f7dd56bfa11d 100644
--- a/patches/0007-drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch
+++ b/patches/0007-drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch
@@ -17,7 +17,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
-@@ -823,7 +823,7 @@ DEFINE_EVENT(i915_request, i915_request_
+@@ -327,7 +327,7 @@ DEFINE_EVENT(i915_request, i915_request_
TP_ARGS(rq)
);
diff --git a/patches/0007-lockdep-selftests-Unbalanced-migrate_disable-rcu_rea.patch b/patches/0007-lockdep-selftests-Unbalanced-migrate_disable-rcu_rea.patch
deleted file mode 100644
index c253bf085b61..000000000000
--- a/patches/0007-lockdep-selftests-Unbalanced-migrate_disable-rcu_rea.patch
+++ /dev/null
@@ -1,83 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Mon, 29 Nov 2021 18:46:50 +0100
-Subject: [PATCH 07/11] lockdep/selftests: Unbalanced migrate_disable() &
- rcu_read_lock().
-
-The tests with unbalanced lock() + unlock() operation leave a modified
-preemption counter behind which is then reset to its original value
-after the test.
-
-The spin_lock() function on PREEMPT_RT does not include a
-preempt_disable() statement but migrate_disable() and read_rcu_lock().
-As a consequence both counter never get back to their original value and
-the system explodes later after the selftest.
-In the double-unlock case on PREEMPT_RT, the migrate_disable() and RCU
-code will trigger a warning which should be avoided. These counter
-should not be decremented below their initial value.
-
-Save both counters and bring them back to their original value after the
-test.
-In the double-unlock case, increment both counter in advance to they
-become balanced after the double unlock.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Link: https://lore.kernel.org/r/20211129174654.668506-8-bigeasy@linutronix.de
----
- lib/locking-selftest.c | 26 +++++++++++++++++++++++++-
- 1 file changed, 25 insertions(+), 1 deletion(-)
-
---- a/lib/locking-selftest.c
-+++ b/lib/locking-selftest.c
-@@ -712,12 +712,18 @@ GENERATE_TESTCASE(ABCDBCDA_rtmutex);
-
- #undef E
-
-+#ifdef CONFIG_PREEMPT_RT
-+# define RT_PREPARE_DBL_UNLOCK() { migrate_disable(); rcu_read_lock(); }
-+#else
-+# define RT_PREPARE_DBL_UNLOCK()
-+#endif
- /*
- * Double unlock:
- */
- #define E() \
- \
- LOCK(A); \
-+ RT_PREPARE_DBL_UNLOCK(); \
- UNLOCK(A); \
- UNLOCK(A); /* fail */
-
-@@ -1398,7 +1404,13 @@ static int unexpected_testcase_failures;
-
- static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask)
- {
-- unsigned long saved_preempt_count = preempt_count();
-+ int saved_preempt_count = preempt_count();
-+#ifdef CONFIG_PREEMPT_RT
-+#ifdef CONFIG_SMP
-+ int saved_mgd_count = current->migration_disabled;
-+#endif
-+ int saved_rcu_count = current->rcu_read_lock_nesting;
-+#endif
-
- WARN_ON(irqs_disabled());
-
-@@ -1432,6 +1444,18 @@ static void dotest(void (*testcase_fn)(v
- * count, so restore it:
- */
- preempt_count_set(saved_preempt_count);
-+
-+#ifdef CONFIG_PREEMPT_RT
-+#ifdef CONFIG_SMP
-+ while (current->migration_disabled > saved_mgd_count)
-+ migrate_enable();
-+#endif
-+
-+ while (current->rcu_read_lock_nesting > saved_rcu_count)
-+ rcu_read_unlock();
-+ WARN_ON_ONCE(current->rcu_read_lock_nesting < saved_rcu_count);
-+#endif
-+
- #ifdef CONFIG_TRACE_IRQFLAGS
- if (softirq_count())
- current->softirqs_enabled = 0;
diff --git a/patches/0007-printk-add-pr_flush.patch b/patches/0007-printk-add-pr_flush.patch
index 34ecf392006f..a7d722f795f9 100644
--- a/patches/0007-printk-add-pr_flush.patch
+++ b/patches/0007-printk-add-pr_flush.patch
@@ -27,7 +27,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Please don't use printk_ratelimit(), because it shares ratelimiting state
* with all other unrelated printk_ratelimit() callsites. Instead use
-@@ -224,6 +226,11 @@ static inline void printk_deferred_exit(
+@@ -220,6 +222,11 @@ static inline void printk_deferred_exit(
{
}
@@ -41,7 +41,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -2445,6 +2445,7 @@ void suspend_console(void)
+@@ -2447,6 +2447,7 @@ void suspend_console(void)
if (!console_suspend_enabled)
return;
pr_info("Suspending console(s) (use no_console_suspend to debug)\n");
@@ -49,7 +49,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
console_lock();
console_suspended = 1;
up_console_sem();
-@@ -2457,6 +2458,7 @@ void resume_console(void)
+@@ -2459,6 +2460,7 @@ void resume_console(void)
down_console_sem();
console_suspended = 0;
console_unlock();
@@ -57,7 +57,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/**
-@@ -2800,8 +2802,10 @@ void console_unblank(void)
+@@ -2802,8 +2804,10 @@ void console_unblank(void)
if (oops_in_progress) {
if (down_trylock_console_sem() != 0)
return;
@@ -69,7 +69,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
console_locked = 1;
console_may_schedule = 0;
-@@ -2867,6 +2871,7 @@ struct tty_driver *console_device(int *i
+@@ -2869,6 +2873,7 @@ struct tty_driver *console_device(int *i
*/
void console_stop(struct console *console)
{
@@ -77,7 +77,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
console_lock();
console->flags &= ~CON_ENABLED;
console_unlock();
-@@ -2878,6 +2883,7 @@ void console_start(struct console *conso
+@@ -2880,6 +2885,7 @@ void console_start(struct console *conso
console_lock();
console->flags |= CON_ENABLED;
console_unlock();
@@ -85,7 +85,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
EXPORT_SYMBOL(console_start);
-@@ -3234,6 +3240,71 @@ static int __init printk_late_init(void)
+@@ -3250,6 +3256,71 @@ static int __init printk_late_init(void)
late_initcall(printk_late_init);
#if defined CONFIG_PRINTK
diff --git a/patches/0007_kernel_fork_only_cache_the_vmap_stack_in_finish_task_switch.patch b/patches/0007_kernel_fork_only_cache_the_vmap_stack_in_finish_task_switch.patch
index 723cb17a34ce..5012412af207 100644
--- a/patches/0007_kernel_fork_only_cache_the_vmap_stack_in_finish_task_switch.patch
+++ b/patches/0007_kernel_fork_only_cache_the_vmap_stack_in_finish_task_switch.patch
@@ -76,7 +76,7 @@ Link: https://lore.kernel.org/r/20211118143452.136421-8-bigeasy@linutronix.de
--- a/kernel/fork.c
+++ b/kernel/fork.c
-@@ -178,6 +178,16 @@ static inline void free_task_struct(stru
+@@ -179,6 +179,16 @@ static inline void free_task_struct(stru
#ifndef CONFIG_ARCH_THREAD_STACK_ALLOCATOR
@@ -93,7 +93,7 @@ Link: https://lore.kernel.org/r/20211118143452.136421-8-bigeasy@linutronix.de
/*
* Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
* kmemcache based allocator.
-@@ -293,7 +303,7 @@ static int alloc_thread_stack_node(struc
+@@ -294,7 +304,7 @@ static int alloc_thread_stack_node(struc
return 0;
}
@@ -102,7 +102,7 @@ Link: https://lore.kernel.org/r/20211118143452.136421-8-bigeasy@linutronix.de
{
int i;
-@@ -306,7 +316,12 @@ static void free_thread_stack(struct tas
+@@ -307,7 +317,12 @@ static void free_thread_stack(struct tas
tsk->stack_vm_area = NULL;
return;
}
@@ -116,7 +116,7 @@ Link: https://lore.kernel.org/r/20211118143452.136421-8-bigeasy@linutronix.de
tsk->stack = NULL;
tsk->stack_vm_area = NULL;
}
-@@ -325,8 +340,12 @@ static int alloc_thread_stack_node(struc
+@@ -326,8 +341,12 @@ static int alloc_thread_stack_node(struc
return -ENOMEM;
}
@@ -130,7 +130,7 @@ Link: https://lore.kernel.org/r/20211118143452.136421-8-bigeasy@linutronix.de
__free_pages(virt_to_page(tsk->stack), THREAD_SIZE_ORDER);
tsk->stack = NULL;
}
-@@ -345,8 +364,12 @@ static int alloc_thread_stack_node(struc
+@@ -346,8 +365,12 @@ static int alloc_thread_stack_node(struc
return stack ? 0 : -ENOMEM;
}
@@ -144,7 +144,7 @@ Link: https://lore.kernel.org/r/20211118143452.136421-8-bigeasy@linutronix.de
kmem_cache_free(thread_stack_cache, tsk->stack);
tsk->stack = NULL;
}
-@@ -360,8 +383,19 @@ void thread_stack_cache_init(void)
+@@ -361,8 +384,19 @@ void thread_stack_cache_init(void)
}
# endif /* THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK) */
@@ -165,7 +165,7 @@ Link: https://lore.kernel.org/r/20211118143452.136421-8-bigeasy@linutronix.de
static int alloc_thread_stack_node(struct task_struct *tsk, int node)
{
unsigned long *stack;
-@@ -461,19 +495,25 @@ void exit_task_stack_account(struct task
+@@ -464,19 +498,25 @@ void exit_task_stack_account(struct task
}
}
@@ -194,7 +194,7 @@ Link: https://lore.kernel.org/r/20211118143452.136421-8-bigeasy@linutronix.de
}
#endif
-@@ -487,7 +527,7 @@ void free_task(struct task_struct *tsk)
+@@ -490,7 +530,7 @@ void free_task(struct task_struct *tsk)
* The task is finally done with both the stack and thread_info,
* so free both.
*/
@@ -203,7 +203,7 @@ Link: https://lore.kernel.org/r/20211118143452.136421-8-bigeasy@linutronix.de
#else
/*
* If the task had a separate stack allocation, it should be gone
-@@ -989,7 +1029,7 @@ static struct task_struct *dup_task_stru
+@@ -990,7 +1030,7 @@ static struct task_struct *dup_task_stru
free_stack:
exit_task_stack_account(tsk);
@@ -214,7 +214,7 @@ Link: https://lore.kernel.org/r/20211118143452.136421-8-bigeasy@linutronix.de
return NULL;
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -4881,8 +4881,11 @@ static struct rq *finish_task_switch(str
+@@ -4894,8 +4894,11 @@ static struct rq *finish_task_switch(str
if (prev->sched_class->task_dead)
prev->sched_class->task_dead(prev);
diff --git a/patches/0007_locking_rwlocks_introduce_write_lock_nested.patch b/patches/0007_locking_rwlocks_introduce_write_lock_nested.patch
deleted file mode 100644
index a9edd5273884..000000000000
--- a/patches/0007_locking_rwlocks_introduce_write_lock_nested.patch
+++ /dev/null
@@ -1,144 +0,0 @@
-From: Minchan Kim <minchan@kernel.org>
-Subject: locking/rwlocks: introduce write_lock_nested
-Date: Mon, 15 Nov 2021 10:59:07 -0800
-
-In preparation for converting bit_spin_lock to rwlock in zsmalloc
-so that multiple writers of zspages can run at the same time but
-those zspages are supposed to be different zspage instance. Thus,
-it's not deadlock. This patch adds write_lock_nested to support
-the case for LOCKDEP.
-
-[bigeasy: folded write_lock_nested() fixups for PREEMPT_RT.]
-[bigeasy: folded Fixup write_lock_nested() implementation. ]
-
-Cc: Peter Zijlstra (Intel) <peterz@infradead.org>
-Signed-off-by: Minchan Kim <minchan@kernel.org>
-Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Link: https://lore.kernel.org/r/20211115185909.3949505-8-minchan@kernel.org
-Link: https://lkml.kernel.org/r/YZfrMTAXV56HFWJY@google.com
-Link: https://lkml.kernel.org/r/20211123170134.y6xb7pmpgdn4m3bn@linutronix.de
----
- include/linux/rwlock.h | 6 ++++++
- include/linux/rwlock_api_smp.h | 8 ++++++++
- include/linux/rwlock_rt.h | 10 ++++++++++
- include/linux/spinlock_api_up.h | 1 +
- kernel/locking/spinlock.c | 10 ++++++++++
- kernel/locking/spinlock_rt.c | 12 ++++++++++++
- 6 files changed, 47 insertions(+)
-
---- a/include/linux/rwlock.h
-+++ b/include/linux/rwlock.h
-@@ -55,6 +55,12 @@ do { \
- #define write_lock(lock) _raw_write_lock(lock)
- #define read_lock(lock) _raw_read_lock(lock)
-
-+#ifdef CONFIG_DEBUG_LOCK_ALLOC
-+#define write_lock_nested(lock, subclass) _raw_write_lock_nested(lock, subclass)
-+#else
-+#define write_lock_nested(lock, subclass) _raw_write_lock(lock)
-+#endif
-+
- #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
-
- #define read_lock_irqsave(lock, flags) \
---- a/include/linux/rwlock_api_smp.h
-+++ b/include/linux/rwlock_api_smp.h
-@@ -17,6 +17,7 @@
-
- void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock);
- void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock);
-+void __lockfunc _raw_write_lock_nested(rwlock_t *lock, int subclass) __acquires(lock);
- void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock);
- void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock);
- void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock);
-@@ -209,6 +210,13 @@ static inline void __raw_write_lock(rwlo
- LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
- }
-
-+static inline void __raw_write_lock_nested(rwlock_t *lock, int subclass)
-+{
-+ preempt_disable();
-+ rwlock_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
-+ LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
-+}
-+
- #endif /* !CONFIG_GENERIC_LOCKBREAK || CONFIG_DEBUG_LOCK_ALLOC */
-
- static inline void __raw_write_unlock(rwlock_t *lock)
---- a/include/linux/rwlock_rt.h
-+++ b/include/linux/rwlock_rt.h
-@@ -28,6 +28,7 @@ extern void rt_read_lock(rwlock_t *rwloc
- extern int rt_read_trylock(rwlock_t *rwlock);
- extern void rt_read_unlock(rwlock_t *rwlock);
- extern void rt_write_lock(rwlock_t *rwlock);
-+extern void rt_write_lock_nested(rwlock_t *rwlock, int subclass);
- extern int rt_write_trylock(rwlock_t *rwlock);
- extern void rt_write_unlock(rwlock_t *rwlock);
-
-@@ -83,6 +84,15 @@ static __always_inline void write_lock(r
- rt_write_lock(rwlock);
- }
-
-+#ifdef CONFIG_DEBUG_LOCK_ALLOC
-+static __always_inline void write_lock_nested(rwlock_t *rwlock, int subclass)
-+{
-+ rt_write_lock_nested(rwlock, subclass);
-+}
-+#else
-+#define write_lock_nested(lock, subclass) rt_write_lock(((void)(subclass), (lock)))
-+#endif
-+
- static __always_inline void write_lock_bh(rwlock_t *rwlock)
- {
- local_bh_disable();
---- a/include/linux/spinlock_api_up.h
-+++ b/include/linux/spinlock_api_up.h
-@@ -59,6 +59,7 @@
- #define _raw_spin_lock_nested(lock, subclass) __LOCK(lock)
- #define _raw_read_lock(lock) __LOCK(lock)
- #define _raw_write_lock(lock) __LOCK(lock)
-+#define _raw_write_lock_nested(lock, subclass) __LOCK(lock)
- #define _raw_spin_lock_bh(lock) __LOCK_BH(lock)
- #define _raw_read_lock_bh(lock) __LOCK_BH(lock)
- #define _raw_write_lock_bh(lock) __LOCK_BH(lock)
---- a/kernel/locking/spinlock.c
-+++ b/kernel/locking/spinlock.c
-@@ -300,6 +300,16 @@ void __lockfunc _raw_write_lock(rwlock_t
- __raw_write_lock(lock);
- }
- EXPORT_SYMBOL(_raw_write_lock);
-+
-+#ifndef CONFIG_DEBUG_LOCK_ALLOC
-+#define __raw_write_lock_nested(lock, subclass) __raw_write_lock(((void)(subclass), (lock)))
-+#endif
-+
-+void __lockfunc _raw_write_lock_nested(rwlock_t *lock, int subclass)
-+{
-+ __raw_write_lock_nested(lock, subclass);
-+}
-+EXPORT_SYMBOL(_raw_write_lock_nested);
- #endif
-
- #ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
---- a/kernel/locking/spinlock_rt.c
-+++ b/kernel/locking/spinlock_rt.c
-@@ -239,6 +239,18 @@ void __sched rt_write_lock(rwlock_t *rwl
- }
- EXPORT_SYMBOL(rt_write_lock);
-
-+#ifdef CONFIG_DEBUG_LOCK_ALLOC
-+void __sched rt_write_lock_nested(rwlock_t *rwlock, int subclass)
-+{
-+ rtlock_might_resched();
-+ rwlock_acquire(&rwlock->dep_map, subclass, 0, _RET_IP_);
-+ rwbase_write_lock(&rwlock->rwbase, TASK_RTLOCK_WAIT);
-+ rcu_read_lock();
-+ migrate_disable();
-+}
-+EXPORT_SYMBOL(rt_write_lock_nested);
-+#endif
-+
- void __sched rt_read_unlock(rwlock_t *rwlock)
- {
- rwlock_release(&rwlock->dep_map, _RET_IP_);
diff --git a/patches/0008-lockdep-selftests-Skip-the-softirq-related-tests-on-.patch b/patches/0008-lockdep-selftests-Skip-the-softirq-related-tests-on-.patch
deleted file mode 100644
index a6c34cbd110c..000000000000
--- a/patches/0008-lockdep-selftests-Skip-the-softirq-related-tests-on-.patch
+++ /dev/null
@@ -1,218 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Mon, 29 Nov 2021 18:46:51 +0100
-Subject: [PATCH 08/11] lockdep/selftests: Skip the softirq related tests on
- PREEMPT_RT
-
-The softirq context on PREEMPT_RT is different compared to !PREEMPT_RT.
-As such lockdep_softirq_enter() is a nop and the all the "softirq safe"
-tests fail on PREEMPT_RT because there is no difference.
-
-Skip the softirq context tests on PREEMPT_RT.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Link: https://lore.kernel.org/r/20211129174654.668506-9-bigeasy@linutronix.de
----
- lib/locking-selftest.c | 38 +++++++++++++++++++++++++++++++-------
- 1 file changed, 31 insertions(+), 7 deletions(-)
-
---- a/lib/locking-selftest.c
-+++ b/lib/locking-selftest.c
-@@ -26,6 +26,12 @@
- #include <linux/rtmutex.h>
- #include <linux/local_lock.h>
-
-+#ifdef CONFIG_PREEMPT_RT
-+# define NON_RT(...)
-+#else
-+# define NON_RT(...) __VA_ARGS__
-+#endif
-+
- /*
- * Change this to 1 if you want to see the failure printouts:
- */
-@@ -808,6 +814,7 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_
- #include "locking-selftest-wlock-hardirq.h"
- GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_wlock)
-
-+#ifndef CONFIG_PREEMPT_RT
- #include "locking-selftest-spin-softirq.h"
- GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_spin)
-
-@@ -816,10 +823,12 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_
-
- #include "locking-selftest-wlock-softirq.h"
- GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_wlock)
-+#endif
-
- #undef E1
- #undef E2
-
-+#ifndef CONFIG_PREEMPT_RT
- /*
- * Enabling hardirqs with a softirq-safe lock held:
- */
-@@ -852,6 +861,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A
- #undef E1
- #undef E2
-
-+#endif
-+
- /*
- * Enabling irqs with an irq-safe lock held:
- */
-@@ -881,6 +892,7 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B
- #include "locking-selftest-wlock-hardirq.h"
- GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_wlock)
-
-+#ifndef CONFIG_PREEMPT_RT
- #include "locking-selftest-spin-softirq.h"
- GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_spin)
-
-@@ -889,6 +901,7 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B
-
- #include "locking-selftest-wlock-softirq.h"
- GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock)
-+#endif
-
- #undef E1
- #undef E2
-@@ -927,6 +940,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_
- #include "locking-selftest-wlock-hardirq.h"
- GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_wlock)
-
-+#ifndef CONFIG_PREEMPT_RT
- #include "locking-selftest-spin-softirq.h"
- GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_spin)
-
-@@ -935,6 +949,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_
-
- #include "locking-selftest-wlock-softirq.h"
- GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock)
-+#endif
-
- #undef E1
- #undef E2
-@@ -975,6 +990,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_
- #include "locking-selftest-wlock-hardirq.h"
- GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_wlock)
-
-+#ifndef CONFIG_PREEMPT_RT
- #include "locking-selftest-spin-softirq.h"
- GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_spin)
-
-@@ -983,6 +999,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_
-
- #include "locking-selftest-wlock-softirq.h"
- GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_wlock)
-+#endif
-
- #undef E1
- #undef E2
-@@ -1037,6 +1054,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_inver
- #include "locking-selftest-wlock-hardirq.h"
- GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_hard_wlock)
-
-+#ifndef CONFIG_PREEMPT_RT
- #include "locking-selftest-spin-softirq.h"
- GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_spin)
-
-@@ -1045,6 +1063,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_inver
-
- #include "locking-selftest-wlock-softirq.h"
- GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_wlock)
-+#endif
-
- #undef E1
- #undef E2
-@@ -1212,12 +1231,14 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_
- #include "locking-selftest-wlock.h"
- GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_hard_wlock)
-
-+#ifndef CONFIG_PREEMPT_RT
- #include "locking-selftest-softirq.h"
- #include "locking-selftest-rlock.h"
- GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft_rlock)
-
- #include "locking-selftest-wlock.h"
- GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft_wlock)
-+#endif
-
- #undef E1
- #undef E2
-@@ -1258,12 +1279,14 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_
- #include "locking-selftest-wlock.h"
- GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_hard_wlock)
-
-+#ifndef CONFIG_PREEMPT_RT
- #include "locking-selftest-softirq.h"
- #include "locking-selftest-rlock.h"
- GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_soft_rlock)
-
- #include "locking-selftest-wlock.h"
- GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_soft_wlock)
-+#endif
-
- #undef E1
- #undef E2
-@@ -1312,12 +1335,14 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_
- #include "locking-selftest-wlock.h"
- GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_hard_wlock)
-
-+#ifndef CONFIG_PREEMPT_RT
- #include "locking-selftest-softirq.h"
- #include "locking-selftest-rlock.h"
- GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_soft_rlock)
-
- #include "locking-selftest-wlock.h"
- GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_soft_wlock)
-+#endif
-
- #ifdef CONFIG_DEBUG_LOCK_ALLOC
- # define I_SPINLOCK(x) lockdep_reset_lock(&lock_##x.dep_map)
-@@ -1523,7 +1548,7 @@ static inline void print_testname(const
-
- #define DO_TESTCASE_2x2RW(desc, name, nr) \
- DO_TESTCASE_2RW("hard-"desc, name##_hard, nr) \
-- DO_TESTCASE_2RW("soft-"desc, name##_soft, nr) \
-+ NON_RT(DO_TESTCASE_2RW("soft-"desc, name##_soft, nr)) \
-
- #define DO_TESTCASE_6x2x2RW(desc, name) \
- DO_TESTCASE_2x2RW(desc, name, 123); \
-@@ -1571,19 +1596,19 @@ static inline void print_testname(const
-
- #define DO_TESTCASE_2I(desc, name, nr) \
- DO_TESTCASE_1("hard-"desc, name##_hard, nr); \
-- DO_TESTCASE_1("soft-"desc, name##_soft, nr);
-+ NON_RT(DO_TESTCASE_1("soft-"desc, name##_soft, nr));
-
- #define DO_TESTCASE_2IB(desc, name, nr) \
- DO_TESTCASE_1B("hard-"desc, name##_hard, nr); \
-- DO_TESTCASE_1B("soft-"desc, name##_soft, nr);
-+ NON_RT(DO_TESTCASE_1B("soft-"desc, name##_soft, nr));
-
- #define DO_TESTCASE_6I(desc, name, nr) \
- DO_TESTCASE_3("hard-"desc, name##_hard, nr); \
-- DO_TESTCASE_3("soft-"desc, name##_soft, nr);
-+ NON_RT(DO_TESTCASE_3("soft-"desc, name##_soft, nr));
-
- #define DO_TESTCASE_6IRW(desc, name, nr) \
- DO_TESTCASE_3RW("hard-"desc, name##_hard, nr); \
-- DO_TESTCASE_3RW("soft-"desc, name##_soft, nr);
-+ NON_RT(DO_TESTCASE_3RW("soft-"desc, name##_soft, nr));
-
- #define DO_TESTCASE_2x3(desc, name) \
- DO_TESTCASE_3(desc, name, 12); \
-@@ -2909,12 +2934,11 @@ void locking_selftest(void)
- DO_TESTCASE_6x1RR("rlock W1R2/R2R3/W3W1", W1R2_R2R3_W3W1);
-
- printk(" --------------------------------------------------------------------------\n");
--
- /*
- * irq-context testcases:
- */
- DO_TESTCASE_2x6("irqs-on + irq-safe-A", irqsafe1);
-- DO_TESTCASE_2x3("sirq-safe-A => hirqs-on", irqsafe2A);
-+ NON_RT(DO_TESTCASE_2x3("sirq-safe-A => hirqs-on", irqsafe2A));
- DO_TESTCASE_2x6("safe-A + irqs-on", irqsafe2B);
- DO_TESTCASE_6x6("safe-A + unsafe-B #1", irqsafe3);
- DO_TESTCASE_6x6("safe-A + unsafe-B #2", irqsafe4);
diff --git a/patches/0008-printk-add-kthread-console-printers.patch b/patches/0008-printk-add-kthread-console-printers.patch
index bdd807cee942..ba45990d509b 100644
--- a/patches/0008-printk-add-kthread-console-printers.patch
+++ b/patches/0008-printk-add-kthread-console-printers.patch
@@ -24,8 +24,8 @@ Signed-off-by: John Ogness <john.ogness@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/console.h | 2
- kernel/printk/printk.c | 157 +++++++++++++++++++++++++++++++++++++++++++++++-
- 2 files changed, 157 insertions(+), 2 deletions(-)
+ kernel/printk/printk.c | 156 +++++++++++++++++++++++++++++++++++++++++++++++-
+ 2 files changed, 156 insertions(+), 2 deletions(-)
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -81,13 +81,13 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#define CONSOLE_LOG_MAX 0
@@ -2270,6 +2286,7 @@ static int console_lock_spinning_disable
static void call_console_driver(struct console *con, const char *text, size_t len,
- char *dropped_text) {}
+ char *dropped_text) { }
static bool suppress_message_printing(int level) { return false; }
-+static void start_printk_kthread(struct console *con) {}
++static void start_printk_kthread(struct console *con) { }
+ static inline void boot_delay_msec(int level) { }
+ static inline void printk_delay(void) { }
- #endif /* CONFIG_PRINTK */
-
-@@ -2458,6 +2475,10 @@ void resume_console(void)
+@@ -2460,6 +2477,10 @@ void resume_console(void)
down_console_sem();
console_suspended = 0;
console_unlock();
@@ -98,7 +98,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
pr_flush(1000, true);
}
-@@ -2672,6 +2693,10 @@ static bool console_flush_all(bool do_co
+@@ -2674,6 +2695,10 @@ static bool console_flush_all(bool do_co
*handover = false;
do {
@@ -109,7 +109,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
any_progress = false;
for_each_console(con) {
-@@ -2883,6 +2908,10 @@ void console_start(struct console *conso
+@@ -2885,6 +2910,10 @@ void console_start(struct console *conso
console_lock();
console->flags |= CON_ENABLED;
console_unlock();
@@ -120,7 +120,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
pr_flush(1000, true);
}
EXPORT_SYMBOL(console_start);
-@@ -3074,6 +3103,8 @@ void register_console(struct console *ne
+@@ -3089,6 +3118,8 @@ void register_console(struct console *ne
/* Begin with next message. */
newcon->seq = prb_next_seq(prb);
}
@@ -129,7 +129,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
console_unlock();
console_sysfs_notify();
-@@ -3130,6 +3161,11 @@ int unregister_console(struct console *c
+@@ -3145,6 +3176,11 @@ int unregister_console(struct console *c
}
}
@@ -141,21 +141,20 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (res)
goto out_disable_unlock;
-@@ -3235,6 +3271,13 @@ static int __init printk_late_init(void)
- ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "printk:online",
+@@ -3251,6 +3287,12 @@ static int __init printk_late_init(void)
console_cpu_notify, NULL);
WARN_ON(ret < 0);
+ printk_sysctl_init();
+
+ console_lock();
+ for_each_console(con)
+ start_printk_kthread(con);
+ kthreads_started = true;
+ console_unlock();
-+
return 0;
}
late_initcall(printk_late_init);
-@@ -3305,6 +3348,116 @@ bool pr_flush(int timeout_ms, bool reset
+@@ -3321,6 +3363,116 @@ bool pr_flush(int timeout_ms, bool reset
}
EXPORT_SYMBOL(pr_flush);
@@ -272,7 +271,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Delayed printk version, for scheduler-internal messages:
*/
-@@ -3324,7 +3477,7 @@ static void wake_up_klogd_work_func(stru
+@@ -3340,7 +3492,7 @@ static void wake_up_klogd_work_func(stru
}
if (pending & PRINTK_PENDING_WAKEUP)
diff --git a/patches/0008_kernel_fork_use_is_enabled_in_account_kernel_stack.patch b/patches/0008_kernel_fork_use_is_enabled_in_account_kernel_stack.patch
index 09633ced1ff0..d29b8e7f20f4 100644
--- a/patches/0008_kernel_fork_use_is_enabled_in_account_kernel_stack.patch
+++ b/patches/0008_kernel_fork_use_is_enabled_in_account_kernel_stack.patch
@@ -16,7 +16,7 @@ Link: https://lore.kernel.org/r/20211118143452.136421-9-bigeasy@linutronix.de
--- a/kernel/fork.c
+++ b/kernel/fork.c
-@@ -465,16 +465,16 @@ void vm_area_free(struct vm_area_struct
+@@ -468,16 +468,16 @@ void vm_area_free(struct vm_area_struct
static void account_kernel_stack(struct task_struct *tsk, int account)
{
diff --git a/patches/0008_zsmalloc_replace_per_zpage_lock_with_pool_migrate_lock.patch b/patches/0008_zsmalloc_replace_per_zpage_lock_with_pool_migrate_lock.patch
deleted file mode 100644
index 10e82cf97255..000000000000
--- a/patches/0008_zsmalloc_replace_per_zpage_lock_with_pool_migrate_lock.patch
+++ /dev/null
@@ -1,469 +0,0 @@
-From: Minchan Kim <minchan@kernel.org>
-Subject: zsmalloc: replace per zpage lock with pool->migrate_lock
-Date: Mon, 15 Nov 2021 10:59:08 -0800
-
-The zsmalloc has used a bit for spin_lock in zpage handle to keep
-zpage object alive during several operations. However, it causes
-the problem for PREEMPT_RT as well as introducing too complicated.
-
-This patch replaces the bit spin_lock with pool->migrate_lock
-rwlock. It could make the code simple as well as zsmalloc work
-under PREEMPT_RT.
-
-The drawback is the pool->migrate_lock is bigger granuarity than
-per zpage lock so the contention would be higher than old when
-both IO-related operations(i.e., zsmalloc, zsfree, zs_[map|unmap])
-and compaction(page/zpage migration) are going in parallel(*,
-the migrate_lock is rwlock and IO related functions are all read
-side lock so there is no contention). However, the write-side
-is fast enough(dominant overhead is just page copy) so it wouldn't
-affect much. If the lock granurity becomes more problem later,
-we could introduce table locks based on handle as a hash value.
-
-Signed-off-by: Minchan Kim <minchan@kernel.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Link: https://lore.kernel.org/r/20211115185909.3949505-9-minchan@kernel.org
----
- mm/zsmalloc.c | 205 +++++++++++++++++++++++++++-------------------------------
- 1 file changed, 96 insertions(+), 109 deletions(-)
-
---- a/mm/zsmalloc.c
-+++ b/mm/zsmalloc.c
-@@ -30,6 +30,14 @@
-
- #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-+/*
-+ * lock ordering:
-+ * page_lock
-+ * pool->migrate_lock
-+ * class->lock
-+ * zspage->lock
-+ */
-+
- #include <linux/module.h>
- #include <linux/kernel.h>
- #include <linux/sched.h>
-@@ -101,15 +109,6 @@
- #define _PFN_BITS (MAX_POSSIBLE_PHYSMEM_BITS - PAGE_SHIFT)
-
- /*
-- * Memory for allocating for handle keeps object position by
-- * encoding <page, obj_idx> and the encoded value has a room
-- * in least bit(ie, look at obj_to_location).
-- * We use the bit to synchronize between object access by
-- * user and migration.
-- */
--#define HANDLE_PIN_BIT 0
--
--/*
- * Head in allocated object should have OBJ_ALLOCATED_TAG
- * to identify the object was allocated or not.
- * It's okay to add the status bit in the least bit because
-@@ -255,6 +254,8 @@ struct zs_pool {
- struct inode *inode;
- struct work_struct free_work;
- #endif
-+ /* protect page/zspage migration */
-+ rwlock_t migrate_lock;
- };
-
- struct zspage {
-@@ -297,6 +298,9 @@ static void zs_unregister_migration(stru
- static void migrate_lock_init(struct zspage *zspage);
- static void migrate_read_lock(struct zspage *zspage);
- static void migrate_read_unlock(struct zspage *zspage);
-+static void migrate_write_lock(struct zspage *zspage);
-+static void migrate_write_lock_nested(struct zspage *zspage);
-+static void migrate_write_unlock(struct zspage *zspage);
- static void kick_deferred_free(struct zs_pool *pool);
- static void init_deferred_free(struct zs_pool *pool);
- static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage);
-@@ -308,6 +312,9 @@ static void zs_unregister_migration(stru
- static void migrate_lock_init(struct zspage *zspage) {}
- static void migrate_read_lock(struct zspage *zspage) {}
- static void migrate_read_unlock(struct zspage *zspage) {}
-+static void migrate_write_lock(struct zspage *zspage) {}
-+static void migrate_write_lock_nested(struct zspage *zspage) {}
-+static void migrate_write_unlock(struct zspage *zspage) {}
- static void kick_deferred_free(struct zs_pool *pool) {}
- static void init_deferred_free(struct zs_pool *pool) {}
- static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {}
-@@ -359,14 +366,10 @@ static void cache_free_zspage(struct zs_
- kmem_cache_free(pool->zspage_cachep, zspage);
- }
-
-+/* class->lock(which owns the handle) synchronizes races */
- static void record_obj(unsigned long handle, unsigned long obj)
- {
-- /*
-- * lsb of @obj represents handle lock while other bits
-- * represent object value the handle is pointing so
-- * updating shouldn't do store tearing.
-- */
-- WRITE_ONCE(*(unsigned long *)handle, obj);
-+ *(unsigned long *)handle = obj;
- }
-
- /* zpool driver */
-@@ -880,26 +883,6 @@ static bool obj_allocated(struct page *p
- return true;
- }
-
--static inline int testpin_tag(unsigned long handle)
--{
-- return bit_spin_is_locked(HANDLE_PIN_BIT, (unsigned long *)handle);
--}
--
--static inline int trypin_tag(unsigned long handle)
--{
-- return bit_spin_trylock(HANDLE_PIN_BIT, (unsigned long *)handle);
--}
--
--static void pin_tag(unsigned long handle) __acquires(bitlock)
--{
-- bit_spin_lock(HANDLE_PIN_BIT, (unsigned long *)handle);
--}
--
--static void unpin_tag(unsigned long handle) __releases(bitlock)
--{
-- bit_spin_unlock(HANDLE_PIN_BIT, (unsigned long *)handle);
--}
--
- static void reset_page(struct page *page)
- {
- __ClearPageMovable(page);
-@@ -968,6 +951,11 @@ static void free_zspage(struct zs_pool *
- VM_BUG_ON(get_zspage_inuse(zspage));
- VM_BUG_ON(list_empty(&zspage->list));
-
-+ /*
-+ * Since zs_free couldn't be sleepable, this function cannot call
-+ * lock_page. The page locks trylock_zspage got will be released
-+ * by __free_zspage.
-+ */
- if (!trylock_zspage(zspage)) {
- kick_deferred_free(pool);
- return;
-@@ -1263,15 +1251,20 @@ void *zs_map_object(struct zs_pool *pool
- */
- BUG_ON(in_interrupt());
-
-- /* From now on, migration cannot move the object */
-- pin_tag(handle);
--
-+ /* It guarantees it can get zspage from handle safely */
-+ read_lock(&pool->migrate_lock);
- obj = handle_to_obj(handle);
- obj_to_location(obj, &page, &obj_idx);
- zspage = get_zspage(page);
-
-- /* migration cannot move any subpage in this zspage */
-+ /*
-+ * migration cannot move any zpages in this zspage. Here, class->lock
-+ * is too heavy since callers would take some time until they calls
-+ * zs_unmap_object API so delegate the locking from class to zspage
-+ * which is smaller granularity.
-+ */
- migrate_read_lock(zspage);
-+ read_unlock(&pool->migrate_lock);
-
- class = zspage_class(pool, zspage);
- off = (class->size * obj_idx) & ~PAGE_MASK;
-@@ -1330,7 +1323,6 @@ void zs_unmap_object(struct zs_pool *poo
- put_cpu_var(zs_map_area);
-
- migrate_read_unlock(zspage);
-- unpin_tag(handle);
- }
- EXPORT_SYMBOL_GPL(zs_unmap_object);
-
-@@ -1424,6 +1416,7 @@ unsigned long zs_malloc(struct zs_pool *
- size += ZS_HANDLE_SIZE;
- class = pool->size_class[get_size_class_index(size)];
-
-+ /* class->lock effectively protects the zpage migration */
- spin_lock(&class->lock);
- zspage = find_get_zspage(class);
- if (likely(zspage)) {
-@@ -1501,30 +1494,27 @@ void zs_free(struct zs_pool *pool, unsig
- if (unlikely(!handle))
- return;
-
-- pin_tag(handle);
-+ /*
-+ * The pool->migrate_lock protects the race with zpage's migration
-+ * so it's safe to get the page from handle.
-+ */
-+ read_lock(&pool->migrate_lock);
- obj = handle_to_obj(handle);
- obj_to_page(obj, &f_page);
- zspage = get_zspage(f_page);
--
-- migrate_read_lock(zspage);
- class = zspage_class(pool, zspage);
--
- spin_lock(&class->lock);
-+ read_unlock(&pool->migrate_lock);
-+
- obj_free(class->size, obj);
- class_stat_dec(class, OBJ_USED, 1);
- fullness = fix_fullness_group(class, zspage);
-- if (fullness != ZS_EMPTY) {
-- migrate_read_unlock(zspage);
-+ if (fullness != ZS_EMPTY)
- goto out;
-- }
-
-- migrate_read_unlock(zspage);
-- /* If zspage is isolated, zs_page_putback will free the zspage */
- free_zspage(pool, class, zspage);
- out:
--
- spin_unlock(&class->lock);
-- unpin_tag(handle);
- cache_free_handle(pool, handle);
- }
- EXPORT_SYMBOL_GPL(zs_free);
-@@ -1608,11 +1598,8 @@ static unsigned long find_alloced_obj(st
- offset += class->size * index;
-
- while (offset < PAGE_SIZE) {
-- if (obj_allocated(page, addr + offset, &handle)) {
-- if (trypin_tag(handle))
-- break;
-- handle = 0;
-- }
-+ if (obj_allocated(page, addr + offset, &handle))
-+ break;
-
- offset += class->size;
- index++;
-@@ -1658,7 +1645,6 @@ static int migrate_zspage(struct zs_pool
-
- /* Stop if there is no more space */
- if (zspage_full(class, get_zspage(d_page))) {
-- unpin_tag(handle);
- ret = -ENOMEM;
- break;
- }
-@@ -1667,15 +1653,7 @@ static int migrate_zspage(struct zs_pool
- free_obj = obj_malloc(pool, get_zspage(d_page), handle);
- zs_object_copy(class, free_obj, used_obj);
- obj_idx++;
-- /*
-- * record_obj updates handle's value to free_obj and it will
-- * invalidate lock bit(ie, HANDLE_PIN_BIT) of handle, which
-- * breaks synchronization using pin_tag(e,g, zs_free) so
-- * let's keep the lock bit.
-- */
-- free_obj |= BIT(HANDLE_PIN_BIT);
- record_obj(handle, free_obj);
-- unpin_tag(handle);
- obj_free(class->size, used_obj);
- }
-
-@@ -1789,6 +1767,11 @@ static void migrate_write_lock(struct zs
- write_lock(&zspage->lock);
- }
-
-+static void migrate_write_lock_nested(struct zspage *zspage)
-+{
-+ write_lock_nested(&zspage->lock, SINGLE_DEPTH_NESTING);
-+}
-+
- static void migrate_write_unlock(struct zspage *zspage)
- {
- write_unlock(&zspage->lock);
-@@ -1856,11 +1839,10 @@ static int zs_page_migrate(struct addres
- struct zspage *zspage;
- struct page *dummy;
- void *s_addr, *d_addr, *addr;
-- int offset, pos;
-+ int offset;
- unsigned long handle;
- unsigned long old_obj, new_obj;
- unsigned int obj_idx;
-- int ret = -EAGAIN;
-
- /*
- * We cannot support the _NO_COPY case here, because copy needs to
-@@ -1873,32 +1855,25 @@ static int zs_page_migrate(struct addres
- VM_BUG_ON_PAGE(!PageMovable(page), page);
- VM_BUG_ON_PAGE(!PageIsolated(page), page);
-
-- zspage = get_zspage(page);
--
-- /* Concurrent compactor cannot migrate any subpage in zspage */
-- migrate_write_lock(zspage);
- pool = mapping->private_data;
-+
-+ /*
-+ * The pool migrate_lock protects the race between zpage migration
-+ * and zs_free.
-+ */
-+ write_lock(&pool->migrate_lock);
-+ zspage = get_zspage(page);
- class = zspage_class(pool, zspage);
-- offset = get_first_obj_offset(page);
-
-+ /*
-+ * the class lock protects zpage alloc/free in the zspage.
-+ */
- spin_lock(&class->lock);
-- if (!get_zspage_inuse(zspage)) {
-- /*
-- * Set "offset" to end of the page so that every loops
-- * skips unnecessary object scanning.
-- */
-- offset = PAGE_SIZE;
-- }
-+ /* the migrate_write_lock protects zpage access via zs_map_object */
-+ migrate_write_lock(zspage);
-
-- pos = offset;
-+ offset = get_first_obj_offset(page);
- s_addr = kmap_atomic(page);
-- while (pos < PAGE_SIZE) {
-- if (obj_allocated(page, s_addr + pos, &handle)) {
-- if (!trypin_tag(handle))
-- goto unpin_objects;
-- }
-- pos += class->size;
-- }
-
- /*
- * Here, any user cannot access all objects in the zspage so let's move.
-@@ -1907,25 +1882,30 @@ static int zs_page_migrate(struct addres
- memcpy(d_addr, s_addr, PAGE_SIZE);
- kunmap_atomic(d_addr);
-
-- for (addr = s_addr + offset; addr < s_addr + pos;
-+ for (addr = s_addr + offset; addr < s_addr + PAGE_SIZE;
- addr += class->size) {
- if (obj_allocated(page, addr, &handle)) {
-- BUG_ON(!testpin_tag(handle));
-
- old_obj = handle_to_obj(handle);
- obj_to_location(old_obj, &dummy, &obj_idx);
- new_obj = (unsigned long)location_to_obj(newpage,
- obj_idx);
-- new_obj |= BIT(HANDLE_PIN_BIT);
- record_obj(handle, new_obj);
- }
- }
-+ kunmap_atomic(s_addr);
-
- replace_sub_page(class, zspage, newpage, page);
-- get_page(newpage);
--
-+ /*
-+ * Since we complete the data copy and set up new zspage structure,
-+ * it's okay to release migration_lock.
-+ */
-+ write_unlock(&pool->migrate_lock);
-+ spin_unlock(&class->lock);
- dec_zspage_isolation(zspage);
-+ migrate_write_unlock(zspage);
-
-+ get_page(newpage);
- if (page_zone(newpage) != page_zone(page)) {
- dec_zone_page_state(page, NR_ZSPAGES);
- inc_zone_page_state(newpage, NR_ZSPAGES);
-@@ -1933,22 +1913,8 @@ static int zs_page_migrate(struct addres
-
- reset_page(page);
- put_page(page);
-- page = newpage;
--
-- ret = MIGRATEPAGE_SUCCESS;
--unpin_objects:
-- for (addr = s_addr + offset; addr < s_addr + pos;
-- addr += class->size) {
-- if (obj_allocated(page, addr, &handle)) {
-- BUG_ON(!testpin_tag(handle));
-- unpin_tag(handle);
-- }
-- }
-- kunmap_atomic(s_addr);
-- spin_unlock(&class->lock);
-- migrate_write_unlock(zspage);
-
-- return ret;
-+ return MIGRATEPAGE_SUCCESS;
- }
-
- static void zs_page_putback(struct page *page)
-@@ -2077,8 +2043,13 @@ static unsigned long __zs_compact(struct
- struct zspage *dst_zspage = NULL;
- unsigned long pages_freed = 0;
-
-+ /* protect the race between zpage migration and zs_free */
-+ write_lock(&pool->migrate_lock);
-+ /* protect zpage allocation/free */
- spin_lock(&class->lock);
- while ((src_zspage = isolate_zspage(class, true))) {
-+ /* protect someone accessing the zspage(i.e., zs_map_object) */
-+ migrate_write_lock(src_zspage);
-
- if (!zs_can_compact(class))
- break;
-@@ -2087,6 +2058,8 @@ static unsigned long __zs_compact(struct
- cc.s_page = get_first_page(src_zspage);
-
- while ((dst_zspage = isolate_zspage(class, false))) {
-+ migrate_write_lock_nested(dst_zspage);
-+
- cc.d_page = get_first_page(dst_zspage);
- /*
- * If there is no more space in dst_page, resched
-@@ -2096,6 +2069,10 @@ static unsigned long __zs_compact(struct
- break;
-
- putback_zspage(class, dst_zspage);
-+ migrate_write_unlock(dst_zspage);
-+ dst_zspage = NULL;
-+ if (rwlock_is_contended(&pool->migrate_lock))
-+ break;
- }
-
- /* Stop if we couldn't find slot */
-@@ -2103,19 +2080,28 @@ static unsigned long __zs_compact(struct
- break;
-
- putback_zspage(class, dst_zspage);
-+ migrate_write_unlock(dst_zspage);
-+
- if (putback_zspage(class, src_zspage) == ZS_EMPTY) {
-+ migrate_write_unlock(src_zspage);
- free_zspage(pool, class, src_zspage);
- pages_freed += class->pages_per_zspage;
-- }
-+ } else
-+ migrate_write_unlock(src_zspage);
- spin_unlock(&class->lock);
-+ write_unlock(&pool->migrate_lock);
- cond_resched();
-+ write_lock(&pool->migrate_lock);
- spin_lock(&class->lock);
- }
-
-- if (src_zspage)
-+ if (src_zspage) {
- putback_zspage(class, src_zspage);
-+ migrate_write_unlock(src_zspage);
-+ }
-
- spin_unlock(&class->lock);
-+ write_unlock(&pool->migrate_lock);
-
- return pages_freed;
- }
-@@ -2221,6 +2207,7 @@ struct zs_pool *zs_create_pool(const cha
- return NULL;
-
- init_deferred_free(pool);
-+ rwlock_init(&pool->migrate_lock);
-
- pool->name = kstrdup(name, GFP_KERNEL);
- if (!pool->name)
diff --git a/patches/0009-drm-i915-gt-Use-spin_lock_irq-instead-of-local_irq_d.patch b/patches/0009-drm-i915-gt-Use-spin_lock_irq-instead-of-local_irq_d.patch
index 08b6e92f1e63..9f71e539caba 100644
--- a/patches/0009-drm-i915-gt-Use-spin_lock_irq-instead-of-local_irq_d.patch
+++ b/patches/0009-drm-i915-gt-Use-spin_lock_irq-instead-of-local_irq_d.patch
@@ -77,7 +77,7 @@ Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
static void clear_ports(struct i915_request **ports, int count)
{
memset_p((void **)ports, NULL, count);
-@@ -2424,7 +2417,7 @@ static void execlists_submission_tasklet
+@@ -2425,7 +2418,7 @@ static void execlists_submission_tasklet
}
if (!engine->execlists.pending[0]) {
diff --git a/patches/0009-lockdep-selftests-Adapt-ww-tests-for-PREEMPT_RT.patch b/patches/0009-lockdep-selftests-Adapt-ww-tests-for-PREEMPT_RT.patch
deleted file mode 100644
index f35a26343c7e..000000000000
--- a/patches/0009-lockdep-selftests-Adapt-ww-tests-for-PREEMPT_RT.patch
+++ /dev/null
@@ -1,254 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Mon, 29 Nov 2021 18:46:52 +0100
-Subject: [PATCH 09/11] lockdep/selftests: Adapt ww-tests for PREEMPT_RT
-
-The ww-mutex selftest operates directly on ww_mutex::base and assumes
-its type is struct mutex. This isn't true on PREEMPT_RT which turns the
-mutex into a rtmutex.
-
-Add a ww_mutex_base_ abstraction which maps to the relevant mutex_ or
-rt_mutex_ function.
-Change the CONFIG_DEBUG_MUTEXES ifdef to DEBUG_WW_MUTEXES. The latter is
-true for the MUTEX and RTMUTEX implementation of WW-MUTEX. The
-assignment is required in order to pass the tests.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Link: https://lore.kernel.org/r/20211129174654.668506-10-bigeasy@linutronix.de
----
- lib/locking-selftest.c | 76 +++++++++++++++++++++++++++++--------------------
- 1 file changed, 46 insertions(+), 30 deletions(-)
-
---- a/lib/locking-selftest.c
-+++ b/lib/locking-selftest.c
-@@ -1700,6 +1700,22 @@ static void ww_test_fail_acquire(void)
- #endif
- }
-
-+#ifdef CONFIG_PREEMPT_RT
-+#define ww_mutex_base_lock(b) rt_mutex_lock(b)
-+#define ww_mutex_base_trylock(b) rt_mutex_trylock(b)
-+#define ww_mutex_base_lock_nest_lock(b, b2) rt_mutex_lock_nest_lock(b, b2)
-+#define ww_mutex_base_lock_interruptible(b) rt_mutex_lock_interruptible(b)
-+#define ww_mutex_base_lock_killable(b) rt_mutex_lock_killable(b)
-+#define ww_mutex_base_unlock(b) rt_mutex_unlock(b)
-+#else
-+#define ww_mutex_base_lock(b) mutex_lock(b)
-+#define ww_mutex_base_trylock(b) mutex_trylock(b)
-+#define ww_mutex_base_lock_nest_lock(b, b2) mutex_lock_nest_lock(b, b2)
-+#define ww_mutex_base_lock_interruptible(b) mutex_lock_interruptible(b)
-+#define ww_mutex_base_lock_killable(b) mutex_lock_killable(b)
-+#define ww_mutex_base_unlock(b) mutex_unlock(b)
-+#endif
-+
- static void ww_test_normal(void)
- {
- int ret;
-@@ -1714,50 +1730,50 @@ static void ww_test_normal(void)
-
- /* mutex_lock (and indirectly, mutex_lock_nested) */
- o.ctx = (void *)~0UL;
-- mutex_lock(&o.base);
-- mutex_unlock(&o.base);
-+ ww_mutex_base_lock(&o.base);
-+ ww_mutex_base_unlock(&o.base);
- WARN_ON(o.ctx != (void *)~0UL);
-
- /* mutex_lock_interruptible (and *_nested) */
- o.ctx = (void *)~0UL;
-- ret = mutex_lock_interruptible(&o.base);
-+ ret = ww_mutex_base_lock_interruptible(&o.base);
- if (!ret)
-- mutex_unlock(&o.base);
-+ ww_mutex_base_unlock(&o.base);
- else
- WARN_ON(1);
- WARN_ON(o.ctx != (void *)~0UL);
-
- /* mutex_lock_killable (and *_nested) */
- o.ctx = (void *)~0UL;
-- ret = mutex_lock_killable(&o.base);
-+ ret = ww_mutex_base_lock_killable(&o.base);
- if (!ret)
-- mutex_unlock(&o.base);
-+ ww_mutex_base_unlock(&o.base);
- else
- WARN_ON(1);
- WARN_ON(o.ctx != (void *)~0UL);
-
- /* trylock, succeeding */
- o.ctx = (void *)~0UL;
-- ret = mutex_trylock(&o.base);
-+ ret = ww_mutex_base_trylock(&o.base);
- WARN_ON(!ret);
- if (ret)
-- mutex_unlock(&o.base);
-+ ww_mutex_base_unlock(&o.base);
- else
- WARN_ON(1);
- WARN_ON(o.ctx != (void *)~0UL);
-
- /* trylock, failing */
- o.ctx = (void *)~0UL;
-- mutex_lock(&o.base);
-- ret = mutex_trylock(&o.base);
-+ ww_mutex_base_lock(&o.base);
-+ ret = ww_mutex_base_trylock(&o.base);
- WARN_ON(ret);
-- mutex_unlock(&o.base);
-+ ww_mutex_base_unlock(&o.base);
- WARN_ON(o.ctx != (void *)~0UL);
-
- /* nest_lock */
- o.ctx = (void *)~0UL;
-- mutex_lock_nest_lock(&o.base, &t);
-- mutex_unlock(&o.base);
-+ ww_mutex_base_lock_nest_lock(&o.base, &t);
-+ ww_mutex_base_unlock(&o.base);
- WARN_ON(o.ctx != (void *)~0UL);
- }
-
-@@ -1770,7 +1786,7 @@ static void ww_test_two_contexts(void)
- static void ww_test_diff_class(void)
- {
- WWAI(&t);
--#ifdef CONFIG_DEBUG_MUTEXES
-+#ifdef DEBUG_WW_MUTEXES
- t.ww_class = NULL;
- #endif
- WWL(&o, &t);
-@@ -1834,7 +1850,7 @@ static void ww_test_edeadlk_normal(void)
- {
- int ret;
-
-- mutex_lock(&o2.base);
-+ ww_mutex_base_lock(&o2.base);
- o2.ctx = &t2;
- mutex_release(&o2.base.dep_map, _THIS_IP_);
-
-@@ -1850,7 +1866,7 @@ static void ww_test_edeadlk_normal(void)
-
- o2.ctx = NULL;
- mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_);
-- mutex_unlock(&o2.base);
-+ ww_mutex_base_unlock(&o2.base);
- WWU(&o);
-
- WWL(&o2, &t);
-@@ -1860,7 +1876,7 @@ static void ww_test_edeadlk_normal_slow(
- {
- int ret;
-
-- mutex_lock(&o2.base);
-+ ww_mutex_base_lock(&o2.base);
- mutex_release(&o2.base.dep_map, _THIS_IP_);
- o2.ctx = &t2;
-
-@@ -1876,7 +1892,7 @@ static void ww_test_edeadlk_normal_slow(
-
- o2.ctx = NULL;
- mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_);
-- mutex_unlock(&o2.base);
-+ ww_mutex_base_unlock(&o2.base);
- WWU(&o);
-
- ww_mutex_lock_slow(&o2, &t);
-@@ -1886,7 +1902,7 @@ static void ww_test_edeadlk_no_unlock(vo
- {
- int ret;
-
-- mutex_lock(&o2.base);
-+ ww_mutex_base_lock(&o2.base);
- o2.ctx = &t2;
- mutex_release(&o2.base.dep_map, _THIS_IP_);
-
-@@ -1902,7 +1918,7 @@ static void ww_test_edeadlk_no_unlock(vo
-
- o2.ctx = NULL;
- mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_);
-- mutex_unlock(&o2.base);
-+ ww_mutex_base_unlock(&o2.base);
-
- WWL(&o2, &t);
- }
-@@ -1911,7 +1927,7 @@ static void ww_test_edeadlk_no_unlock_sl
- {
- int ret;
-
-- mutex_lock(&o2.base);
-+ ww_mutex_base_lock(&o2.base);
- mutex_release(&o2.base.dep_map, _THIS_IP_);
- o2.ctx = &t2;
-
-@@ -1927,7 +1943,7 @@ static void ww_test_edeadlk_no_unlock_sl
-
- o2.ctx = NULL;
- mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_);
-- mutex_unlock(&o2.base);
-+ ww_mutex_base_unlock(&o2.base);
-
- ww_mutex_lock_slow(&o2, &t);
- }
-@@ -1936,7 +1952,7 @@ static void ww_test_edeadlk_acquire_more
- {
- int ret;
-
-- mutex_lock(&o2.base);
-+ ww_mutex_base_lock(&o2.base);
- mutex_release(&o2.base.dep_map, _THIS_IP_);
- o2.ctx = &t2;
-
-@@ -1957,7 +1973,7 @@ static void ww_test_edeadlk_acquire_more
- {
- int ret;
-
-- mutex_lock(&o2.base);
-+ ww_mutex_base_lock(&o2.base);
- mutex_release(&o2.base.dep_map, _THIS_IP_);
- o2.ctx = &t2;
-
-@@ -1978,11 +1994,11 @@ static void ww_test_edeadlk_acquire_more
- {
- int ret;
-
-- mutex_lock(&o2.base);
-+ ww_mutex_base_lock(&o2.base);
- mutex_release(&o2.base.dep_map, _THIS_IP_);
- o2.ctx = &t2;
-
-- mutex_lock(&o3.base);
-+ ww_mutex_base_lock(&o3.base);
- mutex_release(&o3.base.dep_map, _THIS_IP_);
- o3.ctx = &t2;
-
-@@ -2004,11 +2020,11 @@ static void ww_test_edeadlk_acquire_more
- {
- int ret;
-
-- mutex_lock(&o2.base);
-+ ww_mutex_base_lock(&o2.base);
- mutex_release(&o2.base.dep_map, _THIS_IP_);
- o2.ctx = &t2;
-
-- mutex_lock(&o3.base);
-+ ww_mutex_base_lock(&o3.base);
- mutex_release(&o3.base.dep_map, _THIS_IP_);
- o3.ctx = &t2;
-
-@@ -2029,7 +2045,7 @@ static void ww_test_edeadlk_acquire_wron
- {
- int ret;
-
-- mutex_lock(&o2.base);
-+ ww_mutex_base_lock(&o2.base);
- mutex_release(&o2.base.dep_map, _THIS_IP_);
- o2.ctx = &t2;
-
-@@ -2054,7 +2070,7 @@ static void ww_test_edeadlk_acquire_wron
- {
- int ret;
-
-- mutex_lock(&o2.base);
-+ ww_mutex_base_lock(&o2.base);
- mutex_release(&o2.base.dep_map, _THIS_IP_);
- o2.ctx = &t2;
-
diff --git a/patches/0009-printk-reimplement-console_lock-for-proper-kthread-s.patch b/patches/0009-printk-reimplement-console_lock-for-proper-kthread-s.patch
index a1e887b01a78..1d35549c4524 100644
--- a/patches/0009-printk-reimplement-console_lock-for-proper-kthread-s.patch
+++ b/patches/0009-printk-reimplement-console_lock-for-proper-kthread-s.patch
@@ -126,7 +126,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
};
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -215,6 +215,26 @@ int devkmsg_sysctl_set_loglvl(struct ctl
+@@ -216,6 +216,26 @@ int devkmsg_sysctl_set_loglvl(struct ctl
static int nr_ext_console_drivers;
/*
@@ -153,7 +153,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* Helper macros to handle lockdep when locking/unlocking console_sem. We use
* macros instead of functions so that _RET_IP_ contains useful information.
*/
-@@ -257,6 +277,37 @@ static void __up_console_sem(unsigned lo
+@@ -258,6 +278,37 @@ static void __up_console_sem(unsigned lo
#define up_console_sem() __up_console_sem(_RET_IP_)
/*
@@ -191,7 +191,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* This is used for debugging the mess that is the VT code by
* keeping track if we have the console semaphore held. It's
* definitely not the perfect debug tool (we don't know if _WE_
-@@ -2475,10 +2526,6 @@ void resume_console(void)
+@@ -2477,10 +2528,6 @@ void resume_console(void)
down_console_sem();
console_suspended = 0;
console_unlock();
@@ -202,7 +202,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
pr_flush(1000, true);
}
-@@ -2516,6 +2563,7 @@ void console_lock(void)
+@@ -2518,6 +2565,7 @@ void console_lock(void)
down_console_sem();
if (console_suspended)
return;
@@ -210,7 +210,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
console_locked = 1;
console_may_schedule = 1;
}
-@@ -2537,6 +2585,10 @@ int console_trylock(void)
+@@ -2539,6 +2587,10 @@ int console_trylock(void)
up_console_sem();
return 0;
}
@@ -221,7 +221,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
console_locked = 1;
console_may_schedule = 0;
return 1;
-@@ -2545,7 +2597,7 @@ EXPORT_SYMBOL(console_trylock);
+@@ -2547,7 +2599,7 @@ EXPORT_SYMBOL(console_trylock);
int is_console_locked(void)
{
@@ -230,7 +230,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
EXPORT_SYMBOL(is_console_locked);
-@@ -2579,6 +2631,19 @@ static inline bool console_is_usable(str
+@@ -2581,6 +2633,19 @@ static inline bool console_is_usable(str
static void __console_unlock(void)
{
console_locked = 0;
@@ -250,7 +250,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
up_console_sem();
}
-@@ -2601,7 +2666,8 @@ static void __console_unlock(void)
+@@ -2603,7 +2668,8 @@ static void __console_unlock(void)
*
* @handover will be set to true if a printk waiter has taken over the
* console_lock, in which case the caller is no longer holding the
@@ -260,7 +260,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*/
static bool console_emit_next_record(struct console *con, char *text, char *ext_text,
char *dropped_text, bool *handover)
-@@ -2609,11 +2675,15 @@ static bool console_emit_next_record(str
+@@ -2611,11 +2677,15 @@ static bool console_emit_next_record(str
struct printk_info info;
struct printk_record r;
unsigned long flags;
@@ -276,7 +276,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!prb_read_valid(prb, con->seq, &r))
return false;
-@@ -2638,18 +2708,23 @@ static bool console_emit_next_record(str
+@@ -2640,18 +2710,23 @@ static bool console_emit_next_record(str
len = record_print_text(&r, console_msg_format & MSG_FORMAT_SYSLOG, printk_time);
}
@@ -312,7 +312,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
stop_critical_timings(); /* don't trace print latency */
call_console_driver(con, write_text, len, dropped_text);
-@@ -2657,8 +2732,10 @@ static bool console_emit_next_record(str
+@@ -2659,8 +2734,10 @@ static bool console_emit_next_record(str
con->seq++;
@@ -325,7 +325,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
boot_delay_msec(r.info->level);
printk_delay();
-@@ -2827,6 +2904,10 @@ void console_unblank(void)
+@@ -2829,6 +2906,10 @@ void console_unblank(void)
if (oops_in_progress) {
if (down_trylock_console_sem() != 0)
return;
@@ -336,7 +336,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
} else {
pr_flush(1000, true);
console_lock();
-@@ -2908,10 +2989,6 @@ void console_start(struct console *conso
+@@ -2910,10 +2991,6 @@ void console_start(struct console *conso
console_lock();
console->flags |= CON_ENABLED;
console_unlock();
@@ -347,7 +347,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
pr_flush(1000, true);
}
EXPORT_SYMBOL(console_start);
-@@ -3093,7 +3170,11 @@ void register_console(struct console *ne
+@@ -3108,7 +3185,11 @@ void register_console(struct console *ne
if (newcon->flags & CON_EXTENDED)
nr_ext_console_drivers++;
@@ -359,7 +359,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (newcon->flags & CON_PRINTBUFFER) {
/* Get a consistent copy of @syslog_seq. */
mutex_lock(&syslog_lock);
-@@ -3355,16 +3436,17 @@ static bool printer_should_wake(struct c
+@@ -3370,16 +3451,17 @@ static bool printer_should_wake(struct c
if (kthread_should_stop())
return true;
@@ -381,7 +381,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return prb_read_valid(prb, seq, NULL);
}
-@@ -3375,7 +3457,6 @@ static int printk_kthread_func(void *dat
+@@ -3390,7 +3472,6 @@ static int printk_kthread_func(void *dat
char *dropped_text = NULL;
char *ext_text = NULL;
bool progress;
@@ -389,7 +389,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
u64 seq = 0;
char *text;
int error;
-@@ -3408,9 +3489,17 @@ static int printk_kthread_func(void *dat
+@@ -3423,9 +3504,17 @@ static int printk_kthread_func(void *dat
continue;
do {
@@ -410,7 +410,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
break;
}
-@@ -3424,14 +3513,13 @@ static int printk_kthread_func(void *dat
+@@ -3439,14 +3528,13 @@ static int printk_kthread_func(void *dat
*/
console_may_schedule = 0;
progress = console_emit_next_record(con, text, ext_text,
diff --git a/patches/0009_zsmalloc_replace_get_cpu_var_with_local_lock.patch b/patches/0009_zsmalloc_replace_get_cpu_var_with_local_lock.patch
deleted file mode 100644
index f90bd47c90f2..000000000000
--- a/patches/0009_zsmalloc_replace_get_cpu_var_with_local_lock.patch
+++ /dev/null
@@ -1,70 +0,0 @@
-From: Mike Galbraith <umgwanakikbuti@gmail.com>
-Subject: zsmalloc: replace get_cpu_var with local_lock
-Date: Mon, 15 Nov 2021 10:59:09 -0800
-
-The usage of get_cpu_var() in zs_map_object() is problematic because
-it disables preemption and makes it impossible to acquire any sleeping
-lock on PREEMPT_RT such as a spinlock_t.
-Replace the get_cpu_var() usage with a local_lock_t which is embedded
-struct mapping_area. It ensures that the access the struct is
-synchronized against all users on the same CPU.
-
-Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-[minchan: remove the bit_spin_lock part and change the title]
-Signed-off-by: Minchan Kim <minchan@kernel.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Link: https://lore.kernel.org/r/20211115185909.3949505-10-minchan@kernel.org
----
- mm/zsmalloc.c | 11 ++++++++---
- 1 file changed, 8 insertions(+), 3 deletions(-)
-
---- a/mm/zsmalloc.c
-+++ b/mm/zsmalloc.c
-@@ -65,6 +65,7 @@
- #include <linux/wait.h>
- #include <linux/pagemap.h>
- #include <linux/fs.h>
-+#include <linux/local_lock.h>
-
- #define ZSPAGE_MAGIC 0x58
-
-@@ -276,6 +277,7 @@ struct zspage {
- };
-
- struct mapping_area {
-+ local_lock_t lock;
- char *vm_buf; /* copy buffer for objects that span pages */
- char *vm_addr; /* address of kmap_atomic()'ed pages */
- enum zs_mapmode vm_mm; /* mapping mode */
-@@ -451,7 +453,9 @@ MODULE_ALIAS("zpool-zsmalloc");
- #endif /* CONFIG_ZPOOL */
-
- /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
--static DEFINE_PER_CPU(struct mapping_area, zs_map_area);
-+static DEFINE_PER_CPU(struct mapping_area, zs_map_area) = {
-+ .lock = INIT_LOCAL_LOCK(lock),
-+};
-
- static __maybe_unused int is_first_page(struct page *page)
- {
-@@ -1269,7 +1273,8 @@ void *zs_map_object(struct zs_pool *pool
- class = zspage_class(pool, zspage);
- off = (class->size * obj_idx) & ~PAGE_MASK;
-
-- area = &get_cpu_var(zs_map_area);
-+ local_lock(&zs_map_area.lock);
-+ area = this_cpu_ptr(&zs_map_area);
- area->vm_mm = mm;
- if (off + class->size <= PAGE_SIZE) {
- /* this object is contained entirely within a page */
-@@ -1320,7 +1325,7 @@ void zs_unmap_object(struct zs_pool *poo
-
- __zs_unmap_object(area, pages, off, class->size);
- }
-- put_cpu_var(zs_map_area);
-+ local_unlock(&zs_map_area.lock);
-
- migrate_read_unlock(zspage);
- }
diff --git a/patches/0010-drm-i915-Drop-the-irqs_disabled-check.patch b/patches/0010-drm-i915-Drop-the-irqs_disabled-check.patch
index b236f2ab65eb..6d411b56cb55 100644
--- a/patches/0010-drm-i915-Drop-the-irqs_disabled-check.patch
+++ b/patches/0010-drm-i915-Drop-the-irqs_disabled-check.patch
@@ -20,7 +20,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
-@@ -560,7 +560,6 @@ bool __i915_request_submit(struct i915_r
+@@ -583,7 +583,6 @@ bool __i915_request_submit(struct i915_r
RQ_TRACE(request, "\n");
@@ -28,7 +28,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
lockdep_assert_held(&engine->sched_engine->lock);
/*
-@@ -669,7 +668,6 @@ void __i915_request_unsubmit(struct i915
+@@ -692,7 +691,6 @@ void __i915_request_unsubmit(struct i915
*/
RQ_TRACE(request, "\n");
diff --git a/patches/0010-printk-remove-console_locked.patch b/patches/0010-printk-remove-console_locked.patch
index e4f2bf7e8f4c..6335bcf2c921 100644
--- a/patches/0010-printk-remove-console_locked.patch
+++ b/patches/0010-printk-remove-console_locked.patch
@@ -20,7 +20,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -307,15 +307,7 @@ static void __pause_all_consoles(bool do
+@@ -308,15 +308,7 @@ static void __pause_all_consoles(bool do
#define pause_all_consoles() __pause_all_consoles(true)
#define unpause_all_consoles() __pause_all_consoles(false)
@@ -37,7 +37,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Array of consoles built from command line options (console=)
-@@ -2564,7 +2556,6 @@ void console_lock(void)
+@@ -2566,7 +2558,6 @@ void console_lock(void)
if (console_suspended)
return;
pause_all_consoles();
@@ -45,7 +45,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
console_may_schedule = 1;
}
EXPORT_SYMBOL(console_lock);
-@@ -2589,15 +2580,26 @@ int console_trylock(void)
+@@ -2591,15 +2582,26 @@ int console_trylock(void)
up_console_sem();
return 0;
}
@@ -74,7 +74,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
EXPORT_SYMBOL(is_console_locked);
-@@ -2630,8 +2632,6 @@ static inline bool console_is_usable(str
+@@ -2632,8 +2634,6 @@ static inline bool console_is_usable(str
static void __console_unlock(void)
{
@@ -83,7 +83,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Depending on whether console_lock() or console_trylock() was used,
* appropriately allow the kthread printers to continue.
-@@ -2913,7 +2913,6 @@ void console_unblank(void)
+@@ -2915,7 +2915,6 @@ void console_unblank(void)
console_lock();
}
diff --git a/patches/0010-x86-mm-Include-spinlock_t-definition-in-pgtable.patch b/patches/0010-x86-mm-Include-spinlock_t-definition-in-pgtable.patch
deleted file mode 100644
index 41067046b199..000000000000
--- a/patches/0010-x86-mm-Include-spinlock_t-definition-in-pgtable.patch
+++ /dev/null
@@ -1,36 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Mon, 29 Nov 2021 18:46:53 +0100
-Subject: [PATCH 10/11] x86/mm: Include spinlock_t definition in pgtable.
-
-This header file provides forward declartion for pgd_lock but does not
-include the header defining its type. This works since the definition of
-spinlock_t is usually included somehow via printk.
-
-By trying to avoid recursive includes on PREEMPT_RT I avoided the loop
-in printk and as a consequnce kernel/intel.c failed to compile due to
-missing type definition.
-
-Include the needed definition for spinlock_t.
-
-Cc: x86@kernel.org
-Cc: Borislav Petkov <bp@alien8.de>
-Cc: Dave Hansen <dave.hansen@linux.intel.com>
-Cc: H. Peter Anvin <hpa@zytor.com>
-Cc: Ingo Molnar <mingo@redhat.com>
-Cc: Thomas Gleixner <tglx@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Link: https://lore.kernel.org/r/20211129174654.668506-11-bigeasy@linutronix.de
----
- arch/x86/include/asm/pgtable.h | 1 +
- 1 file changed, 1 insertion(+)
-
---- a/arch/x86/include/asm/pgtable.h
-+++ b/arch/x86/include/asm/pgtable.h
-@@ -22,6 +22,7 @@
- #define pgprot_decrypted(prot) __pgprot(__sme_clr(pgprot_val(prot)))
-
- #ifndef __ASSEMBLY__
-+#include <linux/spinlock.h>
- #include <asm/x86_init.h>
- #include <asm/pkru.h>
- #include <asm/fpu/api.h>
diff --git a/patches/0011-console-introduce-CON_MIGHT_SLEEP-for-vt.patch b/patches/0011-console-introduce-CON_MIGHT_SLEEP-for-vt.patch
index 08610894babf..6a93eb394fd9 100644
--- a/patches/0011-console-introduce-CON_MIGHT_SLEEP-for-vt.patch
+++ b/patches/0011-console-introduce-CON_MIGHT_SLEEP-for-vt.patch
@@ -43,7 +43,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
char name[16];
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -2781,6 +2781,8 @@ static bool console_flush_all(bool do_co
+@@ -2783,6 +2783,8 @@ static bool console_flush_all(bool do_co
if (!console_is_usable(con))
continue;
diff --git a/patches/0011-locking-Allow-to-include-asm-spinlock_types.h-from-l.patch b/patches/0011-locking-Allow-to-include-asm-spinlock_types.h-from-l.patch
deleted file mode 100644
index 3d5b036d51c2..000000000000
--- a/patches/0011-locking-Allow-to-include-asm-spinlock_types.h-from-l.patch
+++ /dev/null
@@ -1,302 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Mon, 29 Nov 2021 18:46:54 +0100
-Subject: [PATCH 11/11] locking: Allow to include asm/spinlock_types.h from
- linux/spinlock_types_raw.h
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-The printk header file includes ratelimit_types.h for its __ratelimit()
-based usage. It is required for the static initializer used in
-printk_ratelimited(). It uses a raw_spinlock_t and includes the
-spinlock_types.h.
-
-PREEMPT_RT substitutes spinlock_t with a rtmutex based implementation and so
-its spinlock_t implmentation (provided by spinlock_rt.h) includes rtmutex.h and
-atomic.h which leads to recursive includes where defines are missing.
-
-By including only the raw_spinlock_t defines it avoids the atomic.h
-related includes at this stage.
-
-An example on powerpc:
-
-| CALL scripts/atomic/check-atomics.sh
-|In file included from include/linux/bug.h:5,
-| from include/linux/page-flags.h:10,
-| from kernel/bounds.c:10:
-|arch/powerpc/include/asm/page_32.h: In function ‘clear_page’:
-|arch/powerpc/include/asm/bug.h:87:4: error: implicit declaration of function ‘__WARN’ [-Werror=implicit-function-declaration]
-| 87 | __WARN(); \
-| | ^~~~~~
-|arch/powerpc/include/asm/page_32.h:48:2: note: in expansion of macro ‘WARN_ON’
-| 48 | WARN_ON((unsigned long)addr & (L1_CACHE_BYTES - 1));
-| | ^~~~~~~
-|arch/powerpc/include/asm/bug.h:58:17: error: invalid application of ‘sizeof’ to incomplete type ‘struct bug_entry’
-| 58 | "i" (sizeof(struct bug_entry)), \
-| | ^~~~~~
-|arch/powerpc/include/asm/bug.h:89:3: note: in expansion of macro ‘BUG_ENTRY’
-| 89 | BUG_ENTRY(PPC_TLNEI " %4, 0", \
-| | ^~~~~~~~~
-|arch/powerpc/include/asm/page_32.h:48:2: note: in expansion of macro ‘WARN_ON’
-| 48 | WARN_ON((unsigned long)addr & (L1_CACHE_BYTES - 1));
-| | ^~~~~~~
-|In file included from arch/powerpc/include/asm/ptrace.h:298,
-| from arch/powerpc/include/asm/hw_irq.h:12,
-| from arch/powerpc/include/asm/irqflags.h:12,
-| from include/linux/irqflags.h:16,
-| from include/asm-generic/cmpxchg-local.h:6,
-| from arch/powerpc/include/asm/cmpxchg.h:526,
-| from arch/powerpc/include/asm/atomic.h:11,
-| from include/linux/atomic.h:7,
-| from include/linux/rwbase_rt.h:6,
-| from include/linux/rwlock_types.h:55,
-| from include/linux/spinlock_types.h:74,
-| from include/linux/ratelimit_types.h:7,
-| from include/linux/printk.h:10,
-| from include/asm-generic/bug.h:22,
-| from arch/powerpc/include/asm/bug.h:109,
-| from include/linux/bug.h:5,
-| from include/linux/page-flags.h:10,
-| from kernel/bounds.c:10:
-|include/linux/thread_info.h: In function ‘copy_overflow’:
-|include/linux/thread_info.h:210:2: error: implicit declaration of function ‘WARN’ [-Werror=implicit-function-declaration]
-| 210 | WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
-| | ^~~~
-
-The WARN / BUG include pulls in printk.h and then ptrace.h expects WARN
-(from bug.h) which is not yet complete. Even hw_irq.h has WARN_ON()
-statements.
-
-On POWERPC64 there are missing atomic64 defines while building 32bit
-VDSO:
-| VDSO32C arch/powerpc/kernel/vdso32/vgettimeofday.o
-|In file included from include/linux/atomic.h:80,
-| from include/linux/rwbase_rt.h:6,
-| from include/linux/rwlock_types.h:55,
-| from include/linux/spinlock_types.h:74,
-| from include/linux/ratelimit_types.h:7,
-| from include/linux/printk.h:10,
-| from include/linux/kernel.h:19,
-| from arch/powerpc/include/asm/page.h:11,
-| from arch/powerpc/include/asm/vdso/gettimeofday.h:5,
-| from include/vdso/datapage.h:137,
-| from lib/vdso/gettimeofday.c:5,
-| from <command-line>:
-|include/linux/atomic-arch-fallback.h: In function ‘arch_atomic64_inc’:
-|include/linux/atomic-arch-fallback.h:1447:2: error: implicit declaration of function ‘arch_atomic64_add’; did you mean ‘arch_atomic_add’? [-Werror=impl
-|icit-function-declaration]
-| 1447 | arch_atomic64_add(1, v);
-| | ^~~~~~~~~~~~~~~~~
-| | arch_atomic_add
-
-The generic fallback is not included, atomics itself are not used. If
-kernel.h does not include printk.h then it comes later from the bug.h
-include.
-
-Allow asm/spinlock_types.h to be included from
-linux/spinlock_types_raw.h.
-
-Cc: Albert Ou <aou@eecs.berkeley.edu>
-Cc: Alexander Gordeev <agordeev@linux.ibm.com>
-Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
-Cc: Brian Cain <bcain@codeaurora.org>
-Cc: Catalin Marinas <catalin.marinas@arm.com>
-Cc: Chris Zankel <chris@zankel.net>
-Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
-Cc: Guo Ren <guoren@kernel.org>
-Cc: Heiko Carstens <hca@linux.ibm.com>
-Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
-Cc: Matt Turner <mattst88@gmail.com>
-Cc: Max Filippov <jcmvbkbc@gmail.com>
-Cc: Michael Ellerman <mpe@ellerman.id.au>
-Cc: Palmer Dabbelt <palmer@dabbelt.com>
-Cc: Paul Mackerras <paulus@samba.org>
-Cc: Paul Walmsley <paul.walmsley@sifive.com>
-Cc: Rich Felker <dalias@libc.org>
-Cc: Richard Henderson <rth@twiddle.net>
-Cc: Russell King <linux@armlinux.org.uk>
-Cc: Vasily Gorbik <gor@linux.ibm.com>
-Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
-Cc: linux-alpha@vger.kernel.org
-Cc: linux-arm-kernel@lists.infradead.org
-Cc: linux-csky@vger.kernel.org
-Cc: linux-hexagon@vger.kernel.org
-Cc: linux-ia64@vger.kernel.org
-Cc: linux-riscv@lists.infradead.org
-Cc: linux-s390@vger.kernel.org
-Cc: linux-sh@vger.kernel.org
-Cc: linux-xtensa@linux-xtensa.org
-Cc: linuxppc-dev@lists.ozlabs.org
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Link: https://lore.kernel.org/r/20211129174654.668506-12-bigeasy@linutronix.de
----
- arch/alpha/include/asm/spinlock_types.h | 2 +-
- arch/arm/include/asm/spinlock_types.h | 2 +-
- arch/arm64/include/asm/spinlock_types.h | 2 +-
- arch/csky/include/asm/spinlock_types.h | 2 +-
- arch/hexagon/include/asm/spinlock_types.h | 2 +-
- arch/ia64/include/asm/spinlock_types.h | 2 +-
- arch/powerpc/include/asm/simple_spinlock_types.h | 2 +-
- arch/powerpc/include/asm/spinlock_types.h | 2 +-
- arch/riscv/include/asm/spinlock_types.h | 2 +-
- arch/s390/include/asm/spinlock_types.h | 2 +-
- arch/sh/include/asm/spinlock_types.h | 2 +-
- arch/xtensa/include/asm/spinlock_types.h | 2 +-
- include/linux/ratelimit_types.h | 2 +-
- include/linux/spinlock_types_up.h | 2 +-
- 14 files changed, 14 insertions(+), 14 deletions(-)
-
---- a/arch/alpha/include/asm/spinlock_types.h
-+++ b/arch/alpha/include/asm/spinlock_types.h
-@@ -2,7 +2,7 @@
- #ifndef _ALPHA_SPINLOCK_TYPES_H
- #define _ALPHA_SPINLOCK_TYPES_H
-
--#ifndef __LINUX_SPINLOCK_TYPES_H
-+#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
- # error "please don't include this file directly"
- #endif
-
---- a/arch/arm/include/asm/spinlock_types.h
-+++ b/arch/arm/include/asm/spinlock_types.h
-@@ -2,7 +2,7 @@
- #ifndef __ASM_SPINLOCK_TYPES_H
- #define __ASM_SPINLOCK_TYPES_H
-
--#ifndef __LINUX_SPINLOCK_TYPES_H
-+#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
- # error "please don't include this file directly"
- #endif
-
---- a/arch/arm64/include/asm/spinlock_types.h
-+++ b/arch/arm64/include/asm/spinlock_types.h
-@@ -5,7 +5,7 @@
- #ifndef __ASM_SPINLOCK_TYPES_H
- #define __ASM_SPINLOCK_TYPES_H
-
--#if !defined(__LINUX_SPINLOCK_TYPES_H) && !defined(__ASM_SPINLOCK_H)
-+#if !defined(__LINUX_SPINLOCK_TYPES_RAW_H) && !defined(__ASM_SPINLOCK_H)
- # error "please don't include this file directly"
- #endif
-
---- a/arch/csky/include/asm/spinlock_types.h
-+++ b/arch/csky/include/asm/spinlock_types.h
-@@ -3,7 +3,7 @@
- #ifndef __ASM_CSKY_SPINLOCK_TYPES_H
- #define __ASM_CSKY_SPINLOCK_TYPES_H
-
--#ifndef __LINUX_SPINLOCK_TYPES_H
-+#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
- # error "please don't include this file directly"
- #endif
-
---- a/arch/hexagon/include/asm/spinlock_types.h
-+++ b/arch/hexagon/include/asm/spinlock_types.h
-@@ -8,7 +8,7 @@
- #ifndef _ASM_SPINLOCK_TYPES_H
- #define _ASM_SPINLOCK_TYPES_H
-
--#ifndef __LINUX_SPINLOCK_TYPES_H
-+#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
- # error "please don't include this file directly"
- #endif
-
---- a/arch/ia64/include/asm/spinlock_types.h
-+++ b/arch/ia64/include/asm/spinlock_types.h
-@@ -2,7 +2,7 @@
- #ifndef _ASM_IA64_SPINLOCK_TYPES_H
- #define _ASM_IA64_SPINLOCK_TYPES_H
-
--#ifndef __LINUX_SPINLOCK_TYPES_H
-+#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
- # error "please don't include this file directly"
- #endif
-
---- a/arch/powerpc/include/asm/simple_spinlock_types.h
-+++ b/arch/powerpc/include/asm/simple_spinlock_types.h
-@@ -2,7 +2,7 @@
- #ifndef _ASM_POWERPC_SIMPLE_SPINLOCK_TYPES_H
- #define _ASM_POWERPC_SIMPLE_SPINLOCK_TYPES_H
-
--#ifndef __LINUX_SPINLOCK_TYPES_H
-+#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
- # error "please don't include this file directly"
- #endif
-
---- a/arch/powerpc/include/asm/spinlock_types.h
-+++ b/arch/powerpc/include/asm/spinlock_types.h
-@@ -2,7 +2,7 @@
- #ifndef _ASM_POWERPC_SPINLOCK_TYPES_H
- #define _ASM_POWERPC_SPINLOCK_TYPES_H
-
--#ifndef __LINUX_SPINLOCK_TYPES_H
-+#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
- # error "please don't include this file directly"
- #endif
-
---- a/arch/riscv/include/asm/spinlock_types.h
-+++ b/arch/riscv/include/asm/spinlock_types.h
-@@ -6,7 +6,7 @@
- #ifndef _ASM_RISCV_SPINLOCK_TYPES_H
- #define _ASM_RISCV_SPINLOCK_TYPES_H
-
--#ifndef __LINUX_SPINLOCK_TYPES_H
-+#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
- # error "please don't include this file directly"
- #endif
-
---- a/arch/s390/include/asm/spinlock_types.h
-+++ b/arch/s390/include/asm/spinlock_types.h
-@@ -2,7 +2,7 @@
- #ifndef __ASM_SPINLOCK_TYPES_H
- #define __ASM_SPINLOCK_TYPES_H
-
--#ifndef __LINUX_SPINLOCK_TYPES_H
-+#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
- # error "please don't include this file directly"
- #endif
-
---- a/arch/sh/include/asm/spinlock_types.h
-+++ b/arch/sh/include/asm/spinlock_types.h
-@@ -2,7 +2,7 @@
- #ifndef __ASM_SH_SPINLOCK_TYPES_H
- #define __ASM_SH_SPINLOCK_TYPES_H
-
--#ifndef __LINUX_SPINLOCK_TYPES_H
-+#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
- # error "please don't include this file directly"
- #endif
-
---- a/arch/xtensa/include/asm/spinlock_types.h
-+++ b/arch/xtensa/include/asm/spinlock_types.h
-@@ -2,7 +2,7 @@
- #ifndef __ASM_SPINLOCK_TYPES_H
- #define __ASM_SPINLOCK_TYPES_H
-
--#if !defined(__LINUX_SPINLOCK_TYPES_H) && !defined(__ASM_SPINLOCK_H)
-+#if !defined(__LINUX_SPINLOCK_TYPES_RAW_H) && !defined(__ASM_SPINLOCK_H)
- # error "please don't include this file directly"
- #endif
-
---- a/include/linux/ratelimit_types.h
-+++ b/include/linux/ratelimit_types.h
-@@ -4,7 +4,7 @@
-
- #include <linux/bits.h>
- #include <linux/param.h>
--#include <linux/spinlock_types.h>
-+#include <linux/spinlock_types_raw.h>
-
- #define DEFAULT_RATELIMIT_INTERVAL (5 * HZ)
- #define DEFAULT_RATELIMIT_BURST 10
---- a/include/linux/spinlock_types_up.h
-+++ b/include/linux/spinlock_types_up.h
-@@ -1,7 +1,7 @@
- #ifndef __LINUX_SPINLOCK_TYPES_UP_H
- #define __LINUX_SPINLOCK_TYPES_UP_H
-
--#ifndef __LINUX_SPINLOCK_TYPES_H
-+#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
- # error "please don't include this file directly"
- #endif
-
diff --git a/patches/0012-printk-add-infrastucture-for-atomic-consoles.patch b/patches/0012-printk-add-infrastucture-for-atomic-consoles.patch
index 445085f828c8..064863c1c7a1 100644
--- a/patches/0012-printk-add-infrastucture-for-atomic-consoles.patch
+++ b/patches/0012-printk-add-infrastucture-for-atomic-consoles.patch
@@ -79,7 +79,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
};
--- a/init/Kconfig
+++ b/init/Kconfig
-@@ -1542,6 +1542,10 @@ config PRINTK
+@@ -1541,6 +1541,10 @@ config PRINTK
very difficult to diagnose system problems, saying N here is
strongly discouraged.
@@ -92,7 +92,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
default y
--- a/kernel/panic.c
+++ b/kernel/panic.c
-@@ -215,7 +215,6 @@ void panic(const char *fmt, ...)
+@@ -216,7 +216,6 @@ void panic(const char *fmt, ...)
panic_smp_self_stop();
console_verbose();
@@ -100,7 +100,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
va_start(args, fmt);
len = vscnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
-@@ -239,6 +238,11 @@ void panic(const char *fmt, ...)
+@@ -240,6 +239,11 @@ void panic(const char *fmt, ...)
*/
kgdb_panic(buf);
@@ -249,12 +249,12 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static void console_lock_spinning_enable(void) { }
static int console_lock_spinning_disable_and_check(void) { return 0; }
static void call_console_driver(struct console *con, const char *text, size_t len,
-- char *dropped_text) {}
-+ char *dropped_text, bool atomic_printing) {}
+- char *dropped_text) { }
++ char *dropped_text, bool atomic_printing) { }
static bool suppress_message_printing(int level) { return false; }
- static void start_printk_kthread(struct console *con) {}
-
-@@ -2609,13 +2691,23 @@ EXPORT_SYMBOL(is_console_locked);
+ static void start_printk_kthread(struct console *con) { }
+ static inline void boot_delay_msec(int level) { }
+@@ -2611,13 +2693,23 @@ EXPORT_SYMBOL(is_console_locked);
*
* Requires the console_lock.
*/
@@ -280,7 +280,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Console drivers may assume that per-cpu resources have been
-@@ -2647,6 +2739,66 @@ static void __console_unlock(void)
+@@ -2649,6 +2741,66 @@ static void __console_unlock(void)
up_console_sem();
}
@@ -347,7 +347,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Print one record for the given console. The record printed is whatever
* record is the next available record for the given console.
-@@ -2659,6 +2811,8 @@ static void __console_unlock(void)
+@@ -2661,6 +2813,8 @@ static void __console_unlock(void)
* If dropped messages should be printed, @dropped_text is a buffer of size
* DROPPED_TEXT_MAX. Otherise @dropped_text must be NULL.
*
@@ -356,7 +356,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* Requires the console_lock.
*
* Returns false if the given console has no next record to print, otherwise
-@@ -2670,7 +2824,8 @@ static void __console_unlock(void)
+@@ -2672,7 +2826,8 @@ static void __console_unlock(void)
* the console_lock to be taken over by a printk waiter.
*/
static bool console_emit_next_record(struct console *con, char *text, char *ext_text,
@@ -366,7 +366,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
struct printk_info info;
struct printk_record r;
-@@ -2678,23 +2833,27 @@ static bool console_emit_next_record(str
+@@ -2680,23 +2835,27 @@ static bool console_emit_next_record(str
bool allow_handover;
char *write_text;
size_t len;
@@ -399,7 +399,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto skip;
}
-@@ -2727,10 +2886,10 @@ static bool console_emit_next_record(str
+@@ -2729,10 +2888,10 @@ static bool console_emit_next_record(str
}
stop_critical_timings(); /* don't trace print latency */
@@ -412,7 +412,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (allow_handover) {
*handover = console_lock_spinning_disable_and_check();
-@@ -2779,7 +2938,7 @@ static bool console_flush_all(bool do_co
+@@ -2781,7 +2940,7 @@ static bool console_flush_all(bool do_co
for_each_console(con) {
bool progress;
@@ -421,7 +421,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
continue;
if ((con->flags & CON_MIGHT_SLEEP) && !do_cond_resched)
continue;
-@@ -2789,11 +2948,11 @@ static bool console_flush_all(bool do_co
+@@ -2791,11 +2950,11 @@ static bool console_flush_all(bool do_co
/* Extended consoles do not print "dropped messages". */
progress = console_emit_next_record(con, &text[0],
&ext_text[0], NULL,
@@ -435,7 +435,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
if (*handover)
return true;
-@@ -2814,6 +2973,67 @@ static bool console_flush_all(bool do_co
+@@ -2816,6 +2975,67 @@ static bool console_flush_all(bool do_co
return any_usable;
}
@@ -503,7 +503,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* console_unlock - unlock the console system
*
-@@ -2930,6 +3150,11 @@ void console_unblank(void)
+@@ -2932,6 +3152,11 @@ void console_unblank(void)
*/
void console_flush_on_panic(enum con_flush_mode mode)
{
@@ -515,7 +515,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* If someone else is holding the console lock, trylock will fail
* and may_schedule may be set. Ignore and proceed to unlock so
-@@ -2946,7 +3171,7 @@ void console_flush_on_panic(enum con_flu
+@@ -2948,7 +3173,7 @@ void console_flush_on_panic(enum con_flu
seq = prb_first_valid_seq(prb);
for_each_console(c)
@@ -524,7 +524,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
console_unlock();
}
-@@ -3174,16 +3399,19 @@ void register_console(struct console *ne
+@@ -3189,16 +3414,19 @@ void register_console(struct console *ne
if (consoles_paused)
newcon->flags |= CON_PAUSED;
@@ -547,7 +547,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
if (kthreads_started)
start_printk_kthread(newcon);
-@@ -3265,6 +3493,10 @@ int unregister_console(struct console *c
+@@ -3280,6 +3508,10 @@ int unregister_console(struct console *c
console_unlock();
console_sysfs_notify();
@@ -558,7 +558,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (console->exit)
res = console->exit(console);
-@@ -3398,7 +3630,7 @@ bool pr_flush(int timeout_ms, bool reset
+@@ -3413,7 +3645,7 @@ bool pr_flush(int timeout_ms, bool reset
console_lock();
for_each_console(con) {
@@ -567,7 +567,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
continue;
printk_seq = con->seq;
if (printk_seq < seq)
-@@ -3466,6 +3698,11 @@ static int printk_kthread_func(void *dat
+@@ -3481,6 +3713,11 @@ static int printk_kthread_func(void *dat
(con->flags & CON_BOOT) ? "boot" : "",
con->name, con->index);
@@ -579,7 +579,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
text = kmalloc(CONSOLE_LOG_MAX, GFP_KERNEL);
if (!text)
goto out;
-@@ -3494,7 +3731,7 @@ static int printk_kthread_func(void *dat
+@@ -3509,7 +3746,7 @@ static int printk_kthread_func(void *dat
if (error)
break;
@@ -588,7 +588,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
mutex_unlock(&con->lock);
break;
}
-@@ -3514,7 +3751,7 @@ static int printk_kthread_func(void *dat
+@@ -3529,7 +3766,7 @@ static int printk_kthread_func(void *dat
*/
console_may_schedule = 0;
progress = console_emit_next_record(con, text, ext_text,
diff --git a/patches/0013-serial-8250-implement-write_atomic.patch b/patches/0013-serial-8250-implement-write_atomic.patch
index 11e77e5dd488..04ccb544ac89 100644
--- a/patches/0013-serial-8250-implement-write_atomic.patch
+++ b/patches/0013-serial-8250-implement-write_atomic.patch
@@ -117,7 +117,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
spin_unlock_irqrestore(&up->port.lock, flags);
-@@ -568,6 +566,14 @@ serial8250_register_ports(struct uart_dr
+@@ -567,6 +565,14 @@ serial8250_register_ports(struct uart_dr
#ifdef CONFIG_SERIAL_8250_CONSOLE
@@ -132,7 +132,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static void univ8250_console_write(struct console *co, const char *s,
unsigned int count)
{
-@@ -661,6 +667,7 @@ static int univ8250_console_match(struct
+@@ -660,6 +666,7 @@ static int univ8250_console_match(struct
static struct console univ8250_console = {
.name = "ttyS",
@@ -472,7 +472,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include <linux/serial_core.h>
#include <linux/serial_reg.h>
#include <linux/platform_device.h>
-@@ -125,6 +126,8 @@ struct uart_8250_port {
+@@ -123,6 +124,8 @@ struct uart_8250_port {
#define MSR_SAVE_FLAGS UART_MSR_ANY_DELTA
unsigned char msr_saved_flags;
@@ -481,7 +481,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct uart_8250_dma *dma;
const struct uart_8250_ops *ops;
-@@ -180,6 +183,8 @@ void serial8250_init_port(struct uart_82
+@@ -178,6 +181,8 @@ void serial8250_init_port(struct uart_82
void serial8250_set_defaults(struct uart_8250_port *up);
void serial8250_console_write(struct uart_8250_port *up, const char *s,
unsigned int count);
diff --git a/patches/0014-printk-avoid-preempt_disable-for-PREEMPT_RT.patch b/patches/0014-printk-avoid-preempt_disable-for-PREEMPT_RT.patch
index a74f3e1f6821..c58a2aa742ba 100644
--- a/patches/0014-printk-avoid-preempt_disable-for-PREEMPT_RT.patch
+++ b/patches/0014-printk-avoid-preempt_disable-for-PREEMPT_RT.patch
@@ -87,7 +87,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
wake_up_klogd();
-@@ -2867,8 +2881,13 @@ static bool console_emit_next_record(str
+@@ -2869,8 +2883,13 @@ static bool console_emit_next_record(str
len = record_print_text(&r, console_msg_format & MSG_FORMAT_SYSLOG, printk_time);
}
diff --git a/patches/ARM__enable_irq_in_translation_section_permission_fault_handlers.patch b/patches/ARM__enable_irq_in_translation_section_permission_fault_handlers.patch
index 3920e31ded38..086fb4ef9241 100644
--- a/patches/ARM__enable_irq_in_translation_section_permission_fault_handlers.patch
+++ b/patches/ARM__enable_irq_in_translation_section_permission_fault_handlers.patch
@@ -68,7 +68,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
-@@ -407,6 +407,9 @@ do_translation_fault(unsigned long addr,
+@@ -417,6 +417,9 @@ do_translation_fault(unsigned long addr,
if (addr < TASK_SIZE)
return do_page_fault(addr, fsr, regs);
@@ -78,7 +78,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (user_mode(regs))
goto bad_area;
-@@ -477,6 +480,9 @@ do_translation_fault(unsigned long addr,
+@@ -487,6 +490,9 @@ do_translation_fault(unsigned long addr,
static int
do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
diff --git a/patches/Add_localversion_for_-RT_release.patch b/patches/Add_localversion_for_-RT_release.patch
index c0ab4419d1ec..0e6dbb0e3c10 100644
--- a/patches/Add_localversion_for_-RT_release.patch
+++ b/patches/Add_localversion_for_-RT_release.patch
@@ -15,4 +15,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt19
++-rt1
diff --git a/patches/KVM__arm_arm64__downgrade_preempt_disabled_region_to_migrate_disable.patch b/patches/KVM__arm_arm64__downgrade_preempt_disabled_region_to_migrate_disable.patch
index e32167ddd734..bf841eb60e43 100644
--- a/patches/KVM__arm_arm64__downgrade_preempt_disabled_region_to_migrate_disable.patch
+++ b/patches/KVM__arm_arm64__downgrade_preempt_disabled_region_to_migrate_disable.patch
@@ -27,7 +27,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
-@@ -820,7 +820,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
+@@ -846,7 +846,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
* involves poking the GIC, which must be done in a
* non-preemptible context.
*/
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
kvm_pmu_flush_hwstate(vcpu);
-@@ -844,7 +844,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
+@@ -870,7 +870,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
kvm_timer_sync_user(vcpu);
kvm_vgic_sync_hwstate(vcpu);
local_irq_enable();
@@ -45,7 +45,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
continue;
}
-@@ -916,7 +916,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
+@@ -943,7 +943,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
/* Exit types that need handling before we can be preempted */
handle_exit_early(vcpu, ret);
diff --git a/patches/POWERPC__Allow_to_enable_RT.patch b/patches/POWERPC__Allow_to_enable_RT.patch
index 1b935be772b7..e2e298e764ce 100644
--- a/patches/POWERPC__Allow_to_enable_RT.patch
+++ b/patches/POWERPC__Allow_to_enable_RT.patch
@@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
-@@ -153,6 +153,7 @@ config PPC
+@@ -144,6 +144,7 @@ config PPC
select ARCH_STACKWALK
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_DEBUG_PAGEALLOC if PPC_BOOK3S || PPC_8xx || 40x
@@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF if PPC64
select ARCH_USE_MEMTEST
-@@ -221,6 +222,7 @@ config PPC
+@@ -213,6 +214,7 @@ config PPC
select HAVE_IOREMAP_PROT
select HAVE_IRQ_EXIT_ON_IRQ_STACK
select HAVE_IRQ_TIME_ACCOUNTING
diff --git a/patches/Revert-tty-serial-Use-fifo-in-8250-console-driver.patch b/patches/Revert-tty-serial-Use-fifo-in-8250-console-driver.patch
new file mode 100644
index 000000000000..3d073a2fb1d4
--- /dev/null
+++ b/patches/Revert-tty-serial-Use-fifo-in-8250-console-driver.patch
@@ -0,0 +1,122 @@
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Date: Wed, 26 Jan 2022 14:33:58 +0100
+Subject: [PATCH] Revert "tty: serial: Use fifo in 8250 console driver"
+
+This reverts commit 5021d709b31b8a14317998a33cbc78be0de9ab30.
+
+The patch is still a bit buggy, and this breaks some other hardware
+types. It needs to be resubmitted in a non-buggy way, and make sure the
+other hardware types also continue to work properly.
+
+Fixes: 5021d709b31b ("tty: serial: Use fifo in 8250 console driver")
+Reported-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Reported-by: Jon Hunter <jonathanh@nvidia.com>
+Link: https://lore.kernel.org/r/Ye/1+Z8mEzbKbrqG@linutronix.de
+Link: https://lore.kernel.org/r/a1ac6254-f79e-d131-fa2a-c7ad714c6d4a@nvidia.com
+Cc: Wander Lairson Costa <wander@redhat.com>
+Cc: Andy Shevchenko <andy.shevchenko@gmail.com>
+Cc: Jiri Slaby <jirislaby@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/8250/8250_port.c | 61 +++---------------------------------
+ 1 file changed, 6 insertions(+), 55 deletions(-)
+
+--- a/drivers/tty/serial/8250/8250_port.c
++++ b/drivers/tty/serial/8250/8250_port.c
+@@ -2056,7 +2056,10 @@ static void serial8250_break_ctl(struct
+ serial8250_rpm_put(up);
+ }
+
+-static void wait_for_lsr(struct uart_8250_port *up, int bits)
++/*
++ * Wait for transmitter & holding register to empty
++ */
++static void wait_for_xmitr(struct uart_8250_port *up, int bits)
+ {
+ unsigned int status, tmout = 10000;
+
+@@ -2073,16 +2076,6 @@ static void wait_for_lsr(struct uart_825
+ udelay(1);
+ touch_nmi_watchdog();
+ }
+-}
+-
+-/*
+- * Wait for transmitter & holding register to empty
+- */
+-static void wait_for_xmitr(struct uart_8250_port *up, int bits)
+-{
+- unsigned int tmout;
+-
+- wait_for_lsr(up, bits);
+
+ /* Wait up to 1s for flow control if necessary */
+ if (up->port.flags & UPF_CONS_FLOW) {
+@@ -3333,35 +3326,6 @@ static void serial8250_console_restore(s
+ }
+
+ /*
+- * Print a string to the serial port using the device FIFO
+- *
+- * It sends fifosize bytes and then waits for the fifo
+- * to get empty.
+- */
+-static void serial8250_console_fifo_write(struct uart_8250_port *up,
+- const char *s, unsigned int count)
+-{
+- int i;
+- const char *end = s + count;
+- unsigned int fifosize = up->port.fifosize;
+- bool cr_sent = false;
+-
+- while (s != end) {
+- wait_for_lsr(up, UART_LSR_THRE);
+-
+- for (i = 0; i < fifosize && s != end; ++i) {
+- if (*s == '\n' && !cr_sent) {
+- serial_out(up, UART_TX, '\r');
+- cr_sent = true;
+- } else {
+- serial_out(up, UART_TX, *s++);
+- cr_sent = false;
+- }
+- }
+- }
+-}
+-
+-/*
+ * Print a string to the serial port trying not to disturb
+ * any possible real use of the port...
+ *
+@@ -3376,7 +3340,7 @@ void serial8250_console_write(struct uar
+ struct uart_8250_em485 *em485 = up->em485;
+ struct uart_port *port = &up->port;
+ unsigned long flags;
+- unsigned int ier, use_fifo;
++ unsigned int ier;
+ int locked = 1;
+
+ touch_nmi_watchdog();
+@@ -3408,20 +3372,7 @@ void serial8250_console_write(struct uar
+ mdelay(port->rs485.delay_rts_before_send);
+ }
+
+- use_fifo = (up->capabilities & UART_CAP_FIFO) &&
+- port->fifosize > 1 &&
+- (serial_port_in(port, UART_FCR) & UART_FCR_ENABLE_FIFO) &&
+- /*
+- * After we put a data in the fifo, the controller will send
+- * it regardless of the CTS state. Therefore, only use fifo
+- * if we don't use control flow.
+- */
+- !(up->port.flags & UPF_CONS_FLOW);
+-
+- if (likely(use_fifo))
+- serial8250_console_fifo_write(up, s, count);
+- else
+- uart_console_write(port, s, count, serial8250_console_putchar);
++ uart_console_write(port, s, count, serial8250_console_putchar);
+
+ /*
+ * Finally, wait for transmitter to become empty
diff --git a/patches/Use-generic_handle_irq_safe-where-it-makes-sense.patch b/patches/Use-generic_handle_irq_safe-where-it-makes-sense.patch
index c4ebc27f8a1c..4d0518e18cad 100644
--- a/patches/Use-generic_handle_irq_safe-where-it-makes-sense.patch
+++ b/patches/Use-generic_handle_irq_safe-where-it-makes-sense.patch
@@ -51,7 +51,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
-@@ -1423,7 +1423,7 @@ int i2c_handle_smbus_host_notify(struct
+@@ -1424,7 +1424,7 @@ int i2c_handle_smbus_host_notify(struct
if (irq <= 0)
return -ENXIO;
@@ -101,7 +101,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
-@@ -1367,11 +1367,8 @@ static void lan78xx_status(struct lan78x
+@@ -1537,11 +1537,8 @@ static void lan78xx_status(struct lan78x
netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
diff --git a/patches/arch_arm64__Add_lazy_preempt_support.patch b/patches/arch_arm64__Add_lazy_preempt_support.patch
index 59588a92b9ce..37250a72d887 100644
--- a/patches/arch_arm64__Add_lazy_preempt_support.patch
+++ b/patches/arch_arm64__Add_lazy_preempt_support.patch
@@ -25,14 +25,14 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
-@@ -192,6 +192,7 @@ config ARM64
+@@ -193,6 +193,7 @@ config ARM64
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_PREEMPT_LAZY
select HAVE_POSIX_CPU_TIMERS_TASK_WORK
select HAVE_FUNCTION_ARG_ACCESS_API
- select HAVE_FUTEX_CMPXCHG if FUTEX
+ select MMU_GATHER_RCU_TABLE_FREE
--- a/arch/arm64/include/asm/preempt.h
+++ b/arch/arm64/include/asm/preempt.h
@@ -70,13 +70,36 @@ static inline bool __preempt_count_dec_a
diff --git a/patches/arm64-sve-Delay-freeing-memory-in-fpsimd_flush_threa.patch b/patches/arm64-sve-Delay-freeing-memory-in-fpsimd_flush_threa.patch
index 4b2cda86a1b7..e5fddb7cfa22 100644
--- a/patches/arm64-sve-Delay-freeing-memory-in-fpsimd_flush_threa.patch
+++ b/patches/arm64-sve-Delay-freeing-memory-in-fpsimd_flush_threa.patch
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
-@@ -1125,6 +1125,8 @@ static void fpsimd_flush_thread_vl(enum
+@@ -1135,6 +1135,8 @@ static void fpsimd_flush_thread_vl(enum
void fpsimd_flush_thread(void)
{
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!system_supports_fpsimd())
return;
-@@ -1136,11 +1138,16 @@ void fpsimd_flush_thread(void)
+@@ -1146,11 +1148,16 @@ void fpsimd_flush_thread(void)
if (system_supports_sve()) {
clear_thread_flag(TIF_SVE);
diff --git a/patches/arm64-sve-Make-kernel-FPU-protection-RT-friendly.patch b/patches/arm64-sve-Make-kernel-FPU-protection-RT-friendly.patch
index 89da78d382d3..4fa8df86d47a 100644
--- a/patches/arm64-sve-Make-kernel-FPU-protection-RT-friendly.patch
+++ b/patches/arm64-sve-Make-kernel-FPU-protection-RT-friendly.patch
@@ -21,7 +21,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
-@@ -201,10 +201,19 @@ static void __get_cpu_fpsimd_context(voi
+@@ -206,10 +206,19 @@ static void __get_cpu_fpsimd_context(voi
*
* The double-underscore version must only be called if you know the task
* can't be preempted.
@@ -42,7 +42,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
__get_cpu_fpsimd_context();
}
-@@ -225,7 +234,10 @@ static void __put_cpu_fpsimd_context(voi
+@@ -230,7 +239,10 @@ static void __put_cpu_fpsimd_context(voi
static void put_cpu_fpsimd_context(void)
{
__put_cpu_fpsimd_context();
diff --git a/patches/block_mq__do_not_invoke_preempt_disable.patch b/patches/block_mq__do_not_invoke_preempt_disable.patch
index 788cdc45b7b8..c3817338cddc 100644
--- a/patches/block_mq__do_not_invoke_preempt_disable.patch
+++ b/patches/block_mq__do_not_invoke_preempt_disable.patch
@@ -18,7 +18,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
-@@ -1857,14 +1857,14 @@ static void __blk_mq_delay_run_hw_queue(
+@@ -2040,14 +2040,14 @@ static void __blk_mq_delay_run_hw_queue(
return;
if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
diff --git a/patches/cgroup__use_irqsave_in_cgroup_rstat_flush_locked.patch b/patches/cgroup__use_irqsave_in_cgroup_rstat_flush_locked.patch
index 366fbe763a12..62402d218f3f 100644
--- a/patches/cgroup__use_irqsave_in_cgroup_rstat_flush_locked.patch
+++ b/patches/cgroup__use_irqsave_in_cgroup_rstat_flush_locked.patch
@@ -27,7 +27,7 @@ Link: https://www.spinics.net/lists/cgroups/msg23051.html
---
--- a/kernel/cgroup/rstat.c
+++ b/kernel/cgroup/rstat.c
-@@ -156,8 +156,9 @@ static void cgroup_rstat_flush_locked(st
+@@ -153,8 +153,9 @@ static void cgroup_rstat_flush_locked(st
raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock,
cpu);
struct cgroup *pos = NULL;
@@ -38,7 +38,7 @@ Link: https://www.spinics.net/lists/cgroups/msg23051.html
while ((pos = cgroup_rstat_cpu_pop_updated(pos, cgrp, cpu))) {
struct cgroup_subsys_state *css;
-@@ -169,7 +170,7 @@ static void cgroup_rstat_flush_locked(st
+@@ -166,7 +167,7 @@ static void cgroup_rstat_flush_locked(st
css->ss->css_rstat_flush(css, cpu);
rcu_read_unlock();
}
diff --git a/patches/drm-i915-Don-t-disable-interrupts-and-pretend-a-lock.patch b/patches/drm-i915-Don-t-disable-interrupts-and-pretend-a-lock.patch
deleted file mode 100644
index 7d70f071b1db..000000000000
--- a/patches/drm-i915-Don-t-disable-interrupts-and-pretend-a-lock.patch
+++ /dev/null
@@ -1,142 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Fri, 10 Dec 2021 21:44:17 +0100
-Subject: [PATCH] drm/i915: Don't disable interrupts and pretend a lock as been
- acquired in __timeline_mark_lock().
-
-This is a revert of commits
- d67739268cf0e ("drm/i915/gt: Mark up the nested engine-pm timeline lock as irqsafe")
- 6c69a45445af9 ("drm/i915/gt: Mark context->active_count as protected by timeline->mutex")
- 6dcb85a0ad990 ("drm/i915: Hold irq-off for the entire fake lock period")
-
-The existing code leads to a different behaviour depending on whether
-lockdep is enabled or not. Any following lock that is acquired without
-disabling interrupts (but needs to) will not be noticed by lockdep.
-
-This it not just a lockdep annotation but is used but an actual mutex_t
-that is properly used as a lock but in case of __timeline_mark_lock()
-lockdep is only told that it is acquired but no lock has been acquired.
-
-It appears that its purpose is just satisfy the lockdep_assert_held()
-check in intel_context_mark_active(). The other problem with disabling
-interrupts is that on PREEMPT_RT interrupts are also disabled which
-leads to problems for instance later during memory allocation.
-
-Add a CONTEXT_IS_PARKING bit to intel_engine_cs and set_bit/clear_bit it
-instead of mutex_acquire/mutex_release. Use test_bit in the two
-identified spots which relied on the lockdep annotation.
-
-Cc: Peter Zijlstra <peterz@infradead.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
-Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
-Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
-Link: https://patchwork.freedesktop.org/patch/msgid/YbO8Ie1Nj7XcQPNQ@linutronix.de
----
- drivers/gpu/drm/i915/gt/intel_context.h | 3 +-
- drivers/gpu/drm/i915/gt/intel_context_types.h | 1
- drivers/gpu/drm/i915/gt/intel_engine_pm.c | 38 +-------------------------
- drivers/gpu/drm/i915/i915_request.h | 3 +-
- 4 files changed, 7 insertions(+), 38 deletions(-)
-
---- a/drivers/gpu/drm/i915/gt/intel_context.h
-+++ b/drivers/gpu/drm/i915/gt/intel_context.h
-@@ -211,7 +211,8 @@ static inline void intel_context_enter(s
-
- static inline void intel_context_mark_active(struct intel_context *ce)
- {
-- lockdep_assert_held(&ce->timeline->mutex);
-+ lockdep_assert(lockdep_is_held(&ce->timeline->mutex) ||
-+ test_bit(CONTEXT_IS_PARKING, &ce->flags));
- ++ce->active_count;
- }
-
---- a/drivers/gpu/drm/i915/gt/intel_context_types.h
-+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
-@@ -118,6 +118,7 @@ struct intel_context {
- #define CONTEXT_LRCA_DIRTY 9
- #define CONTEXT_GUC_INIT 10
- #define CONTEXT_PERMA_PIN 11
-+#define CONTEXT_IS_PARKING 12
-
- struct {
- u64 timeout_us;
---- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
-+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
-@@ -80,39 +80,6 @@ static int __engine_unpark(struct intel_
- return 0;
- }
-
--#if IS_ENABLED(CONFIG_LOCKDEP)
--
--static unsigned long __timeline_mark_lock(struct intel_context *ce)
--{
-- unsigned long flags;
--
-- local_irq_save(flags);
-- mutex_acquire(&ce->timeline->mutex.dep_map, 2, 0, _THIS_IP_);
--
-- return flags;
--}
--
--static void __timeline_mark_unlock(struct intel_context *ce,
-- unsigned long flags)
--{
-- mutex_release(&ce->timeline->mutex.dep_map, _THIS_IP_);
-- local_irq_restore(flags);
--}
--
--#else
--
--static unsigned long __timeline_mark_lock(struct intel_context *ce)
--{
-- return 0;
--}
--
--static void __timeline_mark_unlock(struct intel_context *ce,
-- unsigned long flags)
--{
--}
--
--#endif /* !IS_ENABLED(CONFIG_LOCKDEP) */
--
- static void duration(struct dma_fence *fence, struct dma_fence_cb *cb)
- {
- struct i915_request *rq = to_request(fence);
-@@ -159,7 +126,6 @@ static bool switch_to_kernel_context(str
- {
- struct intel_context *ce = engine->kernel_context;
- struct i915_request *rq;
-- unsigned long flags;
- bool result = true;
-
- /*
-@@ -214,7 +180,7 @@ static bool switch_to_kernel_context(str
- * engine->wakeref.count, we may see the request completion and retire
- * it causing an underflow of the engine->wakeref.
- */
-- flags = __timeline_mark_lock(ce);
-+ set_bit(CONTEXT_IS_PARKING, &ce->flags);
- GEM_BUG_ON(atomic_read(&ce->timeline->active_count) < 0);
-
- rq = __i915_request_create(ce, GFP_NOWAIT);
-@@ -246,7 +212,7 @@ static bool switch_to_kernel_context(str
-
- result = false;
- out_unlock:
-- __timeline_mark_unlock(ce, flags);
-+ clear_bit(CONTEXT_IS_PARKING, &ce->flags);
- return result;
- }
-
---- a/drivers/gpu/drm/i915/i915_request.h
-+++ b/drivers/gpu/drm/i915/i915_request.h
-@@ -642,7 +642,8 @@ i915_request_timeline(const struct i915_
- {
- /* Valid only while the request is being constructed (or retired). */
- return rcu_dereference_protected(rq->timeline,
-- lockdep_is_held(&rcu_access_pointer(rq->timeline)->mutex));
-+ lockdep_is_held(&rcu_access_pointer(rq->timeline)->mutex) ||
-+ test_bit(CONTEXT_IS_PARKING, &rq->context->flags));
- }
-
- static inline struct i915_gem_context *
diff --git a/patches/fs-namespace-Boost-the-mount_lock.lock-owner-instead.patch b/patches/fs-namespace-Boost-the-mount_lock.lock-owner-instead.patch
index 42ae4096d22b..80eeb49697aa 100644
--- a/patches/fs-namespace-Boost-the-mount_lock.lock-owner-instead.patch
+++ b/patches/fs-namespace-Boost-the-mount_lock.lock-owner-instead.patch
@@ -28,7 +28,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/fs/namespace.c
+++ b/fs/namespace.c
-@@ -343,8 +343,24 @@ int __mnt_want_write(struct vfsmount *m)
+@@ -344,8 +344,24 @@ int __mnt_want_write(struct vfsmount *m)
* incremented count after it has set MNT_WRITE_HOLD.
*/
smp_mb();
diff --git a/patches/fs_dcache__disable_preemption_on_i_dir_seqs_write_side.patch b/patches/fs_dcache__disable_preemption_on_i_dir_seqs_write_side.patch
index e31b2b834f29..2064e56012fa 100644
--- a/patches/fs_dcache__disable_preemption_on_i_dir_seqs_write_side.patch
+++ b/patches/fs_dcache__disable_preemption_on_i_dir_seqs_write_side.patch
@@ -38,7 +38,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
--- a/fs/dcache.c
+++ b/fs/dcache.c
-@@ -2537,7 +2537,13 @@ EXPORT_SYMBOL(d_rehash);
+@@ -2562,7 +2562,13 @@ EXPORT_SYMBOL(d_rehash);
static inline unsigned start_dir_add(struct inode *dir)
{
@@ -53,7 +53,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
for (;;) {
unsigned n = dir->i_dir_seq;
if (!(n & 1) && cmpxchg(&dir->i_dir_seq, n, n + 1) == n)
-@@ -2549,6 +2555,8 @@ static inline unsigned start_dir_add(str
+@@ -2574,6 +2580,8 @@ static inline unsigned start_dir_add(str
static inline void end_dir_add(struct inode *dir, unsigned n)
{
smp_store_release(&dir->i_dir_seq, n + 2);
diff --git a/patches/fs_dcache__use_swait_queue_instead_of_waitqueue.patch b/patches/fs_dcache__use_swait_queue_instead_of_waitqueue.patch
index 02bc269c806b..7566db2b8304 100644
--- a/patches/fs_dcache__use_swait_queue_instead_of_waitqueue.patch
+++ b/patches/fs_dcache__use_swait_queue_instead_of_waitqueue.patch
@@ -50,7 +50,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/fs/dcache.c
+++ b/fs/dcache.c
-@@ -2553,21 +2553,24 @@ static inline void end_dir_add(struct in
+@@ -2578,21 +2578,24 @@ static inline void end_dir_add(struct in
static void d_wait_lookup(struct dentry *dentry)
{
@@ -86,7 +86,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
unsigned int hash = name->hash;
struct hlist_bl_head *b = in_lookup_hash(parent, hash);
-@@ -2682,7 +2685,7 @@ void __d_lookup_done(struct dentry *dent
+@@ -2707,7 +2710,7 @@ void __d_lookup_done(struct dentry *dent
hlist_bl_lock(b);
dentry->d_flags &= ~DCACHE_PAR_LOOKUP;
__hlist_bl_del(&dentry->d_u.d_in_lookup_hash);
@@ -108,7 +108,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
--- a/fs/namei.c
+++ b/fs/namei.c
-@@ -1633,7 +1633,7 @@ static struct dentry *__lookup_slow(cons
+@@ -1683,7 +1683,7 @@ static struct dentry *__lookup_slow(cons
{
struct dentry *dentry, *old;
struct inode *inode = dir->d_inode;
@@ -117,7 +117,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Don't go there if it's already dead */
if (unlikely(IS_DEADDIR(inode)))
-@@ -3192,7 +3192,7 @@ static struct dentry *lookup_open(struct
+@@ -3242,7 +3242,7 @@ static struct dentry *lookup_open(struct
struct dentry *dentry;
int error, create_error = 0;
umode_t mode = op->mode;
@@ -128,7 +128,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return ERR_PTR(-ENOENT);
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
-@@ -638,7 +638,7 @@ void nfs_prime_dcache(struct dentry *par
+@@ -639,7 +639,7 @@ void nfs_prime_dcache(struct dentry *par
unsigned long dir_verifier)
{
struct qstr filename = QSTR_INIT(entry->name, entry->len);
@@ -137,7 +137,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
struct dentry *dentry;
struct dentry *alias;
struct inode *inode;
-@@ -1860,7 +1860,7 @@ int nfs_atomic_open(struct inode *dir, s
+@@ -1861,7 +1861,7 @@ int nfs_atomic_open(struct inode *dir, s
struct file *file, unsigned open_flags,
umode_t mode)
{
@@ -187,7 +187,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
goto end_instantiate;
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
-@@ -678,7 +678,7 @@ static bool proc_sys_fill_cache(struct f
+@@ -696,7 +696,7 @@ static bool proc_sys_fill_cache(struct f
child = d_lookup(dir, &qname);
if (!child) {
@@ -198,7 +198,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return false;
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
-@@ -108,7 +108,7 @@ struct dentry {
+@@ -98,7 +98,7 @@ struct dentry {
union {
struct list_head d_lru; /* LRU list */
@@ -207,7 +207,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
};
struct list_head d_child; /* child of parent list */
struct list_head d_subdirs; /* our children */
-@@ -240,7 +240,7 @@ extern void d_set_d_op(struct dentry *de
+@@ -230,7 +230,7 @@ extern void d_set_d_op(struct dentry *de
extern struct dentry * d_alloc(struct dentry *, const struct qstr *);
extern struct dentry * d_alloc_anon(struct super_block *);
extern struct dentry * d_alloc_parallel(struct dentry *, const struct qstr *,
diff --git a/patches/fscache-Use-only-one-fscache_object_cong_wait.patch b/patches/fscache-Use-only-one-fscache_object_cong_wait.patch
deleted file mode 100644
index 7ed718fb88fe..000000000000
--- a/patches/fscache-Use-only-one-fscache_object_cong_wait.patch
+++ /dev/null
@@ -1,122 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Thu, 28 Oct 2021 17:30:50 +0200
-Subject: [PATCH] fscache: Use only one fscache_object_cong_wait.
-
-In the commit mentioned below, fscache was converted from slow-work to
-workqueue. slow_work_enqueue() and slow_work_sleep_till_thread_needed()
-did not use a per-CPU workqueue. They choose from two global waitqueues
-depending on the SLOW_WORK_VERY_SLOW bit which was not set so it always
-one waitqueue.
-
-I can't find out how it is ensured that a waiter on certain CPU is woken
-up be the other side. My guess is that the timeout in schedule_timeout()
-ensures that it does not wait forever (or a random wake up).
-
-fscache_object_sleep_till_congested() must be invoked from preemptible
-context in order for schedule() to work. In this case this_cpu_ptr()
-should complain with CONFIG_DEBUG_PREEMPT enabled except the thread is
-bound to one CPU.
-
-wake_up() wakes only one waiter and I'm not sure if it is guaranteed
-that only one waiter exists.
-
-Replace the per-CPU waitqueue with one global waitqueue.
-
-Fixes: 8b8edefa2fffb ("fscache: convert object to use workqueue instead of slow-work")
-Reported-by: Gregor Beck <gregor.beck@gmail.com>
-Cc: stable-rt@vger.kernel.org
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Link: https://lkml.kernel.org/r/20211029083839.xwwt7jgzru3kcpii@linutronix.de
----
- fs/fscache/internal.h | 1 -
- fs/fscache/main.c | 6 ------
- fs/fscache/object.c | 13 +++++--------
- 3 files changed, 5 insertions(+), 15 deletions(-)
-
---- a/fs/fscache/internal.h
-+++ b/fs/fscache/internal.h
-@@ -81,7 +81,6 @@ extern unsigned fscache_debug;
- extern struct kobject *fscache_root;
- extern struct workqueue_struct *fscache_object_wq;
- extern struct workqueue_struct *fscache_op_wq;
--DECLARE_PER_CPU(wait_queue_head_t, fscache_object_cong_wait);
-
- extern unsigned int fscache_hash(unsigned int salt, unsigned int *data, unsigned int n);
-
---- a/fs/fscache/main.c
-+++ b/fs/fscache/main.c
-@@ -41,8 +41,6 @@ struct kobject *fscache_root;
- struct workqueue_struct *fscache_object_wq;
- struct workqueue_struct *fscache_op_wq;
-
--DEFINE_PER_CPU(wait_queue_head_t, fscache_object_cong_wait);
--
- /* these values serve as lower bounds, will be adjusted in fscache_init() */
- static unsigned fscache_object_max_active = 4;
- static unsigned fscache_op_max_active = 2;
-@@ -138,7 +136,6 @@ unsigned int fscache_hash(unsigned int s
- static int __init fscache_init(void)
- {
- unsigned int nr_cpus = num_possible_cpus();
-- unsigned int cpu;
- int ret;
-
- fscache_object_max_active =
-@@ -161,9 +158,6 @@ static int __init fscache_init(void)
- if (!fscache_op_wq)
- goto error_op_wq;
-
-- for_each_possible_cpu(cpu)
-- init_waitqueue_head(&per_cpu(fscache_object_cong_wait, cpu));
--
- ret = fscache_proc_init();
- if (ret < 0)
- goto error_proc;
---- a/fs/fscache/object.c
-+++ b/fs/fscache/object.c
-@@ -798,6 +798,8 @@ void fscache_object_destroy(struct fscac
- }
- EXPORT_SYMBOL(fscache_object_destroy);
-
-+static DECLARE_WAIT_QUEUE_HEAD(fscache_object_cong_wait);
-+
- /*
- * enqueue an object for metadata-type processing
- */
-@@ -806,16 +808,12 @@ void fscache_enqueue_object(struct fscac
- _enter("{OBJ%x}", object->debug_id);
-
- if (fscache_get_object(object, fscache_obj_get_queue) >= 0) {
-- wait_queue_head_t *cong_wq =
-- &get_cpu_var(fscache_object_cong_wait);
-
- if (queue_work(fscache_object_wq, &object->work)) {
- if (fscache_object_congested())
-- wake_up(cong_wq);
-+ wake_up(&fscache_object_cong_wait);
- } else
- fscache_put_object(object, fscache_obj_put_queue);
--
-- put_cpu_var(fscache_object_cong_wait);
- }
- }
-
-@@ -833,16 +831,15 @@ void fscache_enqueue_object(struct fscac
- */
- bool fscache_object_sleep_till_congested(signed long *timeoutp)
- {
-- wait_queue_head_t *cong_wq = this_cpu_ptr(&fscache_object_cong_wait);
- DEFINE_WAIT(wait);
-
- if (fscache_object_congested())
- return true;
-
-- add_wait_queue_exclusive(cong_wq, &wait);
-+ add_wait_queue_exclusive(&fscache_object_cong_wait, &wait);
- if (!fscache_object_congested())
- *timeoutp = schedule_timeout(*timeoutp);
-- finish_wait(cong_wq, &wait);
-+ finish_wait(&fscache_object_cong_wait, &wait);
-
- return fscache_object_congested();
- }
diff --git a/patches/jump-label__disable_if_stop_machine_is_used.patch b/patches/jump-label__disable_if_stop_machine_is_used.patch
index 0a7558543fdd..894c5d3538e4 100644
--- a/patches/jump-label__disable_if_stop_machine_is_used.patch
+++ b/patches/jump-label__disable_if_stop_machine_is_used.patch
@@ -35,6 +35,6 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
- select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU
+ select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU && !PREEMPT_RT
+ select HAVE_ARCH_KFENCE if MMU && !XIP_KERNEL
select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL
- select HAVE_ARCH_MMAP_RND_BITS if MMU
diff --git a/patches/kernel-locking-Use-a-pointer-in-ww_mutex_trylock.patch b/patches/kernel-locking-Use-a-pointer-in-ww_mutex_trylock.patch
deleted file mode 100644
index d0596d868185..000000000000
--- a/patches/kernel-locking-Use-a-pointer-in-ww_mutex_trylock.patch
+++ /dev/null
@@ -1,24 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Thu, 4 Nov 2021 13:12:13 +0100
-Subject: [PATCH] kernel/locking: Use a pointer in ww_mutex_trylock().
-
-mutex_acquire_nest() expects a pointer, pass the pointer.
-
-Fixes: 12235da8c80a1 ("kernel/locking: Add context to ww_mutex_trylock()")
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Link: https://lkml.kernel.org/r/20211104122706.frk52zxbjorso2kv@linutronix.de
----
- kernel/locking/ww_rt_mutex.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/kernel/locking/ww_rt_mutex.c
-+++ b/kernel/locking/ww_rt_mutex.c
-@@ -26,7 +26,7 @@ int ww_mutex_trylock(struct ww_mutex *lo
-
- if (__rt_mutex_trylock(&rtm->rtmutex)) {
- ww_mutex_set_context_fastpath(lock, ww_ctx);
-- mutex_acquire_nest(&rtm->dep_map, 0, 1, ww_ctx->dep_map, _RET_IP_);
-+ mutex_acquire_nest(&rtm->dep_map, 0, 1, &ww_ctx->dep_map, _RET_IP_);
- return 1;
- }
-
diff --git a/patches/locking-Enable-RT_MUTEXES-by-default-on-PREEMPT_RT.patch b/patches/locking-Enable-RT_MUTEXES-by-default-on-PREEMPT_RT.patch
new file mode 100644
index 000000000000..6db72b56436f
--- /dev/null
+++ b/patches/locking-Enable-RT_MUTEXES-by-default-on-PREEMPT_RT.patch
@@ -0,0 +1,26 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 26 Jan 2022 15:31:20 +0100
+Subject: [PATCH] locking: Enable RT_MUTEXES by default on PREEMPT_RT.
+
+The CONFIG_RT_MUTEXES option is enabled by CONFIG_FUTEX and CONFIG_I2C.
+If both are disabled then CONFIG_PREEMPT_RT builds fail to compile.
+It is not possible to have a PREEMPT_RT kernel without RT_MUTEX support
+because RT_MUTEX based locking is always used.
+
+Enable CONFIG_RT_MUTEXES by default on PREEMPT_RT builds.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ init/Kconfig | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -2058,6 +2058,7 @@ source "arch/Kconfig"
+
+ config RT_MUTEXES
+ bool
++ default y if PREEMPT_RT
+
+ config BASE_SMALL
+ int
diff --git a/patches/md__raid5__Make_raid5_percpu_handling_RT_aware.patch b/patches/md__raid5__Make_raid5_percpu_handling_RT_aware.patch
deleted file mode 100644
index 23502f22f88b..000000000000
--- a/patches/md__raid5__Make_raid5_percpu_handling_RT_aware.patch
+++ /dev/null
@@ -1,65 +0,0 @@
-Subject: md: raid5: Make raid5_percpu handling RT aware
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Tue Apr 6 16:51:31 2010 +0200
-
-From: Thomas Gleixner <tglx@linutronix.de>
-
-__raid_run_ops() disables preemption with get_cpu() around the access
-to the raid5_percpu variables. That causes scheduling while atomic
-spews on RT.
-
-Serialize the access to the percpu data with a lock and keep the code
-preemptible.
-
-Reported-by: Udo van den Heuvel <udovdh@xs4all.nl>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Tested-by: Udo van den Heuvel <udovdh@xs4all.nl>
-
-
-
----
- drivers/md/raid5.c | 7 +++++--
- drivers/md/raid5.h | 1 +
- 2 files changed, 6 insertions(+), 2 deletions(-)
----
---- a/drivers/md/raid5.c
-+++ b/drivers/md/raid5.c
-@@ -2217,8 +2217,9 @@ static void raid_run_ops(struct stripe_h
- struct raid5_percpu *percpu;
- unsigned long cpu;
-
-- cpu = get_cpu();
-+ cpu = get_cpu_light();
- percpu = per_cpu_ptr(conf->percpu, cpu);
-+ spin_lock(&percpu->lock);
- if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
- ops_run_biofill(sh);
- overlap_clear++;
-@@ -2277,7 +2278,8 @@ static void raid_run_ops(struct stripe_h
- if (test_and_clear_bit(R5_Overlap, &dev->flags))
- wake_up(&sh->raid_conf->wait_for_overlap);
- }
-- put_cpu();
-+ spin_unlock(&percpu->lock);
-+ put_cpu_light();
- }
-
- static void free_stripe(struct kmem_cache *sc, struct stripe_head *sh)
-@@ -7102,6 +7104,7 @@ static int raid456_cpu_up_prepare(unsign
- __func__, cpu);
- return -ENOMEM;
- }
-+ spin_lock_init(&per_cpu_ptr(conf->percpu, cpu)->lock);
- return 0;
- }
-
---- a/drivers/md/raid5.h
-+++ b/drivers/md/raid5.h
-@@ -635,6 +635,7 @@ struct r5conf {
- int recovery_disabled;
- /* per cpu variables */
- struct raid5_percpu {
-+ spinlock_t lock; /* Protection for -RT */
- struct page *spare_page; /* Used when checking P/Q in raid6 */
- void *scribble; /* space for constructing buffer
- * lists and performing address
diff --git a/patches/mm_vmalloc__Another_preempt_disable_region_which_sucks.patch b/patches/mm_vmalloc__Another_preempt_disable_region_which_sucks.patch
index 5bd3fc524a99..e303949d2625 100644
--- a/patches/mm_vmalloc__Another_preempt_disable_region_which_sucks.patch
+++ b/patches/mm_vmalloc__Another_preempt_disable_region_which_sucks.patch
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
-@@ -1922,11 +1922,12 @@ static void *new_vmap_block(unsigned int
+@@ -1924,11 +1924,12 @@ static void *new_vmap_block(unsigned int
return ERR_PTR(err);
}
@@ -29,7 +29,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return vaddr;
}
-@@ -2005,7 +2006,8 @@ static void *vb_alloc(unsigned long size
+@@ -2007,7 +2008,8 @@ static void *vb_alloc(unsigned long size
order = get_order(size);
rcu_read_lock();
@@ -39,7 +39,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
list_for_each_entry_rcu(vb, &vbq->free, free_list) {
unsigned long pages_off;
-@@ -2028,7 +2030,7 @@ static void *vb_alloc(unsigned long size
+@@ -2030,7 +2032,7 @@ static void *vb_alloc(unsigned long size
break;
}
diff --git a/patches/net-Write-lock-dev_base_lock-without-disabling-botto.patch b/patches/net-Write-lock-dev_base_lock-without-disabling-botto.patch
deleted file mode 100644
index 995f88e6716c..000000000000
--- a/patches/net-Write-lock-dev_base_lock-without-disabling-botto.patch
+++ /dev/null
@@ -1,154 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Fri, 26 Nov 2021 17:15:29 +0100
-Subject: [PATCH] net: Write lock dev_base_lock without disabling bottom
- halves.
-
-The writer acquires dev_base_lock with disabled bottom halves.
-The reader can acquire dev_base_lock without disabling bottom halves
-because there is no writer in softirq context.
-
-On PREEMPT_RT the softirqs are preemptible and local_bh_disable() acts
-as a lock to ensure that resources, that are protected by disabling
-bottom halves, remain protected.
-This leads to a circular locking dependency if the lock acquired with
-disabled bottom halves (as in write_lock_bh()) and somewhere else with
-enabled bottom halves (as by read_lock() in netstat_show()) followed by
-disabling bottom halves (cxgb_get_stats() -> t4_wr_mbox_meat_timeout()
--> spin_lock_bh()). This is the reverse locking order.
-
-All read_lock() invocation are from sysfs callback which are not invoked
-from softirq context. Therefore there is no need to disable bottom
-halves while acquiring a write lock.
-
-Acquire the write lock of dev_base_lock without disabling bottom halves.
-
-Cc: stable-rt@vger.kernel.org
-Reported-by: Pei Zhang <pezhang@redhat.com>
-Reported-by: Luis Claudio R. Goncalves <lgoncalv@redhat.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Link: https://lore.kernel.org/r/20211126161529.hwqbkv6z2svox3zs@linutronix.de
----
- net/core/dev.c | 16 ++++++++--------
- net/core/link_watch.c | 4 ++--
- net/core/rtnetlink.c | 8 ++++----
- net/hsr/hsr_device.c | 6 +++---
- 4 files changed, 17 insertions(+), 17 deletions(-)
-
---- a/net/core/dev.c
-+++ b/net/core/dev.c
-@@ -371,12 +371,12 @@ static void list_netdevice(struct net_de
-
- ASSERT_RTNL();
-
-- write_lock_bh(&dev_base_lock);
-+ write_lock(&dev_base_lock);
- list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
- netdev_name_node_add(net, dev->name_node);
- hlist_add_head_rcu(&dev->index_hlist,
- dev_index_hash(net, dev->ifindex));
-- write_unlock_bh(&dev_base_lock);
-+ write_unlock(&dev_base_lock);
-
- dev_base_seq_inc(net);
- }
-@@ -389,11 +389,11 @@ static void unlist_netdevice(struct net_
- ASSERT_RTNL();
-
- /* Unlink dev from the device chain */
-- write_lock_bh(&dev_base_lock);
-+ write_lock(&dev_base_lock);
- list_del_rcu(&dev->dev_list);
- netdev_name_node_del(dev->name_node);
- hlist_del_rcu(&dev->index_hlist);
-- write_unlock_bh(&dev_base_lock);
-+ write_unlock(&dev_base_lock);
-
- dev_base_seq_inc(dev_net(dev));
- }
-@@ -1272,15 +1272,15 @@ int dev_change_name(struct net_device *d
-
- netdev_adjacent_rename_links(dev, oldname);
-
-- write_lock_bh(&dev_base_lock);
-+ write_lock(&dev_base_lock);
- netdev_name_node_del(dev->name_node);
-- write_unlock_bh(&dev_base_lock);
-+ write_unlock(&dev_base_lock);
-
- synchronize_rcu();
-
-- write_lock_bh(&dev_base_lock);
-+ write_lock(&dev_base_lock);
- netdev_name_node_add(net, dev->name_node);
-- write_unlock_bh(&dev_base_lock);
-+ write_unlock(&dev_base_lock);
-
- ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
- ret = notifier_to_errno(ret);
---- a/net/core/link_watch.c
-+++ b/net/core/link_watch.c
-@@ -55,7 +55,7 @@ static void rfc2863_policy(struct net_de
- if (operstate == dev->operstate)
- return;
-
-- write_lock_bh(&dev_base_lock);
-+ write_lock(&dev_base_lock);
-
- switch(dev->link_mode) {
- case IF_LINK_MODE_TESTING:
-@@ -74,7 +74,7 @@ static void rfc2863_policy(struct net_de
-
- dev->operstate = operstate;
-
-- write_unlock_bh(&dev_base_lock);
-+ write_unlock(&dev_base_lock);
- }
-
-
---- a/net/core/rtnetlink.c
-+++ b/net/core/rtnetlink.c
-@@ -842,9 +842,9 @@ static void set_operstate(struct net_dev
- }
-
- if (dev->operstate != operstate) {
-- write_lock_bh(&dev_base_lock);
-+ write_lock(&dev_base_lock);
- dev->operstate = operstate;
-- write_unlock_bh(&dev_base_lock);
-+ write_unlock(&dev_base_lock);
- netdev_state_change(dev);
- }
- }
-@@ -2779,11 +2779,11 @@ static int do_setlink(const struct sk_bu
- if (tb[IFLA_LINKMODE]) {
- unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]);
-
-- write_lock_bh(&dev_base_lock);
-+ write_lock(&dev_base_lock);
- if (dev->link_mode ^ value)
- status |= DO_SETLINK_NOTIFY;
- dev->link_mode = value;
-- write_unlock_bh(&dev_base_lock);
-+ write_unlock(&dev_base_lock);
- }
-
- if (tb[IFLA_VFINFO_LIST]) {
---- a/net/hsr/hsr_device.c
-+++ b/net/hsr/hsr_device.c
-@@ -30,13 +30,13 @@ static bool is_slave_up(struct net_devic
-
- static void __hsr_set_operstate(struct net_device *dev, int transition)
- {
-- write_lock_bh(&dev_base_lock);
-+ write_lock(&dev_base_lock);
- if (dev->operstate != transition) {
- dev->operstate = transition;
-- write_unlock_bh(&dev_base_lock);
-+ write_unlock(&dev_base_lock);
- netdev_state_change(dev);
- } else {
-- write_unlock_bh(&dev_base_lock);
-+ write_unlock(&dev_base_lock);
- }
- }
-
diff --git a/patches/net-dev-Always-serialize-on-Qdisc-busylock-in-__dev_.patch b/patches/net-dev-Always-serialize-on-Qdisc-busylock-in-__dev_.patch
deleted file mode 100644
index 1c1e12aa4fd8..000000000000
--- a/patches/net-dev-Always-serialize-on-Qdisc-busylock-in-__dev_.patch
+++ /dev/null
@@ -1,39 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Wed, 30 Mar 2016 13:36:29 +0200
-Subject: [PATCH] net: dev: Always serialize on Qdisc::busylock in
- __dev_xmit_skb() on PREEMPT_RT.
-
-The root-lock is dropped before dev_hard_start_xmit() is invoked and after
-setting the __QDISC___STATE_RUNNING bit. If the Qdisc owner is preempted
-by another sender/task with a higher priority then this new sender won't
-be able to submit packets to the NIC directly instead they will be
-enqueued into the Qdisc. The NIC will remain idle until the Qdisc owner
-is scheduled again and finishes the job.
-
-By serializing every task on the ->busylock then the task will be
-preempted by a sender only after the Qdisc has no owner.
-
-Always serialize on the busylock on PREEMPT_RT.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Link: https://lkml.kernel.org/r/YbcmKeLngWW/pb1V@linutronix.de
----
- net/core/dev.c | 6 +++++-
- 1 file changed, 5 insertions(+), 1 deletion(-)
-
---- a/net/core/dev.c
-+++ b/net/core/dev.c
-@@ -3836,8 +3836,12 @@ static inline int __dev_xmit_skb(struct
- * separate lock before trying to get qdisc main lock.
- * This permits qdisc->running owner to get the lock more
- * often and dequeue packets faster.
-+ * On PREEMPT_RT it is possible to preempt the qdisc owner during xmit
-+ * and then other tasks will only enqueue packets. The packets will be
-+ * sent after the qdisc owner is scheduled again. To prevent this
-+ * scenario the task always serialize on the lock.
- */
-- contended = qdisc_is_running(q);
-+ contended = IS_ENABLED(CONFIG_PREEMPT_RT) || qdisc_is_running(q);
- if (unlikely(contended))
- spin_lock(&q->busylock);
-
diff --git a/patches/panic_remove_oops_id.patch b/patches/panic_remove_oops_id.patch
deleted file mode 100644
index 3813a0b370cb..000000000000
--- a/patches/panic_remove_oops_id.patch
+++ /dev/null
@@ -1,56 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Subject: panic: Remove oops_id.
-Date: Thu, 02 Dec 2021 15:27:13 +0100
-
-The oops id has been added as part of the end of trace marker for the
-kerneloops.org project. The id is used to automatically identify duplicate
-submissions of the same report. Identical looking reports with different
-a id can be considered as the same oops occurred again.
-
-The early initialisation of the oops_id can create a warning if the
-random core is not yet fully initialized. On PREEMPT_RT it is
-problematic if the id is initialized on demand from non preemptible
-context.
-
-The kernel oops project is not available since 2017.
-Remove the oops_id and use 0 in the output in case parser rely on it.
-
-Link: https://bugs.debian.org/953172
-Cc: Arjan van de Ven <arjan@linux.intel.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Link: https://lore.kernel.org/r/20211202142713.ofadr43tawengfw4@linutronix.de
-Link: https://lore.kernel.org/r/Ybdi16aP2NEugWHq@linutronix.de>
----
- kernel/panic.c | 19 +------------------
- 1 file changed, 1 insertion(+), 18 deletions(-)
-
---- a/kernel/panic.c
-+++ b/kernel/panic.c
-@@ -537,26 +537,9 @@ void oops_enter(void)
- trigger_all_cpu_backtrace();
- }
-
--/*
-- * 64-bit random ID for oopses:
-- */
--static u64 oops_id;
--
--static int init_oops_id(void)
--{
-- if (!oops_id)
-- get_random_bytes(&oops_id, sizeof(oops_id));
-- else
-- oops_id++;
--
-- return 0;
--}
--late_initcall(init_oops_id);
--
- static void print_oops_end_marker(void)
- {
-- init_oops_id();
-- pr_warn("---[ end trace %016llx ]---\n", (unsigned long long)oops_id);
-+ pr_warn("---[ end trace %016llx ]---\n", 0ULL);
- }
-
- /*
diff --git a/patches/powerpc__Add_support_for_lazy_preemption.patch b/patches/powerpc__Add_support_for_lazy_preemption.patch
index ac61cbce9437..30e15bde0cf4 100644
--- a/patches/powerpc__Add_support_for_lazy_preemption.patch
+++ b/patches/powerpc__Add_support_for_lazy_preemption.patch
@@ -17,7 +17,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
-@@ -237,6 +237,7 @@ config PPC
+@@ -229,6 +229,7 @@ config PPC
select HAVE_PERF_EVENTS_NMI if PPC64
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
@@ -84,7 +84,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/powerpc/kernel/interrupt.c
+++ b/arch/powerpc/kernel/interrupt.c
@@ -346,7 +346,7 @@ interrupt_exit_user_prepare_main(unsigne
- ti_flags = READ_ONCE(current_thread_info()->flags);
+ ti_flags = read_thread_flags();
while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) {
local_irq_enable();
- if (ti_flags & _TIF_NEED_RESCHED) {
@@ -92,14 +92,14 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
schedule();
} else {
/*
-@@ -552,11 +552,15 @@ notrace unsigned long interrupt_exit_ker
+@@ -550,11 +550,15 @@ notrace unsigned long interrupt_exit_ker
/* Returning to a kernel context with local irqs enabled. */
WARN_ON_ONCE(!(regs->msr & MSR_EE));
again:
- if (IS_ENABLED(CONFIG_PREEMPT)) {
+ if (IS_ENABLED(CONFIG_PREEMPTION)) {
/* Return to preemptible kernel context */
- if (unlikely(current_thread_info()->flags & _TIF_NEED_RESCHED)) {
+ if (unlikely(read_thread_flags() & _TIF_NEED_RESCHED)) {
if (preempt_count() == 0)
preempt_schedule_irq();
+ } else if (unlikely(current_thread_info()->flags & _TIF_NEED_RESCHED_LAZY)) {
diff --git a/patches/powerpc_kvm__Disable_in-kernel_MPIC_emulation_for_PREEMPT_RT.patch b/patches/powerpc_kvm__Disable_in-kernel_MPIC_emulation_for_PREEMPT_RT.patch
index 4d4d7e2cf5f9..6d52dda6a7bb 100644
--- a/patches/powerpc_kvm__Disable_in-kernel_MPIC_emulation_for_PREEMPT_RT.patch
+++ b/patches/powerpc_kvm__Disable_in-kernel_MPIC_emulation_for_PREEMPT_RT.patch
@@ -32,7 +32,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
-@@ -178,6 +178,7 @@ config KVM_E500MC
+@@ -195,6 +195,7 @@ config KVM_E500MC
config KVM_MPIC
bool "KVM in-kernel MPIC emulation"
depends on KVM && E500
diff --git a/patches/ptrace__fix_ptrace_vs_tasklist_lock_race.patch b/patches/ptrace__fix_ptrace_vs_tasklist_lock_race.patch
index 2777572ac4dc..839abf0c8750 100644
--- a/patches/ptrace__fix_ptrace_vs_tasklist_lock_race.patch
+++ b/patches/ptrace__fix_ptrace_vs_tasklist_lock_race.patch
@@ -49,7 +49,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Special states are those that do not use the normal wait-loop pattern. See
* the comment with set_special_state().
-@@ -2009,6 +2005,81 @@ static inline int test_tsk_need_resched(
+@@ -2011,6 +2007,81 @@ static inline int test_tsk_need_resched(
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
@@ -195,7 +195,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3226,7 +3226,7 @@ unsigned long wait_task_inactive(struct
+@@ -3239,7 +3239,7 @@ unsigned long wait_task_inactive(struct
* is actually now running somewhere else!
*/
while (task_running(rq, p)) {
@@ -204,7 +204,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return 0;
cpu_relax();
}
-@@ -3241,7 +3241,7 @@ unsigned long wait_task_inactive(struct
+@@ -3254,7 +3254,7 @@ unsigned long wait_task_inactive(struct
running = task_running(rq, p);
queued = task_on_rq_queued(p);
ncsw = 0;
diff --git a/patches/rcu-tree-Protect-rcu_rdp_is_offloaded-invocations-on.patch b/patches/rcu-tree-Protect-rcu_rdp_is_offloaded-invocations-on.patch
deleted file mode 100644
index 447f173fa18d..000000000000
--- a/patches/rcu-tree-Protect-rcu_rdp_is_offloaded-invocations-on.patch
+++ /dev/null
@@ -1,83 +0,0 @@
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Tue, 21 Sep 2021 23:12:50 +0200
-Subject: [PATCH] rcu/tree: Protect rcu_rdp_is_offloaded() invocations on RT
-
-Valentin reported warnings about suspicious RCU usage on RT kernels. Those
-happen when offloading of RCU callbacks is enabled:
-
- WARNING: suspicious RCU usage
- 5.13.0-rt1 #20 Not tainted
- -----------------------------
- kernel/rcu/tree_plugin.h:69 Unsafe read of RCU_NOCB offloaded state!
-
- rcu_rdp_is_offloaded (kernel/rcu/tree_plugin.h:69 kernel/rcu/tree_plugin.h:58)
- rcu_core (kernel/rcu/tree.c:2332 kernel/rcu/tree.c:2398 kernel/rcu/tree.c:2777)
- rcu_cpu_kthread (./include/linux/bottom_half.h:32 kernel/rcu/tree.c:2876)
-
-The reason is that rcu_rdp_is_offloaded() is invoked without one of the
-required protections on RT enabled kernels because local_bh_disable() does
-not disable preemption on RT.
-
-Valentin proposed to add a local lock to the code in question, but that's
-suboptimal in several aspects:
-
- 1) local locks add extra code to !RT kernels for no value.
-
- 2) All possible callsites have to audited and amended when affected
- possible at an outer function level due to lock nesting issues.
-
- 3) As the local lock has to be taken at the outer functions it's required
- to release and reacquire them in the inner code sections which might
- voluntary schedule, e.g. rcu_do_batch().
-
-Both callsites of rcu_rdp_is_offloaded() which trigger this check invoke
-rcu_rdp_is_offloaded() in the variable declaration section right at the top
-of the functions. But the actual usage of the result is either within a
-section which provides the required protections or after such a section.
-
-So the obvious solution is to move the invocation into the code sections
-which provide the proper protections, which solves the problem for RT and
-does not have any impact on !RT kernels.
-
-Reported-by: Valentin Schneider <valentin.schneider@arm.com>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/rcu/tree.c | 7 ++++---
- 1 file changed, 4 insertions(+), 3 deletions(-)
-
---- a/kernel/rcu/tree.c
-+++ b/kernel/rcu/tree.c
-@@ -2276,13 +2276,13 @@ rcu_report_qs_rdp(struct rcu_data *rdp)
- {
- unsigned long flags;
- unsigned long mask;
-- bool needwake = false;
-- const bool offloaded = rcu_rdp_is_offloaded(rdp);
-+ bool offloaded, needwake = false;
- struct rcu_node *rnp;
-
- WARN_ON_ONCE(rdp->cpu != smp_processor_id());
- rnp = rdp->mynode;
- raw_spin_lock_irqsave_rcu_node(rnp, flags);
-+ offloaded = rcu_rdp_is_offloaded(rdp);
- if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq ||
- rdp->gpwrap) {
-
-@@ -2444,7 +2444,7 @@ static void rcu_do_batch(struct rcu_data
- int div;
- bool __maybe_unused empty;
- unsigned long flags;
-- const bool offloaded = rcu_rdp_is_offloaded(rdp);
-+ bool offloaded;
- struct rcu_head *rhp;
- struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
- long bl, count = 0;
-@@ -2470,6 +2470,7 @@ static void rcu_do_batch(struct rcu_data
- rcu_nocb_lock(rdp);
- WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
- pending = rcu_segcblist_n_cbs(&rdp->cblist);
-+ offloaded = rcu_rdp_is_offloaded(rdp);
- div = READ_ONCE(rcu_divisor);
- div = div < 0 ? 7 : div > sizeof(long) * 8 - 2 ? sizeof(long) * 8 - 2 : div;
- bl = max(rdp->blimit, pending >> div);
diff --git a/patches/rcu__Delay_RCU-selftests.patch b/patches/rcu__Delay_RCU-selftests.patch
index 0ccc1b75ec4e..c865f28b93de 100644
--- a/patches/rcu__Delay_RCU-selftests.patch
+++ b/patches/rcu__Delay_RCU-selftests.patch
@@ -34,7 +34,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void rcu_sysrq_end(void);
--- a/init/main.c
+++ b/init/main.c
-@@ -1597,6 +1597,7 @@ static noinline void __init kernel_init_
+@@ -1600,6 +1600,7 @@ static noinline void __init kernel_init_
rcu_init_tasks_generic();
do_pre_smp_initcalls();
@@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
smp_init();
--- a/kernel/rcu/tasks.h
+++ b/kernel/rcu/tasks.h
-@@ -1345,7 +1345,7 @@ static void test_rcu_tasks_callback(stru
+@@ -1657,7 +1657,7 @@ static void test_rcu_tasks_callback(stru
rttd->notrun = true;
}
@@ -53,7 +53,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
pr_info("Running RCU-tasks wait API self tests\n");
#ifdef CONFIG_TASKS_RCU
-@@ -1382,9 +1382,7 @@ static int rcu_tasks_verify_self_tests(v
+@@ -1694,9 +1694,7 @@ static int rcu_tasks_verify_self_tests(v
return ret;
}
late_initcall(rcu_tasks_verify_self_tests);
@@ -64,7 +64,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void __init rcu_init_tasks_generic(void)
{
-@@ -1399,9 +1397,6 @@ void __init rcu_init_tasks_generic(void)
+@@ -1711,9 +1709,6 @@ void __init rcu_init_tasks_generic(void)
#ifdef CONFIG_TASKS_TRACE_RCU
rcu_spawn_tasks_trace_kthread();
#endif
diff --git a/patches/sched__Add_support_for_lazy_preemption.patch b/patches/sched__Add_support_for_lazy_preemption.patch
index a321c56578fd..646026aca3fa 100644
--- a/patches/sched__Add_support_for_lazy_preemption.patch
+++ b/patches/sched__Add_support_for_lazy_preemption.patch
@@ -59,16 +59,16 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
include/linux/preempt.h | 54 +++++++++++++++++++++++++++--
include/linux/sched.h | 37 +++++++++++++++++++
include/linux/thread_info.h | 12 +++++-
- include/linux/trace_events.h | 5 ++
+ include/linux/trace_events.h | 10 ++++-
kernel/Kconfig.preempt | 6 +++
kernel/sched/core.c | 80 +++++++++++++++++++++++++++++++++++++++++--
kernel/sched/fair.c | 16 ++++----
kernel/sched/features.h | 3 +
kernel/sched/sched.h | 9 ++++
- kernel/trace/trace.c | 46 +++++++++++++++---------
+ kernel/trace/trace.c | 50 ++++++++++++++++----------
kernel/trace/trace_events.c | 1
- kernel/trace/trace_output.c | 14 ++++++-
- 12 files changed, 248 insertions(+), 35 deletions(-)
+ kernel/trace/trace_output.c | 16 +++++++-
+ 12 files changed, 258 insertions(+), 36 deletions(-)
---
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -177,7 +177,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -2009,6 +2009,43 @@ static inline int test_tsk_need_resched(
+@@ -2011,6 +2011,43 @@ static inline int test_tsk_need_resched(
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
@@ -223,7 +223,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
-@@ -163,7 +163,17 @@ static inline int test_ti_thread_flag(st
+@@ -177,7 +177,17 @@ static __always_inline unsigned long rea
clear_ti_thread_flag(task_thread_info(t), TIF_##fl)
#endif /* !CONFIG_GENERIC_ENTRY */
@@ -264,14 +264,20 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status);
-@@ -172,6 +174,7 @@ enum trace_flag_type {
+@@ -170,7 +172,13 @@ enum trace_flag_type {
+ TRACE_FLAG_NEED_RESCHED = 0x04,
+ TRACE_FLAG_HARDIRQ = 0x08,
TRACE_FLAG_SOFTIRQ = 0x10,
++#ifdef CONFIG_PREEMPT_LAZY
++ TRACE_FLAG_PREEMPT_RESCHED = 0x00,
++ TRACE_FLAG_NEED_RESCHED_LAZY = 0x20,
++#else
++ TRACE_FLAG_NEED_RESCHED_LAZY = 0x00,
TRACE_FLAG_PREEMPT_RESCHED = 0x20,
++#endif
TRACE_FLAG_NMI = 0x40,
-+ TRACE_FLAG_NEED_RESCHED_LAZY = 0x80,
+ TRACE_FLAG_BH_OFF = 0x80,
};
-
- #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -1,5 +1,11 @@
@@ -288,7 +294,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -986,6 +986,46 @@ void resched_curr(struct rq *rq)
+@@ -997,6 +997,46 @@ void resched_curr(struct rq *rq)
trace_sched_wake_idle_without_ipi(cpu);
}
@@ -335,7 +341,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void resched_cpu(int cpu)
{
struct rq *rq = cpu_rq(cpu);
-@@ -2160,6 +2200,7 @@ void migrate_disable(void)
+@@ -2171,6 +2211,7 @@ void migrate_disable(void)
preempt_disable();
this_rq()->nr_pinned++;
p->migration_disabled = 1;
@@ -343,7 +349,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_enable();
}
EXPORT_SYMBOL_GPL(migrate_disable);
-@@ -2190,6 +2231,7 @@ void migrate_enable(void)
+@@ -2202,6 +2243,7 @@ void migrate_enable(void)
barrier();
p->migration_disabled = 0;
this_rq()->nr_pinned--;
@@ -351,7 +357,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_enable();
}
EXPORT_SYMBOL_GPL(migrate_enable);
-@@ -4419,6 +4461,9 @@ int sched_fork(unsigned long clone_flags
+@@ -4432,6 +4474,9 @@ int sched_fork(unsigned long clone_flags
p->on_cpu = 0;
#endif
init_task_preempt_count(p);
@@ -361,7 +367,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_SMP
plist_node_init(&p->pushable_tasks, MAX_PRIO);
RB_CLEAR_NODE(&p->pushable_dl_tasks);
-@@ -6220,6 +6265,7 @@ static void __sched notrace __schedule(u
+@@ -6262,6 +6307,7 @@ static void __sched notrace __schedule(u
next = pick_next_task(rq, prev, &rf);
clear_tsk_need_resched(prev);
@@ -369,7 +375,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
clear_preempt_need_resched();
#ifdef CONFIG_SCHED_DEBUG
rq->last_seen_need_resched_ns = 0;
-@@ -6431,6 +6477,30 @@ static void __sched notrace preempt_sche
+@@ -6473,6 +6519,30 @@ static void __sched notrace preempt_sche
} while (need_resched());
}
@@ -400,7 +406,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_PREEMPTION
/*
* This is the entry point to schedule() from in-kernel preemption
-@@ -6444,7 +6514,8 @@ asmlinkage __visible void __sched notrac
+@@ -6486,7 +6556,8 @@ asmlinkage __visible void __sched notrac
*/
if (likely(!preemptible()))
return;
@@ -410,7 +416,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_schedule_common();
}
NOKPROBE_SYMBOL(preempt_schedule);
-@@ -6477,6 +6548,9 @@ asmlinkage __visible void __sched notrac
+@@ -6519,6 +6590,9 @@ asmlinkage __visible void __sched notrac
if (likely(!preemptible()))
return;
@@ -420,7 +426,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
do {
/*
* Because the function tracer can trace preempt_count_sub()
-@@ -8657,7 +8731,9 @@ void __init init_idle(struct task_struct
+@@ -8691,7 +8765,9 @@ void __init init_idle(struct task_struct
/* Set the preempt count _outside_ the spinlocks! */
init_idle_preempt_count(idle, cpu);
@@ -478,7 +484,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return;
}
hrtick_start(rq, delta);
-@@ -7172,7 +7172,7 @@ static void check_preempt_wakeup(struct
+@@ -7125,7 +7125,7 @@ static void check_preempt_wakeup(struct
return;
preempt:
@@ -487,7 +493,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Only set the backward buddy when the current task is still
* on the rq. This can happen when a wakeup gets interleaved
-@@ -11207,7 +11207,7 @@ static void task_fork_fair(struct task_s
+@@ -11160,7 +11160,7 @@ static void task_fork_fair(struct task_s
* 'current' within the tree based on its new key value.
*/
swap(curr->vruntime, se->vruntime);
@@ -496,7 +502,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
se->vruntime -= cfs_rq->min_vruntime;
-@@ -11234,7 +11234,7 @@ prio_changed_fair(struct rq *rq, struct
+@@ -11187,7 +11187,7 @@ prio_changed_fair(struct rq *rq, struct
*/
if (task_current(rq, p)) {
if (p->prio > oldprio)
@@ -519,7 +525,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
-@@ -2300,6 +2300,15 @@ extern void reweight_task(struct task_st
+@@ -2328,6 +2328,15 @@ extern void reweight_task(struct task_st
extern void resched_curr(struct rq *rq);
extern void resched_cpu(int cpu);
@@ -537,14 +543,21 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -2606,7 +2606,13 @@ unsigned int tracing_gen_ctx_irq_test(un
+@@ -2606,11 +2606,19 @@ unsigned int tracing_gen_ctx_irq_test(un
+ if (softirq_count() >> (SOFTIRQ_SHIFT + 1))
+ trace_flags |= TRACE_FLAG_BH_OFF;
+
+- if (tif_need_resched())
++ if (tif_need_resched_now())
trace_flags |= TRACE_FLAG_NEED_RESCHED;
- if (test_preempt_need_resched())
- trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
-- return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) |
+#ifdef CONFIG_PREEMPT_LAZY
++ /* Run out of bits. Share the LAZY and PREEMPT_RESCHED */
+ if (need_resched_lazy())
+ trace_flags |= TRACE_FLAG_NEED_RESCHED_LAZY;
++#else
+ if (test_preempt_need_resched())
+ trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
+- return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) |
+#endif
+
+ return (trace_flags << 24) | (min_t(unsigned int, pc & 0xff, 0xf)) |
@@ -552,12 +565,12 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
(min_t(unsigned int, migration_disable_value(), 0xf)) << 4;
}
-@@ -4182,15 +4188,17 @@ unsigned long trace_total_entries(struct
+@@ -4191,15 +4199,17 @@ unsigned long trace_total_entries(struct
static void print_lat_help_header(struct seq_file *m)
{
- seq_puts(m, "# _------=> CPU# \n"
-- "# / _-----=> irqs-off \n"
+- "# / _-----=> irqs-off/BH-disabled\n"
- "# | / _----=> need-resched \n"
- "# || / _---=> hardirq/softirq \n"
- "# ||| / _--=> preempt-depth \n"
@@ -566,7 +579,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
- "# cmd pid |||||| time | caller \n"
- "# \\ / |||||| \\ | / \n");
+ seq_puts(m, "# _--------=> CPU# \n"
-+ "# / _-------=> irqs-off \n"
++ "# / _-------=> irqs-off/BH-disabled\n"
+ "# | / _------=> need-resched \n"
+ "# || / _-----=> need-resched-lazy\n"
+ "# ||| / _----=> hardirq/softirq \n"
@@ -579,11 +592,11 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void print_event_info(struct array_buffer *buf, struct seq_file *m)
-@@ -4224,14 +4232,16 @@ static void print_func_help_header_irq(s
+@@ -4233,14 +4243,16 @@ static void print_func_help_header_irq(s
print_event_info(buf, m);
-- seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space);
+- seq_printf(m, "# %.*s _-----=> irqs-off/BH-disabled\n", prec, space);
- seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
- seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
- seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
@@ -591,7 +604,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
- seq_printf(m, "# %.*s|||| / delay\n", prec, space);
- seq_printf(m, "# TASK-PID %.*s CPU# ||||| TIMESTAMP FUNCTION\n", prec, " TGID ");
- seq_printf(m, "# | | %.*s | ||||| | |\n", prec, " | ");
-+ seq_printf(m, "# %.*s _-------=> irqs-off\n", prec, space);
++ seq_printf(m, "# %.*s _-------=> irqs-off/BH-disabled\n", prec, space);
+ seq_printf(m, "# %.*s / _------=> need-resched\n", prec, space);
+ seq_printf(m, "# %.*s| / _-----=> need-resched-lazy\n", prec, space);
+ seq_printf(m, "# %.*s|| / _----=> hardirq/softirq\n", prec, space);
@@ -624,7 +637,19 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
char irqs_off;
int hardirq;
int softirq;
-@@ -472,6 +473,9 @@ int trace_print_lat_fmt(struct trace_seq
+@@ -465,9 +466,11 @@ int trace_print_lat_fmt(struct trace_seq
+ case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_PREEMPT_RESCHED:
+ need_resched = 'N';
+ break;
++#ifndef CONFIG_PREEMPT_LAZY
+ case TRACE_FLAG_NEED_RESCHED:
+ need_resched = 'n';
+ break;
++#endif
+ case TRACE_FLAG_PREEMPT_RESCHED:
+ need_resched = 'p';
+ break;
+@@ -476,6 +479,9 @@ int trace_print_lat_fmt(struct trace_seq
break;
}
@@ -634,7 +659,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
hardsoft_irq =
(nmi && hardirq) ? 'Z' :
nmi ? 'z' :
-@@ -480,14 +484,20 @@ int trace_print_lat_fmt(struct trace_seq
+@@ -484,14 +490,20 @@ int trace_print_lat_fmt(struct trace_seq
softirq ? 's' :
'.' ;
diff --git a/patches/sched_introduce_migratable.patch b/patches/sched_introduce_migratable.patch
index 479cbefd7ab4..f9535b8944c4 100644
--- a/patches/sched_introduce_migratable.patch
+++ b/patches/sched_introduce_migratable.patch
@@ -26,7 +26,7 @@ Link: https://lore.kernel.org/r/20210811201354.1976839-3-valentin.schneider@arm.
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1727,6 +1727,16 @@ static __always_inline bool is_percpu_th
+@@ -1739,6 +1739,16 @@ static __always_inline bool is_percpu_th
#endif
}
diff --git a/patches/series b/patches/series
index 35a3f0dbc205..00804151d299 100644
--- a/patches/series
+++ b/patches/series
@@ -1,12 +1,7 @@
# Applied upstream
-
-###########################################################################
-# Valentin's PCP fixes
-###########################################################################
-# Temp RCU patch, Frederick is working on something, too.
-rcu-tree-Protect-rcu_rdp_is_offloaded-invocations-on.patch
-sched_introduce_migratable.patch
-arm64_mm_make_arch_faults_on_old_pte_check_for_migratability.patch
+tcp-Add-a-stub-for-sk_defer_free_flush.patch
+tcp-add-a-missing-sk_defer_free_flush-in-tcp_splice_.patch
+Revert-tty-serial-Use-fifo-in-8250-console-driver.patch
###########################################################################
# John's printk queue
@@ -29,36 +24,14 @@ arm64_mm_make_arch_faults_on_old_pte_check_for_migratability.patch
###########################################################################
# Posted and applied
###########################################################################
-net-Write-lock-dev_base_lock-without-disabling-botto.patch
fs-namespace-Boost-the-mount_lock.lock-owner-instead.patch
-0001-blk-mq-Add-blk_mq_complete_request_direct.patch
-0002-mmc-core-Use-blk_mq_complete_request_direct.patch
-drm-i915-Don-t-disable-interrupts-and-pretend-a-lock.patch
-kernel-locking-Use-a-pointer-in-ww_mutex_trylock.patch
-net-dev-Always-serialize-on-Qdisc-busylock-in-__dev_.patch
-u64_stats-Disable-preemption-on-32bit-UP-SMP-PREEMPT.patch
-
-# lockdep
-0001-sched-Trigger-warning-if-migration_disabled-counter-.patch
-0002-locking-Remove-rt_rwlock_is_contended.patch
-0003-locking-rtmutex-Squash-self-deadlock-check-for-ww_rt.patch
-0004-locking-rtmutex-Add-rt_mutex_lock_nest_lock-and-rt_m.patch
-0005-lockdep-Remove-softirq-accounting-on-PREEMPT_RT.patch
-0006-lockdep-selftests-Avoid-using-local_lock_-acquire-re.patch
-0007-lockdep-selftests-Unbalanced-migrate_disable-rcu_rea.patch
-0008-lockdep-selftests-Skip-the-softirq-related-tests-on-.patch
-0009-lockdep-selftests-Adapt-ww-tests-for-PREEMPT_RT.patch
-0010-x86-mm-Include-spinlock_t-definition-in-pgtable.patch
-0011-locking-Allow-to-include-asm-spinlock_types.h-from-l.patch
###########################################################################
# Posted
###########################################################################
irq_poll-Use-raise_softirq_irqoff-in-cpu_dead-notifi.patch
smp_wake_ksoftirqd_on_preempt_rt_instead_do_softirq.patch
-fscache-Use-only-one-fscache_object_cong_wait.patch
tcp-Don-t-acquire-inet_listen_hashbucket-lock-with-d.patch
-panic_remove_oops_id.patch
locking-local_lock-Make-the-empty-local_lock_-functi.patch
# sched
@@ -72,8 +45,6 @@ locking-local_lock-Make-the-empty-local_lock_-functi.patch
0008_kernel_fork_use_is_enabled_in_account_kernel_stack.patch
# random
-0001_random_remove_unused_irq_flags_argument_from_add_interrupt_randomness.patch
-0002_irq_remove_unsued_flags_argument_from___handle_irq_event_percpu.patch
0003_random_split_add_interrupt_randomness.patch
0004_random_move_the_fast_pool_reset_into_the_caller.patch
0005_random_defer_processing_of_randomness_on_preempt_rt.patch
@@ -90,6 +61,7 @@ locking-local_lock-Make-the-empty-local_lock_-functi.patch
cgroup__use_irqsave_in_cgroup_rstat_flush_locked.patch
mm__workingset__replace_IRQ-off_check_with_a_lockdep_assert..patch
softirq-Use-a-dedicated-thread-for-timer-wakeups.patch
+locking-Enable-RT_MUTEXES-by-default-on-PREEMPT_RT.patch
# These two need some feedback.
genirq-Provide-generic_handle_irq_safe.patch
@@ -117,7 +89,6 @@ sched-Make-preempt_enable_no_resched-behave-like-pre.patch
# cpu-light
kernel_sched__add_putget_cpu_light.patch
block_mq__do_not_invoke_preempt_disable.patch
-md__raid5__Make_raid5_percpu_handling_RT_aware.patch
scsi_fcoe__Make_RT_aware..patch
mm_vmalloc__Another_preempt_disable_region_which_sucks.patch
sunrpc__Make_svc_xprt_do_enqueue_use_get_cpu_light.patch
@@ -174,17 +145,6 @@ drivers_block_zram__Replace_bit_spinlocks_with_rtmutex_for_-rt.patch
generic-softirq-Disable-softirq-stacks-on-PREEMPT_RT.patch
softirq-Disable-softirq-stacks-on-PREEMPT_RT.patch
-# zsmalloc, applied, akpm
-0001_zsmalloc_introduce_some_helper_functions.patch
-0002_zsmalloc_rename_zs_stat_type_to_class_stat_type.patch
-0003_zsmalloc_decouple_class_actions_from_zspage_works.patch
-0004_zsmalloc_introduce_obj_allocated.patch
-0005_zsmalloc_move_huge_compressed_obj_from_page_to_zspage.patch
-0006_zsmalloc_remove_zspage_isolation_for_migration.patch
-0007_locking_rwlocks_introduce_write_lock_nested.patch
-0008_zsmalloc_replace_per_zpage_lock_with_pool_migrate_lock.patch
-0009_zsmalloc_replace_get_cpu_var_with_local_lock.patch
-
###########################################################################
# Lazy preemption
###########################################################################
@@ -199,6 +159,12 @@ arch_arm64__Add_lazy_preempt_support.patch
###########################################################################
# ARM/ARM64
###########################################################################
+# Valentin's fixes
+###########################################################################
+sched_introduce_migratable.patch
+arm64_mm_make_arch_faults_on_old_pte_check_for_migratability.patch
+
+###########################################################################
ARM__enable_irq_in_translation_section_permission_fault_handlers.patch
KVM__arm_arm64__downgrade_preempt_disabled_region_to_migrate_disable.patch
arm64-sve-Delay-freeing-memory-in-fpsimd_flush_threa.patch
diff --git a/patches/signal__Revert_ptrace_preempt_magic.patch b/patches/signal__Revert_ptrace_preempt_magic.patch
index 74fa095b3fca..2e08ecc7e403 100644
--- a/patches/signal__Revert_ptrace_preempt_magic.patch
+++ b/patches/signal__Revert_ptrace_preempt_magic.patch
@@ -17,7 +17,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/kernel/signal.c
+++ b/kernel/signal.c
-@@ -2271,16 +2271,8 @@ static void ptrace_stop(int exit_code, i
+@@ -2275,16 +2275,8 @@ static void ptrace_stop(int exit_code, i
if (gstop_done && ptrace_reparented(current))
do_notify_parent_cldstop(current, false, why);
diff --git a/patches/signal_x86__Delay_calling_signals_in_atomic.patch b/patches/signal_x86__Delay_calling_signals_in_atomic.patch
index 81f9b8fb5fa7..7d0f64da6850 100644
--- a/patches/signal_x86__Delay_calling_signals_in_atomic.patch
+++ b/patches/signal_x86__Delay_calling_signals_in_atomic.patch
@@ -66,7 +66,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
typedef sigset_t compat_sigset_t;
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1078,6 +1078,10 @@ struct task_struct {
+@@ -1087,6 +1087,10 @@ struct task_struct {
/* Restored if set_restore_sigmask() was used: */
sigset_t saved_sigmask;
struct sigpending pending;
@@ -96,7 +96,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/signal.c
+++ b/kernel/signal.c
-@@ -1324,6 +1324,34 @@ force_sig_info_to_task(struct kernel_sig
+@@ -1327,6 +1327,34 @@ force_sig_info_to_task(struct kernel_sig
struct k_sigaction *action;
int sig = info->si_signo;
diff --git a/patches/softirq-Disable-softirq-stacks-on-PREEMPT_RT.patch b/patches/softirq-Disable-softirq-stacks-on-PREEMPT_RT.patch
index a3360bb4dde5..e5614969ebcc 100644
--- a/patches/softirq-Disable-softirq-stacks-on-PREEMPT_RT.patch
+++ b/patches/softirq-Disable-softirq-stacks-on-PREEMPT_RT.patch
@@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static __always_inline void call_do_irq(struct pt_regs *regs, void *sp)
{
-@@ -820,10 +822,12 @@ void *mcheckirq_ctx[NR_CPUS] __read_most
+@@ -821,10 +823,12 @@ void *mcheckirq_ctx[NR_CPUS] __read_most
void *softirq_ctx[NR_CPUS] __read_mostly;
void *hardirq_ctx[NR_CPUS] __read_mostly;
diff --git a/patches/softirq-Use-a-dedicated-thread-for-timer-wakeups.patch b/patches/softirq-Use-a-dedicated-thread-for-timer-wakeups.patch
index 61d14b4cbf41..9a8405ae4dea 100644
--- a/patches/softirq-Use-a-dedicated-thread-for-timer-wakeups.patch
+++ b/patches/softirq-Use-a-dedicated-thread-for-timer-wakeups.patch
@@ -45,7 +45,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
-@@ -554,6 +554,22 @@ extern void __raise_softirq_irqoff(unsig
+@@ -605,6 +605,22 @@ extern void __raise_softirq_irqoff(unsig
extern void raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq(unsigned int nr);
diff --git a/patches/softirq__Check_preemption_after_reenabling_interrupts.patch b/patches/softirq__Check_preemption_after_reenabling_interrupts.patch
index 1171e8581d01..ce29ba9ec4a4 100644
--- a/patches/softirq__Check_preemption_after_reenabling_interrupts.patch
+++ b/patches/softirq__Check_preemption_after_reenabling_interrupts.patch
@@ -45,7 +45,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif /* CONFIG_PREEMPT_COUNT */
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -3068,6 +3068,7 @@ static void __netif_reschedule(struct Qd
+@@ -2985,6 +2985,7 @@ static void __netif_reschedule(struct Qd
sd->output_queue_tailp = &q->next_sched;
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
@@ -53,7 +53,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
void __netif_schedule(struct Qdisc *q)
-@@ -3130,6 +3131,7 @@ void __dev_kfree_skb_irq(struct sk_buff
+@@ -3047,6 +3048,7 @@ void __dev_kfree_skb_irq(struct sk_buff
__this_cpu_write(softnet_data.completion_queue, skb);
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
@@ -61,7 +61,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(__dev_kfree_skb_irq);
-@@ -6404,12 +6406,14 @@ static void net_rps_action_and_irq_enabl
+@@ -5742,12 +5744,14 @@ static void net_rps_action_and_irq_enabl
sd->rps_ipi_list = NULL;
local_irq_enable();
@@ -76,7 +76,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
-@@ -6485,6 +6489,7 @@ void __napi_schedule(struct napi_struct
+@@ -5823,6 +5827,7 @@ void __napi_schedule(struct napi_struct
local_irq_save(flags);
____napi_schedule(this_cpu_ptr(&softnet_data), n);
local_irq_restore(flags);
@@ -84,7 +84,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(__napi_schedule);
-@@ -11316,6 +11321,7 @@ static int dev_cpu_dead(unsigned int old
+@@ -10647,6 +10652,7 @@ static int dev_cpu_dead(unsigned int old
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_enable();
diff --git a/patches/sunrpc__Make_svc_xprt_do_enqueue_use_get_cpu_light.patch b/patches/sunrpc__Make_svc_xprt_do_enqueue_use_get_cpu_light.patch
index 324a7565e2ff..e6718bb2b85f 100644
--- a/patches/sunrpc__Make_svc_xprt_do_enqueue_use_get_cpu_light.patch
+++ b/patches/sunrpc__Make_svc_xprt_do_enqueue_use_get_cpu_light.patch
@@ -38,7 +38,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
-@@ -441,7 +441,7 @@ void svc_xprt_do_enqueue(struct svc_xprt
+@@ -440,7 +440,7 @@ void svc_xprt_do_enqueue(struct svc_xprt
if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags))
return;
@@ -47,12 +47,12 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pool = svc_pool_for_cpu(xprt->xpt_server, cpu);
atomic_long_inc(&pool->sp_stats.packets);
-@@ -465,7 +465,7 @@ void svc_xprt_do_enqueue(struct svc_xprt
+@@ -464,7 +464,7 @@ void svc_xprt_do_enqueue(struct svc_xprt
rqstp = NULL;
out_unlock:
rcu_read_unlock();
- put_cpu();
+ put_cpu_light();
- trace_svc_xprt_do_enqueue(xprt, rqstp);
+ trace_svc_xprt_enqueue(xprt, rqstp);
}
EXPORT_SYMBOL_GPL(svc_xprt_do_enqueue);
diff --git a/patches/tcp-Add-a-stub-for-sk_defer_free_flush.patch b/patches/tcp-Add-a-stub-for-sk_defer_free_flush.patch
new file mode 100644
index 000000000000..f5ac7fa6c913
--- /dev/null
+++ b/patches/tcp-Add-a-stub-for-sk_defer_free_flush.patch
@@ -0,0 +1,43 @@
+From: Gal Pressman <gal@nvidia.com>
+Date: Thu, 20 Jan 2022 14:34:40 +0200
+Subject: [PATCH] tcp: Add a stub for sk_defer_free_flush()
+
+When compiling the kernel with CONFIG_INET disabled, the
+sk_defer_free_flush() should be defined as a nop.
+
+This resolves the following compilation error:
+ ld: net/core/sock.o: in function `sk_defer_free_flush':
+ ./include/net/tcp.h:1378: undefined reference to `__sk_defer_free_flush'
+
+Fixes: 79074a72d335 ("net: Flush deferred skb free on socket destroy")
+Reported-by: kernel test robot <lkp@intel.com>
+Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
+Signed-off-by: Gal Pressman <gal@nvidia.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Link: https://lore.kernel.org/r/20220120123440.9088-1-gal@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/net/tcp.h | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -1369,6 +1369,7 @@ static inline bool tcp_checksum_complete
+
+ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb);
+
++#ifdef CONFIG_INET
+ void __sk_defer_free_flush(struct sock *sk);
+
+ static inline void sk_defer_free_flush(struct sock *sk)
+@@ -1377,6 +1378,9 @@ static inline void sk_defer_free_flush(s
+ return;
+ __sk_defer_free_flush(sk);
+ }
++#else
++static inline void sk_defer_free_flush(struct sock *sk) {}
++#endif
+
+ int tcp_filter(struct sock *sk, struct sk_buff *skb);
+ void tcp_set_state(struct sock *sk, int state);
diff --git a/patches/tcp-add-a-missing-sk_defer_free_flush-in-tcp_splice_.patch b/patches/tcp-add-a-missing-sk_defer_free_flush-in-tcp_splice_.patch
new file mode 100644
index 000000000000..40691bbaf231
--- /dev/null
+++ b/patches/tcp-add-a-missing-sk_defer_free_flush-in-tcp_splice_.patch
@@ -0,0 +1,29 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 20 Jan 2022 04:45:30 -0800
+Subject: [PATCH] tcp: add a missing sk_defer_free_flush() in tcp_splice_read()
+
+Without it, splice users can hit the warning
+added in commit 79074a72d335 ("net: Flush deferred skb free on socket destroy")
+
+Fixes: f35f821935d8 ("tcp: defer skb freeing after socket lock is released")
+Fixes: 79074a72d335 ("net: Flush deferred skb free on socket destroy")
+Suggested-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Gal Pressman <gal@nvidia.com>
+Link: https://lore.kernel.org/r/20220120124530.925607-1-eric.dumazet@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ net/ipv4/tcp.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -842,6 +842,7 @@ ssize_t tcp_splice_read(struct socket *s
+ }
+
+ release_sock(sk);
++ sk_defer_free_flush(sk);
+
+ if (spliced)
+ return spliced;
diff --git a/patches/tty_serial_pl011__Make_the_locking_work_on_RT.patch b/patches/tty_serial_pl011__Make_the_locking_work_on_RT.patch
index c3e2b3f9186c..49a0d9ab9779 100644
--- a/patches/tty_serial_pl011__Make_the_locking_work_on_RT.patch
+++ b/patches/tty_serial_pl011__Make_the_locking_work_on_RT.patch
@@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
-@@ -2336,18 +2336,24 @@ pl011_console_write(struct console *co,
+@@ -2279,18 +2279,24 @@ pl011_console_write(struct console *co,
{
struct uart_amba_port *uap = amba_ports[co->index];
unsigned int old_cr = 0, new_cr;
@@ -45,7 +45,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* First save the CR then disable the interrupts
-@@ -2373,8 +2379,7 @@ pl011_console_write(struct console *co,
+@@ -2316,8 +2322,7 @@ pl011_console_write(struct console *co,
pl011_write(old_cr, uap, REG_CR);
if (locked)
diff --git a/patches/u64_stats-Disable-preemption-on-32bit-UP-SMP-PREEMPT.patch b/patches/u64_stats-Disable-preemption-on-32bit-UP-SMP-PREEMPT.patch
deleted file mode 100644
index c89279153052..000000000000
--- a/patches/u64_stats-Disable-preemption-on-32bit-UP-SMP-PREEMPT.patch
+++ /dev/null
@@ -1,152 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Mon, 17 Aug 2020 12:28:10 +0200
-Subject: [PATCH] u64_stats: Disable preemption on 32bit UP+SMP PREEMPT_RT
- during updates.
-
-On PREEMPT_RT the seqcount_t for synchronisation is required on 32bit
-architectures even on UP because the softirq (and the threaded IRQ handler) can
-be preempted.
-
-With the seqcount_t for synchronisation, a reader with higher priority can
-preempt the writer and then spin endlessly in read_seqcount_begin() while the
-writer can't make progress.
-
-To avoid such a lock up on PREEMPT_RT the writer must disable preemption during
-the update. There is no need to disable interrupts because no writer is using
-this API in hard-IRQ context on PREEMPT_RT.
-
-Disable preemption on 32bit-RT within the u64_stats write section.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Link: https://lore.kernel.org/r/YbO4x7vRoDGUWxrv@linutronix.de
----
- include/linux/u64_stats_sync.h | 42 +++++++++++++++++++++++++++--------------
- 1 file changed, 28 insertions(+), 14 deletions(-)
-
---- a/include/linux/u64_stats_sync.h
-+++ b/include/linux/u64_stats_sync.h
-@@ -66,7 +66,7 @@
- #include <linux/seqlock.h>
-
- struct u64_stats_sync {
--#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
-+#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
- seqcount_t seq;
- #endif
- };
-@@ -125,7 +125,7 @@ static inline void u64_stats_inc(u64_sta
- }
- #endif
-
--#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
-+#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
- #define u64_stats_init(syncp) seqcount_init(&(syncp)->seq)
- #else
- static inline void u64_stats_init(struct u64_stats_sync *syncp)
-@@ -135,15 +135,19 @@ static inline void u64_stats_init(struct
-
- static inline void u64_stats_update_begin(struct u64_stats_sync *syncp)
- {
--#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
-+#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
-+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
-+ preempt_disable();
- write_seqcount_begin(&syncp->seq);
- #endif
- }
-
- static inline void u64_stats_update_end(struct u64_stats_sync *syncp)
- {
--#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
-+#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
- write_seqcount_end(&syncp->seq);
-+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
-+ preempt_enable();
- #endif
- }
-
-@@ -152,8 +156,11 @@ u64_stats_update_begin_irqsave(struct u6
- {
- unsigned long flags = 0;
-
--#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
-- local_irq_save(flags);
-+#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
-+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
-+ preempt_disable();
-+ else
-+ local_irq_save(flags);
- write_seqcount_begin(&syncp->seq);
- #endif
- return flags;
-@@ -163,15 +170,18 @@ static inline void
- u64_stats_update_end_irqrestore(struct u64_stats_sync *syncp,
- unsigned long flags)
- {
--#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
-+#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
- write_seqcount_end(&syncp->seq);
-- local_irq_restore(flags);
-+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
-+ preempt_enable();
-+ else
-+ local_irq_restore(flags);
- #endif
- }
-
- static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
- {
--#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
-+#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
- return read_seqcount_begin(&syncp->seq);
- #else
- return 0;
-@@ -180,7 +190,7 @@ static inline unsigned int __u64_stats_f
-
- static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
- {
--#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
-+#if BITS_PER_LONG == 32 && (!defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_RT))
- preempt_disable();
- #endif
- return __u64_stats_fetch_begin(syncp);
-@@ -189,7 +199,7 @@ static inline unsigned int u64_stats_fet
- static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
- unsigned int start)
- {
--#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
-+#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
- return read_seqcount_retry(&syncp->seq, start);
- #else
- return false;
-@@ -199,7 +209,7 @@ static inline bool __u64_stats_fetch_ret
- static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
- unsigned int start)
- {
--#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
-+#if BITS_PER_LONG == 32 && (!defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_RT))
- preempt_enable();
- #endif
- return __u64_stats_fetch_retry(syncp, start);
-@@ -213,7 +223,9 @@ static inline bool u64_stats_fetch_retry
- */
- static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp)
- {
--#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
-+#if BITS_PER_LONG == 32 && defined(CONFIG_PREEMPT_RT)
-+ preempt_disable();
-+#elif BITS_PER_LONG == 32 && !defined(CONFIG_SMP)
- local_irq_disable();
- #endif
- return __u64_stats_fetch_begin(syncp);
-@@ -222,7 +234,9 @@ static inline unsigned int u64_stats_fet
- static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp,
- unsigned int start)
- {
--#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
-+#if BITS_PER_LONG == 32 && defined(CONFIG_PREEMPT_RT)
-+ preempt_enable();
-+#elif BITS_PER_LONG == 32 && !defined(CONFIG_SMP)
- local_irq_enable();
- #endif
- return __u64_stats_fetch_retry(syncp, start);
diff --git a/patches/x86__Enable_RT_also_on_32bit.patch b/patches/x86__Enable_RT_also_on_32bit.patch
index cacc949ad4fa..f52dde183193 100644
--- a/patches/x86__Enable_RT_also_on_32bit.patch
+++ b/patches/x86__Enable_RT_also_on_32bit.patch
@@ -22,7 +22,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
select ARCH_USE_CMPXCHG_LOCKREF
select HAVE_ARCH_SOFT_DIRTY
select MODULES_USE_ELF_RELA
-@@ -109,6 +108,7 @@ config X86
+@@ -110,6 +109,7 @@ config X86
select ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP if NR_CPUS <= 4096
select ARCH_SUPPORTS_LTO_CLANG
select ARCH_SUPPORTS_LTO_CLANG_THIN
diff --git a/patches/x86__kvm_Require_const_tsc_for_RT.patch b/patches/x86__kvm_Require_const_tsc_for_RT.patch
index 1d473b6265e4..be11d3e3139f 100644
--- a/patches/x86__kvm_Require_const_tsc_for_RT.patch
+++ b/patches/x86__kvm_Require_const_tsc_for_RT.patch
@@ -18,7 +18,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
-@@ -8655,6 +8655,12 @@ int kvm_arch_init(void *opaque)
+@@ -8742,6 +8742,12 @@ int kvm_arch_init(void *opaque)
goto out;
}