diff options
author | Ard Biesheuvel <ardb@kernel.org> | 2022-01-10 09:54:22 +0100 |
---|---|---|
committer | Ard Biesheuvel <ardb@kernel.org> | 2022-01-25 09:53:52 +0100 |
commit | d31e23aff011d96278f4dbc22f2ec5db433eabaf (patch) | |
tree | 7da6315ac0b3016db684d3be4caac44f45ebd802 /arch/arm/include/asm/mmu_context.h | |
parent | aa0a20f521516ba83ea29b510fcc12fb35920b48 (diff) | |
download | linux-next-d31e23aff011d96278f4dbc22f2ec5db433eabaf.tar.gz |
ARM: mm: make vmalloc_seq handling SMP safe
Rework the vmalloc_seq handling so it can be used safely under SMP, as
we started using it to ensure that vmap'ed stacks are guaranteed to be
mapped by the active mm before switching to a task, and here we need to
ensure that changes to the page tables are visible to other CPUs when
they observe a change in the sequence count.
Since LPAE needs none of this, fold a check against it into the
vmalloc_seq counter check after breaking it out into a separate static
inline helper.
Given that vmap'ed stacks are now also supported on !SMP configurations,
let's drop the WARN() that could potentially now fire spuriously.
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Diffstat (limited to 'arch/arm/include/asm/mmu_context.h')
-rw-r--r-- | arch/arm/include/asm/mmu_context.h | 22 |
1 files changed, 20 insertions, 2 deletions
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index 84e58956fcab..db2cb06aa8cf 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h @@ -23,6 +23,16 @@ void __check_vmalloc_seq(struct mm_struct *mm); +#ifdef CONFIG_MMU +static inline void check_vmalloc_seq(struct mm_struct *mm) +{ + if (!IS_ENABLED(CONFIG_ARM_LPAE) && + unlikely(atomic_read(&mm->context.vmalloc_seq) != + atomic_read(&init_mm.context.vmalloc_seq))) + __check_vmalloc_seq(mm); +} +#endif + #ifdef CONFIG_CPU_HAS_ASID void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); @@ -52,8 +62,7 @@ static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, static inline void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) { - if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) - __check_vmalloc_seq(mm); + check_vmalloc_seq(mm); if (irqs_disabled()) /* @@ -129,6 +138,15 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, #endif } +#ifdef CONFIG_VMAP_STACK +static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) +{ + if (mm != &init_mm) + check_vmalloc_seq(mm); +} +#define enter_lazy_tlb enter_lazy_tlb +#endif + #include <asm-generic/mmu_context.h> #endif |