When an active_mm is shot down, we switch it to the init_mm, evict it out of percpu active mm array. Signed-off-by: Xu Lu --- arch/riscv/include/asm/mmu_context.h | 5 ++++ arch/riscv/include/asm/tlbflush.h | 11 +++++++++ arch/riscv/mm/context.c | 19 ++++++++++++++++ arch/riscv/mm/tlbflush.c | 34 ++++++++++++++++++++++++---- 4 files changed, 64 insertions(+), 5 deletions(-) diff --git a/arch/riscv/include/asm/mmu_context.h b/arch/riscv/include/asm/mmu_context.h index 8c4bc49a3a0f5..bc73cc3262ae6 100644 --- a/arch/riscv/include/asm/mmu_context.h +++ b/arch/riscv/include/asm/mmu_context.h @@ -16,6 +16,11 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *task); +#ifdef CONFIG_RISCV_LAZY_TLB_FLUSH +#define arch_do_shoot_lazy_tlb arch_do_shoot_lazy_tlb +void arch_do_shoot_lazy_tlb(void *arg); +#endif + #define activate_mm activate_mm static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h index 3f83fd5ef36db..e7365a53265a6 100644 --- a/arch/riscv/include/asm/tlbflush.h +++ b/arch/riscv/include/asm/tlbflush.h @@ -15,6 +15,11 @@ #define FLUSH_TLB_NO_ASID ((unsigned long)-1) #ifdef CONFIG_MMU +static inline unsigned long get_mm_asid(struct mm_struct *mm) +{ + return mm ? cntx2asid(atomic_long_read(&mm->context.id)) : FLUSH_TLB_NO_ASID; +} + static inline void local_flush_tlb_all(void) { __asm__ __volatile__ ("sfence.vma" : : : "memory"); @@ -86,11 +91,17 @@ struct tlb_info { DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_info, tlbinfo); void local_load_tlb_mm(struct mm_struct *mm); +void local_flush_tlb_mm(struct mm_struct *mm); #else /* CONFIG_RISCV_LAZY_TLB_FLUSH */ static inline void local_load_tlb_mm(struct mm_struct *mm) {} +static inline void local_flush_tlb_mm(struct mm_struct *mm) +{ + local_flush_tlb_all_asid(get_mm_asid(mm)); +} + #endif /* CONFIG_RISCV_LAZY_TLB_FLUSH */ #else /* CONFIG_MMU */ diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c index a7cf36ad34678..3335080e5f720 100644 --- a/arch/riscv/mm/context.c +++ b/arch/riscv/mm/context.c @@ -274,6 +274,25 @@ static int __init asids_init(void) return 0; } early_initcall(asids_init); + +#ifdef CONFIG_RISCV_LAZY_TLB_FLUSH +void arch_do_shoot_lazy_tlb(void *arg) +{ + struct mm_struct *mm = arg; + + if (current->active_mm == mm) { + WARN_ON_ONCE(current->mm); + current->active_mm = &init_mm; + switch_mm(mm, &init_mm, current); + } + + if (!static_branch_unlikely(&use_asid_allocator) || !mm) + return; + + local_flush_tlb_mm(mm); +} +#endif /* CONFIG_RISCV_LAZY_TLB_FLUSH */ + #else static inline void set_mm(struct mm_struct *prev, struct mm_struct *next, unsigned int cpu) diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c index 4b2ce06cbe6bd..a47bacf5801ab 100644 --- a/arch/riscv/mm/tlbflush.c +++ b/arch/riscv/mm/tlbflush.c @@ -164,11 +164,6 @@ static void __ipi_flush_tlb_range_asid(void *info) local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid); } -static inline unsigned long get_mm_asid(struct mm_struct *mm) -{ - return mm ? cntx2asid(atomic_long_read(&mm->context.id)) : FLUSH_TLB_NO_ASID; -} - static void __flush_tlb_range(struct mm_struct *mm, const struct cpumask *cmask, unsigned long start, unsigned long size, @@ -352,4 +347,33 @@ void local_load_tlb_mm(struct mm_struct *mm) } } +void local_flush_tlb_mm(struct mm_struct *mm) +{ + struct tlb_info *info = this_cpu_ptr(&tlbinfo); + struct tlb_context *contexts = info->contexts; + unsigned long asid = get_mm_asid(mm); + unsigned int i; + + if (!mm || mm == info->active_mm) { + local_flush_tlb_all_asid(asid); + return; + } + + for (i = 0; i < MAX_LOADED_MM; i++) { + if (contexts[i].mm != mm) + continue; + + write_lock(&info->rwlock); + contexts[i].mm = NULL; + contexts[i].gen = 0; + write_unlock(&info->rwlock); + + cpumask_clear_cpu(raw_smp_processor_id(), mm_cpumask(mm)); + mmdrop_lazy_mm(mm); + break; + } + + local_flush_tlb_all_asid(asid); +} + #endif /* CONFIG_RISCV_LAZY_TLB_FLUSH */ -- 2.20.1