From: Lance Yang Add a callback function flush_tlb_multi_implies_ipi_broadcast to pv_mmu_ops to explicitly track whether flush_tlb_multi IPIs provide sufficient synchronization for GUP-fast when freeing or unsharing page tables. Pass both freed_tables and unshared_tables to flush_tlb_mm_range() to ensure lazy-TLB CPUs receive IPIs and flush their paging-structure caches: flush_tlb_mm_range(..., freed_tables || unshared_tables); Suggested-by: David Hildenbrand (Red Hat) Signed-off-by: Lance Yang --- arch/x86/include/asm/paravirt_types.h | 6 ++++++ arch/x86/include/asm/tlb.h | 19 ++++++++++++++++++- arch/x86/kernel/paravirt.c | 10 ++++++++++ 3 files changed, 34 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 3502939415ad..a5bd0983da1f 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -133,6 +133,12 @@ struct pv_mmu_ops { void (*flush_tlb_multi)(const struct cpumask *cpus, const struct flush_tlb_info *info); + /* + * Indicates whether flush_tlb_multi IPIs provide sufficient + * synchronization for GUP-fast when freeing or unsharing page tables. + */ + bool (*flush_tlb_multi_implies_ipi_broadcast)(void); + /* Hook for intercepting the destruction of an mm_struct. */ void (*exit_mmap)(struct mm_struct *mm); void (*notify_page_enc_status_changed)(unsigned long pfn, int npages, bool enc); diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h index 866ea78ba156..3a7cdfdcea8e 100644 --- a/arch/x86/include/asm/tlb.h +++ b/arch/x86/include/asm/tlb.h @@ -5,10 +5,26 @@ #define tlb_flush tlb_flush static inline void tlb_flush(struct mmu_gather *tlb); +#define tlb_table_flush_implies_ipi_broadcast tlb_table_flush_implies_ipi_broadcast +static inline bool tlb_table_flush_implies_ipi_broadcast(void); + #include #include #include #include +#include + +static inline bool tlb_table_flush_implies_ipi_broadcast(void) +{ +#ifdef CONFIG_PARAVIRT + if (pv_ops.mmu.flush_tlb_multi_implies_ipi_broadcast) + return pv_ops.mmu.flush_tlb_multi_implies_ipi_broadcast(); + + return false; +#else + return !cpu_feature_enabled(X86_FEATURE_INVLPGB); +#endif +} static inline void tlb_flush(struct mmu_gather *tlb) { @@ -20,7 +36,8 @@ static inline void tlb_flush(struct mmu_gather *tlb) end = tlb->end; } - flush_tlb_mm_range(tlb->mm, start, end, stride_shift, tlb->freed_tables); + flush_tlb_mm_range(tlb->mm, start, end, stride_shift, + tlb->freed_tables || tlb->unshared_tables); } static inline void invlpg(unsigned long addr) diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index ab3e172dcc69..4eaa44800b39 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -60,6 +60,15 @@ void __init native_pv_lock_init(void) static_branch_enable(&virt_spin_lock_key); } +static bool native_flush_tlb_multi_implies_ipi_broadcast(void) +{ + /* Paravirt may use hypercalls that don't send real IPIs. */ + if (pv_ops.mmu.flush_tlb_multi != native_flush_tlb_multi) + return false; + + return !cpu_feature_enabled(X86_FEATURE_INVLPGB); +} + struct static_key paravirt_steal_enabled; struct static_key paravirt_steal_rq_enabled; @@ -173,6 +182,7 @@ struct paravirt_patch_template pv_ops = { .mmu.flush_tlb_kernel = native_flush_tlb_global, .mmu.flush_tlb_one_user = native_flush_tlb_one_user, .mmu.flush_tlb_multi = native_flush_tlb_multi, + .mmu.flush_tlb_multi_implies_ipi_broadcast = native_flush_tlb_multi_implies_ipi_broadcast, .mmu.exit_mmap = paravirt_nop, .mmu.notify_page_enc_status_changed = paravirt_nop, -- 2.49.0