From: Lance Yang Pass both freed_tables and unshared_tables to flush_tlb_mm_range() to ensure lazy-TLB CPUs receive IPIs and flush their paging-structure caches: flush_tlb_mm_range(..., freed_tables || unshared_tables); Implement tlb_table_flush_implies_ipi_broadcast() for x86: on native x86 without paravirt or INVLPGB, the TLB flush IPI already provides necessary synchronization, allowing the second IPI to be skipped. For paravirt with non-native flush_tlb_multi and for INVLPGB, conservatively keep both IPIs. Suggested-by: David Hildenbrand (Red Hat) Signed-off-by: Lance Yang --- arch/x86/include/asm/tlb.h | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h index 866ea78ba156..96602b7b7210 100644 --- a/arch/x86/include/asm/tlb.h +++ b/arch/x86/include/asm/tlb.h @@ -5,10 +5,24 @@ #define tlb_flush tlb_flush static inline void tlb_flush(struct mmu_gather *tlb); +#define tlb_table_flush_implies_ipi_broadcast tlb_table_flush_implies_ipi_broadcast +static inline bool tlb_table_flush_implies_ipi_broadcast(void); + #include #include #include #include +#include + +static inline bool tlb_table_flush_implies_ipi_broadcast(void) +{ +#ifdef CONFIG_PARAVIRT + /* Paravirt may use hypercalls that don't send real IPIs. */ + if (pv_ops.mmu.flush_tlb_multi != native_flush_tlb_multi) + return false; +#endif + return !cpu_feature_enabled(X86_FEATURE_INVLPGB); +} static inline void tlb_flush(struct mmu_gather *tlb) { @@ -20,7 +34,8 @@ static inline void tlb_flush(struct mmu_gather *tlb) end = tlb->end; } - flush_tlb_mm_range(tlb->mm, start, end, stride_shift, tlb->freed_tables); + flush_tlb_mm_range(tlb->mm, start, end, stride_shift, + tlb->freed_tables || tlb->unshared_tables); } static inline void invlpg(unsigned long addr) -- 2.49.0