From: Lance Yang Currently, tlb_remove_table_sync_one() broadcasts IPIs to all CPUs to wait for any concurrent lockless page table walkers (e.g., GUP-fast). This is inefficient on systems with many CPUs, especially for RT workloads[1]. This patch introduces a per-CPU tracking mechanism to record which CPUs are actively performing lockless page table walks for a specific mm_struct. When freeing/unsharing page tables, we can now send IPIs only to the CPUs that are actually walking that mm, instead of broadcasting to all CPUs. In preparation for targeted IPIs; a follow-up will switch callers to tlb_remove_table_sync_mm(). Note that the tracking adds ~3% latency to GUP-fast, as measured on a 64-core system. [1] https://lore.kernel.org/linux-mm/1b27a3fa-359a-43d0-bdeb-c31341749367@kernel.org/ Suggested-by: David Hildenbrand (Red Hat) Signed-off-by: Lance Yang --- include/asm-generic/tlb.h | 2 ++ include/linux/mm.h | 34 ++++++++++++++++++++++++++ kernel/events/core.c | 2 ++ mm/gup.c | 2 ++ mm/mmu_gather.c | 50 +++++++++++++++++++++++++++++++++++++++ 5 files changed, 90 insertions(+) diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 4aeac0c3d3f0..b6b06e6b879f 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -250,6 +250,7 @@ static inline void tlb_remove_table(struct mmu_gather *tlb, void *table) #endif void tlb_remove_table_sync_one(void); +void tlb_remove_table_sync_mm(struct mm_struct *mm); #else @@ -258,6 +259,7 @@ void tlb_remove_table_sync_one(void); #endif static inline void tlb_remove_table_sync_one(void) { } +static inline void tlb_remove_table_sync_mm(struct mm_struct *mm) { } #endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */ diff --git a/include/linux/mm.h b/include/linux/mm.h index f8a8fd47399c..d92df995fcd1 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2995,6 +2995,40 @@ long memfd_pin_folios(struct file *memfd, loff_t start, loff_t end, pgoff_t *offset); int folio_add_pins(struct folio *folio, unsigned int pins); +/* + * Track CPUs doing lockless page table walks to avoid broadcast IPIs + * during TLB flushes. + */ +DECLARE_PER_CPU(struct mm_struct *, active_lockless_pt_walk_mm); + +static inline void pt_walk_lockless_start(struct mm_struct *mm) +{ + lockdep_assert_irqs_disabled(); + + /* + * Tell other CPUs we're doing lockless page table walk. + * + * Full barrier needed to prevent page table reads from being + * reordered before this write. + * + * Pairs with smp_rmb() in tlb_remove_table_sync_mm(). + */ + this_cpu_write(active_lockless_pt_walk_mm, mm); + smp_mb(); +} + +static inline void pt_walk_lockless_end(void) +{ + lockdep_assert_irqs_disabled(); + + /* + * Clear the pointer so other CPUs no longer see this CPU as walking + * the mm. Use smp_store_release to ensure page table reads complete + * before the clear is visible to other CPUs. + */ + smp_store_release(this_cpu_ptr(&active_lockless_pt_walk_mm), NULL); +} + int get_user_pages_fast(unsigned long start, int nr_pages, unsigned int gup_flags, struct page **pages); int pin_user_pages_fast(unsigned long start, int nr_pages, diff --git a/kernel/events/core.c b/kernel/events/core.c index 5b5cb620499e..6539112c28ff 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -8190,7 +8190,9 @@ static u64 perf_get_page_size(unsigned long addr) mm = &init_mm; } + pt_walk_lockless_start(mm); size = perf_get_pgtable_size(mm, addr); + pt_walk_lockless_end(); local_irq_restore(flags); diff --git a/mm/gup.c b/mm/gup.c index 8e7dc2c6ee73..6748e28b27f2 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -3154,7 +3154,9 @@ static unsigned long gup_fast(unsigned long start, unsigned long end, * that come from callers of tlb_remove_table_sync_one(). */ local_irq_save(flags); + pt_walk_lockless_start(current->mm); gup_fast_pgd_range(start, end, gup_flags, pages, &nr_pinned); + pt_walk_lockless_end(); local_irq_restore(flags); /* diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c index 2faa23d7f8d4..35c89e4b6230 100644 --- a/mm/mmu_gather.c +++ b/mm/mmu_gather.c @@ -285,6 +285,56 @@ void tlb_remove_table_sync_one(void) smp_call_function(tlb_remove_table_smp_sync, NULL, 1); } +DEFINE_PER_CPU(struct mm_struct *, active_lockless_pt_walk_mm); +EXPORT_PER_CPU_SYMBOL_GPL(active_lockless_pt_walk_mm); + +/** + * tlb_remove_table_sync_mm - send IPIs to CPUs doing lockless page table + * walk for @mm + * + * @mm: target mm; only CPUs walking this mm get an IPI. + * + * Like tlb_remove_table_sync_one() but only targets CPUs in + * active_lockless_pt_walk_mm. + */ +void tlb_remove_table_sync_mm(struct mm_struct *mm) +{ + cpumask_var_t target_cpus; + bool found_any = false; + int cpu; + + if (WARN_ONCE(!mm, "NULL mm in %s\n", __func__)) { + tlb_remove_table_sync_one(); + return; + } + + /* If we can't, fall back to broadcast. */ + if (!alloc_cpumask_var(&target_cpus, GFP_ATOMIC)) { + tlb_remove_table_sync_one(); + return; + } + + cpumask_clear(target_cpus); + + /* Pairs with smp_mb() in pt_walk_lockless_start(). */ + smp_rmb(); + + /* Find CPUs doing lockless page table walks for this mm */ + for_each_online_cpu(cpu) { + if (per_cpu(active_lockless_pt_walk_mm, cpu) == mm) { + cpumask_set_cpu(cpu, target_cpus); + found_any = true; + } + } + + /* Only send IPIs to CPUs actually doing lockless walks */ + if (found_any) + smp_call_function_many(target_cpus, tlb_remove_table_smp_sync, + NULL, 1); + + free_cpumask_var(target_cpus); +} + static void tlb_remove_table_rcu(struct rcu_head *head) { __tlb_remove_table_free(container_of(head, struct mmu_table_batch, rcu)); -- 2.49.0 From: Lance Yang Now that we have tlb_remove_table_sync_mm(), convert callers from tlb_remove_table_sync_one() to enable targeted IPIs instead of broadcast. Three callers updated: 1) collapse_huge_page() - after flushing the old PMD, only IPIs CPUs walking this mm instead of all CPUs. 2) tlb_flush_unshared_tables() - when unsharing hugetlb page tables, use tlb->mm for targeted IPIs. 3) __tlb_remove_table_one() - updated to take mmu_gather parameter so it can use tlb->mm when batch allocation fails. Note that pmdp_get_lockless_sync() (PAE only) also calls tlb_remove_table_sync_one() under PTL to ensure all ongoing PMD split-reads complete between pmdp_get_lockless_{start,end}; the critical section is very short. I'm inclined not to convert it since PAE systems typically don't have many cores. Suggested-by: David Hildenbrand (Red Hat) Signed-off-by: Lance Yang --- include/asm-generic/tlb.h | 11 ++++++----- mm/khugepaged.c | 2 +- mm/mmu_gather.c | 12 ++++++------ 3 files changed, 13 insertions(+), 12 deletions(-) diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index b6b06e6b879f..40eb74b28f9d 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -831,17 +831,18 @@ static inline void tlb_flush_unshared_tables(struct mmu_gather *tlb) /* * Similarly, we must make sure that concurrent GUP-fast will not * walk previously-shared page tables that are getting modified+reused - * elsewhere. So broadcast an IPI to wait for any concurrent GUP-fast. + * elsewhere. So send an IPI to wait for any concurrent GUP-fast. * - * We only perform this when we are the last sharer of a page table, - * as the IPI will reach all CPUs: any GUP-fast. + * We only perform this when we are the last sharer of a page table. + * Use targeted IPI to CPUs actively walking this mm instead of + * broadcast. * - * Note that on configs where tlb_remove_table_sync_one() is a NOP, + * Note that on configs where tlb_remove_table_sync_mm() is a NOP, * the expectation is that the tlb_flush_mmu_tlbonly() would have issued * required IPIs already for us. */ if (tlb->fully_unshared_tables) { - tlb_remove_table_sync_one(); + tlb_remove_table_sync_mm(tlb->mm); tlb->fully_unshared_tables = false; } } diff --git a/mm/khugepaged.c b/mm/khugepaged.c index fa1e57fd2c46..7781d6628649 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1173,7 +1173,7 @@ static enum scan_result collapse_huge_page(struct mm_struct *mm, unsigned long a _pmd = pmdp_collapse_flush(vma, address, pmd); spin_unlock(pmd_ptl); mmu_notifier_invalidate_range_end(&range); - tlb_remove_table_sync_one(); + tlb_remove_table_sync_mm(mm); pte = pte_offset_map_lock(mm, &_pmd, address, &pte_ptl); if (pte) { diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c index 35c89e4b6230..76573ec454e5 100644 --- a/mm/mmu_gather.c +++ b/mm/mmu_gather.c @@ -378,7 +378,7 @@ static inline void __tlb_remove_table_one_rcu(struct rcu_head *head) __tlb_remove_table(ptdesc); } -static inline void __tlb_remove_table_one(void *table) +static inline void __tlb_remove_table_one(struct mmu_gather *tlb, void *table) { struct ptdesc *ptdesc; @@ -386,16 +386,16 @@ static inline void __tlb_remove_table_one(void *table) call_rcu(&ptdesc->pt_rcu_head, __tlb_remove_table_one_rcu); } #else -static inline void __tlb_remove_table_one(void *table) +static inline void __tlb_remove_table_one(struct mmu_gather *tlb, void *table) { - tlb_remove_table_sync_one(); + tlb_remove_table_sync_mm(tlb->mm); __tlb_remove_table(table); } #endif /* CONFIG_PT_RECLAIM */ -static void tlb_remove_table_one(void *table) +static void tlb_remove_table_one(struct mmu_gather *tlb, void *table) { - __tlb_remove_table_one(table); + __tlb_remove_table_one(tlb, table); } static void tlb_table_flush(struct mmu_gather *tlb) @@ -417,7 +417,7 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table) *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT); if (*batch == NULL) { tlb_table_invalidate(tlb); - tlb_remove_table_one(table); + tlb_remove_table_one(tlb, table); return; } (*batch)->nr = 0; -- 2.49.0 From: Lance Yang When the TLB flush path already sends IPIs (e.g. native without INVLPGB, or KVM), tlb_remove_table_sync_mm() does not need to send another round. Add a property on pv_mmu_ops so each paravirt backend can indicate whether its flush_tlb_multi sends real IPIs; if so, tlb_remove_table_sync_mm() is a no-op. Native sets it in native_pv_tlb_init() when still using native_flush_tlb_multi() and INVLPGB is disabled. KVM sets it true; Xen and Hyper-V set it false because they use hypercalls. Also pass both freed_tables and unshared_tables from tlb_flush() into flush_tlb_mm_range() so lazy-TLB CPUs get IPIs during hugetlb unshare. Suggested-by: David Hildenbrand (Red Hat) Signed-off-by: Lance Yang --- arch/x86/hyperv/mmu.c | 5 +++++ arch/x86/include/asm/paravirt.h | 5 +++++ arch/x86/include/asm/paravirt_types.h | 6 ++++++ arch/x86/include/asm/tlb.h | 20 +++++++++++++++++++- arch/x86/kernel/kvm.c | 6 ++++++ arch/x86/kernel/paravirt.c | 18 ++++++++++++++++++ arch/x86/kernel/smpboot.c | 1 + arch/x86/xen/mmu_pv.c | 2 ++ include/asm-generic/tlb.h | 15 +++++++++++++++ mm/mmu_gather.c | 7 +++++++ 10 files changed, 84 insertions(+), 1 deletion(-) diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c index cfcb60468b01..fc8fb275f295 100644 --- a/arch/x86/hyperv/mmu.c +++ b/arch/x86/hyperv/mmu.c @@ -243,4 +243,9 @@ void hyperv_setup_mmu_ops(void) pr_info("Using hypercall for remote TLB flush\n"); pv_ops.mmu.flush_tlb_multi = hyperv_flush_tlb_multi; + /* + * Hyper-V uses hypercalls for TLB flush, not real IPIs. + * Keep the property as false. + */ + pv_ops.mmu.flush_tlb_multi_implies_ipi_broadcast = false; } diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 13f9cd31c8f8..1fdbe3736f41 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -698,6 +698,7 @@ static __always_inline unsigned long arch_local_irq_save(void) extern void default_banner(void); void native_pv_lock_init(void) __init; +void native_pv_tlb_init(void) __init; #else /* __ASSEMBLER__ */ @@ -727,6 +728,10 @@ void native_pv_lock_init(void) __init; static inline void native_pv_lock_init(void) { } + +static inline void native_pv_tlb_init(void) +{ +} #endif #endif /* !CONFIG_PARAVIRT */ diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 3502939415ad..d8aa519ef5e3 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -133,6 +133,12 @@ struct pv_mmu_ops { void (*flush_tlb_multi)(const struct cpumask *cpus, const struct flush_tlb_info *info); + /* + * Indicates whether flush_tlb_multi IPIs provide sufficient + * synchronization during TLB flush when freeing or unsharing page tables. + */ + bool flush_tlb_multi_implies_ipi_broadcast; + /* Hook for intercepting the destruction of an mm_struct. */ void (*exit_mmap)(struct mm_struct *mm); void (*notify_page_enc_status_changed)(unsigned long pfn, int npages, bool enc); diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h index 866ea78ba156..1e524d8e260a 100644 --- a/arch/x86/include/asm/tlb.h +++ b/arch/x86/include/asm/tlb.h @@ -5,10 +5,23 @@ #define tlb_flush tlb_flush static inline void tlb_flush(struct mmu_gather *tlb); +#define tlb_table_flush_implies_ipi_broadcast tlb_table_flush_implies_ipi_broadcast +static inline bool tlb_table_flush_implies_ipi_broadcast(void); + #include #include #include #include +#include + +static inline bool tlb_table_flush_implies_ipi_broadcast(void) +{ +#ifdef CONFIG_PARAVIRT + return pv_ops.mmu.flush_tlb_multi_implies_ipi_broadcast; +#else + return !cpu_feature_enabled(X86_FEATURE_INVLPGB); +#endif +} static inline void tlb_flush(struct mmu_gather *tlb) { @@ -20,7 +33,12 @@ static inline void tlb_flush(struct mmu_gather *tlb) end = tlb->end; } - flush_tlb_mm_range(tlb->mm, start, end, stride_shift, tlb->freed_tables); + /* + * During TLB flushes, pass both freed_tables and unshared_tables + * so lazy-TLB CPUs receive IPIs. + */ + flush_tlb_mm_range(tlb->mm, start, end, stride_shift, + tlb->freed_tables || tlb->unshared_tables); } static inline void invlpg(unsigned long addr) diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 37dc8465e0f5..6a5e47ee4eb6 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -856,6 +856,12 @@ static void __init kvm_guest_init(void) #ifdef CONFIG_SMP if (pv_tlb_flush_supported()) { pv_ops.mmu.flush_tlb_multi = kvm_flush_tlb_multi; + /* + * KVM's flush implementation calls native_flush_tlb_multi(), + * which sends real IPIs when INVLPGB is not available. + */ + if (!cpu_feature_enabled(X86_FEATURE_INVLPGB)) + pv_ops.mmu.flush_tlb_multi_implies_ipi_broadcast = true; pr_info("KVM setup pv remote TLB flush\n"); } diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index ab3e172dcc69..1af253c9f51d 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -60,6 +60,23 @@ void __init native_pv_lock_init(void) static_branch_enable(&virt_spin_lock_key); } +void __init native_pv_tlb_init(void) +{ + /* + * Check if we're still using native TLB flush (not overridden by + * a PV backend) and don't have INVLPGB support. + * + * In this case, native IPI-based TLB flush provides sufficient + * synchronization for GUP-fast. + * + * PV backends (KVM, Xen, HyperV) should set this property in their + * own initialization code if their flush implementation sends IPIs. + */ + if (pv_ops.mmu.flush_tlb_multi == native_flush_tlb_multi && + !cpu_feature_enabled(X86_FEATURE_INVLPGB)) + pv_ops.mmu.flush_tlb_multi_implies_ipi_broadcast = true; +} + struct static_key paravirt_steal_enabled; struct static_key paravirt_steal_rq_enabled; @@ -173,6 +190,7 @@ struct paravirt_patch_template pv_ops = { .mmu.flush_tlb_kernel = native_flush_tlb_global, .mmu.flush_tlb_one_user = native_flush_tlb_one_user, .mmu.flush_tlb_multi = native_flush_tlb_multi, + .mmu.flush_tlb_multi_implies_ipi_broadcast = false, .mmu.exit_mmap = paravirt_nop, .mmu.notify_page_enc_status_changed = paravirt_nop, diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 5cd6950ab672..3cdb04162843 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1167,6 +1167,7 @@ void __init native_smp_prepare_boot_cpu(void) switch_gdt_and_percpu_base(me); native_pv_lock_init(); + native_pv_tlb_init(); } void __init native_smp_cpus_done(unsigned int max_cpus) diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index 7a35c3393df4..b6d86299cf10 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c @@ -2185,6 +2185,8 @@ static const typeof(pv_ops) xen_mmu_ops __initconst = { .flush_tlb_kernel = xen_flush_tlb, .flush_tlb_one_user = xen_flush_tlb_one_user, .flush_tlb_multi = xen_flush_tlb_multi, + /* Xen uses hypercalls for TLB flush, not real IPIs */ + .flush_tlb_multi_implies_ipi_broadcast = false, .pgd_alloc = xen_pgd_alloc, .pgd_free = xen_pgd_free, diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 40eb74b28f9d..fae97c8bcceb 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -240,6 +240,21 @@ static inline void tlb_remove_table(struct mmu_gather *tlb, void *table) } #endif /* CONFIG_MMU_GATHER_TABLE_FREE */ +/* + * Architectures can override this to indicate whether TLB flush operations + * send IPIs that are sufficient to synchronize with lockless page table + * walkers (e.g., GUP-fast). If true, tlb_remove_table_sync_mm() becomes + * a no-op as the TLB flush already provided the necessary IPI. + * + * Default is false, meaning we need explicit IPIs via tlb_remove_table_sync_mm(). + */ +#ifndef tlb_table_flush_implies_ipi_broadcast +static inline bool tlb_table_flush_implies_ipi_broadcast(void) +{ + return false; +} +#endif + #ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE /* * This allows an architecture that does not use the linux page-tables for diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c index 76573ec454e5..9620480c11ce 100644 --- a/mm/mmu_gather.c +++ b/mm/mmu_gather.c @@ -303,6 +303,13 @@ void tlb_remove_table_sync_mm(struct mm_struct *mm) bool found_any = false; int cpu; + /* + * If the architecture's TLB flush already sent IPIs that are sufficient + * for synchronization, we don't need to send additional IPIs. + */ + if (tlb_table_flush_implies_ipi_broadcast()) + return; + if (WARN_ONCE(!mm, "NULL mm in %s\n", __func__)) { tlb_remove_table_sync_one(); return; -- 2.49.0