From: Lance Yang tlb_remove_table_sync_one() sends IPIs to all CPUs and waits for them, which we really don't want to do while holding PTL. Just move the call to after we release PTL, and drop the macro wrapper while we're at it. Signed-off-by: Lance Yang --- include/linux/pgtable.h | 4 ---- mm/khugepaged.c | 5 +++-- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index eb8aacba3698..fb04ed22052c 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -755,7 +755,6 @@ static inline pmd_t pmdp_get_lockless(pmd_t *pmdp) return pmd; } #define pmdp_get_lockless pmdp_get_lockless -#define pmdp_get_lockless_sync() tlb_remove_table_sync_one() #endif /* CONFIG_PGTABLE_LEVELS > 2 */ #endif /* CONFIG_GUP_GET_PXX_LOW_HIGH */ @@ -774,9 +773,6 @@ static inline pmd_t pmdp_get_lockless(pmd_t *pmdp) { return pmdp_get(pmdp); } -static inline void pmdp_get_lockless_sync(void) -{ -} #endif #ifdef CONFIG_TRANSPARENT_HUGEPAGE diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 9f790ec34400..0a6cebf880e0 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1664,10 +1664,10 @@ static enum scan_result try_collapse_pte_mapped_thp(struct mm_struct *mm, unsign } } pgt_pmd = pmdp_collapse_flush(vma, haddr, pmd); - pmdp_get_lockless_sync(); pte_unmap_unlock(start_pte, ptl); if (ptl != pml) spin_unlock(pml); + tlb_remove_table_sync_one(); mmu_notifier_invalidate_range_end(&range); @@ -1818,7 +1818,6 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) */ if (likely(file_backed_vma_is_retractable(vma))) { pgt_pmd = pmdp_collapse_flush(vma, addr, pmd); - pmdp_get_lockless_sync(); success = true; } @@ -1826,6 +1825,8 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) spin_unlock(ptl); drop_pml: spin_unlock(pml); + if (success) + tlb_remove_table_sync_one(); mmu_notifier_invalidate_range_end(&range); -- 2.49.0