PMD 'non-swap' swap entries are currently used for PMD-level migration entries and device private entries. To add to the confusion in this terminology we use is_swap_pmd() in an inconsistent way similar to how is_swap_pte() was being used - sometimes adopting the convention that pmd_none(), !pmd_present() implies PMD 'swap' entry, sometimes not. This patch handles the low-hanging fruit of cases where we can simply substitute other predicates for is_swap_pmd(). No functional change intended. Signed-off-by: Lorenzo Stoakes --- fs/proc/task_mmu.c | 15 ++++++++++--- include/linux/swapops.h | 16 +++++++++++-- mm/huge_memory.c | 4 +++- mm/memory.c | 50 +++++++++++++++++++++++------------------ mm/page_table_check.c | 12 ++++++---- 5 files changed, 65 insertions(+), 32 deletions(-) diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 5ca18bd3b2d0..b68eabb26f29 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -1059,10 +1059,12 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, bool present = false; struct folio *folio; + if (pmd_none(*pmd)) + return; if (pmd_present(*pmd)) { page = vm_normal_page_pmd(vma, addr, *pmd); present = true; - } else if (unlikely(thp_migration_supported() && is_swap_pmd(*pmd))) { + } else if (unlikely(thp_migration_supported())) { swp_entry_t entry = pmd_to_swp_entry(*pmd); if (is_pfn_swap_entry(entry)) @@ -1999,6 +2001,9 @@ static int pagemap_pmd_range_thp(pmd_t *pmdp, unsigned long addr, if (vma->vm_flags & VM_SOFTDIRTY) flags |= PM_SOFT_DIRTY; + if (pmd_none(pmd)) + goto populate_pagemap; + if (pmd_present(pmd)) { page = pmd_page(pmd); @@ -2009,7 +2014,7 @@ static int pagemap_pmd_range_thp(pmd_t *pmdp, unsigned long addr, flags |= PM_UFFD_WP; if (pm->show_pfn) frame = pmd_pfn(pmd) + idx; - } else if (thp_migration_supported() && is_swap_pmd(pmd)) { + } else if (thp_migration_supported()) { swp_entry_t entry = pmd_to_swp_entry(pmd); unsigned long offset; @@ -2036,6 +2041,7 @@ static int pagemap_pmd_range_thp(pmd_t *pmdp, unsigned long addr, flags |= PM_FILE; } +populate_pagemap: for (; addr != end; addr += PAGE_SIZE, idx++) { u64 cur_flags = flags; pagemap_entry_t pme; @@ -2398,6 +2404,9 @@ static unsigned long pagemap_thp_category(struct pagemap_scan_private *p, { unsigned long categories = PAGE_IS_HUGE; + if (pmd_none(pmd)) + return categories; + if (pmd_present(pmd)) { struct page *page; @@ -2415,7 +2424,7 @@ static unsigned long pagemap_thp_category(struct pagemap_scan_private *p, categories |= PAGE_IS_PFNZERO; if (pmd_soft_dirty(pmd)) categories |= PAGE_IS_SOFT_DIRTY; - } else if (is_swap_pmd(pmd)) { + } else { swp_entry_t swp; categories |= PAGE_IS_SWAPPED; diff --git a/include/linux/swapops.h b/include/linux/swapops.h index a66ac4f2105c..3e8dd6ea94dd 100644 --- a/include/linux/swapops.h +++ b/include/linux/swapops.h @@ -509,7 +509,13 @@ static inline pmd_t swp_entry_to_pmd(swp_entry_t entry) static inline int is_pmd_migration_entry(pmd_t pmd) { - return is_swap_pmd(pmd) && is_migration_entry(pmd_to_swp_entry(pmd)); + swp_entry_t entry; + + if (pmd_present(pmd)) + return 0; + + entry = pmd_to_swp_entry(pmd); + return is_migration_entry(entry); } #else /* CONFIG_ARCH_ENABLE_THP_MIGRATION */ static inline int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, @@ -557,7 +563,13 @@ static inline int is_pmd_migration_entry(pmd_t pmd) */ static inline int is_pmd_device_private_entry(pmd_t pmd) { - return is_swap_pmd(pmd) && is_device_private_entry(pmd_to_swp_entry(pmd)); + swp_entry_t entry; + + if (pmd_present(pmd)) + return 0; + + entry = pmd_to_swp_entry(pmd); + return is_device_private_entry(entry); } #else /* CONFIG_ZONE_DEVICE && CONFIG_ARCH_ENABLE_THP_MIGRATION */ diff --git a/mm/huge_memory.c b/mm/huge_memory.c index f6c353a8d7bd..2e5196a68f14 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2429,9 +2429,11 @@ static pmd_t move_soft_dirty_pmd(pmd_t pmd) static pmd_t clear_uffd_wp_pmd(pmd_t pmd) { + if (pmd_none(pmd)) + return pmd; if (pmd_present(pmd)) pmd = pmd_clear_uffd_wp(pmd); - else if (is_swap_pmd(pmd)) + else pmd = pmd_swp_clear_uffd_wp(pmd); return pmd; diff --git a/mm/memory.c b/mm/memory.c index 7493ed084b99..fea079e5fb90 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1376,6 +1376,7 @@ copy_pmd_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, next = pmd_addr_end(addr, end); if (is_swap_pmd(*src_pmd) || pmd_trans_huge(*src_pmd)) { int err; + VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, src_vma); err = copy_huge_pmd(dst_mm, src_mm, dst_pmd, src_pmd, addr, dst_vma, src_vma); @@ -6350,35 +6351,40 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, if (pmd_none(*vmf.pmd) && thp_vma_allowable_order(vma, vm_flags, TVA_PAGEFAULT, PMD_ORDER)) { ret = create_huge_pmd(&vmf); - if (!(ret & VM_FAULT_FALLBACK)) + if (ret & VM_FAULT_FALLBACK) + goto fallback; + else return ret; - } else { - vmf.orig_pmd = pmdp_get_lockless(vmf.pmd); + } - if (unlikely(is_swap_pmd(vmf.orig_pmd))) { - if (is_pmd_device_private_entry(vmf.orig_pmd)) - return do_huge_pmd_device_private(&vmf); + vmf.orig_pmd = pmdp_get_lockless(vmf.pmd); + if (pmd_none(vmf.orig_pmd)) + goto fallback; - if (is_pmd_migration_entry(vmf.orig_pmd)) - pmd_migration_entry_wait(mm, vmf.pmd); - return 0; - } - if (pmd_trans_huge(vmf.orig_pmd)) { - if (pmd_protnone(vmf.orig_pmd) && vma_is_accessible(vma)) - return do_huge_pmd_numa_page(&vmf); + if (unlikely(!pmd_present(vmf.orig_pmd))) { + if (is_pmd_device_private_entry(vmf.orig_pmd)) + return do_huge_pmd_device_private(&vmf); - if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) && - !pmd_write(vmf.orig_pmd)) { - ret = wp_huge_pmd(&vmf); - if (!(ret & VM_FAULT_FALLBACK)) - return ret; - } else { - huge_pmd_set_accessed(&vmf); - return 0; - } + if (is_pmd_migration_entry(vmf.orig_pmd)) + pmd_migration_entry_wait(mm, vmf.pmd); + return 0; + } + if (pmd_trans_huge(vmf.orig_pmd)) { + if (pmd_protnone(vmf.orig_pmd) && vma_is_accessible(vma)) + return do_huge_pmd_numa_page(&vmf); + + if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) && + !pmd_write(vmf.orig_pmd)) { + ret = wp_huge_pmd(&vmf); + if (!(ret & VM_FAULT_FALLBACK)) + return ret; + } else { + huge_pmd_set_accessed(&vmf); + return 0; } } +fallback: return handle_pte_fault(&vmf); } diff --git a/mm/page_table_check.c b/mm/page_table_check.c index 43f75d2f7c36..f5f25e120f69 100644 --- a/mm/page_table_check.c +++ b/mm/page_table_check.c @@ -215,10 +215,14 @@ EXPORT_SYMBOL(__page_table_check_ptes_set); static inline void page_table_check_pmd_flags(pmd_t pmd) { - if (pmd_present(pmd) && pmd_uffd_wp(pmd)) - WARN_ON_ONCE(pmd_write(pmd)); - else if (is_swap_pmd(pmd) && pmd_swp_uffd_wp(pmd)) - WARN_ON_ONCE(swap_cached_writable(pmd_to_swp_entry(pmd))); + if (pmd_present(pmd)) { + if (pmd_uffd_wp(pmd)) + WARN_ON_ONCE(pmd_write(pmd)); + } else if (pmd_swp_uffd_wp(pmd)) { + swp_entry_t entry = pmd_to_swp_entry(pmd); + + WARN_ON_ONCE(swap_cached_writable(entry)); + } } void __page_table_check_pmds_set(struct mm_struct *mm, pmd_t *pmdp, pmd_t pmd, -- 2.51.0