Having converted so much of the code base to leaf entries, we can mop up some remaining cases. We replace is_pfn_swap_entry(), pfn_swap_entry_to_page(), is_writable_device_private_entry(), is_device_exclusive_entry(), is_migration_entry(), is_writable_migration_entry(), is_readable_migration_entry(), swp_offset_pfn() and pfn_swap_entry_folio() with leafent equivalents. No functional change intended. Signed-off-by: Lorenzo Stoakes --- fs/proc/task_mmu.c | 14 ++--- include/linux/leafops.h | 25 +++++++-- include/linux/swapops.h | 121 +--------------------------------------- mm/debug_vm_pgtable.c | 20 +++---- mm/hmm.c | 2 +- mm/hugetlb.c | 2 +- mm/ksm.c | 6 +- mm/memory-failure.c | 6 +- mm/memory.c | 3 +- mm/mempolicy.c | 4 +- mm/migrate.c | 6 +- mm/migrate_device.c | 10 ++-- mm/mprotect.c | 8 +-- mm/page_vma_mapped.c | 8 +-- mm/pagewalk.c | 7 +-- mm/rmap.c | 9 ++- 16 files changed, 75 insertions(+), 176 deletions(-) diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 82532c069039..8a9894aefbca 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -1940,13 +1940,13 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm, if (pte_uffd_wp(pte)) flags |= PM_UFFD_WP; } else { - swp_entry_t entry; + leaf_entry_t entry; if (pte_swp_soft_dirty(pte)) flags |= PM_SOFT_DIRTY; if (pte_swp_uffd_wp(pte)) flags |= PM_UFFD_WP; - entry = pte_to_swp_entry(pte); + entry = leafent_from_pte(pte); if (pm->show_pfn) { pgoff_t offset; @@ -1954,16 +1954,16 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm, * For PFN swap offsets, keeping the offset field * to be PFN only to be compatible with old smaps. */ - if (is_pfn_swap_entry(entry)) - offset = swp_offset_pfn(entry); + if (leafent_has_pfn(entry)) + offset = leafent_to_pfn(entry); else offset = swp_offset(entry); frame = swp_type(entry) | (offset << MAX_SWAPFILES_SHIFT); } flags |= PM_SWAP; - if (is_pfn_swap_entry(entry)) - page = pfn_swap_entry_to_page(entry); + if (leafent_has_pfn(entry)) + page = leafent_to_page(entry); if (leafent_is_uffd_wp_marker(entry)) flags |= PM_UFFD_WP; if (leafent_is_guard_marker(entry)) @@ -2032,7 +2032,7 @@ static int pagemap_pmd_range_thp(pmd_t *pmdp, unsigned long addr, if (pmd_swp_uffd_wp(pmd)) flags |= PM_UFFD_WP; VM_WARN_ON_ONCE(!pmd_is_migration_entry(pmd)); - page = pfn_swap_entry_to_page(entry); + page = leafent_to_page(entry); } if (page) { diff --git a/include/linux/leafops.h b/include/linux/leafops.h index 2d3bc4c866bd..b74d406ba648 100644 --- a/include/linux/leafops.h +++ b/include/linux/leafops.h @@ -357,7 +357,7 @@ static inline unsigned long leafent_to_pfn(leaf_entry_t entry) VM_WARN_ON_ONCE(!leafent_has_pfn(entry)); /* Temporary until swp_entry_t eliminated. */ - return swp_offset_pfn(entry); + return swp_offset(entry) & SWP_PFN_MASK; } /** @@ -368,10 +368,16 @@ static inline unsigned long leafent_to_pfn(leaf_entry_t entry) */ static inline struct page *leafent_to_page(leaf_entry_t entry) { + struct page *page = pfn_to_page(leafent_to_pfn(entry)); + VM_WARN_ON_ONCE(!leafent_has_pfn(entry)); + /* + * Any use of migration entries may only occur while the + * corresponding page is locked + */ + VM_WARN_ON_ONCE(leafent_is_migration(entry) && !PageLocked(page)); - /* Temporary until swp_entry_t eliminated. */ - return pfn_swap_entry_to_page(entry); + return page; } /** @@ -383,10 +389,17 @@ static inline struct page *leafent_to_page(leaf_entry_t entry) */ static inline struct folio *leafent_to_folio(leaf_entry_t entry) { - VM_WARN_ON_ONCE(!leafent_has_pfn(entry)); + struct folio *folio = pfn_folio(leafent_to_pfn(entry)); - /* Temporary until swp_entry_t eliminated. */ - return pfn_swap_entry_folio(entry); + VM_WARN_ON_ONCE(!leafent_has_pfn(entry)); + /* + * Any use of migration entries may only occur while the + * corresponding folio is locked. + */ + VM_WARN_ON_ONCE(leafent_is_migration(entry) && + !folio_test_locked(folio)); + + return folio; } /** diff --git a/include/linux/swapops.h b/include/linux/swapops.h index c8e6f927da48..3d02b288c15e 100644 --- a/include/linux/swapops.h +++ b/include/linux/swapops.h @@ -28,7 +28,7 @@ #define SWP_OFFSET_MASK ((1UL << SWP_TYPE_SHIFT) - 1) /* - * Definitions only for PFN swap entries (see is_pfn_swap_entry()). To + * Definitions only for PFN swap entries (see leafeant_has_pfn()). To * store PFN, we only need SWP_PFN_BITS bits. Each of the pfn swap entries * can use the extra bits to store other information besides PFN. */ @@ -66,8 +66,6 @@ #define SWP_MIG_YOUNG BIT(SWP_MIG_YOUNG_BIT) #define SWP_MIG_DIRTY BIT(SWP_MIG_DIRTY_BIT) -static inline bool is_pfn_swap_entry(swp_entry_t entry); - /* Clear all flags but only keep swp_entry_t related information */ static inline pte_t pte_swp_clear_flags(pte_t pte) { @@ -109,17 +107,6 @@ static inline pgoff_t swp_offset(swp_entry_t entry) return entry.val & SWP_OFFSET_MASK; } -/* - * This should only be called upon a pfn swap entry to get the PFN stored - * in the swap entry. Please refers to is_pfn_swap_entry() for definition - * of pfn swap entry. - */ -static inline unsigned long swp_offset_pfn(swp_entry_t entry) -{ - VM_BUG_ON(!is_pfn_swap_entry(entry)); - return swp_offset(entry) & SWP_PFN_MASK; -} - /* * Convert the arch-dependent pte representation of a swp_entry_t into an * arch-independent swp_entry_t. @@ -169,27 +156,11 @@ static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset) return swp_entry(SWP_DEVICE_WRITE, offset); } -static inline bool is_device_private_entry(swp_entry_t entry) -{ - int type = swp_type(entry); - return type == SWP_DEVICE_READ || type == SWP_DEVICE_WRITE; -} - -static inline bool is_writable_device_private_entry(swp_entry_t entry) -{ - return unlikely(swp_type(entry) == SWP_DEVICE_WRITE); -} - static inline swp_entry_t make_device_exclusive_entry(pgoff_t offset) { return swp_entry(SWP_DEVICE_EXCLUSIVE, offset); } -static inline bool is_device_exclusive_entry(swp_entry_t entry) -{ - return swp_type(entry) == SWP_DEVICE_EXCLUSIVE; -} - #else /* CONFIG_DEVICE_PRIVATE */ static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset) { @@ -201,50 +172,14 @@ static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset) return swp_entry(0, 0); } -static inline bool is_device_private_entry(swp_entry_t entry) -{ - return false; -} - -static inline bool is_writable_device_private_entry(swp_entry_t entry) -{ - return false; -} - static inline swp_entry_t make_device_exclusive_entry(pgoff_t offset) { return swp_entry(0, 0); } -static inline bool is_device_exclusive_entry(swp_entry_t entry) -{ - return false; -} - #endif /* CONFIG_DEVICE_PRIVATE */ #ifdef CONFIG_MIGRATION -static inline int is_migration_entry(swp_entry_t entry) -{ - return unlikely(swp_type(entry) == SWP_MIGRATION_READ || - swp_type(entry) == SWP_MIGRATION_READ_EXCLUSIVE || - swp_type(entry) == SWP_MIGRATION_WRITE); -} - -static inline int is_writable_migration_entry(swp_entry_t entry) -{ - return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE); -} - -static inline int is_readable_migration_entry(swp_entry_t entry) -{ - return unlikely(swp_type(entry) == SWP_MIGRATION_READ); -} - -static inline int is_readable_exclusive_migration_entry(swp_entry_t entry) -{ - return unlikely(swp_type(entry) == SWP_MIGRATION_READ_EXCLUSIVE); -} static inline swp_entry_t make_readable_migration_entry(pgoff_t offset) { @@ -310,23 +245,10 @@ static inline swp_entry_t make_writable_migration_entry(pgoff_t offset) return swp_entry(0, 0); } -static inline int is_migration_entry(swp_entry_t swp) -{ - return 0; -} - static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, unsigned long address) { } static inline void migration_entry_wait_huge(struct vm_area_struct *vma, unsigned long addr, pte_t *pte) { } -static inline int is_writable_migration_entry(swp_entry_t entry) -{ - return 0; -} -static inline int is_readable_migration_entry(swp_entry_t entry) -{ - return 0; -} static inline swp_entry_t make_migration_entry_young(swp_entry_t entry) { @@ -410,47 +332,6 @@ static inline swp_entry_t make_guard_swp_entry(void) return make_pte_marker_entry(PTE_MARKER_GUARD); } -static inline struct page *pfn_swap_entry_to_page(swp_entry_t entry) -{ - struct page *p = pfn_to_page(swp_offset_pfn(entry)); - - /* - * Any use of migration entries may only occur while the - * corresponding page is locked - */ - BUG_ON(is_migration_entry(entry) && !PageLocked(p)); - - return p; -} - -static inline struct folio *pfn_swap_entry_folio(swp_entry_t entry) -{ - struct folio *folio = pfn_folio(swp_offset_pfn(entry)); - - /* - * Any use of migration entries may only occur while the - * corresponding folio is locked - */ - BUG_ON(is_migration_entry(entry) && !folio_test_locked(folio)); - - return folio; -} - -/* - * A pfn swap entry is a special type of swap entry that always has a pfn stored - * in the swap offset. They can either be used to represent unaddressable device - * memory, to restrict access to a page undergoing migration or to represent a - * pfn which has been hwpoisoned and unmapped. - */ -static inline bool is_pfn_swap_entry(swp_entry_t entry) -{ - /* Make sure the swp offset can always store the needed fields */ - BUILD_BUG_ON(SWP_TYPE_SHIFT < SWP_PFN_BITS); - - return is_migration_entry(entry) || is_device_private_entry(entry) || - is_device_exclusive_entry(entry) || is_hwpoison_entry(entry); -} - struct page_vma_mapped_walk; #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c index 8f247fcf1865..181fa2b25625 100644 --- a/mm/debug_vm_pgtable.c +++ b/mm/debug_vm_pgtable.c @@ -844,7 +844,7 @@ static void __init pmd_leafent_tests(struct pgtable_debug_args *args) { } static void __init swap_migration_tests(struct pgtable_debug_args *args) { struct page *page; - swp_entry_t swp; + leaf_entry_t entry; if (!IS_ENABLED(CONFIG_MIGRATION)) return; @@ -867,17 +867,17 @@ static void __init swap_migration_tests(struct pgtable_debug_args *args) * be locked, otherwise it stumbles upon a BUG_ON(). */ __SetPageLocked(page); - swp = make_writable_migration_entry(page_to_pfn(page)); - WARN_ON(!is_migration_entry(swp)); - WARN_ON(!is_writable_migration_entry(swp)); + entry = make_writable_migration_entry(page_to_pfn(page)); + WARN_ON(!leafent_is_migration(entry)); + WARN_ON(!leafent_is_migration_write(entry)); - swp = make_readable_migration_entry(swp_offset(swp)); - WARN_ON(!is_migration_entry(swp)); - WARN_ON(is_writable_migration_entry(swp)); + entry = make_readable_migration_entry(swp_offset(entry)); + WARN_ON(!leafent_is_migration(entry)); + WARN_ON(leafent_is_migration_write(entry)); - swp = make_readable_migration_entry(page_to_pfn(page)); - WARN_ON(!is_migration_entry(swp)); - WARN_ON(is_writable_migration_entry(swp)); + entry = make_readable_migration_entry(page_to_pfn(page)); + WARN_ON(!leafent_is_migration(entry)); + WARN_ON(leafent_is_migration_write(entry)); __ClearPageLocked(page); } diff --git a/mm/hmm.c b/mm/hmm.c index 831ef855a55a..618503c8fd1c 100644 --- a/mm/hmm.c +++ b/mm/hmm.c @@ -265,7 +265,7 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, cpu_flags = HMM_PFN_VALID; if (leafent_is_device_private_write(entry)) cpu_flags |= HMM_PFN_WRITE; - new_pfn_flags = swp_offset_pfn(entry) | cpu_flags; + new_pfn_flags = leafent_to_pfn(entry) | cpu_flags; goto out; } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 6c483ecd496f..acb0c38c99a8 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -5640,7 +5640,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, } else if (unlikely(leafent_is_migration(leafent))) { bool uffd_wp = pte_swp_uffd_wp(entry); - if (!is_readable_migration_entry(leafent) && cow) { + if (!leafent_is_migration_read(leafent) && cow) { /* * COW mappings require pages in both * parent and child to be set to read. diff --git a/mm/ksm.c b/mm/ksm.c index 7cd19a6ce45f..f9dbe93fcffc 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -637,14 +637,14 @@ static int break_ksm_pmd_entry(pmd_t *pmdp, unsigned long addr, unsigned long en if (pte_present(pte)) { folio = vm_normal_folio(walk->vma, addr, pte); } else if (!pte_none(pte)) { - swp_entry_t entry = pte_to_swp_entry(pte); + const leaf_entry_t entry = leafent_from_pte(pte); /* * As KSM pages remain KSM pages until freed, no need to wait * here for migration to end. */ - if (is_migration_entry(entry)) - folio = pfn_swap_entry_folio(entry); + if (leafent_is_migration(entry)) + folio = leafent_to_folio(entry); } /* return 1 if the page is an normal ksm page or KSM-placed zero page */ found = (folio && folio_test_ksm(folio)) || is_ksm_zero_pte(pte); diff --git a/mm/memory-failure.c b/mm/memory-failure.c index acc35c881547..42cd4079c660 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -691,10 +691,10 @@ static int check_hwpoisoned_entry(pte_t pte, unsigned long addr, short shift, if (pte_present(pte)) { pfn = pte_pfn(pte); } else { - swp_entry_t swp = pte_to_swp_entry(pte); + const leaf_entry_t entry = leafent_from_pte(pte); - if (is_hwpoison_entry(swp)) - pfn = swp_offset_pfn(swp); + if (leafent_is_hwpoison(entry)) + pfn = leafent_to_pfn(entry); } if (!pfn || pfn != poisoned_pfn) diff --git a/mm/memory.c b/mm/memory.c index 3d118618bdeb..f7b837c3c4dd 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -902,7 +902,8 @@ static void restore_exclusive_pte(struct vm_area_struct *vma, static int try_restore_exclusive_pte(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t orig_pte) { - struct page *page = pfn_swap_entry_to_page(pte_to_swp_entry(orig_pte)); + const leaf_entry_t entry = leafent_from_pte(orig_pte); + struct page *page = leafent_to_page(entry); struct folio *folio = page_folio(page); if (folio_trylock(folio)) { diff --git a/mm/mempolicy.c b/mm/mempolicy.c index f5b05754e6d5..48c85642fbe2 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -705,7 +705,9 @@ static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr, if (pte_none(ptent)) continue; if (!pte_present(ptent)) { - if (is_migration_entry(pte_to_swp_entry(ptent))) + const leaf_entry_t entry = leafent_from_pte(ptent); + + if (leafent_is_migration(entry)) qp->nr_failed++; continue; } diff --git a/mm/migrate.c b/mm/migrate.c index 8f2c3c7d87ba..22e52e90cb21 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -483,7 +483,7 @@ void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, spinlock_t *ptl; pte_t *ptep; pte_t pte; - swp_entry_t entry; + leaf_entry_t entry; ptep = pte_offset_map_lock(mm, pmd, address, &ptl); if (!ptep) @@ -495,8 +495,8 @@ void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, if (pte_none(pte) || pte_present(pte)) goto out; - entry = pte_to_swp_entry(pte); - if (!is_migration_entry(entry)) + entry = leafent_from_pte(pte); + if (!leafent_is_migration(entry)) goto out; migration_entry_wait_on_locked(entry, ptl); diff --git a/mm/migrate_device.c b/mm/migrate_device.c index 5cb5ac2f0290..490560245ab6 100644 --- a/mm/migrate_device.c +++ b/mm/migrate_device.c @@ -282,7 +282,7 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp, unsigned long mpfn = 0, pfn; struct folio *folio; struct page *page; - swp_entry_t entry; + leaf_entry_t entry; pte_t pte; pte = ptep_get(ptep); @@ -301,11 +301,11 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp, * page table entry. Other special swap entries are not * migratable, and we ignore regular swapped page. */ - entry = pte_to_swp_entry(pte); - if (!is_device_private_entry(entry)) + entry = leafent_from_pte(pte); + if (!leafent_is_device_private(entry)) goto next; - page = pfn_swap_entry_to_page(entry); + page = leafent_to_page(entry); pgmap = page_pgmap(page); if (!(migrate->flags & MIGRATE_VMA_SELECT_DEVICE_PRIVATE) || @@ -331,7 +331,7 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp, mpfn = migrate_pfn(page_to_pfn(page)) | MIGRATE_PFN_MIGRATE; - if (is_writable_device_private_entry(entry)) + if (leafent_is_device_private_write(entry)) mpfn |= MIGRATE_PFN_WRITE; } else { pfn = pte_pfn(pte); diff --git a/mm/mprotect.c b/mm/mprotect.c index 2134e28257d0..3358a3774db1 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -317,11 +317,11 @@ static long change_pte_range(struct mmu_gather *tlb, pages++; } } else { - swp_entry_t entry = pte_to_swp_entry(oldpte); + leaf_entry_t entry = leafent_from_pte(oldpte); pte_t newpte; - if (is_writable_migration_entry(entry)) { - struct folio *folio = pfn_swap_entry_folio(entry); + if (leafent_is_migration_write(entry)) { + const struct folio *folio = leafent_to_folio(entry); /* * A protection check is difficult so @@ -335,7 +335,7 @@ static long change_pte_range(struct mmu_gather *tlb, newpte = swp_entry_to_pte(entry); if (pte_swp_soft_dirty(oldpte)) newpte = pte_swp_mksoft_dirty(newpte); - } else if (is_writable_device_private_entry(entry)) { + } else if (leafent_is_device_private_write(entry)) { /* * We do not preserve soft-dirtiness. See * copy_nonpresent_pte() for explanation. diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c index b69b817ad180..52755d58ddc5 100644 --- a/mm/page_vma_mapped.c +++ b/mm/page_vma_mapped.c @@ -49,7 +49,7 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw, pmd_t *pmdvalp, if (is_migration) return false; } else if (!is_migration) { - swp_entry_t entry; + leaf_entry_t entry; /* * Handle un-addressable ZONE_DEVICE memory. @@ -67,9 +67,9 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw, pmd_t *pmdvalp, * For more details on device private memory see HMM * (include/linux/hmm.h or mm/hmm.c). */ - entry = pte_to_swp_entry(ptent); - if (!is_device_private_entry(entry) && - !is_device_exclusive_entry(entry)) + entry = leafent_from_pte(ptent); + if (!leafent_is_device_private(entry) && + !leafent_is_device_exclusive(entry)) return false; } spin_lock(*ptlp); diff --git a/mm/pagewalk.c b/mm/pagewalk.c index fc2576235fde..6cace2c8814a 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c @@ -1000,11 +1000,10 @@ struct folio *folio_walk_start(struct folio_walk *fw, goto found; } } else if (!pte_none(pte)) { - swp_entry_t entry = pte_to_swp_entry(pte); + const leaf_entry_t entry = leafent_from_pte(pte); - if ((flags & FW_MIGRATION) && - is_migration_entry(entry)) { - page = pfn_swap_entry_to_page(entry); + if ((flags & FW_MIGRATION) && leafent_is_migration(entry)) { + page = leafent_to_page(entry); expose_page = false; goto found; } diff --git a/mm/rmap.c b/mm/rmap.c index 99203bf7d346..061d988b6ddf 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1969,7 +1969,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, if (likely(pte_present(pteval))) { pfn = pte_pfn(pteval); } else { - pfn = swp_offset_pfn(pte_to_swp_entry(pteval)); + pfn = leafent_to_pfn(pte_to_swp_entry(pteval)); VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); } @@ -2368,7 +2368,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, if (likely(pte_present(pteval))) { pfn = pte_pfn(pteval); } else { - pfn = swp_offset_pfn(pte_to_swp_entry(pteval)); + pfn = leafent_to_pfn(pte_to_swp_entry(pteval)); VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); } @@ -2453,8 +2453,11 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, folio_mark_dirty(folio); writable = pte_write(pteval); } else { + const leaf_entry_t entry = leafent_from_pte(pteval); + pte_clear(mm, address, pvmw.pte); - writable = is_writable_device_private_entry(pte_to_swp_entry(pteval)); + + writable = leafent_is_device_private_write(entry); } VM_WARN_ON_FOLIO(writable && folio_test_anon(folio) && -- 2.51.0