There is simply no need for the hugely confusing concept of 'non-swap' swap entries now we have the concept of leaf entries and relevant leafent_xxx() helpers. Adjust all callers to use these instead and remove non_swap_entry() altogether. No functional change intended. Signed-off-by: Lorenzo Stoakes --- arch/s390/mm/gmap_helpers.c | 18 +++++++++--------- arch/s390/mm/pgtable.c | 12 ++++++------ fs/proc/task_mmu.c | 12 ++++++------ include/linux/swapops.h | 5 ----- mm/filemap.c | 2 +- mm/hmm.c | 16 ++++++++-------- mm/madvise.c | 2 +- mm/memory.c | 36 ++++++++++++++++++------------------ mm/mincore.c | 2 +- mm/userfaultfd.c | 24 ++++++++++++------------ 10 files changed, 62 insertions(+), 67 deletions(-) diff --git a/arch/s390/mm/gmap_helpers.c b/arch/s390/mm/gmap_helpers.c index d4c3c36855e2..0cf856f38ade 100644 --- a/arch/s390/mm/gmap_helpers.c +++ b/arch/s390/mm/gmap_helpers.c @@ -11,27 +11,27 @@ #include #include #include -#include +#include #include #include #include #include /** - * ptep_zap_swap_entry() - discard a swap entry. + * ptep_zap_leaf_entry() - discard a leaf entry. * @mm: the mm - * @entry: the swap entry that needs to be zapped + * @entry: the leaf entry that needs to be zapped * - * Discards the given swap entry. If the swap entry was an actual swap + * Discards the given leaf entry. If the leaf entry was an actual swap * entry (and not a migration entry, for example), the actual swapped * page is also discarded from swap. */ -static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry) +static void ptep_zap_leaf_entry(struct mm_struct *mm, leaf_entry_t entry) { - if (!non_swap_entry(entry)) + if (leafent_is_swap(entry)) dec_mm_counter(mm, MM_SWAPENTS); - else if (is_migration_entry(entry)) - dec_mm_counter(mm, mm_counter(pfn_swap_entry_folio(entry))); + else if (leafent_is_migration(entry)) + dec_mm_counter(mm, mm_counter(leafent_to_folio(entry))); free_swap_and_cache(entry); } @@ -66,7 +66,7 @@ void gmap_helper_zap_one_page(struct mm_struct *mm, unsigned long vmaddr) preempt_disable(); pgste = pgste_get_lock(ptep); - ptep_zap_swap_entry(mm, pte_to_swp_entry(*ptep)); + ptep_zap_leaf_entry(mm, leafent_from_pte(*ptep)); pte_clear(mm, vmaddr, ptep); pgste_set_unlock(ptep, pgste); diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 0fde20bbc50b..7805c5a3755e 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include #include @@ -683,12 +683,12 @@ void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep) pgste_set_unlock(ptep, pgste); } -static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry) +static void ptep_zap_leaf_entry(struct mm_struct *mm, leaf_entry_t entry) { - if (!non_swap_entry(entry)) + if (leafent_is_swap(entry)) dec_mm_counter(mm, MM_SWAPENTS); - else if (is_migration_entry(entry)) { - struct folio *folio = pfn_swap_entry_folio(entry); + else if (leafent_is_migration(entry)) { + const struct folio *folio = leafent_to_folio(entry); dec_mm_counter(mm, mm_counter(folio)); } @@ -710,7 +710,7 @@ void ptep_zap_unused(struct mm_struct *mm, unsigned long addr, if (!reset && pte_swap(pte) && ((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED || (pgstev & _PGSTE_GPS_ZERO))) { - ptep_zap_swap_entry(mm, pte_to_swp_entry(pte)); + ptep_zap_leaf_entry(mm, leafent_from_pte(pte)); pte_clear(mm, addr, ptep); } if (reset) diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 0ccdc21e60e0..1df735d6b938 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -1020,13 +1020,13 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr, } else if (pte_none(ptent)) { smaps_pte_hole_lookup(addr, walk); } else { - swp_entry_t swpent = pte_to_swp_entry(ptent); + const leaf_entry_t entry = leafent_from_pte(ptent); - if (!non_swap_entry(swpent)) { + if (leafent_is_swap(entry)) { int mapcount; mss->swap += PAGE_SIZE; - mapcount = swp_swapcount(swpent); + mapcount = swp_swapcount(entry); if (mapcount >= 2) { u64 pss_delta = (u64)PAGE_SIZE << PSS_SHIFT; @@ -1035,10 +1035,10 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr, } else { mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT; } - } else if (is_pfn_swap_entry(swpent)) { - if (is_device_private_entry(swpent)) + } else if (leafent_has_pfn(entry)) { + if (leafent_is_device_private(entry)) present = true; - page = pfn_swap_entry_to_page(swpent); + page = leafent_to_page(entry); } } diff --git a/include/linux/swapops.h b/include/linux/swapops.h index 41cfc6d59054..c8e6f927da48 100644 --- a/include/linux/swapops.h +++ b/include/linux/swapops.h @@ -492,10 +492,5 @@ static inline pmd_t swp_entry_to_pmd(swp_entry_t entry) #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */ -static inline int non_swap_entry(swp_entry_t entry) -{ - return swp_type(entry) >= MAX_SWAPFILES; -} - #endif /* CONFIG_MMU */ #endif /* _LINUX_SWAPOPS_H */ diff --git a/mm/filemap.c b/mm/filemap.c index eb1f994291d8..980ea9f20993 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -4566,7 +4566,7 @@ static void filemap_cachestat(struct address_space *mapping, swp_entry_t swp = radix_to_swp_entry(folio); /* swapin error results in poisoned entry */ - if (non_swap_entry(swp)) + if (!leafent_is_swap(swp)) goto resched; /* diff --git a/mm/hmm.c b/mm/hmm.c index cbcabc48974f..831ef855a55a 100644 --- a/mm/hmm.c +++ b/mm/hmm.c @@ -253,17 +253,17 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, } if (!pte_present(pte)) { - swp_entry_t entry = pte_to_swp_entry(pte); + const leaf_entry_t entry = leafent_from_pte(pte); /* * Don't fault in device private pages owned by the caller, * just report the PFN. */ - if (is_device_private_entry(entry) && - page_pgmap(pfn_swap_entry_to_page(entry))->owner == + if (leafent_is_device_private(entry) && + page_pgmap(leafent_to_page(entry))->owner == range->dev_private_owner) { cpu_flags = HMM_PFN_VALID; - if (is_writable_device_private_entry(entry)) + if (leafent_is_device_private_write(entry)) cpu_flags |= HMM_PFN_WRITE; new_pfn_flags = swp_offset_pfn(entry) | cpu_flags; goto out; @@ -274,16 +274,16 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, if (!required_fault) goto out; - if (!non_swap_entry(entry)) + if (leafent_is_swap(entry)) goto fault; - if (is_device_private_entry(entry)) + if (leafent_is_device_private(entry)) goto fault; - if (is_device_exclusive_entry(entry)) + if (leafent_is_device_exclusive(entry)) goto fault; - if (is_migration_entry(entry)) { + if (leafent_is_migration(entry)) { pte_unmap(ptep); hmm_vma_walk->last = addr; migration_entry_wait(walk->mm, pmdp, addr); diff --git a/mm/madvise.c b/mm/madvise.c index 900f0f29e77b..67bdfcb315b3 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -249,7 +249,7 @@ static void shmem_swapin_range(struct vm_area_struct *vma, continue; entry = radix_to_swp_entry(folio); /* There might be swapin error entries in shmem mapping. */ - if (non_swap_entry(entry)) + if (!leafent_is_swap(entry)) continue; addr = vma->vm_start + diff --git a/mm/memory.c b/mm/memory.c index 1412fc84172d..3d118618bdeb 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -932,7 +932,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, struct folio *folio; struct page *page; - if (likely(!non_swap_entry(entry))) { + if (likely(leafent_is_swap(entry))) { if (swap_duplicate(entry) < 0) return -EIO; @@ -950,12 +950,12 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, set_pte_at(src_mm, addr, src_pte, pte); } rss[MM_SWAPENTS]++; - } else if (is_migration_entry(entry)) { - folio = pfn_swap_entry_folio(entry); + } else if (leafent_is_migration(entry)) { + folio = leafent_to_folio(entry); rss[mm_counter(folio)]++; - if (!is_readable_migration_entry(entry) && + if (!leafent_is_migration_read(entry) && is_cow_mapping(vm_flags)) { /* * COW mappings require pages in both parent and child @@ -964,15 +964,15 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, */ entry = make_readable_migration_entry( swp_offset(entry)); - pte = swp_entry_to_pte(entry); + pte = leafent_to_pte(entry); if (pte_swp_soft_dirty(orig_pte)) pte = pte_swp_mksoft_dirty(pte); if (pte_swp_uffd_wp(orig_pte)) pte = pte_swp_mkuffd_wp(pte); set_pte_at(src_mm, addr, src_pte, pte); } - } else if (is_device_private_entry(entry)) { - page = pfn_swap_entry_to_page(entry); + } else if (leafent_is_device_private(entry)) { + page = leafent_to_page(entry); folio = page_folio(page); /* @@ -996,7 +996,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, * when a device driver is involved (you cannot easily * save and restore device driver state). */ - if (is_writable_device_private_entry(entry) && + if (leafent_is_device_private_write(entry) && is_cow_mapping(vm_flags)) { entry = make_readable_device_private_entry( swp_offset(entry)); @@ -1005,7 +1005,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, pte = pte_swp_mkuffd_wp(pte); set_pte_at(src_mm, addr, src_pte, pte); } - } else if (is_device_exclusive_entry(entry)) { + } else if (leafent_is_device_exclusive(entry)) { /* * Make device exclusive entries present by restoring the * original entry then copying as for a present pte. Device @@ -4635,7 +4635,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) rmap_t rmap_flags = RMAP_NONE; bool need_clear_cache = false; bool exclusive = false; - swp_entry_t entry; + leaf_entry_t entry; pte_t pte; vm_fault_t ret = 0; void *shadow = NULL; @@ -4647,15 +4647,15 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) if (!pte_unmap_same(vmf)) goto out; - entry = pte_to_swp_entry(vmf->orig_pte); - if (unlikely(non_swap_entry(entry))) { - if (is_migration_entry(entry)) { + entry = leafent_from_pte(vmf->orig_pte); + if (unlikely(!leafent_is_swap(entry))) { + if (leafent_is_migration(entry)) { migration_entry_wait(vma->vm_mm, vmf->pmd, vmf->address); - } else if (is_device_exclusive_entry(entry)) { - vmf->page = pfn_swap_entry_to_page(entry); + } else if (leafent_is_device_exclusive(entry)) { + vmf->page = leafent_to_page(entry); ret = remove_device_exclusive_entry(vmf); - } else if (is_device_private_entry(entry)) { + } else if (leafent_is_device_private(entry)) { if (vmf->flags & FAULT_FLAG_VMA_LOCK) { /* * migrate_to_ram is not yet ready to operate @@ -4666,7 +4666,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) goto out; } - vmf->page = pfn_swap_entry_to_page(entry); + vmf->page = leafent_to_page(entry); vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); if (unlikely(!vmf->pte || @@ -4690,7 +4690,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) } else { pte_unmap_unlock(vmf->pte, vmf->ptl); } - } else if (is_hwpoison_entry(entry)) { + } else if (leafent_is_hwpoison(entry)) { ret = VM_FAULT_HWPOISON; } else if (leafent_is_marker(entry)) { ret = handle_pte_marker(vmf); diff --git a/mm/mincore.c b/mm/mincore.c index e77c5bc88fc7..a1f48df5564e 100644 --- a/mm/mincore.c +++ b/mm/mincore.c @@ -74,7 +74,7 @@ static unsigned char mincore_swap(swp_entry_t entry, bool shmem) * absent. Page table may contain migration or hwpoison * entries which are always uptodate. */ - if (non_swap_entry(entry)) + if (!leafent_is_swap(entry)) return !shmem; /* diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 055ec1050776..d11fa8eeaef2 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -1256,7 +1256,6 @@ static long move_pages_ptes(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd unsigned long dst_addr, unsigned long src_addr, unsigned long len, __u64 mode) { - swp_entry_t entry; struct swap_info_struct *si = NULL; pte_t orig_src_pte, orig_dst_pte; pte_t src_folio_pte; @@ -1430,19 +1429,20 @@ static long move_pages_ptes(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd orig_dst_pte, orig_src_pte, dst_pmd, dst_pmdval, dst_ptl, src_ptl, &src_folio, len); - } else { + } else { /* !pte_present() */ struct folio *folio = NULL; + const leaf_entry_t entry = leafent_from_pte(orig_src_pte); - entry = pte_to_swp_entry(orig_src_pte); - if (non_swap_entry(entry)) { - if (is_migration_entry(entry)) { - pte_unmap(src_pte); - pte_unmap(dst_pte); - src_pte = dst_pte = NULL; - migration_entry_wait(mm, src_pmd, src_addr); - ret = -EAGAIN; - } else - ret = -EFAULT; + if (leafent_is_migration(entry)) { + pte_unmap(src_pte); + pte_unmap(dst_pte); + src_pte = dst_pte = NULL; + migration_entry_wait(mm, src_pmd, src_addr); + + ret = -EAGAIN; + goto out; + } else if (!leafent_is_swap(entry)) { + ret = -EFAULT; goto out; } -- 2.51.0