Add folio_put_swap_pages to handle a batch of consecutive pages. Note that folio_put_swap already can handle a subset of this: nr_pages == 1 and nr_pages == folio_nr_pages(folio). Generalize this to any nr_pages. Currently we have a not-so-nice logic of passing in subpage == NULL if we mean to exercise the logic on the entire folio, and subpage != NULL if we want to exercise the logic on only that subpage. Remove this indirection: the caller invokes folio_put_swap_pages() if it wants to operate on a range of pages in the folio (i.e nr_pages may be anything between 1 till folio_nr_pages()), and invokes folio_put_swap() if it wants to operate on the entire folio. Signed-off-by: Dev Jain --- mm/memory.c | 6 +++--- mm/rmap.c | 4 ++-- mm/shmem.c | 6 +++--- mm/swap.h | 11 +++++++++-- mm/swapfile.c | 22 +++++++++++++--------- 5 files changed, 30 insertions(+), 19 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index f14311c4d2001..c5605a779ce4d 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5104,7 +5104,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) if (unlikely(folio != swapcache)) { folio_add_new_anon_rmap(folio, vma, address, RMAP_EXCLUSIVE); folio_add_lru_vma(folio, vma); - folio_put_swap(swapcache, NULL); + folio_put_swap(swapcache); } else if (!folio_test_anon(folio)) { /* * We currently only expect !anon folios that are fully @@ -5113,12 +5113,12 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) VM_WARN_ON_ONCE_FOLIO(folio_nr_pages(folio) != nr_pages, folio); VM_WARN_ON_ONCE_FOLIO(folio_mapped(folio), folio); folio_add_new_anon_rmap(folio, vma, address, rmap_flags); - folio_put_swap(folio, NULL); + folio_put_swap(folio); } else { VM_WARN_ON_ONCE(nr_pages != 1 && nr_pages != folio_nr_pages(folio)); folio_add_anon_rmap_ptes(folio, page, nr_pages, vma, address, rmap_flags); - folio_put_swap(folio, nr_pages == 1 ? page : NULL); + folio_put_swap_pages(folio, page, nr_pages); } VM_BUG_ON(!folio_test_anon(folio) || diff --git a/mm/rmap.c b/mm/rmap.c index 352ba77d90f67..7cbf850182187 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -2325,7 +2325,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, * so we'll not check/care. */ if (arch_unmap_one(mm, vma, address, pteval) < 0) { - folio_put_swap(folio, subpage); + folio_put_swap_pages(folio, subpage, 1); set_pte_at(mm, address, pvmw.pte, pteval); goto walk_abort; } @@ -2333,7 +2333,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, /* See folio_try_share_anon_rmap(): clear PTE first. */ if (anon_exclusive && folio_try_share_anon_rmap_pte(folio, subpage)) { - folio_put_swap(folio, subpage); + folio_put_swap_pages(folio, subpage, 1); set_pte_at(mm, address, pvmw.pte, pteval); goto walk_abort; } diff --git a/mm/shmem.c b/mm/shmem.c index 5e4f521399847..bb7e0fc305d87 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1719,7 +1719,7 @@ int shmem_writeout(struct folio *folio, struct swap_iocb **plug, /* Swap entry might be erased by racing shmem_free_swap() */ if (!error) { shmem_recalc_inode(inode, 0, -nr_pages); - folio_put_swap(folio, NULL); + folio_put_swap(folio); } /* @@ -2199,7 +2199,7 @@ static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index, nr_pages = folio_nr_pages(folio); folio_wait_writeback(folio); - folio_put_swap(folio, NULL); + folio_put_swap(folio); swap_cache_del_folio(folio); /* * Don't treat swapin error folio as alloced. Otherwise inode->i_blocks @@ -2429,7 +2429,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index, if (sgp == SGP_WRITE) folio_mark_accessed(folio); - folio_put_swap(folio, NULL); + folio_put_swap(folio); swap_cache_del_folio(folio); folio_mark_dirty(folio); put_swap_device(si); diff --git a/mm/swap.h b/mm/swap.h index 3c25f914e908b..343547469927a 100644 --- a/mm/swap.h +++ b/mm/swap.h @@ -209,7 +209,9 @@ int folio_alloc_swap(struct folio *folio); int folio_dup_swap(struct folio *folio); int folio_dup_swap_pages(struct folio *folio, struct page *page, unsigned long nr_pages); -void folio_put_swap(struct folio *folio, struct page *subpage); +void folio_put_swap(struct folio *folio); +void folio_put_swap_pages(struct folio *folio, struct page *page, + unsigned long nr_pages); /* For internal use */ extern void __swap_cluster_free_entries(struct swap_info_struct *si, @@ -403,7 +405,12 @@ static inline int folio_dup_swap_pages(struct folio *folio, struct page *page, return -EINVAL; } -static inline void folio_put_swap(struct folio *folio, struct page *page) +static inline void folio_put_swap(struct folio *folio) +{ +} + +static inline void folio_put_swap_pages(struct folio *folio, struct page *page, + unsigned long nr_pages) { } diff --git a/mm/swapfile.c b/mm/swapfile.c index 28daf92839e77..ac576cc63b194 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1778,31 +1778,34 @@ int folio_dup_swap(struct folio *folio) } /** - * folio_put_swap() - Decrease swap count of swap entries of a folio. + * folio_put_swap_pages() - Decrease swap count of swap entries of a folio. * @folio: folio with swap entries bounded, must be in swap cache and locked. - * @subpage: if not NULL, only decrease the swap count of this subpage. + * @page: the first page in the folio to decrease the swap count for. + * @nr_pages: the number of pages in the folio to decrease the swap count for. * * This won't free the swap slots even if swap count drops to zero, they are * still pinned by the swap cache. User may call folio_free_swap to free them. * Context: Caller must ensure the folio is locked and in the swap cache. */ -void folio_put_swap(struct folio *folio, struct page *subpage) +void folio_put_swap_pages(struct folio *folio, struct page *page, + unsigned long nr_pages) { swp_entry_t entry = folio->swap; - unsigned long nr_pages = folio_nr_pages(folio); struct swap_info_struct *si = __swap_entry_to_info(entry); VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio); VM_WARN_ON_FOLIO(!folio_test_swapcache(folio), folio); - if (subpage) { - entry.val += folio_page_idx(folio, subpage); - nr_pages = 1; - } + entry.val += folio_page_idx(folio, page); swap_put_entries_cluster(si, swp_offset(entry), nr_pages, false); } +void folio_put_swap(struct folio *folio) +{ + folio_put_swap_pages(folio, folio_page(folio, 0), folio_nr_pages(folio)); +} + /* * When we get a swap entry, if there aren't some other ways to * prevent swapoff, such as the folio in swap cache is locked, RCU @@ -2443,7 +2446,8 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, new_pte = pte_mkuffd_wp(new_pte); setpte: set_pte_at(vma->vm_mm, addr, pte, new_pte); - folio_put_swap(swapcache, folio_file_page(swapcache, swp_offset(entry))); + folio_put_swap_pages(swapcache, + folio_file_page(swapcache, swp_offset(entry)), 1); out: if (pte) pte_unmap_unlock(pte, ptl); -- 2.34.1