From: Kairui Song The filemap_get_incore_folio (previously find_get_incore_page) helper was introduced by commit 61ef18655704 ("mm: factor find_get_incore_page out of mincore_page") to be used by later commit f5df8635c5a3 ("mm: use find_get_incore_page in memcontrol"), so memory cgroup charge move code can be simplified. But commit 6b611388b626 ("memcg-v1: remove charge move code") removed that user completely, it's only used by mincore now. So this commit basically reverts commit 61ef18655704 ("mm: factor find_get_incore_page out of mincore_page"). Move it back to mincore side to simplify the code. Signed-off-by: Kairui Song --- mm/mincore.c | 29 +++++++++++++++++++++++++++-- mm/swap.h | 10 ---------- mm/swap_state.c | 38 -------------------------------------- 3 files changed, 27 insertions(+), 50 deletions(-) diff --git a/mm/mincore.c b/mm/mincore.c index 10dabefc3acc..f0d3c9419e58 100644 --- a/mm/mincore.c +++ b/mm/mincore.c @@ -64,8 +64,33 @@ static unsigned char mincore_page(struct address_space *mapping, pgoff_t index) * any other file mapping (ie. marked !present and faulted in with * tmpfs's .fault). So swapped out tmpfs mappings are tested here. */ - folio = filemap_get_incore_folio(mapping, index); - if (!IS_ERR(folio)) { + if (IS_ENABLED(CONFIG_SWAP) && shmem_mapping(mapping)) { + folio = filemap_get_entry(mapping, index); + /* + * shmem/tmpfs may return swap: account for swapcache + * page too. + */ + if (xa_is_value(folio)) { + struct swap_info_struct *si; + swp_entry_t swp = radix_to_swp_entry(folio); + /* There might be swapin error entries in shmem mapping. */ + if (non_swap_entry(swp)) + return 0; + /* Prevent swap device to being swapoff under us */ + si = get_swap_device(swp); + if (si) { + folio = filemap_get_folio(swap_address_space(swp), + swap_cache_index(swp)); + put_swap_device(si); + } else { + return 0; + } + } + } else { + folio = filemap_get_folio(mapping, index); + } + + if (folio) { present = folio_test_uptodate(folio); folio_put(folio); } diff --git a/mm/swap.h b/mm/swap.h index 911ad5ff0f89..1ae44d4193b1 100644 --- a/mm/swap.h +++ b/mm/swap.h @@ -64,9 +64,6 @@ void clear_shadow_from_swap_cache(int type, unsigned long begin, void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry, int nr); struct folio *swap_cache_get_folio(swp_entry_t entry, struct vm_area_struct *vma, unsigned long addr); -struct folio *filemap_get_incore_folio(struct address_space *mapping, - pgoff_t index); - struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, struct vm_area_struct *vma, unsigned long addr, struct swap_iocb **plug); @@ -178,13 +175,6 @@ static inline struct folio *swap_cache_get_folio(swp_entry_t entry, return NULL; } -static inline -struct folio *filemap_get_incore_folio(struct address_space *mapping, - pgoff_t index) -{ - return filemap_get_folio(mapping, index); -} - static inline void *get_shadow_from_swap_cache(swp_entry_t entry) { return NULL; diff --git a/mm/swap_state.c b/mm/swap_state.c index c354435a0923..99513b74b5d8 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -323,44 +323,6 @@ struct folio *swap_cache_get_folio(swp_entry_t entry, return folio; } -/** - * filemap_get_incore_folio - Find and get a folio from the page or swap caches. - * @mapping: The address_space to search. - * @index: The page cache index. - * - * This differs from filemap_get_folio() in that it will also look for the - * folio in the swap cache. - * - * Return: The found folio or %NULL. - */ -struct folio *filemap_get_incore_folio(struct address_space *mapping, - pgoff_t index) -{ - swp_entry_t swp; - struct swap_info_struct *si; - struct folio *folio = filemap_get_entry(mapping, index); - - if (!folio) - return ERR_PTR(-ENOENT); - if (!xa_is_value(folio)) - return folio; - if (!shmem_mapping(mapping)) - return ERR_PTR(-ENOENT); - - swp = radix_to_swp_entry(folio); - /* There might be swapin error entries in shmem mapping. */ - if (non_swap_entry(swp)) - return ERR_PTR(-ENOENT); - /* Prevent swapoff from happening to us */ - si = get_swap_device(swp); - if (!si) - return ERR_PTR(-ENOENT); - index = swap_cache_index(swp); - folio = filemap_get_folio(swap_address_space(swp), index); - put_swap_device(si); - return folio; -} - struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated, bool skip_if_exists) -- 2.50.1 From: Kairui Song Introduce a mincore_swap helper for checking swap entries, seperate it from page cache checking. The new helper will only be called on swap address space, so it always grab the swap device before checking the entry, caller won't need to lock anything. The sanity WARN_ON check will also cover all use case now, previously it only worked for PTE checking. Signed-off-by: Kairui Song --- mm/mincore.c | 58 ++++++++++++++++++++++++++++------------------------ 1 file changed, 31 insertions(+), 27 deletions(-) diff --git a/mm/mincore.c b/mm/mincore.c index f0d3c9419e58..1ac53acac239 100644 --- a/mm/mincore.c +++ b/mm/mincore.c @@ -47,6 +47,31 @@ static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr, return 0; } +static unsigned char mincore_swap(swp_entry_t entry) +{ + struct swap_info_struct *si; + struct folio *folio = NULL; + unsigned char present = 0; + + if (!IS_ENABLED(CONFIG_SWAP)) { + WARN_ON(1); + return 1; + } + + si = get_swap_device(entry); + if (si) { + folio = filemap_get_folio(swap_address_space(entry), + swap_cache_index(entry)); + put_swap_device(si); + if (folio) { + present = folio_test_uptodate(folio); + folio_put(folio); + } + } + + return present; +} + /* * Later we can get more picky about what "in core" means precisely. * For now, simply check to see if the page is in the page cache, @@ -64,33 +89,18 @@ static unsigned char mincore_page(struct address_space *mapping, pgoff_t index) * any other file mapping (ie. marked !present and faulted in with * tmpfs's .fault). So swapped out tmpfs mappings are tested here. */ - if (IS_ENABLED(CONFIG_SWAP) && shmem_mapping(mapping)) { - folio = filemap_get_entry(mapping, index); - /* - * shmem/tmpfs may return swap: account for swapcache - * page too. - */ + folio = filemap_get_entry(mapping, index); + if (folio) { if (xa_is_value(folio)) { - struct swap_info_struct *si; swp_entry_t swp = radix_to_swp_entry(folio); /* There might be swapin error entries in shmem mapping. */ if (non_swap_entry(swp)) return 0; - /* Prevent swap device to being swapoff under us */ - si = get_swap_device(swp); - if (si) { - folio = filemap_get_folio(swap_address_space(swp), - swap_cache_index(swp)); - put_swap_device(si); - } else { + if (shmem_mapping(mapping)) + return mincore_swap(swp); + else return 0; - } } - } else { - folio = filemap_get_folio(mapping, index); - } - - if (folio) { present = folio_test_uptodate(folio); folio_put(folio); } @@ -177,13 +187,7 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, */ *vec = 1; } else { -#ifdef CONFIG_SWAP - *vec = mincore_page(swap_address_space(entry), - swap_cache_index(entry)); -#else - WARN_ON(1); - *vec = 1; -#endif + *vec = mincore_swap(entry); } } vec += step; -- 2.50.1 From: Kairui Song mincore only interested in the existence of a page, which is a changing state by nature, locking and making it stable is not needed. And now neither mincore_page or mincore_swap requires PTL, this PTL locking can be dropped. Signed-off-by: Kairui Song --- mm/mincore.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mm/mincore.c b/mm/mincore.c index 1ac53acac239..cc4460aba1f9 100644 --- a/mm/mincore.c +++ b/mm/mincore.c @@ -153,13 +153,13 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, goto out; } - ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); + ptep = pte_offset_map(pmd, addr); if (!ptep) { walk->action = ACTION_AGAIN; return 0; } for (; addr != end; ptep += step, addr += step * PAGE_SIZE) { - pte_t pte = ptep_get(ptep); + pte_t pte = ptep_get_lockless(ptep); step = 1; /* We need to do cache lookup too for pte markers */ @@ -192,7 +192,7 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, } vec += step; } - pte_unmap_unlock(ptep - 1, ptl); + pte_unmap(ptep - 1); out: walk->private += nr; cond_resched(); -- 2.50.1