Add folio_is_lazyfree() function to identify lazy-free folios to improve code readability. Signed-off-by: Vernon Yang --- include/linux/mm_inline.h | 5 +++++ mm/rmap.c | 2 +- mm/vmscan.c | 5 ++--- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index fa2d6ba811b5..65a4ae52d915 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -35,6 +35,11 @@ static inline int page_is_file_lru(struct page *page) return folio_is_file_lru(page_folio(page)); } +static inline int folio_is_lazyfree(const struct folio *folio) +{ + return folio_test_anon(folio) && !folio_test_swapbacked(folio); +} + static __always_inline void __update_lru_size(struct lruvec *lruvec, enum lru_list lru, enum zone_type zid, long nr_pages) diff --git a/mm/rmap.c b/mm/rmap.c index 336b27e00238..fd335b171ea7 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -2042,7 +2042,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, } if (!pvmw.pte) { - if (folio_test_anon(folio) && !folio_test_swapbacked(folio)) { + if (folio_is_lazyfree(folio)) { if (unmap_huge_pmd_locked(vma, pvmw.address, pvmw.pmd, folio)) goto walk_done; /* diff --git a/mm/vmscan.c b/mm/vmscan.c index 81828fa625ed..ad3516ff1381 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -964,8 +964,7 @@ static void folio_check_dirty_writeback(struct folio *folio, * They could be mistakenly treated as file lru. So further anon * test is needed. */ - if (!folio_is_file_lru(folio) || - (folio_test_anon(folio) && !folio_test_swapbacked(folio))) { + if (!folio_is_file_lru(folio) || folio_is_lazyfree(folio)) { *dirty = false; *writeback = false; return; @@ -1506,7 +1505,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list, } } - if (folio_test_anon(folio) && !folio_test_swapbacked(folio)) { + if (folio_is_lazyfree(folio)) { /* follow __remove_mapping for reference */ if (!folio_ref_freeze(folio, 1)) goto keep_locked; -- 2.51.0