At this point, we are sure (nr < ENTIRELY_MAPPED). This means the upper bits are already cleared. Not necessary to mask off it. Signed-off-by: Wei Yang Cc: David Hildenbrand Cc: Lorenzo Stoakes Cc: Rik van Riel Cc: Liam R. Howlett Cc: Vlastimil Babka Cc: Harry Yoo --- mm/rmap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/rmap.c b/mm/rmap.c index 1c5988dbd1e7..a927437a56c2 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1749,7 +1749,7 @@ static __always_inline void __folio_remove_rmap(struct folio *folio, nr_pages = folio_large_nr_pages(folio); if (level == PGTABLE_LEVEL_PMD) nr_pmdmapped = nr_pages; - nr = nr_pages - (nr & FOLIO_PAGES_MAPPED); + nr = nr_pages - nr; /* Raced ahead of another remove and an add? */ if (unlikely(nr < 0)) nr = 0; -- 2.34.1 If it is not the last entire map, we are sure the folio is not partially mapped. Move the check when there is no entire map. Signed-off-by: Wei Yang Cc: David Hildenbrand Cc: Lorenzo Stoakes Cc: Rik van Riel Cc: Liam R. Howlett Cc: Vlastimil Babka Cc: Harry Yoo --- mm/rmap.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/rmap.c b/mm/rmap.c index a927437a56c2..645d924bfc7d 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1757,9 +1757,9 @@ static __always_inline void __folio_remove_rmap(struct folio *folio, /* An add of ENTIRELY_MAPPED raced ahead */ nr = 0; } - } - partially_mapped = nr && nr < nr_pmdmapped; + partially_mapped = nr && nr < nr_pmdmapped; + } break; default: BUILD_BUG(); -- 2.34.1 Non-large folio is handled at the beginning, so it is a large folio for sure. Use folio_large_nr_pages() here like elsewhere. Signed-off-by: Wei Yang Cc: David Hildenbrand Cc: Lorenzo Stoakes Cc: Rik van Riel Cc: Liam R. Howlett Cc: Vlastimil Babka Cc: Harry Yoo --- mm/rmap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/rmap.c b/mm/rmap.c index 645d924bfc7d..1b7011afc663 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1703,7 +1703,7 @@ static __always_inline void __folio_remove_rmap(struct folio *folio, nr = folio_sub_return_large_mapcount(folio, nr_pages, vma); if (!nr) { /* Now completely unmapped. */ - nr = folio_nr_pages(folio); + nr = folio_large_nr_pages(folio); } else { partially_mapped = nr < folio_large_nr_pages(folio) && !folio_entire_mapcount(folio); -- 2.34.1