mm/swapops,rmap: remove should-never-be-compiled codes, included folio_add_return_large_mapcount(), folio_sub_return_large_mapcount(), set_pmd_migration_entry() and remove_migration_pmd(). Link: https://lore.kernel.org/linux-mm/CAHrEdeunY-YpDC7AoTFcppAvHCJpEJRp=GTQ4psRKRi_3fhB0Q@mail.gmail.com/ Signed-off-by: Wale Zhang --- include/linux/rmap.h | 17 ++----- include/linux/swapops.h | 12 ----- mm/migrate_device.c | 5 ++- mm/rmap.c | 98 ++++++++++++++++++++--------------------- 4 files changed, 54 insertions(+), 78 deletions(-) diff --git a/include/linux/rmap.h b/include/linux/rmap.h index daa92a58585d..44dccd1821eb 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -354,33 +354,24 @@ static inline void folio_add_large_mapcount(struct folio *folio, atomic_add(diff, &folio->_large_mapcount); } -static inline int folio_add_return_large_mapcount(struct folio *folio, - int diff, struct vm_area_struct *vma) -{ - BUILD_BUG(); -} - static inline void folio_sub_large_mapcount(struct folio *folio, int diff, struct vm_area_struct *vma) { atomic_sub(diff, &folio->_large_mapcount); } -static inline int folio_sub_return_large_mapcount(struct folio *folio, - int diff, struct vm_area_struct *vma) -{ - BUILD_BUG(); -} #endif /* CONFIG_MM_ID */ #define folio_inc_large_mapcount(folio, vma) \ folio_add_large_mapcount(folio, 1, vma) -#define folio_inc_return_large_mapcount(folio, vma) \ - folio_add_return_large_mapcount(folio, 1, vma) #define folio_dec_large_mapcount(folio, vma) \ folio_sub_large_mapcount(folio, 1, vma) +#ifdef CONFIG_NO_PAGE_MAPCOUNT +#define folio_inc_return_large_mapcount(folio, vma) \ + folio_add_return_large_mapcount(folio, 1, vma) #define folio_dec_return_large_mapcount(folio, vma) \ folio_sub_return_large_mapcount(folio, 1, vma) +#endif /* RMAP flags, currently only relevant for some anon rmap operations. */ typedef int __bitwise rmap_t; diff --git a/include/linux/swapops.h b/include/linux/swapops.h index 8cfc966eae48..d6ca56efc489 100644 --- a/include/linux/swapops.h +++ b/include/linux/swapops.h @@ -339,18 +339,6 @@ static inline pmd_t swp_entry_to_pmd(swp_entry_t entry) } #else /* CONFIG_ARCH_ENABLE_THP_MIGRATION */ -static inline int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, - struct page *page) -{ - BUILD_BUG(); -} - -static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, - struct page *new) -{ - BUILD_BUG(); -} - static inline void pmd_migration_entry_wait(struct mm_struct *m, pmd_t *p) { } static inline pmd_t swp_entry_to_pmd(swp_entry_t entry) diff --git a/mm/migrate_device.c b/mm/migrate_device.c index 23379663b1e1..13b2cd12e612 100644 --- a/mm/migrate_device.c +++ b/mm/migrate_device.c @@ -195,8 +195,8 @@ static int migrate_vma_collect_huge_pmd(pmd_t *pmdp, unsigned long start, return migrate_vma_collect_skip(start, end, walk); } - if (thp_migration_supported() && - (migrate->flags & MIGRATE_VMA_SELECT_COMPOUND) && +#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION + if ((migrate->flags & MIGRATE_VMA_SELECT_COMPOUND) && (IS_ALIGNED(start, HPAGE_PMD_SIZE) && IS_ALIGNED(end, HPAGE_PMD_SIZE))) { @@ -228,6 +228,7 @@ static int migrate_vma_collect_huge_pmd(pmd_t *pmdp, unsigned long start, } fallback: +#endif spin_unlock(ptl); if (!folio_test_large(folio)) goto done; diff --git a/mm/rmap.c b/mm/rmap.c index f955f02d570e..81c7f2becc21 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1232,7 +1232,7 @@ static __always_inline void __folio_add_rmap(struct folio *folio, struct page *page, int nr_pages, struct vm_area_struct *vma, enum pgtable_level level) { - atomic_t *mapped = &folio->_nr_pages_mapped; + __maybe_unused atomic_t *mapped = &folio->_nr_pages_mapped; const int orig_nr_pages = nr_pages; int first = 0, nr = 0, nr_pmdmapped = 0; @@ -1245,16 +1245,14 @@ static __always_inline void __folio_add_rmap(struct folio *folio, break; } - if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) { - nr = folio_add_return_large_mapcount(folio, orig_nr_pages, vma); - if (nr == orig_nr_pages) - /* Was completely unmapped. */ - nr = folio_large_nr_pages(folio); - else - nr = 0; - break; - } - +#ifdef CONFIG_NO_PAGE_MAPCOUNT + nr = folio_add_return_large_mapcount(folio, orig_nr_pages, vma); + if (nr == orig_nr_pages) + /* Was completely unmapped. */ + nr = folio_large_nr_pages(folio); + else + nr = 0; +#else do { first += atomic_inc_and_test(&page->_mapcount); } while (page++, --nr_pages > 0); @@ -1264,22 +1262,21 @@ static __always_inline void __folio_add_rmap(struct folio *folio, nr = first; folio_add_large_mapcount(folio, orig_nr_pages, vma); +#endif break; case PGTABLE_LEVEL_PMD: case PGTABLE_LEVEL_PUD: first = atomic_inc_and_test(&folio->_entire_mapcount); - if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) { - if (level == PGTABLE_LEVEL_PMD && first) - nr_pmdmapped = folio_large_nr_pages(folio); - nr = folio_inc_return_large_mapcount(folio, vma); - if (nr == 1) - /* Was completely unmapped. */ - nr = folio_large_nr_pages(folio); - else - nr = 0; - break; - } - +#ifdef CONFIG_NO_PAGE_MAPCOUNT + if (level == PGTABLE_LEVEL_PMD && first) + nr_pmdmapped = folio_large_nr_pages(folio); + nr = folio_inc_return_large_mapcount(folio, vma); + if (nr == 1) + /* Was completely unmapped. */ + nr = folio_large_nr_pages(folio); + else + nr = 0; +#else if (first) { nr = atomic_add_return_relaxed(ENTIRELY_MAPPED, mapped); if (likely(nr < ENTIRELY_MAPPED + ENTIRELY_MAPPED)) { @@ -1300,6 +1297,7 @@ static __always_inline void __folio_add_rmap(struct folio *folio, } } folio_inc_large_mapcount(folio, vma); +#endif break; default: BUILD_BUG(); @@ -1656,7 +1654,7 @@ static __always_inline void __folio_remove_rmap(struct folio *folio, struct page *page, int nr_pages, struct vm_area_struct *vma, enum pgtable_level level) { - atomic_t *mapped = &folio->_nr_pages_mapped; + __maybe_unused atomic_t *mapped = &folio->_nr_pages_mapped; int last = 0, nr = 0, nr_pmdmapped = 0; bool partially_mapped = false; @@ -1669,19 +1667,17 @@ static __always_inline void __folio_remove_rmap(struct folio *folio, break; } - if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) { - nr = folio_sub_return_large_mapcount(folio, nr_pages, vma); - if (!nr) { - /* Now completely unmapped. */ - nr = folio_large_nr_pages(folio); - } else { - partially_mapped = nr < folio_large_nr_pages(folio) && - !folio_entire_mapcount(folio); - nr = 0; - } - break; +#ifdef CONFIG_NO_PAGE_MAPCOUNT + nr = folio_sub_return_large_mapcount(folio, nr_pages, vma); + if (!nr) { + /* Now completely unmapped. */ + nr = folio_large_nr_pages(folio); + } else { + partially_mapped = nr < folio_large_nr_pages(folio) && + !folio_entire_mapcount(folio); + nr = 0; } - +#else folio_sub_large_mapcount(folio, nr_pages, vma); do { last += atomic_add_negative(-1, &page->_mapcount); @@ -1692,25 +1688,24 @@ static __always_inline void __folio_remove_rmap(struct folio *folio, nr = last; partially_mapped = nr && atomic_read(mapped); +#endif break; case PGTABLE_LEVEL_PMD: case PGTABLE_LEVEL_PUD: - if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) { - last = atomic_add_negative(-1, &folio->_entire_mapcount); - if (level == PGTABLE_LEVEL_PMD && last) - nr_pmdmapped = folio_large_nr_pages(folio); - nr = folio_dec_return_large_mapcount(folio, vma); - if (!nr) { - /* Now completely unmapped. */ - nr = folio_large_nr_pages(folio); - } else { - partially_mapped = last && - nr < folio_large_nr_pages(folio); - nr = 0; - } - break; +#ifdef CONFIG_NO_PAGE_MAPCOUNT + last = atomic_add_negative(-1, &folio->_entire_mapcount); + if (level == PGTABLE_LEVEL_PMD && last) + nr_pmdmapped = folio_large_nr_pages(folio); + nr = folio_dec_return_large_mapcount(folio, vma); + if (!nr) { + /* Now completely unmapped. */ + nr = folio_large_nr_pages(folio); + } else { + partially_mapped = last && + nr < folio_large_nr_pages(folio); + nr = 0; } - +#else folio_dec_large_mapcount(folio, vma); last = atomic_add_negative(-1, &folio->_entire_mapcount); if (last) { @@ -1730,6 +1725,7 @@ static __always_inline void __folio_remove_rmap(struct folio *folio, } partially_mapped = nr && nr < nr_pmdmapped; +#endif break; default: BUILD_BUG(); -- 2.43.0