Instead of translating between RMP_ and TTU_ flags, remove the RMP_ flags and just use the TTU_ flag space; there's plenty available. Possibly we should rename these to RMAP_ flags, and maybe even pass them in through rmap_walk_arg, but that can be done later. Signed-off-by: Matthew Wilcox (Oracle) --- include/linux/rmap.h | 9 +++------ mm/huge_memory.c | 8 ++++---- mm/migrate.c | 12 ++++++------ 3 files changed, 13 insertions(+), 16 deletions(-) diff --git a/include/linux/rmap.h b/include/linux/rmap.h index daa92a58585d..7afc6abe1c23 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -92,6 +92,7 @@ struct anon_vma_chain { }; enum ttu_flags { + TTU_USE_SHARED_ZEROPAGE = 0x2, /* for unused pages of large folios */ TTU_SPLIT_HUGE_PMD = 0x4, /* split huge PMD if any */ TTU_IGNORE_MLOCK = 0x8, /* ignore mlock */ TTU_SYNC = 0x10, /* avoid racy checks with PVMW_SYNC */ @@ -1000,12 +1001,8 @@ int mapping_wrprotect_range(struct address_space *mapping, pgoff_t pgoff, int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff, struct vm_area_struct *vma); -enum rmp_flags { - RMP_LOCKED = 1 << 0, - RMP_USE_SHARED_ZEROPAGE = 1 << 1, -}; - -void remove_migration_ptes(struct folio *src, struct folio *dst, int flags); +void remove_migration_ptes(struct folio *src, struct folio *dst, + enum ttu_flags flags); /* * rmap_walk_control: To control rmap traversing for specific needs diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 40cf59301c21..44ff8a648afd 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3431,7 +3431,7 @@ static void remap_page(struct folio *folio, unsigned long nr, int flags) if (!folio_test_anon(folio)) return; for (;;) { - remove_migration_ptes(folio, folio, RMP_LOCKED | flags); + remove_migration_ptes(folio, folio, TTU_RMAP_LOCKED | flags); i += folio_nr_pages(folio); if (i >= nr) break; @@ -3944,7 +3944,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order, int old_order = folio_order(folio); struct folio *new_folio, *next; int nr_shmem_dropped = 0; - int remap_flags = 0; + enum ttu_flags ttu_flags = 0; int ret; pgoff_t end = 0; @@ -4064,9 +4064,9 @@ static int __folio_split(struct folio *folio, unsigned int new_order, shmem_uncharge(mapping->host, nr_shmem_dropped); if (!ret && is_anon && !folio_is_device_private(folio)) - remap_flags = RMP_USE_SHARED_ZEROPAGE; + ttu_flags = TTU_USE_SHARED_ZEROPAGE; - remap_page(folio, 1 << old_order, remap_flags); + remap_page(folio, 1 << old_order, ttu_flags); /* * Unlock all after-split folios except the one containing diff --git a/mm/migrate.c b/mm/migrate.c index 4688b9e38cd2..4750a2ba15fe 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -452,11 +452,12 @@ static bool remove_migration_pte(struct folio *folio, * Get rid of all migration entries and replace them by * references to the indicated page. */ -void remove_migration_ptes(struct folio *src, struct folio *dst, int flags) +void remove_migration_ptes(struct folio *src, struct folio *dst, + enum ttu_flags flags) { struct rmap_walk_arg rmap_walk_arg = { .folio = src, - .map_unused_to_zeropage = flags & RMP_USE_SHARED_ZEROPAGE, + .map_unused_to_zeropage = flags & TTU_USE_SHARED_ZEROPAGE, }; struct rmap_walk_control rwc = { @@ -464,9 +465,9 @@ void remove_migration_ptes(struct folio *src, struct folio *dst, int flags) .arg = &rmap_walk_arg, }; - VM_BUG_ON_FOLIO((flags & RMP_USE_SHARED_ZEROPAGE) && (src != dst), src); + VM_BUG_ON_FOLIO((flags & TTU_USE_SHARED_ZEROPAGE) && (src != dst), src); - if (flags & RMP_LOCKED) + if (flags & TTU_RMAP_LOCKED) rmap_walk_locked(dst, &rwc); else rmap_walk(dst, &rwc); @@ -1521,8 +1522,7 @@ static int unmap_and_move_huge_page(new_folio_t get_new_folio, rc = move_to_new_folio(dst, src, mode); if (page_was_mapped) - remove_migration_ptes(src, !rc ? dst : src, - ttu ? RMP_LOCKED : 0); + remove_migration_ptes(src, !rc ? dst : src, ttu); if (ttu & TTU_RMAP_LOCKED) i_mmap_unlock_write(mapping); -- 2.47.3