These flags only track folio-specific state during migration and are not used for movable_ops pages. Rename the enum values and the old_page_state variable to match. No functional change. Suggested-by: David Hildenbrand Signed-off-by: Shivank Garg --- Applies cleanly on mm-new (02b045682c74). v1: https://lore.kernel.org/all/20260323141935.389232-3-shivankg@amd.com v2: - Rename FOLIO_MF_* to FOLIO_*, per feedback from Willy. mm/migrate.c | 46 +++++++++++++++++++++++----------------------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/mm/migrate.c b/mm/migrate.c index 05cb408846f2..7dd6c2f2e1ef 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1135,26 +1135,26 @@ static int move_to_new_folio(struct folio *dst, struct folio *src, * This is safe because nobody is using it except us. */ enum { - PAGE_WAS_MAPPED = BIT(0), - PAGE_WAS_MLOCKED = BIT(1), - PAGE_OLD_STATES = PAGE_WAS_MAPPED | PAGE_WAS_MLOCKED, + FOLIO_WAS_MAPPED = BIT(0), + FOLIO_WAS_MLOCKED = BIT(1), + FOLIO_OLD_STATES = FOLIO_WAS_MAPPED | FOLIO_WAS_MLOCKED, }; static void __migrate_folio_record(struct folio *dst, - int old_page_state, + int old_folio_state, struct anon_vma *anon_vma) { - dst->private = (void *)anon_vma + old_page_state; + dst->private = (void *)anon_vma + old_folio_state; } static void __migrate_folio_extract(struct folio *dst, - int *old_page_state, + int *old_folio_state, struct anon_vma **anon_vmap) { unsigned long private = (unsigned long)dst->private; - *anon_vmap = (struct anon_vma *)(private & ~PAGE_OLD_STATES); - *old_page_state = private & PAGE_OLD_STATES; + *anon_vmap = (struct anon_vma *)(private & ~FOLIO_OLD_STATES); + *old_folio_state = private & FOLIO_OLD_STATES; dst->private = NULL; } @@ -1209,7 +1209,7 @@ static int migrate_folio_unmap(new_folio_t get_new_folio, { struct folio *dst; int rc = -EAGAIN; - int old_page_state = 0; + int old_folio_state = 0; struct anon_vma *anon_vma = NULL; bool locked = false; bool dst_locked = false; @@ -1253,7 +1253,7 @@ static int migrate_folio_unmap(new_folio_t get_new_folio, } locked = true; if (folio_test_mlocked(src)) - old_page_state |= PAGE_WAS_MLOCKED; + old_folio_state |= FOLIO_WAS_MLOCKED; if (folio_test_writeback(src)) { /* @@ -1302,7 +1302,7 @@ static int migrate_folio_unmap(new_folio_t get_new_folio, dst_locked = true; if (unlikely(page_has_movable_ops(&src->page))) { - __migrate_folio_record(dst, old_page_state, anon_vma); + __migrate_folio_record(dst, old_folio_state, anon_vma); return 0; } @@ -1328,11 +1328,11 @@ static int migrate_folio_unmap(new_folio_t get_new_folio, VM_BUG_ON_FOLIO(folio_test_anon(src) && !folio_test_ksm(src) && !anon_vma, src); try_to_migrate(src, mode == MIGRATE_ASYNC ? TTU_BATCH_FLUSH : 0); - old_page_state |= PAGE_WAS_MAPPED; + old_folio_state |= FOLIO_WAS_MAPPED; } if (!folio_mapped(src)) { - __migrate_folio_record(dst, old_page_state, anon_vma); + __migrate_folio_record(dst, old_folio_state, anon_vma); return 0; } @@ -1344,7 +1344,7 @@ static int migrate_folio_unmap(new_folio_t get_new_folio, if (rc == -EAGAIN) ret = NULL; - migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED, + migrate_folio_undo_src(src, old_folio_state & FOLIO_WAS_MAPPED, anon_vma, locked, ret); migrate_folio_undo_dst(dst, dst_locked, put_new_folio, private); @@ -1358,13 +1358,13 @@ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private, struct list_head *ret) { int rc; - int old_page_state = 0; + int old_folio_state = 0; struct anon_vma *anon_vma = NULL; bool src_deferred_split = false; bool src_partially_mapped = false; struct list_head *prev; - __migrate_folio_extract(dst, &old_page_state, &anon_vma); + __migrate_folio_extract(dst, &old_folio_state, &anon_vma); prev = dst->lru.prev; list_del(&dst->lru); @@ -1395,10 +1395,10 @@ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private, * isolated from the unevictable LRU: but this case is the easiest. */ folio_add_lru(dst); - if (old_page_state & PAGE_WAS_MLOCKED) + if (old_folio_state & FOLIO_WAS_MLOCKED) lru_add_drain(); - if (old_page_state & PAGE_WAS_MAPPED) + if (old_folio_state & FOLIO_WAS_MAPPED) remove_migration_ptes(src, dst, 0); /* @@ -1439,11 +1439,11 @@ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private, */ if (rc == -EAGAIN) { list_add(&dst->lru, prev); - __migrate_folio_record(dst, old_page_state, anon_vma); + __migrate_folio_record(dst, old_folio_state, anon_vma); return rc; } - migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED, + migrate_folio_undo_src(src, old_folio_state & FOLIO_WAS_MAPPED, anon_vma, true, ret); migrate_folio_undo_dst(dst, true, put_new_folio, private); @@ -1777,11 +1777,11 @@ static void migrate_folios_undo(struct list_head *src_folios, dst = list_first_entry(dst_folios, struct folio, lru); dst2 = list_next_entry(dst, lru); list_for_each_entry_safe(folio, folio2, src_folios, lru) { - int old_page_state = 0; + int old_folio_state = 0; struct anon_vma *anon_vma = NULL; - __migrate_folio_extract(dst, &old_page_state, &anon_vma); - migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED, + __migrate_folio_extract(dst, &old_folio_state, &anon_vma); + migrate_folio_undo_src(folio, old_folio_state & FOLIO_WAS_MAPPED, anon_vma, true, ret_folios); list_del(&dst->lru); migrate_folio_undo_dst(dst, true, put_new_folio, private); base-commit: 02b045682c74be16c7d1501563f02b0e92d42cdb -- 2.43.0