Similar to copy_huge_pmd(), there is a large mass of open-coded logic for the CONFIG_ARCH_ENABLE_THP_MIGRATION non-present entry case that does not use thp_migration_supported() consistently. Resolve this by separating out this logic and introduce change_non_present_huge_pmd(). Signed-off-by: Lorenzo Stoakes --- mm/huge_memory.c | 72 ++++++++++++++++++++++++++---------------------- 1 file changed, 39 insertions(+), 33 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 755d38f82aec..fa928ca42b6d 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2498,6 +2498,42 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, return false; } +static void change_non_present_huge_pmd(struct mm_struct *mm, + unsigned long addr, pmd_t *pmd, bool uffd_wp, + bool uffd_wp_resolve) +{ + swp_entry_t entry = pmd_to_swp_entry(*pmd); + struct folio *folio = pfn_swap_entry_folio(entry); + pmd_t newpmd; + + VM_WARN_ON(!is_pmd_non_present_folio_entry(*pmd)); + if (is_writable_migration_entry(entry)) { + /* + * A protection check is difficult so + * just be safe and disable write + */ + if (folio_test_anon(folio)) + entry = make_readable_exclusive_migration_entry(swp_offset(entry)); + else + entry = make_readable_migration_entry(swp_offset(entry)); + newpmd = swp_entry_to_pmd(entry); + if (pmd_swp_soft_dirty(*pmd)) + newpmd = pmd_swp_mksoft_dirty(newpmd); + } else if (is_writable_device_private_entry(entry)) { + entry = make_readable_device_private_entry(swp_offset(entry)); + newpmd = swp_entry_to_pmd(entry); + } else { + newpmd = *pmd; + } + + if (uffd_wp) + newpmd = pmd_swp_mkuffd_wp(newpmd); + else if (uffd_wp_resolve) + newpmd = pmd_swp_clear_uffd_wp(newpmd); + if (!pmd_same(*pmd, newpmd)) + set_pmd_at(mm, addr, pmd, newpmd); +} + /* * Returns * - 0 if PMD could not be locked @@ -2526,41 +2562,11 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, if (!ptl) return 0; -#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION - if (is_swap_pmd(*pmd)) { - swp_entry_t entry = pmd_to_swp_entry(*pmd); - struct folio *folio = pfn_swap_entry_folio(entry); - pmd_t newpmd; - - VM_WARN_ON(!is_pmd_non_present_folio_entry(*pmd)); - if (is_writable_migration_entry(entry)) { - /* - * A protection check is difficult so - * just be safe and disable write - */ - if (folio_test_anon(folio)) - entry = make_readable_exclusive_migration_entry(swp_offset(entry)); - else - entry = make_readable_migration_entry(swp_offset(entry)); - newpmd = swp_entry_to_pmd(entry); - if (pmd_swp_soft_dirty(*pmd)) - newpmd = pmd_swp_mksoft_dirty(newpmd); - } else if (is_writable_device_private_entry(entry)) { - entry = make_readable_device_private_entry(swp_offset(entry)); - newpmd = swp_entry_to_pmd(entry); - } else { - newpmd = *pmd; - } - - if (uffd_wp) - newpmd = pmd_swp_mkuffd_wp(newpmd); - else if (uffd_wp_resolve) - newpmd = pmd_swp_clear_uffd_wp(newpmd); - if (!pmd_same(*pmd, newpmd)) - set_pmd_at(mm, addr, pmd, newpmd); + if (thp_migration_supported() && is_swap_pmd(*pmd)) { + change_non_present_huge_pmd(mm, addr, pmd, uffd_wp, + uffd_wp_resolve); goto unlock; } -#endif if (prot_numa) { int target_node = NUMA_NO_NODE; -- 2.51.0