The prot_numa_skip() naming is not good since it updates the folio access time except checking whether to skip prot NUMA, so rename it to folio_needs_prot_numa(), and cleanup it a bit, remove ret by directly return value instead of goto style, also make it non-static function so that it can be reused. Signed-off-by: Kefeng Wang --- mm/internal.h | 3 +++ mm/mprotect.c | 43 ++++++++++++++++++++++--------------------- 2 files changed, 25 insertions(+), 21 deletions(-) diff --git a/mm/internal.h b/mm/internal.h index 6691d3ea55af..b521b5177d3c 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -1403,6 +1403,9 @@ int numa_migrate_check(struct folio *folio, struct vm_fault *vmf, unsigned long addr, int *flags, bool writable, int *last_cpupid); +bool folio_needs_prot_numa(struct folio *folio, struct vm_area_struct *vma, + int target_node); + void free_zone_device_folio(struct folio *folio); int migrate_device_coherent_folio(struct folio *folio); diff --git a/mm/mprotect.c b/mm/mprotect.c index 6236d120c8e6..1369ba6f6294 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -118,26 +118,30 @@ static int mprotect_folio_pte_batch(struct folio *folio, pte_t *ptep, return folio_pte_batch_flags(folio, NULL, ptep, &pte, max_nr_ptes, flags); } -static bool prot_numa_skip(struct vm_area_struct *vma, int target_node, - struct folio *folio) +/** + * folio_needs_prot_numa() - Whether the folio needs prot numa + * @folio: The folio. + * @vma: The VMA mapping. + * @target_node: The numa node being accessed. + * + * Return: True if folio needs prot numa and the access time of + * folio is adjusted. False otherwise. + */ +bool folio_needs_prot_numa(struct folio *folio, struct vm_area_struct *vma, + int target_node) { - bool ret = true; - bool toptier; int nid; - if (!folio) - goto skip; - - if (folio_is_zone_device(folio) || folio_test_ksm(folio)) - goto skip; + if (!folio || folio_is_zone_device(folio) || folio_test_ksm(folio)) + return false; /* Also skip shared copy-on-write folios */ if (is_cow_mapping(vma->vm_flags) && folio_maybe_mapped_shared(folio)) - goto skip; + return false; /* Folios are pinned and can't be migrated */ if (folio_maybe_dma_pinned(folio)) - goto skip; + return false; /* * While migration can move some dirty pages, @@ -145,7 +149,7 @@ static bool prot_numa_skip(struct vm_area_struct *vma, int target_node, * context. */ if (folio_is_file_lru(folio) && folio_test_dirty(folio)) - goto skip; + return false; /* * Don't mess with PTEs if page is already on the node @@ -153,23 +157,20 @@ static bool prot_numa_skip(struct vm_area_struct *vma, int target_node, */ nid = folio_nid(folio); if (target_node == nid) - goto skip; - - toptier = node_is_toptier(nid); + return false; /* * Skip scanning top tier node if normal numa * balancing is disabled */ - if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) && toptier) - goto skip; + if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) && + node_is_toptier(nid)) + return false; - ret = false; if (folio_use_access_time(folio)) folio_xchg_access_time(folio, jiffies_to_msecs(jiffies)); -skip: - return ret; + return true; } /* Set nr_ptes number of ptes, starting from idx */ @@ -315,7 +316,7 @@ static long change_pte_range(struct mmu_gather *tlb, * pages. See similar comment in change_huge_pmd. */ if (prot_numa && - prot_numa_skip(vma, target_node, folio)) { + !folio_needs_prot_numa(folio, vma, target_node)) { /* determine batch to skip */ nr_ptes = mprotect_folio_pte_batch(folio, -- 2.27.0