The following cleanup reworks all the max_ptes_* handling into helper functions. This increases the code readability and will later be used to implement the mTHP handling of these variables. With these changes we abstract all the madvise_collapse() special casing (dont respect the sysctls) away from the functions that utilize them. And will be used later in this series to cleanly restrict the mTHP collapse behavior. No functional change is intended; however, we are now only reading the sysfs variables once per scan, whereas before these variables were being read on each loop iteration. Suggested-by: David Hildenbrand Acked-by: David Hildenbrand (Arm) Acked-by: Usama Arif Signed-off-by: Nico Pache --- mm/khugepaged.c | 118 +++++++++++++++++++++++++++++++++--------------- 1 file changed, 82 insertions(+), 36 deletions(-) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index f0e29d5c7b1f..f68853b3caa7 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -348,6 +348,62 @@ static bool pte_none_or_zero(pte_t pte) return pte_present(pte) && is_zero_pfn(pte_pfn(pte)); } +/** + * collapse_max_ptes_none - Calculate maximum allowed none-page or zero-page + * PTEs for the given collapse operation. + * @cc: The collapse control struct + * @vma: The vma to check for userfaultfd + * + * Return: Maximum number of none-page or zero-page PTEs allowed for the + * collapse operation. + */ +static unsigned int collapse_max_ptes_none(struct collapse_control *cc, + struct vm_area_struct *vma) +{ + // If the vma is userfaultfd-armed, allow no none-page or zero-page PTEs. + if (vma && userfaultfd_armed(vma)) + return 0; + // for MADV_COLLAPSE, allow any none-page or zero-page PTEs. + if (!cc->is_khugepaged) + return HPAGE_PMD_NR; + // For all other cases repect the user defined maximum. + return khugepaged_max_ptes_none; +} + +/** + * collapse_max_ptes_shared - Calculate maximum allowed PTEs that map shared + * anonymous pages for the given collapse operation. + * @cc: The collapse control struct + * + * Return: Maximum number of PTEs that map shared anonymous pages for the + * collapse operation + */ +static unsigned int collapse_max_ptes_shared(struct collapse_control *cc) +{ + // for MADV_COLLAPSE, do not restrict the number of PTEs that map shared + // anonymous pages. + if (!cc->is_khugepaged) + return HPAGE_PMD_NR; + return khugepaged_max_ptes_shared; +} + +/** + * collapse_max_ptes_swap - Calculate the maximum allowed non-present PTEs or the + * maximum allowed non-present pagecache entries for the given collapse operation. + * @cc: The collapse control struct + * + * Return: Maximum number of non-present PTEs or the maximum allowed non-present + * pagecache entries for the collapse operation. + */ +static unsigned int collapse_max_ptes_swap(struct collapse_control *cc) +{ + // for MADV_COLLAPSE, do not restrict the number PTEs entries or + // pagecache entries that are non-present. + if (!cc->is_khugepaged) + return HPAGE_PMD_NR; + return khugepaged_max_ptes_swap; +} + int hugepage_madvise(struct vm_area_struct *vma, vm_flags_t *vm_flags, int advice) { @@ -546,21 +602,19 @@ static enum scan_result __collapse_huge_page_isolate(struct vm_area_struct *vma, pte_t *_pte; int none_or_zero = 0, shared = 0, referenced = 0; enum scan_result result = SCAN_FAIL; + unsigned int max_ptes_none = collapse_max_ptes_none(cc, vma); + unsigned int max_ptes_shared = collapse_max_ptes_shared(cc); for (_pte = pte; _pte < pte + HPAGE_PMD_NR; _pte++, addr += PAGE_SIZE) { pte_t pteval = ptep_get(_pte); if (pte_none_or_zero(pteval)) { - ++none_or_zero; - if (!userfaultfd_armed(vma) && - (!cc->is_khugepaged || - none_or_zero <= khugepaged_max_ptes_none)) { - continue; - } else { + if (++none_or_zero > max_ptes_none) { result = SCAN_EXCEED_NONE_PTE; count_vm_event(THP_SCAN_EXCEED_NONE_PTE); goto out; } + continue; } if (!pte_present(pteval)) { result = SCAN_PTE_NON_PRESENT; @@ -591,9 +645,7 @@ static enum scan_result __collapse_huge_page_isolate(struct vm_area_struct *vma, /* See collapse_scan_pmd(). */ if (folio_maybe_mapped_shared(folio)) { - ++shared; - if (cc->is_khugepaged && - shared > khugepaged_max_ptes_shared) { + if (++shared > max_ptes_shared) { result = SCAN_EXCEED_SHARED_PTE; count_vm_event(THP_SCAN_EXCEED_SHARED_PTE); goto out; @@ -1261,6 +1313,9 @@ static enum scan_result collapse_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long start_addr, bool *lock_dropped, struct collapse_control *cc) { + const unsigned int max_ptes_none = collapse_max_ptes_none(cc, vma); + const unsigned int max_ptes_shared = collapse_max_ptes_shared(cc); + const unsigned int max_ptes_swap = collapse_max_ptes_swap(cc); pmd_t *pmd; pte_t *pte, *_pte; int none_or_zero = 0, shared = 0, referenced = 0; @@ -1294,36 +1349,29 @@ static enum scan_result collapse_scan_pmd(struct mm_struct *mm, pte_t pteval = ptep_get(_pte); if (pte_none_or_zero(pteval)) { - ++none_or_zero; - if (!userfaultfd_armed(vma) && - (!cc->is_khugepaged || - none_or_zero <= khugepaged_max_ptes_none)) { - continue; - } else { + if (++none_or_zero > max_ptes_none) { result = SCAN_EXCEED_NONE_PTE; count_vm_event(THP_SCAN_EXCEED_NONE_PTE); goto out_unmap; } + continue; } if (!pte_present(pteval)) { - ++unmapped; - if (!cc->is_khugepaged || - unmapped <= khugepaged_max_ptes_swap) { - /* - * Always be strict with uffd-wp - * enabled swap entries. Please see - * comment below for pte_uffd_wp(). - */ - if (pte_swp_uffd_wp_any(pteval)) { - result = SCAN_PTE_UFFD_WP; - goto out_unmap; - } - continue; - } else { + if (++unmapped > max_ptes_swap) { result = SCAN_EXCEED_SWAP_PTE; count_vm_event(THP_SCAN_EXCEED_SWAP_PTE); goto out_unmap; } + /* + * Always be strict with uffd-wp + * enabled swap entries. Please see + * comment below for pte_uffd_wp(). + */ + if (pte_swp_uffd_wp_any(pteval)) { + result = SCAN_PTE_UFFD_WP; + goto out_unmap; + } + continue; } if (pte_uffd_wp(pteval)) { /* @@ -1366,9 +1414,7 @@ static enum scan_result collapse_scan_pmd(struct mm_struct *mm, * is shared. */ if (folio_maybe_mapped_shared(folio)) { - ++shared; - if (cc->is_khugepaged && - shared > khugepaged_max_ptes_shared) { + if (++shared > max_ptes_shared) { result = SCAN_EXCEED_SHARED_PTE; count_vm_event(THP_SCAN_EXCEED_SHARED_PTE); goto out_unmap; @@ -2323,6 +2369,8 @@ static enum scan_result collapse_scan_file(struct mm_struct *mm, unsigned long addr, struct file *file, pgoff_t start, struct collapse_control *cc) { + const unsigned int max_ptes_none = collapse_max_ptes_none(cc, NULL); + const unsigned int max_ptes_swap = collapse_max_ptes_swap(cc); struct folio *folio = NULL; struct address_space *mapping = file->f_mapping; XA_STATE(xas, &mapping->i_pages, start); @@ -2341,8 +2389,7 @@ static enum scan_result collapse_scan_file(struct mm_struct *mm, if (xa_is_value(folio)) { swap += 1 << xas_get_order(&xas); - if (cc->is_khugepaged && - swap > khugepaged_max_ptes_swap) { + if (swap > max_ptes_swap) { result = SCAN_EXCEED_SWAP_PTE; count_vm_event(THP_SCAN_EXCEED_SWAP_PTE); break; @@ -2413,8 +2460,7 @@ static enum scan_result collapse_scan_file(struct mm_struct *mm, cc->progress += HPAGE_PMD_NR; if (result == SCAN_SUCCEED) { - if (cc->is_khugepaged && - present < HPAGE_PMD_NR - khugepaged_max_ptes_none) { + if (present < HPAGE_PMD_NR - max_ptes_none) { result = SCAN_EXCEED_NONE_PTE; count_vm_event(THP_SCAN_EXCEED_NONE_PTE); } else { -- 2.54.0