From: Lance Yang As pointed out by Dev, the PTE checks for disjoint conditions in the scanning loops can be optimized. is_swap_pte, (pte_none && is_zero_pfn), and pte_uffd_wp are mutually exclusive. This patch refactors the loops in both __collapse_huge_page_isolate() and hpage_collapse_scan_pmd() to use a continuous if-else-if-else-if chain instead of separate if blocks. While at it, the redundant pte_present() check before is_zero_pfn() is also removed. Also, this is a preparatory step to make it easier to merge the almost-duplicated scanning logic in these two functions, as suggested by David. Reviewed-by: Wei Yang Reviewed-by: Dev Jain Reviewed-by: Zi Yan Suggested-by: Dev Jain Suggested-by: David Hildenbrand Signed-off-by: Lance Yang --- mm/khugepaged.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index bec3e268dc76..e3e27223137a 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -548,8 +548,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, for (_pte = pte; _pte < pte + HPAGE_PMD_NR; _pte++, addr += PAGE_SIZE) { pte_t pteval = ptep_get(_pte); - if (pte_none(pteval) || (pte_present(pteval) && - is_zero_pfn(pte_pfn(pteval)))) { + if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { ++none_or_zero; if (!userfaultfd_armed(vma) && (!cc->is_khugepaged || @@ -560,12 +559,10 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, count_vm_event(THP_SCAN_EXCEED_NONE_PTE); goto out; } - } - if (!pte_present(pteval)) { + } else if (!pte_present(pteval)) { result = SCAN_PTE_NON_PRESENT; goto out; - } - if (pte_uffd_wp(pteval)) { + } else if (pte_uffd_wp(pteval)) { result = SCAN_PTE_UFFD_WP; goto out; } @@ -1321,8 +1318,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm, count_vm_event(THP_SCAN_EXCEED_SWAP_PTE); goto out_unmap; } - } - if (pte_uffd_wp(pteval)) { + } else if (pte_uffd_wp(pteval)) { /* * Don't collapse the page if any of the small * PTEs are armed with uffd write protection. -- 2.49.0 From: Lance Yang As Zi pointed out, we should avoid crashing the kernel for conditions that can be handled gracefully. Encountering a non-anonymous folio in an anonymous VMA is a bug, but a warning is sufficient. This patch changes the VM_BUG_ON_FOLIO(!folio_test_anon(folio)) to a VM_WARN_ON_FOLIO() in both __collapse_huge_page_isolate() and hpage_collapse_scan_pmd(), and then aborts the scan with SCAN_PAGE_ANON. Making more of the scanning logic common between hpage_collapse_scan_pmd() and __collapse_huge_page_isolate(), as suggested by Dev. Suggested-by: Dev Jain Suggested-by: Zi Yan Reviewed-by: Wei Yang Reviewed-by: Dev Jain Signed-off-by: Lance Yang --- mm/khugepaged.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index e3e27223137a..b5c0295c3414 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -573,7 +573,11 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, } folio = page_folio(page); - VM_BUG_ON_FOLIO(!folio_test_anon(folio), folio); + if (!folio_test_anon(folio)) { + VM_WARN_ON_FOLIO(true, folio); + result = SCAN_PAGE_ANON; + goto out; + } /* See hpage_collapse_scan_pmd(). */ if (folio_maybe_mapped_shared(folio)) { @@ -1340,6 +1344,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm, folio = page_folio(page); if (!folio_test_anon(folio)) { + VM_WARN_ON_FOLIO(true, folio); result = SCAN_PAGE_ANON; goto out_unmap; } -- 2.49.0 From: Lance Yang As David suggested, the PTE scanning logic in hpage_collapse_scan_pmd() and __collapse_huge_page_isolate() was almost duplicated. This patch cleans things up by moving all the common PTE checking logic into a new shared helper, thp_collapse_check_pte(). While at it, we use vm_normal_folio() instead of vm_normal_page(). Suggested-by: David Hildenbrand Suggested-by: Dev Jain Signed-off-by: Lance Yang --- mm/khugepaged.c | 243 ++++++++++++++++++++++++++---------------------- 1 file changed, 130 insertions(+), 113 deletions(-) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index b5c0295c3414..7116caae1fa4 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -61,6 +61,12 @@ enum scan_result { SCAN_PAGE_FILLED, }; +enum pte_check_result { + PTE_CHECK_SUCCEED, + PTE_CHECK_CONTINUE, + PTE_CHECK_FAIL, +}; + #define CREATE_TRACE_POINTS #include @@ -533,62 +539,139 @@ static void release_pte_pages(pte_t *pte, pte_t *_pte, } } +/* + * thp_collapse_check_pte - Check if a PTE is suitable for THP collapse + * @pte: The PTE to check + * @vma: The VMA the PTE belongs to + * @addr: The virtual address corresponding to this PTE + * @foliop: On success, used to return a pointer to the folio + * Must be non-NULL + * @none_or_zero: Counter for none/zero PTEs. Must be non-NULL + * @unmapped: Counter for swap PTEs. Can be NULL if not scanning swaps + * @shared: Counter for shared pages. Must be non-NULL + * @scan_result: Used to return the failure reason (SCAN_*) on a + * PTE_CHECK_FAIL return. Must be non-NULL + * @cc: Collapse control settings + * + * Returns: + * PTE_CHECK_SUCCEED - PTE is suitable, proceed with further checks + * PTE_CHECK_CONTINUE - Skip this PTE and continue scanning + * PTE_CHECK_FAIL - Abort collapse scan + */ +static inline int thp_collapse_check_pte(pte_t pte, struct vm_area_struct *vma, + unsigned long addr, struct folio **foliop, int *none_or_zero, + int *unmapped, int *shared, int *scan_result, + struct collapse_control *cc) +{ + struct folio *folio = NULL; + + if (pte_none(pte) || is_zero_pfn(pte_pfn(pte))) { + (*none_or_zero)++; + if (!userfaultfd_armed(vma) && + (!cc->is_khugepaged || + *none_or_zero <= khugepaged_max_ptes_none)) { + return PTE_CHECK_CONTINUE; + } else { + *scan_result = SCAN_EXCEED_NONE_PTE; + count_vm_event(THP_SCAN_EXCEED_NONE_PTE); + return PTE_CHECK_FAIL; + } + } else if (!pte_present(pte)) { + if (!unmapped) { + *scan_result = SCAN_PTE_NON_PRESENT; + return PTE_CHECK_FAIL; + } + + if (non_swap_entry(pte_to_swp_entry(pte))) { + *scan_result = SCAN_PTE_NON_PRESENT; + return PTE_CHECK_FAIL; + } + + (*unmapped)++; + if (!cc->is_khugepaged || + *unmapped <= khugepaged_max_ptes_swap) { + /* + * Always be strict with uffd-wp enabled swap + * entries. Please see comment below for + * pte_uffd_wp(). + */ + if (pte_swp_uffd_wp(pte)) { + *scan_result = SCAN_PTE_UFFD_WP; + return PTE_CHECK_FAIL; + } + return PTE_CHECK_CONTINUE; + } else { + *scan_result = SCAN_EXCEED_SWAP_PTE; + count_vm_event(THP_SCAN_EXCEED_SWAP_PTE); + return PTE_CHECK_FAIL; + } + } else if (pte_uffd_wp(pte)) { + /* + * Don't collapse the page if any of the small PTEs are + * armed with uffd write protection. Here we can also mark + * the new huge pmd as write protected if any of the small + * ones is marked but that could bring unknown userfault + * messages that falls outside of the registered range. + * So, just be simple. + */ + *scan_result = SCAN_PTE_UFFD_WP; + return PTE_CHECK_FAIL; + } + + folio = vm_normal_folio(vma, addr, pte); + if (unlikely(!folio) || unlikely(folio_is_zone_device(folio))) { + *scan_result = SCAN_PAGE_NULL; + return PTE_CHECK_FAIL; + } + + if (!folio_test_anon(folio)) { + VM_WARN_ON_FOLIO(true, folio); + *scan_result = SCAN_PAGE_ANON; + return PTE_CHECK_FAIL; + } + + /* + * We treat a single page as shared if any part of the THP + * is shared. + */ + if (folio_maybe_mapped_shared(folio)) { + (*shared)++; + if (cc->is_khugepaged && *shared > khugepaged_max_ptes_shared) { + *scan_result = SCAN_EXCEED_SHARED_PTE; + count_vm_event(THP_SCAN_EXCEED_SHARED_PTE); + return PTE_CHECK_FAIL; + } + } + + *foliop = folio; + + return PTE_CHECK_SUCCEED; +} + static int __collapse_huge_page_isolate(struct vm_area_struct *vma, unsigned long start_addr, pte_t *pte, struct collapse_control *cc, struct list_head *compound_pagelist) { - struct page *page = NULL; struct folio *folio = NULL; unsigned long addr = start_addr; pte_t *_pte; int none_or_zero = 0, shared = 0, result = SCAN_FAIL, referenced = 0; + int pte_check_res; for (_pte = pte; _pte < pte + HPAGE_PMD_NR; _pte++, addr += PAGE_SIZE) { pte_t pteval = ptep_get(_pte); - if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { - ++none_or_zero; - if (!userfaultfd_armed(vma) && - (!cc->is_khugepaged || - none_or_zero <= khugepaged_max_ptes_none)) { - continue; - } else { - result = SCAN_EXCEED_NONE_PTE; - count_vm_event(THP_SCAN_EXCEED_NONE_PTE); - goto out; - } - } else if (!pte_present(pteval)) { - result = SCAN_PTE_NON_PRESENT; - goto out; - } else if (pte_uffd_wp(pteval)) { - result = SCAN_PTE_UFFD_WP; - goto out; - } - page = vm_normal_page(vma, addr, pteval); - if (unlikely(!page) || unlikely(is_zone_device_page(page))) { - result = SCAN_PAGE_NULL; - goto out; - } - folio = page_folio(page); - if (!folio_test_anon(folio)) { - VM_WARN_ON_FOLIO(true, folio); - result = SCAN_PAGE_ANON; - goto out; - } + pte_check_res = thp_collapse_check_pte(pteval, vma, addr, + &folio, &none_or_zero, NULL, &shared, + &result, cc); - /* See hpage_collapse_scan_pmd(). */ - if (folio_maybe_mapped_shared(folio)) { - ++shared; - if (cc->is_khugepaged && - shared > khugepaged_max_ptes_shared) { - result = SCAN_EXCEED_SHARED_PTE; - count_vm_event(THP_SCAN_EXCEED_SHARED_PTE); - goto out; - } - } + if (pte_check_res == PTE_CHECK_CONTINUE) + continue; + else if (pte_check_res == PTE_CHECK_FAIL) + goto out; if (folio_test_large(folio)) { struct folio *f; @@ -1264,11 +1347,11 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm, pte_t *pte, *_pte; int result = SCAN_FAIL, referenced = 0; int none_or_zero = 0, shared = 0; - struct page *page = NULL; struct folio *folio = NULL; unsigned long addr; spinlock_t *ptl; int node = NUMA_NO_NODE, unmapped = 0; + int pte_check_res; VM_BUG_ON(start_addr & ~HPAGE_PMD_MASK); @@ -1287,81 +1370,15 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm, for (addr = start_addr, _pte = pte; _pte < pte + HPAGE_PMD_NR; _pte++, addr += PAGE_SIZE) { pte_t pteval = ptep_get(_pte); - if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { - ++none_or_zero; - if (!userfaultfd_armed(vma) && - (!cc->is_khugepaged || - none_or_zero <= khugepaged_max_ptes_none)) { - continue; - } else { - result = SCAN_EXCEED_NONE_PTE; - count_vm_event(THP_SCAN_EXCEED_NONE_PTE); - goto out_unmap; - } - } else if (!pte_present(pteval)) { - if (non_swap_entry(pte_to_swp_entry(pteval))) { - result = SCAN_PTE_NON_PRESENT; - goto out_unmap; - } - ++unmapped; - if (!cc->is_khugepaged || - unmapped <= khugepaged_max_ptes_swap) { - /* - * Always be strict with uffd-wp - * enabled swap entries. Please see - * comment below for pte_uffd_wp(). - */ - if (pte_swp_uffd_wp(pteval)) { - result = SCAN_PTE_UFFD_WP; - goto out_unmap; - } - continue; - } else { - result = SCAN_EXCEED_SWAP_PTE; - count_vm_event(THP_SCAN_EXCEED_SWAP_PTE); - goto out_unmap; - } - } else if (pte_uffd_wp(pteval)) { - /* - * Don't collapse the page if any of the small - * PTEs are armed with uffd write protection. - * Here we can also mark the new huge pmd as - * write protected if any of the small ones is - * marked but that could bring unknown - * userfault messages that falls outside of - * the registered range. So, just be simple. - */ - result = SCAN_PTE_UFFD_WP; - goto out_unmap; - } + pte_check_res = thp_collapse_check_pte(pteval, vma, addr, + &folio, &none_or_zero, &unmapped, + &shared, &result, cc); - page = vm_normal_page(vma, addr, pteval); - if (unlikely(!page) || unlikely(is_zone_device_page(page))) { - result = SCAN_PAGE_NULL; - goto out_unmap; - } - folio = page_folio(page); - - if (!folio_test_anon(folio)) { - VM_WARN_ON_FOLIO(true, folio); - result = SCAN_PAGE_ANON; + if (pte_check_res == PTE_CHECK_CONTINUE) + continue; + else if (pte_check_res == PTE_CHECK_FAIL) goto out_unmap; - } - - /* - * We treat a single page as shared if any part of the THP - * is shared. - */ - if (folio_maybe_mapped_shared(folio)) { - ++shared; - if (cc->is_khugepaged && - shared > khugepaged_max_ptes_shared) { - result = SCAN_EXCEED_SHARED_PTE; - count_vm_event(THP_SCAN_EXCEED_SHARED_PTE); - goto out_unmap; - } - } /* * Record which node the original page is from and save this -- 2.49.0