The structure struct collapse_control is being unnecessarily cleared twice during the huge page collapse process. Both hpage_collapse_scan_file() and hpage_collapse_scan_pmd() currently perform a clear operation on this structure. Remove the redundant clear operation. Signed-off-by: Wei Yang --- mm/khugepaged.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 7e8cb181d5bd..1fc8986a28b3 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -2803,8 +2803,6 @@ int madvise_collapse(struct vm_area_struct *vma, unsigned long start, hend = min(hend, vma->vm_end & HPAGE_PMD_MASK); } mmap_assert_locked(mm); - memset(cc->node_load, 0, sizeof(cc->node_load)); - nodes_clear(cc->alloc_nmask); if (!vma_is_anonymous(vma)) { struct file *file = get_file(vma->vm_file); pgoff_t pgoff = linear_page_index(vma, addr); -- 2.34.1 SCAN_PMD_NONE means current pmd is empty, but we can still continue collapse next pmd range. Signed-off-by: Wei Yang Reviewed-by: Lance Yang Reviewed-by: Dev Jain Reviewed-by: Baolin Wang --- mm/khugepaged.c | 1 + 1 file changed, 1 insertion(+) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 1fc8986a28b3..2ee5048b764e 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -2833,6 +2833,7 @@ int madvise_collapse(struct vm_area_struct *vma, unsigned long start, goto handle_result; /* Whitelisted set of results where continuing OK */ case SCAN_PMD_NULL: + case SCAN_PMD_NONE: case SCAN_PTE_NON_PRESENT: case SCAN_PTE_UFFD_WP: case SCAN_LACK_REFERENCED_PAGE: -- 2.34.1 The current hugepage collapse scan results include two separate values, SCAN_PMD_NONE and SCAN_PMD_NULL, which are handled identically by the consuming code. To reduce confusion and improve long-term maintenance, this commit merges these two functionally equivalent states into a single, clearer identifier: SCAN_NO_PTE_TABLE Suggested-by: "David Hildenbrand (Red Hat)" Signed-off-by: Wei Yang --- include/trace/events/huge_memory.h | 3 +-- mm/khugepaged.c | 23 ++++++++++------------- 2 files changed, 11 insertions(+), 15 deletions(-) diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h index dd94d14a2427..4cde53b45a85 100644 --- a/include/trace/events/huge_memory.h +++ b/include/trace/events/huge_memory.h @@ -10,8 +10,7 @@ #define SCAN_STATUS \ EM( SCAN_FAIL, "failed") \ EM( SCAN_SUCCEED, "succeeded") \ - EM( SCAN_PMD_NULL, "pmd_null") \ - EM( SCAN_PMD_NONE, "pmd_none") \ + EM( SCAN_NO_PTE_TABLE, "no_pte_table") \ EM( SCAN_PMD_MAPPED, "page_pmd_mapped") \ EM( SCAN_EXCEED_NONE_PTE, "exceed_none_pte") \ EM( SCAN_EXCEED_SWAP_PTE, "exceed_swap_pte") \ diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 2ee5048b764e..40f9d5939aa5 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -30,8 +30,7 @@ enum scan_result { SCAN_FAIL, SCAN_SUCCEED, - SCAN_PMD_NULL, - SCAN_PMD_NONE, + SCAN_NO_PTE_TABLE, SCAN_PMD_MAPPED, SCAN_EXCEED_NONE_PTE, SCAN_EXCEED_SWAP_PTE, @@ -934,7 +933,7 @@ static inline int check_pmd_state(pmd_t *pmd) pmd_t pmde = pmdp_get_lockless(pmd); if (pmd_none(pmde)) - return SCAN_PMD_NONE; + return SCAN_NO_PTE_TABLE; /* * The folio may be under migration when khugepaged is trying to @@ -944,11 +943,11 @@ static inline int check_pmd_state(pmd_t *pmd) if (pmd_is_migration_entry(pmde)) return SCAN_PMD_MAPPED; if (!pmd_present(pmde)) - return SCAN_PMD_NULL; + return SCAN_NO_PTE_TABLE; if (pmd_trans_huge(pmde)) return SCAN_PMD_MAPPED; if (pmd_bad(pmde)) - return SCAN_PMD_NULL; + return SCAN_NO_PTE_TABLE; return SCAN_SUCCEED; } @@ -958,7 +957,7 @@ static int find_pmd_or_thp_or_none(struct mm_struct *mm, { *pmd = mm_find_pmd(mm, address); if (!*pmd) - return SCAN_PMD_NULL; + return SCAN_NO_PTE_TABLE; return check_pmd_state(*pmd); } @@ -1013,7 +1012,7 @@ static int __collapse_huge_page_swapin(struct mm_struct *mm, pte = pte_offset_map_ro_nolock(mm, pmd, addr, &ptl); if (!pte) { mmap_read_unlock(mm); - result = SCAN_PMD_NULL; + result = SCAN_NO_PTE_TABLE; goto out; } } @@ -1187,7 +1186,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address, &compound_pagelist); spin_unlock(pte_ptl); } else { - result = SCAN_PMD_NULL; + result = SCAN_NO_PTE_TABLE; } if (unlikely(result != SCAN_SUCCEED)) { @@ -1270,7 +1269,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm, nodes_clear(cc->alloc_nmask); pte = pte_offset_map_lock(mm, pmd, start_addr, &ptl); if (!pte) { - result = SCAN_PMD_NULL; + result = SCAN_NO_PTE_TABLE; goto out; } @@ -1544,8 +1543,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr, switch (result) { case SCAN_SUCCEED: break; - case SCAN_PMD_NULL: - case SCAN_PMD_NONE: + case SCAN_NO_PTE_TABLE: /* * All pte entries have been removed and pmd cleared. * Skip all the pte checks and just update the pmd mapping. @@ -2832,8 +2830,7 @@ int madvise_collapse(struct vm_area_struct *vma, unsigned long start, mmap_read_unlock(mm); goto handle_result; /* Whitelisted set of results where continuing OK */ - case SCAN_PMD_NULL: - case SCAN_PMD_NONE: + case SCAN_NO_PTE_TABLE: case SCAN_PTE_NON_PRESENT: case SCAN_PTE_UFFD_WP: case SCAN_LACK_REFERENCED_PAGE: -- 2.34.1