The khugepaged_enter_vma() function requires handling in two specific scenarios: 1. New VMA creation When a new VMA is created (for anon vma, it is deferred to pagefault), if vma->vm_mm is not present in khugepaged_mm_slot, it must be added. In this case, khugepaged_enter_vma() is called after vma->vm_flags have been set, allowing direct use of the VMA's flags. 2. VMA flag modification When vma->vm_flags are modified (particularly when VM_HUGEPAGE is set), the system must recheck whether to add vma->vm_mm to khugepaged_mm_slot. Currently, khugepaged_enter_vma() is called before the flag update, so the call must be relocated to occur after vma->vm_flags have been set. In the VMA merging path, khugepaged_enter_vma() is also called. For this case, since VMA merging only occurs when the vm_flags of both VMAs are identical (excluding special flags like VM_SOFTDIRTY), we can safely use target->vm_flags instead. (It is worth noting that khugepaged_enter_vma() can be removed from the VMA merging path because the VMA has already been added in the two aforementioned cases. We will address this cleanup in a separate patch.) After this change, we can further remove vm_flags parameter from thp_vma_allowable_order(). That will be handled in a followup patch. Signed-off-by: Yafang Shao Cc: Yang Shi Cc: Usama Arif --- include/linux/khugepaged.h | 10 ++++++---- mm/huge_memory.c | 2 +- mm/khugepaged.c | 27 ++++++++++++++------------- mm/madvise.c | 7 +++++++ mm/vma.c | 6 +++--- 5 files changed, 31 insertions(+), 21 deletions(-) diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h index 179ce716e769..b8291a9740b4 100644 --- a/include/linux/khugepaged.h +++ b/include/linux/khugepaged.h @@ -15,8 +15,8 @@ extern void khugepaged_destroy(void); extern int start_stop_khugepaged(void); extern void __khugepaged_enter(struct mm_struct *mm); extern void __khugepaged_exit(struct mm_struct *mm); -extern void khugepaged_enter_vma(struct vm_area_struct *vma, - vm_flags_t vm_flags); +extern void khugepaged_enter_vma(struct vm_area_struct *vma); +extern void khugepaged_enter_mm(struct mm_struct *mm); extern void khugepaged_min_free_kbytes_update(void); extern bool current_is_khugepaged(void); extern int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr, @@ -40,8 +40,10 @@ static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm static inline void khugepaged_exit(struct mm_struct *mm) { } -static inline void khugepaged_enter_vma(struct vm_area_struct *vma, - vm_flags_t vm_flags) +static inline void khugepaged_enter_vma(struct vm_area_struct *vma) +{ +} +static inline void khugepaged_enter_mm(struct mm_struct *mm) { } static inline int collapse_pte_mapped_thp(struct mm_struct *mm, diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 7a0eedf5e3c8..bcbc1674f3d3 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1476,7 +1476,7 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf) ret = vmf_anon_prepare(vmf); if (ret) return ret; - khugepaged_enter_vma(vma, vma->vm_flags); + khugepaged_enter_vma(vma); if (!(vmf->flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(vma->vm_mm) && diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 8ed9f8e2d376..d517659d905f 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -367,12 +367,6 @@ int hugepage_madvise(struct vm_area_struct *vma, #endif *vm_flags &= ~VM_NOHUGEPAGE; *vm_flags |= VM_HUGEPAGE; - /* - * If the vma become good for khugepaged to scan, - * register it here without waiting a page fault that - * may not happen any time soon. - */ - khugepaged_enter_vma(vma, *vm_flags); break; case MADV_NOHUGEPAGE: *vm_flags &= ~VM_HUGEPAGE; @@ -514,14 +508,21 @@ static unsigned long collapse_allowable_orders(struct vm_area_struct *vma, return thp_vma_allowable_orders(vma, vm_flags, tva_flags, orders); } -void khugepaged_enter_vma(struct vm_area_struct *vma, - vm_flags_t vm_flags) +void khugepaged_enter_mm(struct mm_struct *mm) { - if (!mm_flags_test(MMF_VM_HUGEPAGE, vma->vm_mm) && - hugepage_enabled()) { - if (collapse_allowable_orders(vma, vm_flags, true)) - __khugepaged_enter(vma->vm_mm); - } + if (mm_flags_test(MMF_VM_HUGEPAGE, mm)) + return; + if (!hugepage_enabled()) + return; + + __khugepaged_enter(mm); +} + +void khugepaged_enter_vma(struct vm_area_struct *vma) +{ + if (!collapse_allowable_orders(vma, vma->vm_flags, true)) + return; + khugepaged_enter_mm(vma->vm_mm); } void __khugepaged_exit(struct mm_struct *mm) diff --git a/mm/madvise.c b/mm/madvise.c index fb1c86e630b6..067d4c6d5c46 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -1425,6 +1425,13 @@ static int madvise_vma_behavior(struct madvise_behavior *madv_behavior) VM_WARN_ON_ONCE(madv_behavior->lock_mode != MADVISE_MMAP_WRITE_LOCK); error = madvise_update_vma(new_flags, madv_behavior); + /* + * If the vma become good for khugepaged to scan, + * register it here without waiting a page fault that + * may not happen any time soon. + */ + if (!error && new_flags & VM_HUGEPAGE) + khugepaged_enter_mm(madv_behavior->vma->vm_mm); out: /* * madvise() returns EAGAIN if kernel resources, such as diff --git a/mm/vma.c b/mm/vma.c index 919d1fc63a52..519963e6f174 100644 --- a/mm/vma.c +++ b/mm/vma.c @@ -975,7 +975,7 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( if (err || commit_merge(vmg)) goto abort; - khugepaged_enter_vma(vmg->target, vmg->vm_flags); + khugepaged_enter_vma(vmg->target); vmg->state = VMA_MERGE_SUCCESS; return vmg->target; @@ -1095,7 +1095,7 @@ struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg) * following VMA if we have VMAs on both sides. */ if (vmg->target && !vma_expand(vmg)) { - khugepaged_enter_vma(vmg->target, vmg->vm_flags); + khugepaged_enter_vma(vmg->target); vmg->state = VMA_MERGE_SUCCESS; return vmg->target; } @@ -2506,7 +2506,7 @@ static int __mmap_new_vma(struct mmap_state *map, struct vm_area_struct **vmap) * call covers the non-merge case. */ if (!vma_is_anonymous(vma)) - khugepaged_enter_vma(vma, map->vm_flags); + khugepaged_enter_vma(vma); *vmap = vma; return 0; -- 2.47.3