Current code calls mm_slot_entry() even when we don't have a valid slot, which is not future proof. Currently, this is not a problem because "slot" is the first member in struct khugepaged_mm_slot. While struct khugepaged_mm_slot is just a wrapper of struct mm_slot, there is no need to define it. Remove the definition of struct khugepaged_mm_slot, so there is not chance to miss use mm_slot_entry(). Signed-off-by: Wei Yang Acked-by: Lance Yang Reviewed-by: Dev Jain Cc: Lance Yang Cc: David Hildenbrand Cc: Dev Jain Cc: Kiryl Shutsemau Cc: xu xin Cc: SeongJae Park Cc: Nico Pache --- v3: * adjust changelog * rename the slab cache to "mm_slot" v2: * fix a PF reported by SeongJae, where slot is changed to next one --- mm/khugepaged.c | 55 ++++++++++++++++--------------------------------- 1 file changed, 18 insertions(+), 37 deletions(-) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 204ce3059267..67540078083b 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -103,14 +103,6 @@ struct collapse_control { nodemask_t alloc_nmask; }; -/** - * struct khugepaged_mm_slot - khugepaged information per mm that is being scanned - * @slot: hash lookup from mm to mm_slot - */ -struct khugepaged_mm_slot { - struct mm_slot slot; -}; - /** * struct khugepaged_scan - cursor for scanning * @mm_head: the head of the mm list to scan @@ -121,7 +113,7 @@ struct khugepaged_mm_slot { */ struct khugepaged_scan { struct list_head mm_head; - struct khugepaged_mm_slot *mm_slot; + struct mm_slot *mm_slot; unsigned long address; }; @@ -384,7 +376,7 @@ int hugepage_madvise(struct vm_area_struct *vma, int __init khugepaged_init(void) { - mm_slot_cache = KMEM_CACHE(khugepaged_mm_slot, 0); + mm_slot_cache = KMEM_CACHE(mm_slot, 0); if (!mm_slot_cache) return -ENOMEM; @@ -438,7 +430,6 @@ static bool hugepage_pmd_enabled(void) void __khugepaged_enter(struct mm_struct *mm) { - struct khugepaged_mm_slot *mm_slot; struct mm_slot *slot; int wakeup; @@ -447,12 +438,10 @@ void __khugepaged_enter(struct mm_struct *mm) if (unlikely(mm_flags_test_and_set(MMF_VM_HUGEPAGE, mm))) return; - mm_slot = mm_slot_alloc(mm_slot_cache); - if (!mm_slot) + slot = mm_slot_alloc(mm_slot_cache); + if (!slot) return; - slot = &mm_slot->slot; - spin_lock(&khugepaged_mm_lock); mm_slot_insert(mm_slots_hash, mm, slot); /* @@ -480,14 +469,12 @@ void khugepaged_enter_vma(struct vm_area_struct *vma, void __khugepaged_exit(struct mm_struct *mm) { - struct khugepaged_mm_slot *mm_slot; struct mm_slot *slot; int free = 0; spin_lock(&khugepaged_mm_lock); slot = mm_slot_lookup(mm_slots_hash, mm); - mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot); - if (mm_slot && khugepaged_scan.mm_slot != mm_slot) { + if (slot && khugepaged_scan.mm_slot != slot) { hash_del(&slot->hash); list_del(&slot->mm_node); free = 1; @@ -496,9 +483,9 @@ void __khugepaged_exit(struct mm_struct *mm) if (free) { mm_flags_clear(MMF_VM_HUGEPAGE, mm); - mm_slot_free(mm_slot_cache, mm_slot); + mm_slot_free(mm_slot_cache, slot); mmdrop(mm); - } else if (mm_slot) { + } else if (slot) { /* * This is required to serialize against * hpage_collapse_test_exit() (which is guaranteed to run @@ -1432,9 +1419,8 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm, return result; } -static void collect_mm_slot(struct khugepaged_mm_slot *mm_slot) +static void collect_mm_slot(struct mm_slot *slot) { - struct mm_slot *slot = &mm_slot->slot; struct mm_struct *mm = slot->mm; lockdep_assert_held(&khugepaged_mm_lock); @@ -1451,7 +1437,7 @@ static void collect_mm_slot(struct khugepaged_mm_slot *mm_slot) */ /* khugepaged_mm_lock actually not necessary for the below */ - mm_slot_free(mm_slot_cache, mm_slot); + mm_slot_free(mm_slot_cache, slot); mmdrop(mm); } } @@ -2394,7 +2380,6 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result, __acquires(&khugepaged_mm_lock) { struct vma_iterator vmi; - struct khugepaged_mm_slot *mm_slot; struct mm_slot *slot; struct mm_struct *mm; struct vm_area_struct *vma; @@ -2405,14 +2390,12 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result, *result = SCAN_FAIL; if (khugepaged_scan.mm_slot) { - mm_slot = khugepaged_scan.mm_slot; - slot = &mm_slot->slot; + slot = khugepaged_scan.mm_slot; } else { slot = list_first_entry(&khugepaged_scan.mm_head, struct mm_slot, mm_node); - mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot); khugepaged_scan.address = 0; - khugepaged_scan.mm_slot = mm_slot; + khugepaged_scan.mm_slot = slot; } spin_unlock(&khugepaged_mm_lock); @@ -2510,7 +2493,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result, breakouterloop_mmap_lock: spin_lock(&khugepaged_mm_lock); - VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot); + VM_BUG_ON(khugepaged_scan.mm_slot != slot); /* * Release the current mm_slot if this mm is about to die, or * if we scanned all vmas of this mm. @@ -2522,16 +2505,14 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result, * mm_slot not pointing to the exiting mm. */ if (!list_is_last(&slot->mm_node, &khugepaged_scan.mm_head)) { - slot = list_next_entry(slot, mm_node); - khugepaged_scan.mm_slot = - mm_slot_entry(slot, struct khugepaged_mm_slot, slot); + khugepaged_scan.mm_slot = list_next_entry(slot, mm_node); khugepaged_scan.address = 0; } else { khugepaged_scan.mm_slot = NULL; khugepaged_full_scans++; } - collect_mm_slot(mm_slot); + collect_mm_slot(slot); } return progress; @@ -2618,7 +2599,7 @@ static void khugepaged_wait_work(void) static int khugepaged(void *none) { - struct khugepaged_mm_slot *mm_slot; + struct mm_slot *slot; set_freezable(); set_user_nice(current, MAX_NICE); @@ -2629,10 +2610,10 @@ static int khugepaged(void *none) } spin_lock(&khugepaged_mm_lock); - mm_slot = khugepaged_scan.mm_slot; + slot = khugepaged_scan.mm_slot; khugepaged_scan.mm_slot = NULL; - if (mm_slot) - collect_mm_slot(mm_slot); + if (slot) + collect_mm_slot(slot); spin_unlock(&khugepaged_mm_lock); return 0; } -- 2.34.1