When collapse a pmd, there are two address in use: * address points to the start of pmd * address points to each individual page Current naming is not easy to distinguish these two and error prone. Name the first one to pmd_addr and second one to pte_addr. Signed-off-by: Wei Yang Suggested-by: David Hildenbrand --- mm/khugepaged.c | 43 ++++++++++++++++++++++--------------------- 1 file changed, 22 insertions(+), 21 deletions(-) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 4c957ce788d1..6d03072c1a92 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -537,18 +537,19 @@ static void release_pte_pages(pte_t *pte, pte_t *_pte, } static int __collapse_huge_page_isolate(struct vm_area_struct *vma, - unsigned long address, + unsigned long pmd_addr, pte_t *pte, struct collapse_control *cc, struct list_head *compound_pagelist) { struct page *page = NULL; struct folio *folio = NULL; + unsigned long pte_addr = pmd_addr; pte_t *_pte; int none_or_zero = 0, shared = 0, result = SCAN_FAIL, referenced = 0; for (_pte = pte; _pte < pte + HPAGE_PMD_NR; - _pte++, address += PAGE_SIZE) { + _pte++, pte_addr += PAGE_SIZE) { pte_t pteval = ptep_get(_pte); if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { ++none_or_zero; @@ -570,7 +571,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, result = SCAN_PTE_UFFD_WP; goto out; } - page = vm_normal_page(vma, address, pteval); + page = vm_normal_page(vma, pte_addr, pteval); if (unlikely(!page) || unlikely(is_zone_device_page(page))) { result = SCAN_PAGE_NULL; goto out; @@ -655,8 +656,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, */ if (cc->is_khugepaged && (pte_young(pteval) || folio_test_young(folio) || - folio_test_referenced(folio) || mmu_notifier_test_young(vma->vm_mm, - address))) + folio_test_referenced(folio) || + mmu_notifier_test_young(vma->vm_mm, pte_addr))) referenced++; } @@ -985,21 +986,21 @@ static int check_pmd_still_valid(struct mm_struct *mm, */ static int __collapse_huge_page_swapin(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long haddr, pmd_t *pmd, + unsigned long pmd_addr, pmd_t *pmd, int referenced) { int swapped_in = 0; vm_fault_t ret = 0; - unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE); + unsigned long pte_addr, end = pmd_addr + (HPAGE_PMD_NR * PAGE_SIZE); int result; pte_t *pte = NULL; spinlock_t *ptl; - for (address = haddr; address < end; address += PAGE_SIZE) { + for (pte_addr = pmd_addr; pte_addr < end; pte_addr += PAGE_SIZE) { struct vm_fault vmf = { .vma = vma, - .address = address, - .pgoff = linear_page_index(vma, address), + .address = pte_addr, + .pgoff = linear_page_index(vma, pte_addr), .flags = FAULT_FLAG_ALLOW_RETRY, .pmd = pmd, }; @@ -1009,7 +1010,7 @@ static int __collapse_huge_page_swapin(struct mm_struct *mm, * Here the ptl is only used to check pte_same() in * do_swap_page(), so readonly version is enough. */ - pte = pte_offset_map_ro_nolock(mm, pmd, address, &ptl); + pte = pte_offset_map_ro_nolock(mm, pmd, pte_addr, &ptl); if (!pte) { mmap_read_unlock(mm); result = SCAN_PMD_NULL; @@ -1252,7 +1253,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address, static int hpage_collapse_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long address, bool *mmap_locked, + unsigned long pmd_addr, bool *mmap_locked, struct collapse_control *cc) { pmd_t *pmd; @@ -1261,26 +1262,26 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm, int none_or_zero = 0, shared = 0; struct page *page = NULL; struct folio *folio = NULL; - unsigned long _address; + unsigned long pte_addr; spinlock_t *ptl; int node = NUMA_NO_NODE, unmapped = 0; - VM_BUG_ON(address & ~HPAGE_PMD_MASK); + VM_BUG_ON(pmd_addr & ~HPAGE_PMD_MASK); - result = find_pmd_or_thp_or_none(mm, address, &pmd); + result = find_pmd_or_thp_or_none(mm, pmd_addr, &pmd); if (result != SCAN_SUCCEED) goto out; memset(cc->node_load, 0, sizeof(cc->node_load)); nodes_clear(cc->alloc_nmask); - pte = pte_offset_map_lock(mm, pmd, address, &ptl); + pte = pte_offset_map_lock(mm, pmd, pmd_addr, &ptl); if (!pte) { result = SCAN_PMD_NULL; goto out; } - for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR; - _pte++, _address += PAGE_SIZE) { + for (pte_addr = pmd_addr, _pte = pte; _pte < pte + HPAGE_PMD_NR; + _pte++, pte_addr += PAGE_SIZE) { pte_t pteval = ptep_get(_pte); if (is_swap_pte(pteval)) { ++unmapped; @@ -1328,7 +1329,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm, goto out_unmap; } - page = vm_normal_page(vma, _address, pteval); + page = vm_normal_page(vma, pte_addr, pteval); if (unlikely(!page) || unlikely(is_zone_device_page(page))) { result = SCAN_PAGE_NULL; goto out_unmap; @@ -1397,7 +1398,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm, if (cc->is_khugepaged && (pte_young(pteval) || folio_test_young(folio) || folio_test_referenced(folio) || - mmu_notifier_test_young(vma->vm_mm, _address))) + mmu_notifier_test_young(vma->vm_mm, pte_addr))) referenced++; } if (cc->is_khugepaged && @@ -1410,7 +1411,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm, out_unmap: pte_unmap_unlock(pte, ptl); if (result == SCAN_SUCCEED) { - result = collapse_huge_page(mm, address, referenced, + result = collapse_huge_page(mm, pmd_addr, referenced, unmapped, cc); /* collapse_huge_page will return with the mmap_lock released */ *mmap_locked = false; -- 2.34.1