Rather than having separate logic for each case determining whether to zap the deposited table, simply track this via a boolean. We check separately if the architecture requires it. Also use pmd_folio() direct in the present case. Signed-off-by: Lorenzo Stoakes (Oracle) --- mm/huge_memory.c | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 015f6d679d26..bcc74b0172fa 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2432,6 +2432,7 @@ bool zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, { struct folio *folio = NULL; bool flush_needed = false; + bool needs_deposit = false; bool ret = true; spinlock_t *ptl; pmd_t orig_pmd; @@ -2451,23 +2452,18 @@ bool zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, tlb->fullmm); arch_check_zapped_pmd(vma, orig_pmd); tlb_remove_pmd_tlb_entry(tlb, pmd, addr); - if (vma_is_special_huge(vma)) { - if (arch_needs_pgtable_deposit()) - zap_deposited_table(tlb->mm, pmd); + if (vma_is_special_huge(vma)) goto out; - } if (is_huge_zero_pmd(orig_pmd)) { - if (!vma_is_dax(vma) || arch_needs_pgtable_deposit()) - zap_deposited_table(tlb->mm, pmd); + needs_deposit = !vma_is_dax(vma); goto out; } if (pmd_present(orig_pmd)) { - struct page *page = pmd_page(orig_pmd); + folio = pmd_folio(orig_pmd); flush_needed = true; - folio = page_folio(page); - folio_remove_rmap_pmd(folio, page, vma); + folio_remove_rmap_pmd(folio, &folio->page, vma); WARN_ON_ONCE(folio_mapcount(folio) < 0); } else if (pmd_is_valid_softleaf(orig_pmd)) { const softleaf_t entry = softleaf_from_pmd(orig_pmd); @@ -2483,11 +2479,9 @@ bool zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, } if (folio_test_anon(folio)) { - zap_deposited_table(tlb->mm, pmd); + needs_deposit = true; add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); } else { - if (arch_needs_pgtable_deposit()) - zap_deposited_table(tlb->mm, pmd); add_mm_counter(tlb->mm, mm_counter_file(folio), -HPAGE_PMD_NR); @@ -2507,6 +2501,9 @@ bool zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, } out: + if (arch_needs_pgtable_deposit() || needs_deposit) + zap_deposited_table(tlb->mm, pmd); + spin_unlock(ptl); if (flush_needed) tlb_remove_page_size(tlb, &folio->page, HPAGE_PMD_SIZE); -- 2.53.0