Other than when we acquire the PTL, we always need to unlock the PTL, and optionally need to flush on exit. The code is currently very duplicated in this respect, so default flush_needed to false, set it true in the case in which it's required, then share the same logic for all exit paths. This also makes flush_needed make more sense as a function-scope value (we don't need to flush for the PFN map/mixed map, zero huge, error cases for instance). Reviewed-by: Baolin Wang Signed-off-by: Lorenzo Stoakes (Oracle) --- mm/huge_memory.c | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 0056ac27ec9a..b9d9acfef147 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2338,7 +2338,7 @@ bool zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr) { struct folio *folio = NULL; - bool flush_needed = true; + bool flush_needed = false; spinlock_t *ptl; pmd_t orig_pmd; @@ -2360,19 +2360,18 @@ bool zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, if (vma_is_special_huge(vma)) { if (arch_needs_pgtable_deposit()) zap_deposited_table(tlb->mm, pmd); - spin_unlock(ptl); - return true; + goto out; } if (is_huge_zero_pmd(orig_pmd)) { if (!vma_is_dax(vma) || arch_needs_pgtable_deposit()) zap_deposited_table(tlb->mm, pmd); - spin_unlock(ptl); - return true; + goto out; } if (pmd_present(orig_pmd)) { struct page *page = pmd_page(orig_pmd); + flush_needed = true; folio = page_folio(page); folio_remove_rmap_pmd(folio, page, vma); WARN_ON_ONCE(folio_mapcount(folio) < 0); @@ -2381,14 +2380,12 @@ bool zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, const softleaf_t entry = softleaf_from_pmd(orig_pmd); folio = softleaf_to_folio(entry); - flush_needed = false; if (!thp_migration_supported()) WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!"); } else { WARN_ON_ONCE(true); - spin_unlock(ptl); - return true; + goto out; } if (folio_test_anon(folio)) { @@ -2415,10 +2412,10 @@ bool zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, folio_put(folio); } +out: spin_unlock(ptl); if (flush_needed) tlb_remove_page_size(tlb, &folio->page, HPAGE_PMD_SIZE); - return true; } -- 2.53.0