Reduce the repetition, and lay the ground for further refactorings by keeping this variable separate. Signed-off-by: Lorenzo Stoakes (Oracle) --- mm/huge_memory.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index c5b16c218900..673d0c4734ad 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2338,6 +2338,7 @@ bool zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr) { bool has_deposit = arch_needs_pgtable_deposit(); + struct mm_struct *mm = tlb->mm; struct folio *folio = NULL; bool flush_needed = false; spinlock_t *ptl; @@ -2385,9 +2386,9 @@ bool zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, if (folio_test_anon(folio)) { has_deposit = true; - add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); + add_mm_counter(mm, MM_ANONPAGES, -HPAGE_PMD_NR); } else { - add_mm_counter(tlb->mm, mm_counter_file(folio), + add_mm_counter(mm, mm_counter_file(folio), -HPAGE_PMD_NR); /* @@ -2406,7 +2407,7 @@ bool zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, out: if (has_deposit) - zap_deposited_table(tlb->mm, pmd); + zap_deposited_table(mm, pmd); spin_unlock(ptl); if (flush_needed) -- 2.53.0