Convert the hugetlb fault and fallocate paths to use __GFP_ZERO. For pages allocated from the buddy allocator, post_alloc_hook() handles zeroing. Hugetlb surplus pages need special handling because they can be pre-allocated into the pool during mmap (by hugetlb_acct_memory) before any page fault. Pool pages are kept around and may need zeroing long after buddy allocation, so a buddy-level zeroed hint (consumed at allocation time) cannot track their state. Add a bool *zeroed output parameter to alloc_hugetlb_folio() so callers know whether the page needs zeroing. Buddy-allocated pages are always zeroed (zeroed by post_alloc_hook). Pool pages use a new HPG_zeroed flag to track whether the page is known-zero (freshly buddy-allocated, never mapped to userspace). The flag is set in alloc_surplus_hugetlb_folio() after buddy allocation and cleared in free_huge_folio() when a user-mapped page returns to the pool. Callers that do not need zeroing (CoW, migration) pass NULL for zeroed and 0 for gfp. Signed-off-by: Michael S. Tsirkin Assisted-by: Claude:claude-opus-4-6 Assisted-by: cursor-agent:GPT-5.4-xhigh --- fs/hugetlbfs/inode.c | 10 ++++++-- include/linux/hugetlb.h | 8 +++++-- mm/hugetlb.c | 52 ++++++++++++++++++++++++++++++----------- 3 files changed, 53 insertions(+), 17 deletions(-) diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 8b05bec08e04..24e42cb10ade 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -810,14 +810,20 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, * folios in these areas, we need to consume the reserves * to keep reservation accounting consistent. */ - folio = alloc_hugetlb_folio(&pseudo_vma, addr, false); + { + bool zeroed; + + folio = alloc_hugetlb_folio(&pseudo_vma, addr, false, + __GFP_ZERO, &zeroed); if (IS_ERR(folio)) { mutex_unlock(&hugetlb_fault_mutex_table[hash]); error = PTR_ERR(folio); goto out; } - folio_zero_user(folio, addr); + if (!zeroed) + folio_zero_user(folio, addr); __folio_mark_uptodate(folio); + } error = hugetlb_add_to_page_cache(folio, mapping, index); if (unlikely(error)) { restore_reserve_on_error(h, &pseudo_vma, addr, folio); diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 93418625d3c5..950e1702fbd8 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -599,6 +599,7 @@ enum hugetlb_page_flags { HPG_vmemmap_optimized, HPG_raw_hwp_unreliable, HPG_cma, + HPG_zeroed, __NR_HPAGEFLAGS, }; @@ -659,6 +660,7 @@ HPAGEFLAG(Freed, freed) HPAGEFLAG(VmemmapOptimized, vmemmap_optimized) HPAGEFLAG(RawHwpUnreliable, raw_hwp_unreliable) HPAGEFLAG(Cma, cma) +HPAGEFLAG(Zeroed, zeroed) #ifdef CONFIG_HUGETLB_PAGE @@ -706,7 +708,8 @@ int isolate_or_dissolve_huge_folio(struct folio *folio, struct list_head *list); int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn); void wait_for_freed_hugetlb_folios(void); struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, - unsigned long addr, bool cow_from_owner); + unsigned long addr, bool cow_from_owner, + gfp_t gfp, bool *zeroed); struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid, nodemask_t *nmask, gfp_t gfp_mask, bool allow_alloc_fallback); @@ -1131,7 +1134,8 @@ static inline void wait_for_freed_hugetlb_folios(void) static inline struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, unsigned long addr, - bool cow_from_owner) + bool cow_from_owner, + gfp_t gfp, bool *zeroed) { return NULL; } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index a999f3ead852..8710366d14b7 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1708,6 +1708,9 @@ void free_huge_folio(struct folio *folio) int nid = folio_nid(folio); struct hugepage_subpool *spool = hugetlb_folio_subpool(folio); bool restore_reserve; + + /* Page was mapped to userspace; no longer known-zero */ + folio_clear_hugetlb_zeroed(folio); unsigned long flags; VM_BUG_ON_FOLIO(folio_ref_count(folio), folio); @@ -2110,6 +2113,10 @@ static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h, if (!folio) return NULL; + /* Mark as known-zero only if __GFP_ZERO was requested */ + if (gfp_mask & __GFP_ZERO) + folio_set_hugetlb_zeroed(folio); + spin_lock_irq(&hugetlb_lock); /* * nr_huge_pages needs to be adjusted within the same lock cycle @@ -2173,11 +2180,11 @@ static struct folio *alloc_migrate_hugetlb_folio(struct hstate *h, gfp_t gfp_mas */ static struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h, - struct vm_area_struct *vma, unsigned long addr) + struct vm_area_struct *vma, unsigned long addr, gfp_t gfp) { struct folio *folio = NULL; struct mempolicy *mpol; - gfp_t gfp_mask = htlb_alloc_mask(h); + gfp_t gfp_mask = htlb_alloc_mask(h) | gfp; int nid; nodemask_t *nodemask; @@ -2874,7 +2881,8 @@ typedef enum { * When it's set, the allocation will bypass all vma level reservations. */ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, - unsigned long addr, bool cow_from_owner) + unsigned long addr, bool cow_from_owner, + gfp_t gfp, bool *zeroed) { struct hugepage_subpool *spool = subpool_vma(vma); struct hstate *h = hstate_vma(vma); @@ -2883,7 +2891,9 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, map_chg_state map_chg; int ret, idx; struct hugetlb_cgroup *h_cg = NULL; - gfp_t gfp = htlb_alloc_mask(h) | __GFP_RETRY_MAYFAIL; + bool from_pool; + + gfp |= htlb_alloc_mask(h) | __GFP_RETRY_MAYFAIL; idx = hstate_index(h); @@ -2951,13 +2961,15 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, folio = dequeue_hugetlb_folio_vma(h, vma, addr, gbl_chg); if (!folio) { spin_unlock_irq(&hugetlb_lock); - folio = alloc_buddy_hugetlb_folio_with_mpol(h, vma, addr); + folio = alloc_buddy_hugetlb_folio_with_mpol(h, vma, addr, gfp); if (!folio) goto out_uncharge_cgroup; spin_lock_irq(&hugetlb_lock); list_add(&folio->lru, &h->hugepage_activelist); folio_ref_unfreeze(folio, 1); - /* Fall through */ + from_pool = false; + } else { + from_pool = true; } /* @@ -2980,6 +2992,14 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, spin_unlock_irq(&hugetlb_lock); + if (zeroed) { + if (from_pool) + *zeroed = folio_test_hugetlb_zeroed(folio); + else + *zeroed = true; /* buddy-allocated, zeroed by post_alloc_hook */ + folio_clear_hugetlb_zeroed(folio); + } + hugetlb_set_folio_subpool(folio, spool); if (map_chg != MAP_CHG_ENFORCED) { @@ -4988,7 +5008,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, spin_unlock(src_ptl); spin_unlock(dst_ptl); /* Do not use reserve as it's private owned */ - new_folio = alloc_hugetlb_folio(dst_vma, addr, false); + new_folio = alloc_hugetlb_folio(dst_vma, addr, false, 0, NULL); if (IS_ERR(new_folio)) { folio_put(pte_folio); ret = PTR_ERR(new_folio); @@ -5517,7 +5537,7 @@ static vm_fault_t hugetlb_wp(struct vm_fault *vmf) * be acquired again before returning to the caller, as expected. */ spin_unlock(vmf->ptl); - new_folio = alloc_hugetlb_folio(vma, vmf->address, cow_from_owner); + new_folio = alloc_hugetlb_folio(vma, vmf->address, cow_from_owner, 0, NULL); if (IS_ERR(new_folio)) { /* @@ -5711,7 +5731,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping, struct vm_fault *vmf) { u32 hash = hugetlb_fault_mutex_hash(mapping, vmf->pgoff); - bool new_folio, new_anon_folio = false; + bool new_folio, new_anon_folio = false, zeroed; struct vm_area_struct *vma = vmf->vma; struct mm_struct *mm = vma->vm_mm; struct hstate *h = hstate_vma(vma); @@ -5777,7 +5797,8 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping, goto out; } - folio = alloc_hugetlb_folio(vma, vmf->address, false); + folio = alloc_hugetlb_folio(vma, vmf->address, false, + __GFP_ZERO, &zeroed); if (IS_ERR(folio)) { /* * Returning error will result in faulting task being @@ -5797,7 +5818,12 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping, ret = 0; goto out; } - folio_zero_user(folio, vmf->real_address); + /* + * Buddy-allocated pages are zeroed in post_alloc_hook(). + * Pool pages bypass the allocator, zero them here. + */ + if (!zeroed) + folio_zero_user(folio, vmf->real_address); __folio_mark_uptodate(folio); new_folio = true; @@ -6236,7 +6262,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte, goto out; } - folio = alloc_hugetlb_folio(dst_vma, dst_addr, false); + folio = alloc_hugetlb_folio(dst_vma, dst_addr, false, 0, NULL); if (IS_ERR(folio)) { pte_t *actual_pte = hugetlb_walk(dst_vma, dst_addr, PMD_SIZE); if (actual_pte) { @@ -6283,7 +6309,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte, goto out; } - folio = alloc_hugetlb_folio(dst_vma, dst_addr, false); + folio = alloc_hugetlb_folio(dst_vma, dst_addr, false, 0, NULL); if (IS_ERR(folio)) { folio_put(*foliop); ret = -ENOMEM; -- MST