gather_surplus_pages() pre-allocates hugetlb pages into the pool during mmap. Pass __GFP_ZERO so these pages are zeroed by the buddy allocator, and HPG_zeroed is set by alloc_surplus_hugetlb_folio. Add bool *zeroed output to alloc_hugetlb_folio_reserve() so callers can check whether the pool page is known-zero. memfd's memfd_alloc_folio() uses this to skip the explicit folio_zero_user() when the page is already zero. This avoids redundant zeroing for memfd hugetlb pages that were pre-allocated into the pool and never mapped to userspace. Signed-off-by: Michael S. Tsirkin Assisted-by: Claude:claude-opus-4-6 --- include/linux/hugetlb.h | 6 ++++-- mm/hugetlb.c | 11 +++++++++-- mm/memfd.c | 14 ++++++++------ 3 files changed, 21 insertions(+), 10 deletions(-) diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 49e5557d6cc0..3d23267014e6 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -714,7 +714,8 @@ struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid, nodemask_t *nmask, gfp_t gfp_mask, bool allow_alloc_fallback); struct folio *alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid, - nodemask_t *nmask, gfp_t gfp_mask); + nodemask_t *nmask, gfp_t gfp_mask, + bool *zeroed); int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping, pgoff_t idx); @@ -1134,7 +1135,8 @@ static inline void wait_for_freed_hugetlb_folios(void) static inline struct folio * alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid, - nodemask_t *nmask, gfp_t gfp_mask) + nodemask_t *nmask, gfp_t gfp_mask, + bool *zeroed) { return NULL; } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 2ea078d4e5a8..0a19a078d860 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2205,7 +2205,7 @@ struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h, } struct folio *alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid, - nodemask_t *nmask, gfp_t gfp_mask) + nodemask_t *nmask, gfp_t gfp_mask, bool *zeroed) { struct folio *folio; @@ -2221,6 +2221,12 @@ struct folio *alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid, h->resv_huge_pages--; spin_unlock_irq(&hugetlb_lock); + + if (zeroed && folio) { + *zeroed = folio_test_hugetlb_zeroed(folio); + folio_clear_hugetlb_zeroed(folio); + } + return folio; } @@ -2305,7 +2311,8 @@ static int gather_surplus_pages(struct hstate *h, long delta) * It is okay to use NUMA_NO_NODE because we use numa_mem_id() * down the road to pick the current node if that is the case. */ - folio = alloc_surplus_hugetlb_folio(h, htlb_alloc_mask(h), + folio = alloc_surplus_hugetlb_folio(h, + htlb_alloc_mask(h) | __GFP_ZERO, NUMA_NO_NODE, &alloc_nodemask, USER_ADDR_NONE); if (!folio) { diff --git a/mm/memfd.c b/mm/memfd.c index fb425f4e315f..5518f7d2d91f 100644 --- a/mm/memfd.c +++ b/mm/memfd.c @@ -69,6 +69,7 @@ struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx) #ifdef CONFIG_HUGETLB_PAGE struct folio *folio; gfp_t gfp_mask; + bool zeroed; if (is_file_hugepages(memfd)) { /* @@ -93,17 +94,18 @@ struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx) folio = alloc_hugetlb_folio_reserve(h, numa_node_id(), NULL, - gfp_mask); + gfp_mask, + &zeroed); if (folio) { u32 hash; /* - * Zero the folio to prevent information leaks to userspace. - * Use folio_zero_user() which is optimized for huge/gigantic - * pages. Pass 0 as addr_hint since this is not a faulting path - * and we don't have a user virtual address yet. + * Zero the folio to prevent information leaks to + * userspace. Skip if the pool page is known-zero + * (HPG_zeroed set during pool pre-allocation). */ - folio_zero_user(folio, 0); + if (!zeroed) + folio_zero_user(folio, 0); /* * Mark the folio uptodate before adding to page cache, -- MST