When a reported free page is split via expand() to satisfy a smaller allocation, the sub-pages placed back on the free lists lose the PageReported flag. This means they will be unnecessarily re-reported to the hypervisor in the next reporting cycle, wasting work. While I was unable to quantify the performance difference, it is an obvious waste, even if small. Propagate the PageReported flag to sub-pages during expand(), both in page_del_and_expand() and try_to_claim_block(), so that they are recognized as already-reported. Signed-off-by: Michael S. Tsirkin Assisted-by: Claude:claude-opus-4-6 --- mm/page_alloc.c | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 227d58dc3de6..e40fd39acbd0 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1699,7 +1699,7 @@ struct page *__pageblock_pfn_to_page(unsigned long start_pfn, * -- nyc */ static inline unsigned int expand(struct zone *zone, struct page *page, int low, - int high, int migratetype) + int high, int migratetype, bool reported) { unsigned int size = 1 << high; unsigned int nr_added = 0; @@ -1721,6 +1721,15 @@ static inline unsigned int expand(struct zone *zone, struct page *page, int low, __add_to_free_list(&page[size], zone, high, migratetype, false); set_buddy_order(&page[size], high); nr_added += size; + + /* + * The parent page has been reported to the host. The + * sub-pages are part of the same reported block, so mark + * them reported too. This avoids re-reporting pages that + * the host already knows about. + */ + if (reported) + __SetPageReported(&page[size]); } return nr_added; @@ -1731,9 +1740,10 @@ static __always_inline void page_del_and_expand(struct zone *zone, int high, int migratetype) { int nr_pages = 1 << high; + bool was_reported = page_reported(page); __del_page_from_free_list(page, zone, high, migratetype); - nr_pages -= expand(zone, page, low, high, migratetype); + nr_pages -= expand(zone, page, low, high, migratetype, was_reported); account_freepages(zone, -nr_pages, migratetype); } @@ -2288,10 +2298,12 @@ try_to_claim_block(struct zone *zone, struct page *page, /* Take ownership for orders >= pageblock_order */ if (current_order >= pageblock_order) { unsigned int nr_added; + bool was_reported = page_reported(page); del_page_from_free_list(page, zone, current_order, block_type); change_pageblock_range(page, current_order, start_type); - nr_added = expand(zone, page, order, current_order, start_type); + nr_added = expand(zone, page, order, current_order, start_type, + was_reported); account_freepages(zone, nr_added, start_type); return page; } -- MST