Use spinlock_irqsave zone lock guard in free_pcppages_bulk() to replace the explicit lock/unlock pattern with automatic scope-based cleanup. Suggested-by: Steven Rostedt Signed-off-by: Dmitry Ilvokhin --- mm/page_alloc.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 096f5fef0eb5..6a7c548a7406 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1458,7 +1458,6 @@ static void free_pcppages_bulk(struct zone *zone, int count, struct per_cpu_pages *pcp, int pindex) { - unsigned long flags; unsigned int order; struct page *page; @@ -1471,7 +1470,7 @@ static void free_pcppages_bulk(struct zone *zone, int count, /* Ensure requested pindex is drained first. */ pindex = pindex - 1; - spin_lock_irqsave(&zone->lock, flags); + guard(spinlock_irqsave)(&zone->lock); while (count > 0) { struct list_head *list; @@ -1503,8 +1502,6 @@ static void free_pcppages_bulk(struct zone *zone, int count, trace_mm_page_pcpu_drain(page, order, mt); } while (count > 0 && !list_empty(list)); } - - spin_unlock_irqrestore(&zone->lock, flags); } /* Split a multi-block free page into its individual pageblocks. */ -- 2.52.0