Use spinlock_irqsave zone lock guard in unreserve_highatomic_pageblock() to replace the explicit lock/unlock pattern with automatic scope-based cleanup. Suggested-by: Steven Rostedt Signed-off-by: Dmitry Ilvokhin --- mm/page_alloc.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 3a4523c35fb6..0b9a423cbd24 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3480,7 +3480,6 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, bool force) { struct zonelist *zonelist = ac->zonelist; - unsigned long flags; struct zoneref *z; struct zone *zone; struct page *page; @@ -3497,7 +3496,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, pageblock_nr_pages) continue; - spin_lock_irqsave(&zone->lock, flags); + guard(spinlock_irqsave)(&zone->lock); for (order = 0; order < NR_PAGE_ORDERS; order++) { struct free_area *area = &(zone->free_area[order]); unsigned long size; @@ -3544,12 +3543,9 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, * so this should not fail on zone boundaries. */ WARN_ON_ONCE(ret == -1); - if (ret > 0) { - spin_unlock_irqrestore(&zone->lock, flags); + if (ret > 0) return ret; - } } - spin_unlock_irqrestore(&zone->lock, flags); } return false; -- 2.52.0