Use the spinlock_irqsave zone lock guard in reserve_highatomic_pageblock() to replace the explicit lock/unlock and goto out_unlock pattern with automatic scope-based cleanup. Suggested-by: Steven Rostedt Signed-off-by: Dmitry Ilvokhin --- mm/page_alloc.c | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 111b54df8a3c..3a4523c35fb6 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3431,7 +3431,7 @@ static void reserve_highatomic_pageblock(struct page *page, int order, struct zone *zone) { int mt; - unsigned long max_managed, flags; + unsigned long max_managed; /* * The number reserved as: minimum is 1 pageblock, maximum is @@ -3445,29 +3445,26 @@ static void reserve_highatomic_pageblock(struct page *page, int order, if (zone->nr_reserved_highatomic >= max_managed) return; - spin_lock_irqsave(&zone->lock, flags); + guard(spinlock_irqsave)(&zone->lock); /* Recheck the nr_reserved_highatomic limit under the lock */ if (zone->nr_reserved_highatomic >= max_managed) - goto out_unlock; + return; /* Yoink! */ mt = get_pageblock_migratetype(page); /* Only reserve normal pageblocks (i.e., they can merge with others) */ if (!migratetype_is_mergeable(mt)) - goto out_unlock; + return; if (order < pageblock_order) { if (move_freepages_block(zone, page, mt, MIGRATE_HIGHATOMIC) == -1) - goto out_unlock; + return; zone->nr_reserved_highatomic += pageblock_nr_pages; } else { change_pageblock_range(page, order, MIGRATE_HIGHATOMIC); zone->nr_reserved_highatomic += 1 << order; } - -out_unlock: - spin_unlock_irqrestore(&zone->lock, flags); } /* -- 2.52.0