Use zone_lock_irqsave scoped lock guard in set_migratetype_isolate() to replace the explicit lock/unlock pattern with automatic scope-based cleanup. The scoped variant is used to keep dump_page() outside the locked section to avoid a lockdep splat. Suggested-by: Steven Rostedt Signed-off-by: Dmitry Ilvokhin --- mm/page_isolation.c | 60 ++++++++++++++++++++------------------------- 1 file changed, 26 insertions(+), 34 deletions(-) diff --git a/mm/page_isolation.c b/mm/page_isolation.c index dc1e18124228..e7f006e8870c 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -168,48 +168,40 @@ static int set_migratetype_isolate(struct page *page, enum pb_isolate_mode mode, { struct zone *zone = page_zone(page); struct page *unmovable; - unsigned long flags; unsigned long check_unmovable_start, check_unmovable_end; if (PageUnaccepted(page)) accept_page(page); - zone_lock_irqsave(zone, flags); - - /* - * We assume the caller intended to SET migrate type to isolate. - * If it is already set, then someone else must have raced and - * set it before us. - */ - if (is_migrate_isolate_page(page)) { - zone_unlock_irqrestore(zone, flags); - return -EBUSY; - } - - /* - * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself. - * We just check MOVABLE pages. - * - * Pass the intersection of [start_pfn, end_pfn) and the page's pageblock - * to avoid redundant checks. - */ - check_unmovable_start = max(page_to_pfn(page), start_pfn); - check_unmovable_end = min(pageblock_end_pfn(page_to_pfn(page)), - end_pfn); - - unmovable = has_unmovable_pages(check_unmovable_start, check_unmovable_end, - mode); - if (!unmovable) { - if (!pageblock_isolate_and_move_free_pages(zone, page)) { - zone_unlock_irqrestore(zone, flags); + scoped_guard(zone_lock_irqsave, zone) { + /* + * We assume the caller intended to SET migrate type to + * isolate. If it is already set, then someone else must have + * raced and set it before us. + */ + if (is_migrate_isolate_page(page)) return -EBUSY; + + /* + * FIXME: Now, memory hotplug doesn't call shrink_slab() by + * itself. We just check MOVABLE pages. + * + * Pass the intersection of [start_pfn, end_pfn) and the page's + * pageblock to avoid redundant checks. + */ + check_unmovable_start = max(page_to_pfn(page), start_pfn); + check_unmovable_end = min(pageblock_end_pfn(page_to_pfn(page)), + end_pfn); + + unmovable = has_unmovable_pages(check_unmovable_start, + check_unmovable_end, mode); + if (!unmovable) { + if (!pageblock_isolate_and_move_free_pages(zone, page)) + return -EBUSY; + zone->nr_isolate_pageblock++; + return 0; } - zone->nr_isolate_pageblock++; - zone_unlock_irqrestore(zone, flags); - return 0; } - - zone_unlock_irqrestore(zone, flags); if (mode == PB_ISOLATE_MODE_MEM_OFFLINE) { /* * printk() with zone lock held will likely trigger a -- 2.47.3