From: Yuan Liu Function set_zone_contiguous used __pageblock_pfn_to_page to check the whole pageblock is in the same zone. One assumption is the memory section must online, otherwise the __pageblock_pfn_to_page will return NULL, then the set_zone_contiguous will be false. When move_pfn_range_to_zone invoked set_zone_contiguous, since the memory section did not online, the return value will always be false. To fix this issue, we removed the set_zone_contiguous from the move_pfn_range_to_zone, and place it after memory section onlined. Function remove_pfn_range_from_zone did not have this issue because memory section remains online at the time set_zone_contiguous invoked. Reviewed-by: Tianyou Li Reviewed-by: Nanhai Zou Signed-off-by: Yuan Liu --- mm/memory_hotplug.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index d711f6e2c87f..f548d9180415 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -809,8 +809,7 @@ void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, { struct pglist_data *pgdat = zone->zone_pgdat; int nid = pgdat->node_id; - const enum zone_contig_state contiguous_state = - zone_contig_state_after_growing(zone, start_pfn, nr_pages); + clear_zone_contiguous(zone); if (zone_is_empty(zone)) @@ -840,8 +839,6 @@ void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, memmap_init_range(nr_pages, nid, zone_idx(zone), start_pfn, 0, MEMINIT_HOTPLUG, altmap, migratetype, isolate_pageblock); - - set_zone_contiguous(zone, contiguous_state); } struct auto_movable_stats { @@ -1150,6 +1147,7 @@ int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages, { unsigned long end_pfn = pfn + nr_pages; int ret, i; + enum zone_contig_state contiguous_state = ZONE_CONTIG_NO; ret = kasan_add_zero_shadow(__va(PFN_PHYS(pfn)), PFN_PHYS(nr_pages)); if (ret) @@ -1164,6 +1162,10 @@ int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages, if (mhp_off_inaccessible) page_init_poison(pfn_to_page(pfn), sizeof(struct page) * nr_pages); + if (IS_ALIGNED(end_pfn, PAGES_PER_SECTION)) + contiguous_state = zone_contig_state_after_growing(zone, pfn, + nr_pages); + move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_UNMOVABLE, false); @@ -1182,6 +1184,7 @@ int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages, if (nr_pages >= PAGES_PER_SECTION) online_mem_sections(pfn, ALIGN_DOWN(end_pfn, PAGES_PER_SECTION)); + set_zone_contiguous(zone, contiguous_state); return ret; } @@ -1220,6 +1223,7 @@ int online_pages(unsigned long pfn, unsigned long nr_pages, }; const int nid = zone_to_nid(zone); int need_zonelists_rebuild = 0; + enum zone_contig_state contiguous_state = ZONE_CONTIG_NO; unsigned long flags; int ret; @@ -1234,6 +1238,7 @@ int online_pages(unsigned long pfn, unsigned long nr_pages, !IS_ALIGNED(pfn + nr_pages, PAGES_PER_SECTION))) return -EINVAL; + contiguous_state = zone_contig_state_after_growing(zone, pfn, nr_pages); /* associate pfn range with the zone */ move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_MOVABLE, @@ -1272,6 +1277,7 @@ int online_pages(unsigned long pfn, unsigned long nr_pages, } online_pages_range(pfn, nr_pages); + set_zone_contiguous(zone, contiguous_state); adjust_present_page_count(pfn_to_page(pfn), group, nr_pages); if (node_arg.nid >= 0) -- 2.47.1