Later patches will rearrange the free areas, but there are a couple of places that iterate over them with the assumption that they have the current structure. It seems ideally, code outside of mm should not be directly aware of struct free_area in the first place, but that awareness seems relatively harmless so just make the minimal change. Now instead of letting users manually iterate over the free lists, just provide a macro to do that. Then adopt that macro in a couple of places. Signed-off-by: Brendan Jackman --- include/linux/mmzone.h | 9 ++++++--- kernel/power/snapshot.c | 7 +++---- mm/mm_init.c | 11 +++++++---- 3 files changed, 16 insertions(+), 11 deletions(-) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 7fb7331c57250782a464a9583c6ea4867f4ffdab..02f5e8cc40c78ac8b81bb5c6f9af8718b1ffb316 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -123,9 +123,12 @@ static inline bool migratetype_is_mergeable(int mt) return mt < MIGRATE_PCPTYPES; } -#define for_each_migratetype_order(order, type) \ - for (order = 0; order < NR_PAGE_ORDERS; order++) \ - for (type = 0; type < MIGRATE_TYPES; type++) +#define for_each_free_list(list, zone) \ + for (unsigned int order = 0; order < NR_PAGE_ORDERS; order++) \ + for (unsigned int type = 0; \ + list = &zone->free_area[order].free_list[type], \ + type < MIGRATE_TYPES; \ + type++) \ extern int page_group_by_mobility_disabled; diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 645f42e404789286ffa751f083e97e52a4e4cf7e..40a7064eb6b247f47ca02211f8347cbd605af590 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -1244,8 +1244,8 @@ unsigned int snapshot_additional_pages(struct zone *zone) static void mark_free_pages(struct zone *zone) { unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT; + struct list_head *free_list; unsigned long flags; - unsigned int order, t; struct page *page; if (zone_is_empty(zone)) @@ -1269,9 +1269,8 @@ static void mark_free_pages(struct zone *zone) swsusp_unset_page_free(page); } - for_each_migratetype_order(order, t) { - list_for_each_entry(page, - &zone->free_area[order].free_list[t], buddy_list) { + for_each_free_list(free_list, zone) { + list_for_each_entry(page, free_list, buddy_list) { unsigned long i; pfn = page_to_pfn(page); diff --git a/mm/mm_init.c b/mm/mm_init.c index 3db2dea7db4c57c81f3fc3b71f0867025edda655..9554b79d0946a4a1a2ac5c934c1f80d2dc91b087 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -1435,11 +1435,14 @@ static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, static void __meminit zone_init_free_lists(struct zone *zone) { - unsigned int order, t; - for_each_migratetype_order(order, t) { - INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); + struct list_head *list; + unsigned int order; + + for_each_free_list(list, zone) + INIT_LIST_HEAD(list); + + for (order = 0; order < NR_PAGE_ORDERS; order++) zone->free_area[order].nr_free = 0; - } #ifdef CONFIG_UNACCEPTED_MEMORY INIT_LIST_HEAD(&zone->unaccepted_pages); -- 2.50.1