Before splitting folio, its order keeps the same. It is only necessary to get folio_order() once. Also rename order to old_order to represent the original folio order. Signed-off-by: Wei Yang Cc: Zi Yan Cc: Dev Jain Cc: David Hildenbrand Cc: Lance Yang --- mm/huge_memory.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 960fe50f2d89..9454fb7d635e 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3786,7 +3786,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order, bool is_anon = folio_test_anon(folio); struct address_space *mapping = NULL; struct anon_vma *anon_vma = NULL; - int order = folio_order(folio); + int old_order = folio_order(folio); struct folio *new_folio, *next; int nr_shmem_dropped = 0; int remap_flags = 0; @@ -3800,7 +3800,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order, if (folio != page_folio(split_at) || folio != page_folio(lock_at)) return -EINVAL; - if (new_order >= folio_order(folio)) + if (new_order >= old_order) return -EINVAL; if (uniform_split && !uniform_split_supported(folio, new_order, true)) @@ -3871,7 +3871,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order, if (uniform_split) { xas_set_order(&xas, folio->index, new_order); - xas_split_alloc(&xas, folio, folio_order(folio), gfp); + xas_split_alloc(&xas, folio, old_order, gfp); if (xas_error(&xas)) { ret = xas_error(&xas); goto out; @@ -3926,7 +3926,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order, struct lruvec *lruvec; int expected_refs; - if (folio_order(folio) > 1) { + if (old_order > 1) { if (!list_empty(&folio->_deferred_list)) { ds_queue->split_queue_len--; /* @@ -3939,8 +3939,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order, } if (folio_test_partially_mapped(folio)) { folio_clear_partially_mapped(folio); - mod_mthp_stat(folio_order(folio), - MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1); + mod_mthp_stat(old_order, MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1); } } split_queue_unlock(ds_queue); @@ -4061,7 +4060,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order, if (!ret && is_anon && !folio_is_device_private(folio)) remap_flags = RMP_USE_SHARED_ZEROPAGE; - remap_page(folio, 1 << order, remap_flags); + remap_page(folio, 1 << old_order, remap_flags); /* * Unlock all after-split folios except the one containing @@ -4092,9 +4091,9 @@ static int __folio_split(struct folio *folio, unsigned int new_order, i_mmap_unlock_read(mapping); out: xas_destroy(&xas); - if (order == HPAGE_PMD_ORDER) + if (old_order == HPAGE_PMD_ORDER) count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED); - count_mthp_stat(order, !ret ? MTHP_STAT_SPLIT : MTHP_STAT_SPLIT_FAILED); + count_mthp_stat(old_order, !ret ? MTHP_STAT_SPLIT : MTHP_STAT_SPLIT_FAILED); return ret; } -- 2.34.1