There's no need to use folio APIs here; just use a page directly. Signed-off-by: Matthew Wilcox (Oracle) --- mm/slub.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 4e8104b05ed4..0330f7f21ef0 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -6195,12 +6195,12 @@ void kmem_cache_free(struct kmem_cache *s, void *x) } EXPORT_SYMBOL(kmem_cache_free); -static void free_large_kmalloc(struct folio *folio, void *object) +static void free_large_kmalloc(struct page *page, void *object) { - unsigned int order = folio_order(folio); + unsigned int order = compound_order(page); - if (WARN_ON_ONCE(!folio_test_large_kmalloc(folio))) { - dump_page(&folio->page, "Not a kmalloc allocation"); + if (WARN_ON_ONCE(!PageLargeKmalloc(page))) { + dump_page(page, "Not a kmalloc allocation"); return; } @@ -6211,10 +6211,10 @@ static void free_large_kmalloc(struct folio *folio, void *object) kasan_kfree_large(object); kmsan_kfree_large(object); - lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B, + mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, -(PAGE_SIZE << order)); - __folio_clear_large_kmalloc(folio); - free_frozen_pages(&folio->page, order); + __ClearPageLargeKmalloc(page); + free_frozen_pages(page, order); } /* @@ -6242,7 +6242,7 @@ void kvfree_rcu_cb(struct rcu_head *head) * consider folio order */ obj = (void *) PAGE_ALIGN_DOWN((unsigned long)obj); - free_large_kmalloc(folio, obj); + free_large_kmalloc(&folio->page, obj); return; } @@ -6282,7 +6282,7 @@ void kfree(const void *object) folio = virt_to_folio(object); if (unlikely(!folio_test_slab(folio))) { - free_large_kmalloc(folio, (void *)object); + free_large_kmalloc(&folio->page, (void *)object); return; } @@ -6641,7 +6641,7 @@ int build_detached_freelist(struct kmem_cache *s, size_t size, if (!s) { /* Handle kalloc'ed objects */ if (unlikely(!folio_test_slab(folio))) { - free_large_kmalloc(folio, object); + free_large_kmalloc(&folio->page, object); df->slab = NULL; return size; } -- 2.47.2