As with memcg_slab_post_charge(), we save a call to compound_head() for large kmallocs. This has a slight change of behaviour in that non-vmalloc, non-slab, non-kmalloc pointers will now cause a NULL pointer dereference rather than a warning. We could add that back if really needed. Signed-off-by: Matthew Wilcox (Oracle) --- mm/slub.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index b344cb0ee381..e471716cbde3 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -6270,7 +6270,7 @@ void kvfree_rcu_cb(struct rcu_head *head) */ void kfree(const void *object) { - struct folio *folio; + struct page *page; struct slab *slab; struct kmem_cache *s; void *x = (void *)object; @@ -6280,13 +6280,13 @@ void kfree(const void *object) if (unlikely(ZERO_OR_NULL_PTR(object))) return; - folio = virt_to_folio(object); - if (unlikely(!folio_test_slab(folio))) { - free_large_kmalloc(&folio->page, (void *)object); + page = virt_to_page(object); + if (unlikely(PageLargeKmalloc(page))) { + free_large_kmalloc(page, (void *)object); return; } - slab = folio_slab(folio); + slab = page_slab(page); s = slab->slab_cache; slab_free(s, slab, x, _RET_IP_); } -- 2.47.2