As with memcg_slab_post_charge(), we save a call to compound_head() for large kmallocs. This has a slight change of behaviour in that non-vmalloc, non-slab, non-kmalloc pointers will now cause a NULL pointer dereference rather than a warning. We could add that back if really needed. Signed-off-by: Matthew Wilcox (Oracle) --- mm/slub.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 0330f7f21ef0..b344cb0ee381 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -6224,7 +6224,7 @@ static void free_large_kmalloc(struct page *page, void *object) void kvfree_rcu_cb(struct rcu_head *head) { void *obj = head; - struct folio *folio; + struct page *page; struct slab *slab; struct kmem_cache *s; void *slab_addr; @@ -6235,20 +6235,20 @@ void kvfree_rcu_cb(struct rcu_head *head) return; } - folio = virt_to_folio(obj); - if (!folio_test_slab(folio)) { + page = virt_to_page(obj); + if (PageLargeKmalloc(page)) { /* * rcu_head offset can be only less than page size so no need to - * consider folio order + * consider allocation order */ obj = (void *) PAGE_ALIGN_DOWN((unsigned long)obj); - free_large_kmalloc(&folio->page, obj); + free_large_kmalloc(page, obj); return; } - slab = folio_slab(folio); + slab = page_slab(page); s = slab->slab_cache; - slab_addr = folio_address(folio); + slab_addr = slab_address(slab); if (is_kfence_address(obj)) { obj = kfence_object_start(obj); -- 2.47.2