As with memcg_slab_post_charge(), we save a call to compound_head() for large kmallocs. This has a slight change of behaviour in that non-slab, non-kmalloc pointers will now cause a NULL pointer dereference rather than a warning. We could add that back if really needed. Signed-off-by: Matthew Wilcox (Oracle) --- mm/slub.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index f31206e5c89a..af3e7ef30b5d 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -6633,23 +6633,23 @@ int build_detached_freelist(struct kmem_cache *s, size_t size, { int lookahead = 3; void *object; - struct folio *folio; + struct page *page; size_t same; object = p[--size]; - folio = virt_to_folio(object); + page = virt_to_page(object); if (!s) { /* Handle kalloc'ed objects */ - if (unlikely(!folio_test_slab(folio))) { - free_large_kmalloc(&folio->page, object); + if (unlikely(PageLargeKmalloc(page))) { + free_large_kmalloc(page, object); df->slab = NULL; return size; } /* Derive kmem_cache from object */ - df->slab = folio_slab(folio); + df->slab = page_slab(page); df->s = df->slab->slab_cache; } else { - df->slab = folio_slab(folio); + df->slab = page_slab(page); df->s = cache_from_obj(s, object); /* Support for memcg */ } -- 2.47.2