As with memcg_slab_post_charge(), we save a call to compound_head() for large kmallocs. This has a slight change of behaviour in that non-vmalloc, non-slab, non-kmalloc pointers will now cause a NULL pointer dereference rather than a warning. We could add that back if really needed. Signed-off-by: Matthew Wilcox (Oracle) --- mm/slub.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index e471716cbde3..f31206e5c89a 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -6320,16 +6320,16 @@ __do_krealloc(const void *p, size_t new_size, unsigned long align, gfp_t flags, if (is_kfence_address(p)) { ks = orig_size = kfence_ksize(p); } else { - struct folio *folio; + struct page *page; - folio = virt_to_folio(p); - if (unlikely(!folio_test_slab(folio))) { + page = virt_to_page(p); + if (unlikely(PageLargeKmalloc(page))) { /* Big kmalloc object */ - WARN_ON(folio_size(folio) <= KMALLOC_MAX_CACHE_SIZE); - WARN_ON(p != folio_address(folio)); - ks = folio_size(folio); + ks = page_size(page); + WARN_ON(ks <= KMALLOC_MAX_CACHE_SIZE); + WARN_ON(p != page_address(page)); } else { - s = folio_slab(folio)->slab_cache; + s = page_slab(page)->slab_cache; orig_size = get_orig_size(s, (void *)p); ks = s->object_size; } -- 2.47.2