In the future, we will separate slab, folio and page from each other and calling virt_to_folio() on an address allocated from slab will return NULL. Delay the conversion from struct page to struct slab until we know we're not dealing with a large kmalloc allocation. This deprecates calling ksize() on memory allocated by alloc_pages(). Today it becomes a warning and support will be removed entirely in the future. Signed-off-by: Matthew Wilcox (Oracle) --- include/linux/page-flags.h | 2 +- mm/slab.h | 10 ++++++++++ mm/slab_common.c | 24 ++++++++++++++++-------- 3 files changed, 27 insertions(+), 9 deletions(-) diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 39ed2f243279..f0f7b536b9e5 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -1068,7 +1068,7 @@ PAGE_TYPE_OPS(Zsmalloc, zsmalloc, zsmalloc) * Serialized with zone lock. */ PAGE_TYPE_OPS(Unaccepted, unaccepted, unaccepted) -FOLIO_TYPE_OPS(large_kmalloc, large_kmalloc) +PAGE_TYPE_OPS(LargeKmalloc, large_kmalloc, large_kmalloc) /** * PageHuge - Determine if the page belongs to hugetlbfs diff --git a/mm/slab.h b/mm/slab.h index 084231394250..e3a85318a6e5 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -595,6 +595,16 @@ static inline size_t slab_ksize(const struct kmem_cache *s) return s->size; } +static inline unsigned int large_kmalloc_order(const struct page *page) +{ + return page[1].flags.f & 0xff; +} + +static inline size_t large_kmalloc_size(const struct page *page) +{ + return PAGE_SIZE << large_kmalloc_order(page); +} + #ifdef CONFIG_SLUB_DEBUG void dump_unreclaimable_slab(void); #else diff --git a/mm/slab_common.c b/mm/slab_common.c index 08f5baee1309..e017ed0cd438 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -994,26 +994,34 @@ void __init create_kmalloc_caches(void) */ size_t __ksize(const void *object) { - struct folio *folio; + const struct page *page; + const struct slab *slab; if (unlikely(object == ZERO_SIZE_PTR)) return 0; - folio = virt_to_folio(object); + page = virt_to_page(object); + + if (unlikely(PageLargeKmalloc(page))) { + size_t size = large_kmalloc_size(page); - if (unlikely(!folio_test_slab(folio))) { - if (WARN_ON(folio_size(folio) <= KMALLOC_MAX_CACHE_SIZE)) + if (WARN_ON(size <= KMALLOC_MAX_CACHE_SIZE)) return 0; - if (WARN_ON(object != folio_address(folio))) + if (WARN_ON(object != page_address(page))) return 0; - return folio_size(folio); + return size; } + /* Delete this after we're sure there are no users */ + if (WARN_ON(!PageSlab(page))) + return page_size(page); + + slab = page_slab(page); #ifdef CONFIG_SLUB_DEBUG - skip_orig_size_check(folio_slab(folio)->slab_cache, object); + skip_orig_size_check(slab->slab_cache, object); #endif - return slab_ksize(folio_slab(folio)->slab_cache); + return slab_ksize(slab->slab_cache); } gfp_t kmalloc_fix_flags(gfp_t flags) -- 2.47.2