In order to separate slabs from folios, we need to convert from any page in a slab to the slab directly without going through a page to folio conversion first. page_slab() is a little different from other memdesc converters we have in that it will return NULL if the page is not part of a slab. This will be the normal style for memdesc converters in the future. kfence was the only user of page_slab(), so adjust it to the new way of working. It will need to be touched again when we separate slab from page. Signed-off-by: Matthew Wilcox (Oracle) --- include/linux/page-flags.h | 14 +------------- mm/kfence/core.c | 12 ++++++++---- mm/slab.h | 28 ++++++++++++++++------------ 3 files changed, 25 insertions(+), 29 deletions(-) diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 5ee6ffbdbf83..39ed2f243279 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -1052,19 +1052,7 @@ PAGE_TYPE_OPS(Table, table, pgtable) */ PAGE_TYPE_OPS(Guard, guard, guard) -FOLIO_TYPE_OPS(slab, slab) - -/** - * PageSlab - Determine if the page belongs to the slab allocator - * @page: The page to test. - * - * Context: Any context. - * Return: True for slab pages, false for any other kind of page. - */ -static inline bool PageSlab(const struct page *page) -{ - return folio_test_slab(page_folio(page)); -} +PAGE_TYPE_OPS(Slab, slab, slab) #ifdef CONFIG_HUGETLB_PAGE FOLIO_TYPE_OPS(hugetlb, hugetlb) diff --git a/mm/kfence/core.c b/mm/kfence/core.c index 0ed3be100963..5fb0f0f8f99a 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -613,12 +613,14 @@ static unsigned long kfence_init_pool(void) * enters __slab_free() slow-path. */ for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) { - struct slab *slab = page_slab(nth_page(pages, i)); + struct page *page = nth_page(pages, i); + struct slab *slab; if (!i || (i % 2)) continue; - __folio_set_slab(slab_folio(slab)); + __SetPageSlab(page); + slab = page_slab(page); #ifdef CONFIG_MEMCG slab->obj_exts = (unsigned long)&kfence_metadata_init[i / 2 - 1].obj_exts | MEMCG_DATA_OBJEXTS; @@ -665,14 +667,16 @@ static unsigned long kfence_init_pool(void) reset_slab: for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) { - struct slab *slab = page_slab(nth_page(pages, i)); + struct page *page = nth_page(pages, i); + struct slab *slab; if (!i || (i % 2)) continue; + slab = page_slab(page); #ifdef CONFIG_MEMCG slab->obj_exts = 0; #endif - __folio_clear_slab(slab_folio(slab)); + __ClearPageSlab(page); } return addr; diff --git a/mm/slab.h b/mm/slab.h index bf2f91a6c535..084231394250 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -142,20 +142,24 @@ static_assert(IS_ALIGNED(offsetof(struct slab, freelist), sizeof(freelist_aba_t) struct slab *: (struct folio *)s)) /** - * page_slab - Converts from first struct page to slab. - * @p: The first (either head of compound or single) page of slab. + * page_slab - Converts from struct page to its slab. + * @page: A page which may or may not belong to a slab. * - * A temporary wrapper to convert struct page to struct slab in situations where - * we know the page is the compound head, or single order-0 page. - * - * Long-term ideally everything would work with struct slab directly or go - * through folio to struct slab. - * - * Return: The slab which contains this page + * Return: The slab which contains this page or NULL if the page does + * not belong to a slab. This includes pages returned from large kmalloc. */ -#define page_slab(p) (_Generic((p), \ - const struct page *: (const struct slab *)(p), \ - struct page *: (struct slab *)(p))) +static inline struct slab *page_slab(const struct page *page) +{ + unsigned long head; + + head = READ_ONCE(page->compound_head); + if (head & 1) + page = (struct page *)(head - 1); + if (data_race(page->page_type >> 24) != PGTY_slab) + page = NULL; + + return (struct slab *)page; +} /** * slab_page - The first struct page allocated for a slab -- 2.47.2