Because the pointer being checked may not lie within the first PAGE_SIZE bytes of the object, we have to mark all pages as being LargeKmalloc. We could use virt_to_head_page() instead, but that would pessimize slab objects. Once we move to memdescs properly, we'll tag each page as being LargeKmalloc anyway, so this is more in keeping with how code will be written in the future. Signed-off-by: Matthew Wilcox (Oracle) --- mm/slub.c | 8 ++++++-- mm/usercopy.c | 21 ++++++++++++--------- 2 files changed, 18 insertions(+), 11 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 8ab2d329664b..8226d2d9ff21 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -5287,10 +5287,12 @@ static void *___kmalloc_large_node(size_t size, gfp_t flags, int node) page = __alloc_frozen_pages_noprof(flags, order, node, NULL); if (page) { + unsigned long i; ptr = page_address(page); mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, PAGE_SIZE << order); - __SetPageLargeKmalloc(page); + for (i = 0; i < 1UL << order; i++) + __SetPageLargeKmalloc(page + i); } ptr = kasan_kmalloc_large(ptr, size, flags); @@ -6198,6 +6200,7 @@ EXPORT_SYMBOL(kmem_cache_free); static void free_large_kmalloc(struct page *page, void *object) { unsigned int order = compound_order(page); + unsigned long i; if (WARN_ON_ONCE(order == 0)) pr_warn_once("object pointer: 0x%p\n", object); @@ -6208,7 +6211,8 @@ static void free_large_kmalloc(struct page *page, void *object) mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, -(PAGE_SIZE << order)); - __ClearPageLargeKmalloc(page); + for (i = 0; i < 1UL << order; i++) + __ClearPageLargeKmalloc(page + i); free_frozen_pages(page, order); } diff --git a/mm/usercopy.c b/mm/usercopy.c index dbdcc43964fb..8d21635147a4 100644 --- a/mm/usercopy.c +++ b/mm/usercopy.c @@ -164,7 +164,7 @@ static inline void check_heap_object(const void *ptr, unsigned long n, { unsigned long addr = (unsigned long)ptr; unsigned long offset; - struct folio *folio; + struct page *page; if (is_kmap_addr(ptr)) { offset = offset_in_page(ptr); @@ -189,15 +189,18 @@ static inline void check_heap_object(const void *ptr, unsigned long n, if (!virt_addr_valid(ptr)) return; - folio = virt_to_folio(ptr); - - if (folio_test_slab(folio)) { + page = virt_to_page(ptr); + if (PageLargeKmalloc(page)) { + page = compound_head(page); + offset = ptr - page_address(page); + if (n > page_size(page) - offset) + usercopy_abort("kmalloc", NULL, to_user, offset, n); + return; + } else { + struct slab *slab = page_slab(page); /* Check slab allocator for flags and size. */ - __check_heap_object(ptr, n, folio_slab(folio), to_user); - } else if (folio_test_large(folio)) { - offset = ptr - folio_address(folio); - if (n > folio_size(folio) - offset) - usercopy_abort("page alloc", NULL, to_user, offset, n); + if (slab) + __check_heap_object(ptr, n, slab, to_user); } } -- 2.47.2