When separately allocating ptdesc from struct page, calling preallocate_vmalloc_pages() from mem_init() is too early as the slab allocator hasn't been set up yet. Move preallocate_vmalloc_pages() to vmalloc_init() which is called after the slab allocator has been set up. Honestly, this patch is a bit bobbins and I'm sure it'll be reworked before it goes upstream. Signed-off-by: Matthew Wilcox (Oracle) --- arch/x86/mm/init_64.c | 4 +--- include/linux/mm.h | 33 +++++++++++++++++++++++++++++++-- mm/vmalloc.c | 2 ++ 3 files changed, 34 insertions(+), 5 deletions(-) diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 0e4270e20fad..5270fc24f6f6 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -1322,7 +1322,7 @@ static void __init register_page_bootmem_info(void) * Only the level which needs to be synchronized between all page-tables is * allocated because the synchronization can be expensive. */ -static void __init preallocate_vmalloc_pages(void) +void __init preallocate_vmalloc_pages(void) { unsigned long addr; const char *lvl; @@ -1390,8 +1390,6 @@ void __init mem_init(void) /* Register memory areas for /proc/kcore */ if (get_gate_vma(&init_mm)) kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, PAGE_SIZE, KCORE_USER); - - preallocate_vmalloc_pages(); } int kernel_set_to_readonly; diff --git a/include/linux/mm.h b/include/linux/mm.h index edcb7d75542f..e60b181da3df 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1160,6 +1160,12 @@ static inline int is_vmalloc_or_module_addr(const void *x) } #endif +#ifdef CONFIG_X86 +void __init preallocate_vmalloc_pages(void); +#else +static inline void preallocate_vmalloc_pages(void) { } +#endif + /* * How many times the entire folio is mapped as a single unit (eg by a * PMD or PUD entry). This is probably not what you want, except for @@ -2939,9 +2945,32 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a } #endif /* CONFIG_MMU */ +static inline struct page *ptdesc_page(const struct ptdesc *pt) +{ + return pt->pt_page; +} + +static inline struct ptdesc *page_ptdesc(const struct page *page) +{ + memdesc_t memdesc = READ_ONCE(page->memdesc); + + if (memdesc_type(memdesc) != MEMDESC_TYPE_PAGE_TABLE) { + printk(KERN_EMERG "memdesc %lx index %lx\n", memdesc.v, page->__folio_index); + VM_BUG_ON_PAGE(1, page); + return NULL; + } + return (void *)(memdesc.v - MEMDESC_TYPE_PAGE_TABLE); +} + +/** + * enum pt_flags = How the ptdesc flags bits are used. + * @PT_reserved: Used by PowerPC + * + * The pt flags are stored in a memdesc_flags_t. + * The high bits are used for information like zone/node/section. + */ enum pt_flags { PT_reserved = PG_reserved, - /* High bits are used for zone/node/section */ }; static inline struct ptdesc *virt_to_ptdesc(const void *x) @@ -2957,7 +2986,7 @@ static inline struct ptdesc *virt_to_ptdesc(const void *x) */ static inline void *ptdesc_address(const struct ptdesc *pt) { - return folio_address(ptdesc_folio(pt)); + return page_address(pt->pt_page); } static inline bool pagetable_is_reserved(struct ptdesc *pt) diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 798b2ed21e46..9b349051a83a 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -5264,6 +5264,8 @@ void __init vmalloc_init(void) struct vm_struct *tmp; int i; + preallocate_vmalloc_pages(); + /* * Create the cache for vmap_area objects. */ -- 2.47.2