For a fault at address addr, the page offset is page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT = ((addr & PAGE_MASK) - vma->vm_start) >> PAGE_SHIFT = (addr - vma->vm_start) >> PAGE_SHIFT Since the faulty logical page offset based on VMA is vmf->pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT) We can slightly simplify the calculation using page_offset = vmf->pgoff - vma->vm_pgoff Signed-off-by: Loïc Molinari --- drivers/gpu/drm/drm_gem_shmem_helper.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index dc94a27710e5..be89be1c804c 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -577,8 +577,8 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf) struct page *page; pgoff_t page_offset; - /* We don't use vmf->pgoff since that has the fake offset */ - page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; + /* Offset to faulty address in the VMA (without the fake offset). */ + page_offset = vmf->pgoff - vma->vm_pgoff; dma_resv_lock(shmem->base.resv, NULL); -- 2.47.3 This gives the mm subsystem the ability to increase fault handling performance by proposing the insertion of a range of pages around the faulty address in a single batch. v4: - implement map_pages instead of huge_fault v5: - improve patch series progression - use dma_resv_trylock() in map_pages (many thanks to Matthew Wilcox) - validate map_pages range based on end_pgoff instead of start_pgoff Signed-off-by: Loïc Molinari --- drivers/gpu/drm/drm_gem_shmem_helper.c | 72 ++++++++++++++++++++++---- 1 file changed, 62 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index be89be1c804c..2a9fbc9c3712 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -567,31 +567,82 @@ int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev, } EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create); -static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf) +static bool drm_gem_shmem_fault_is_valid(struct drm_gem_object *obj, + pgoff_t pgoff) +{ + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + + if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages) || + pgoff >= (obj->size >> PAGE_SHIFT) || + shmem->madv < 0) + return false; + + return true; +} + +static vm_fault_t drm_gem_shmem_map_pages(struct vm_fault *vmf, + pgoff_t start_pgoff, + pgoff_t end_pgoff) { struct vm_area_struct *vma = vmf->vma; struct drm_gem_object *obj = vma->vm_private_data; struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); - loff_t num_pages = obj->size >> PAGE_SHIFT; + struct page **pages = shmem->pages; + unsigned long addr, pfn; vm_fault_t ret; - struct page *page; + + start_pgoff -= vma->vm_pgoff; + end_pgoff -= vma->vm_pgoff; + addr = vma->vm_start + (start_pgoff << PAGE_SHIFT); + + /* map_pages is called with the RCU lock for reading (sleep isn't + * allowed) so just fall through to the more heavy-weight fault path. + */ + if (unlikely(!dma_resv_trylock(shmem->base.resv))) + return 0; + + if (unlikely(!drm_gem_shmem_fault_is_valid(obj, end_pgoff))) { + ret = VM_FAULT_SIGBUS; + goto out; + } + + /* Map a range of pages around the faulty address. */ + do { + pfn = page_to_pfn(pages[start_pgoff]); + ret = vmf_insert_pfn(vma, addr, pfn); + addr += PAGE_SIZE; + } while (++start_pgoff <= end_pgoff && ret == VM_FAULT_NOPAGE); + + out: + dma_resv_unlock(shmem->base.resv); + + return ret; +} + +static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct drm_gem_object *obj = vma->vm_private_data; + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + struct page **pages = shmem->pages; pgoff_t page_offset; + unsigned long pfn; + vm_fault_t ret; /* Offset to faulty address in the VMA (without the fake offset). */ page_offset = vmf->pgoff - vma->vm_pgoff; dma_resv_lock(shmem->base.resv, NULL); - if (page_offset >= num_pages || - drm_WARN_ON_ONCE(obj->dev, !shmem->pages) || - shmem->madv < 0) { + if (unlikely(!drm_gem_shmem_fault_is_valid(obj, page_offset))) { ret = VM_FAULT_SIGBUS; - } else { - page = shmem->pages[page_offset]; - - ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page)); + goto out; } + pfn = page_to_pfn(pages[page_offset]); + ret = vmf_insert_pfn(vma, vmf->address, pfn); + + out: dma_resv_unlock(shmem->base.resv); return ret; @@ -632,6 +683,7 @@ static void drm_gem_shmem_vm_close(struct vm_area_struct *vma) } const struct vm_operations_struct drm_gem_shmem_vm_ops = { + .map_pages = drm_gem_shmem_map_pages, .fault = drm_gem_shmem_fault, .open = drm_gem_shmem_vm_open, .close = drm_gem_shmem_vm_close, -- 2.47.3 Check whether the starting address in the fault-around handler or the faulty address in the fault handler is part of a huge page in order to attempt a PMD sized PFN insertion into the VMA. On builds with CONFIG_TRANSPARENT_HUGEPAGE enabled, if the mmap() user address is PMD size aligned, if the GEM object is backed by shmem buffers on mountpoints setting the 'huge=' option and if the shmem backing store manages to allocate a huge folio, CPU mapping would then benefit from significantly increased memcpy() performance. When these conditions are met on a system with 2 MiB huge pages, an aligned copy of 2 MiB would raise a single page fault instead of 4096. v4: - implement map_pages instead of huge_fault Signed-off-by: Loïc Molinari --- drivers/gpu/drm/drm_gem_shmem_helper.c | 30 ++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index 2a9fbc9c3712..b7238448d4f9 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -580,6 +580,26 @@ static bool drm_gem_shmem_fault_is_valid(struct drm_gem_object *obj, return true; } +static bool drm_gem_shmem_map_pmd(struct vm_fault *vmf, unsigned long addr, + struct page *page) +{ +#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP + unsigned long pfn = page_to_pfn(page); + unsigned long paddr = pfn << PAGE_SHIFT; + bool aligned = (addr & ~PMD_MASK) == (paddr & ~PMD_MASK); + + if (aligned && + pmd_none(*vmf->pmd) && + folio_test_pmd_mappable(page_folio(page))) { + pfn &= PMD_MASK >> PAGE_SHIFT; + if (vmf_insert_pfn_pmd(vmf, pfn, false) == VM_FAULT_NOPAGE) + return true; + } +#endif + + return false; +} + static vm_fault_t drm_gem_shmem_map_pages(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff) @@ -606,6 +626,11 @@ static vm_fault_t drm_gem_shmem_map_pages(struct vm_fault *vmf, goto out; } + if (drm_gem_shmem_map_pmd(vmf, addr, pages[start_pgoff])) { + ret = VM_FAULT_NOPAGE; + goto out; + } + /* Map a range of pages around the faulty address. */ do { pfn = page_to_pfn(pages[start_pgoff]); @@ -639,6 +664,11 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf) goto out; } + if (drm_gem_shmem_map_pmd(vmf, vmf->address, pages[page_offset])) { + ret = VM_FAULT_NOPAGE; + goto out; + } + pfn = page_to_pfn(pages[page_offset]); ret = vmf_insert_pfn(vma, vmf->address, pfn); -- 2.47.3 mmap() calls on the DRM file pointer currently always end up using mm_get_unmapped_area() to get a free mapping region. On builds with CONFIG_TRANSPARENT_HUGEPAGE enabled, this isn't ideal for GEM objects backed by shmem buffers on mountpoints setting the 'huge=' option because it can't correctly figure out the potentially huge address alignment required. This commit introduces the drm_gem_get_unmapped_area() function which is meant to be used as a get_unmapped_area file operation on the DRM file pointer to lookup GEM objects based on their fake offsets and get a properly aligned region by calling shmem_get_unmapped_area() with the right file pointer. If a GEM object isn't available at the given offset or if the caller isn't granted access to it, the function falls back to mm_get_unmapped_area(). This also makes drm_gem_get_unmapped_area() part of the default GEM file operations so that all the DRM drivers can benefit from more efficient mappings thanks to the huge page fault handler introduced in previous commit 'drm/shmem-helper: Add huge page fault handler'. The shmem_get_unmapped_area() function needs to be exported so that it can be used from the DRM subsystem. v3: - add missing include: 'linux/sched/mm.h' - forward to shmem layer in builds with CONFIG_TRANSPARENT_HUGEPAGE=n Signed-off-by: Loïc Molinari --- drivers/gpu/drm/drm_gem.c | 107 ++++++++++++++++++++++++++++++-------- include/drm/drm_gem.h | 4 ++ mm/shmem.c | 1 + 3 files changed, 90 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index a1a9c828938b..a98d5744cc6c 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include #include @@ -1187,36 +1188,27 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, } EXPORT_SYMBOL(drm_gem_mmap_obj); -/** - * drm_gem_mmap - memory map routine for GEM objects - * @filp: DRM file pointer - * @vma: VMA for the area to be mapped - * - * If a driver supports GEM object mapping, mmap calls on the DRM file - * descriptor will end up here. - * - * Look up the GEM object based on the offset passed in (vma->vm_pgoff will - * contain the fake offset we created when the GTT map ioctl was called on - * the object) and map it with a call to drm_gem_mmap_obj(). - * - * If the caller is not granted access to the buffer object, the mmap will fail - * with EACCES. Please see the vma manager for more information. +/* + * Look up a GEM object in offset space based on the exact start address. The + * caller must be granted access to the object. Returns a GEM object on success + * or a negative error code on failure. The returned GEM object needs to be + * released with drm_gem_object_put(). */ -int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) +static struct drm_gem_object * +drm_gem_object_lookup_from_offset(struct file *filp, unsigned long start, + unsigned long pages) { struct drm_file *priv = filp->private_data; struct drm_device *dev = priv->minor->dev; struct drm_gem_object *obj = NULL; struct drm_vma_offset_node *node; - int ret; if (drm_dev_is_unplugged(dev)) - return -ENODEV; + return ERR_PTR(-ENODEV); drm_vma_offset_lock_lookup(dev->vma_offset_manager); node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager, - vma->vm_pgoff, - vma_pages(vma)); + start, pages); if (likely(node)) { obj = container_of(node, struct drm_gem_object, vma_node); /* @@ -1235,14 +1227,85 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) drm_vma_offset_unlock_lookup(dev->vma_offset_manager); if (!obj) - return -EINVAL; + return ERR_PTR(-EINVAL); if (!drm_vma_node_is_allowed(node, priv)) { drm_gem_object_put(obj); - return -EACCES; + return ERR_PTR(-EACCES); } - ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, + return obj; +} + +/** + * drm_gem_get_unmapped_area - get memory mapping region routine for GEM objects + * @filp: DRM file pointer + * @uaddr: User address hint + * @len: Mapping length + * @pgoff: Offset (in pages) + * @flags: Mapping flags + * + * If a driver supports GEM object mapping, before ending up in drm_gem_mmap(), + * mmap calls on the DRM file descriptor will first try to find a free linear + * address space large enough for a mapping. Since GEM objects are backed by + * shmem buffers, this should preferably be handled by the shmem virtual memory + * filesystem which can appropriately align addresses to huge page sizes when + * needed. + * + * Look up the GEM object based on the offset passed in (vma->vm_pgoff will + * contain the fake offset we created) and call shmem_get_unmapped_area() with + * the right file pointer. + * + * If a GEM object is not available at the given offset or if the caller is not + * granted access to it, fall back to mm_get_unmapped_area(). + */ +unsigned long drm_gem_get_unmapped_area(struct file *filp, unsigned long uaddr, + unsigned long len, unsigned long pgoff, + unsigned long flags) +{ + struct drm_gem_object *obj; + unsigned long ret; + + obj = drm_gem_object_lookup_from_offset(filp, pgoff, len >> PAGE_SHIFT); + if (IS_ERR(obj)) + return mm_get_unmapped_area(current->mm, filp, uaddr, len, 0, + flags); + + ret = shmem_get_unmapped_area(obj->filp, uaddr, len, 0, flags); + + drm_gem_object_put(obj); + + return ret; +} +EXPORT_SYMBOL(drm_gem_get_unmapped_area); + +/** + * drm_gem_mmap - memory map routine for GEM objects + * @filp: DRM file pointer + * @vma: VMA for the area to be mapped + * + * If a driver supports GEM object mapping, mmap calls on the DRM file + * descriptor will end up here. + * + * Look up the GEM object based on the offset passed in (vma->vm_pgoff will + * contain the fake offset we created) and map it with a call to + * drm_gem_mmap_obj(). + * + * If the caller is not granted access to the buffer object, the mmap will fail + * with EACCES. Please see the vma manager for more information. + */ +int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct drm_gem_object *obj; + int ret; + + obj = drm_gem_object_lookup_from_offset(filp, vma->vm_pgoff, + vma_pages(vma)); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + ret = drm_gem_mmap_obj(obj, + drm_vma_node_size(&obj->vma_node) << PAGE_SHIFT, vma); drm_gem_object_put(obj); diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h index 8d48d2af2649..7c8bd67d087c 100644 --- a/include/drm/drm_gem.h +++ b/include/drm/drm_gem.h @@ -469,6 +469,7 @@ struct drm_gem_object { .poll = drm_poll,\ .read = drm_read,\ .llseek = noop_llseek,\ + .get_unmapped_area = drm_gem_get_unmapped_area,\ .mmap = drm_gem_mmap, \ .fop_flags = FOP_UNSIGNED_OFFSET @@ -506,6 +507,9 @@ void drm_gem_vm_close(struct vm_area_struct *vma); int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, struct vm_area_struct *vma); int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); +unsigned long drm_gem_get_unmapped_area(struct file *filp, unsigned long uaddr, + unsigned long len, unsigned long pgoff, + unsigned long flags); /** * drm_gem_object_get - acquire a GEM buffer object reference diff --git a/mm/shmem.c b/mm/shmem.c index b9081b817d28..612218fc95cb 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2851,6 +2851,7 @@ unsigned long shmem_get_unmapped_area(struct file *file, return addr; return inflated_addr; } +EXPORT_SYMBOL_GPL(shmem_get_unmapped_area); #ifdef CONFIG_NUMA static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol) -- 2.47.3 Add the drm_gem_huge_mnt_create() and drm_gem_has_huge_mnt() helpers to avoid code duplication in the i915, V3D, Panfrost and Panthor drivers. The former creates and mounts a dedicated huge tmpfs mountpoint, for the lifetime of a DRM device, used at GEM object initialization. The latter checks whether a dedicated huge tmpfs mountpoint is in use by a DRM device. The next commits will port drivers to this helper. v3: - store huge tmpfs mountpoint in drm_device v4: - return 0 in builds with CONFIG_TRANSPARENT_HUGEPAGE=n - return 0 when huge_mnt already exists - use new vfs_parse_fs_string() helper v5: - removed warning on !dev->huge_mnt and reset to NULL on free - inline drm_gem_huge_mnt_create() to remove func from text and avoid calls in builds with CONFIG_TRANSPARENT_HUGEPAGE=n - compile out drm_device's huge_mnt field in builds with CONFIG_TRANSPARENT_HUGEPAGE=n - add drm_gem_has_huge_mnt() helper Signed-off-by: Loïc Molinari --- drivers/gpu/drm/drm_gem.c | 39 +++++++++++++++++++++++++++++ include/drm/drm_device.h | 15 +++++++++++ include/drm/drm_gem.h | 52 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 106 insertions(+) diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index a98d5744cc6c..161da048330e 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -29,6 +29,9 @@ #include #include #include +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#include +#endif #include #include #include @@ -82,6 +85,42 @@ * up at a later date, and as our interface with shmfs for memory allocation. */ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static void drm_gem_huge_mnt_free(struct drm_device *dev, void *data) +{ + kern_unmount(dev->huge_mnt); +} + +int __drm_gem_huge_mnt_create(struct drm_device *dev, const char *value) +{ + struct file_system_type *type; + struct fs_context *fc; + int ret; + + if (unlikely(drm_gem_has_huge_mnt(dev))) + return 0; + + type = get_fs_type("tmpfs"); + if (unlikely(!type)) + return -EOPNOTSUPP; + fc = fs_context_for_mount(type, SB_KERNMOUNT); + if (IS_ERR(fc)) + return PTR_ERR(fc); + ret = vfs_parse_fs_string(fc, "source", "tmpfs"); + if (unlikely(ret)) + return -ENOPARAM; + ret = vfs_parse_fs_string(fc, "huge", value); + if (unlikely(ret)) + return -ENOPARAM; + + dev->huge_mnt = fc_mount_longterm(fc); + put_fs_context(fc); + + return drmm_add_action_or_reset(dev, drm_gem_huge_mnt_free, NULL); +} +EXPORT_SYMBOL_GPL(__drm_gem_huge_mnt_create); +#endif + static void drm_gem_init_release(struct drm_device *dev, void *ptr) { diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h index 778b2cca6c49..684939987d83 100644 --- a/include/drm/drm_device.h +++ b/include/drm/drm_device.h @@ -3,6 +3,9 @@ #include #include +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#include +#endif #include #include #include @@ -168,6 +171,18 @@ struct drm_device { */ struct drm_master *master; + /** + * @huge_mnt: + * + * Huge tmpfs mountpoint used at GEM object initialization + * drm_gem_object_init(). Drivers can call drm_gem_huge_mnt_create() to + * create a huge tmfps mountpoint. The default tmpfs mountpoint + * (`shm_mnt`) is used if NULL. + */ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + struct vfsmount *huge_mnt; +#endif + /** * @driver_features: per-device driver features * diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h index 7c8bd67d087c..9845854850fb 100644 --- a/include/drm/drm_gem.h +++ b/include/drm/drm_gem.h @@ -492,6 +492,58 @@ struct drm_gem_object { DRM_GEM_FOPS,\ } +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +int __drm_gem_huge_mnt_create(struct drm_device *dev, const char *value); +#endif + +/** + * drm_gem_huge_mnt_create - Create, mount and use a huge tmpfs mountpoint + * @dev: DRM device a huge tmpfs mountpoint should be used with + * @value: huge tmpfs mount option value + * + * This function creates and mounts a dedicated huge tmpfs mountpoint for the + * lifetime of the DRM device @dev which is used at GEM object initialization + * with drm_gem_object_init(). + * + * The most common option value @value is "within_size" which only allocates + * huge pages if the page will be fully within the GEM object size. "always", + * "advise" and "never" are supported too but the latter would just create a + * mountpoint similar to the default one (`shm_mnt`). See shmemfs and + * Transparent Hugepage for more information. + * + * Returns: + * 0 on success or a negative error code on failure. + */ +static inline int drm_gem_huge_mnt_create(struct drm_device *dev, + const char *value) +{ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + return __drm_gem_huge_mnt_create(dev, value); +#else + return 0; +#endif +} + +/** + * drm_gem_has_huge_mnt - Check if a huge tmpfs mountpoint is in use + * @dev: DRM device + * + * This function checks whether a huge tmpfs mountpoint is in use after by DRM + * device @dev. A huge tmpfs mountpoint is used after a successful call to + * drm_gem_huge_mnt_create() on builds with Transparent Hugepage enabled. + * + * Returns: + * true on success, false otherwise. + */ +static inline bool drm_gem_has_huge_mnt(struct drm_device *dev) +{ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + return !!dev->huge_mnt; +#else + return false; +#endif +} + void drm_gem_object_release(struct drm_gem_object *obj); void drm_gem_object_free(struct kref *kref); int drm_gem_object_init(struct drm_device *dev, -- 2.47.3 Make use of the new drm_gem_huge_mnt_create() and drm_gem_has_huge_mnt() helpers to avoid code duplication. Now that it's just a few lines long, the single function in i915_gemfs.c is moved into v3d_gem_shmem.c. v3: - use huge tmpfs mountpoint in drm_device - move i915_gemfs.c into i915_gem_shmem.c v4: - clean up mountpoint creation error handling v5: - Use drm_gem_has_huge_mnt() helper Signed-off-by: Loïc Molinari --- drivers/gpu/drm/i915/Makefile | 3 +- drivers/gpu/drm/i915/gem/i915_gem_shmem.c | 47 +++++++++---- drivers/gpu/drm/i915/gem/i915_gemfs.c | 69 ------------------- drivers/gpu/drm/i915/gem/i915_gemfs.h | 14 ---- .../gpu/drm/i915/gem/selftests/huge_pages.c | 11 +-- drivers/gpu/drm/i915/i915_drv.h | 5 -- 6 files changed, 41 insertions(+), 108 deletions(-) delete mode 100644 drivers/gpu/drm/i915/gem/i915_gemfs.c delete mode 100644 drivers/gpu/drm/i915/gem/i915_gemfs.h diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index e58c0c158b3a..e22393a7cf6f 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -163,8 +163,7 @@ gem-y += \ gem/i915_gem_ttm_move.o \ gem/i915_gem_ttm_pm.o \ gem/i915_gem_userptr.o \ - gem/i915_gem_wait.o \ - gem/i915_gemfs.o + gem/i915_gem_wait.o i915-y += \ $(gem-y) \ i915_active.o \ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c index b9dae15c1d16..944aceac4cd3 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c @@ -9,14 +9,15 @@ #include #include +#include #include "gem/i915_gem_region.h" #include "i915_drv.h" #include "i915_gem_object.h" #include "i915_gem_tiling.h" -#include "i915_gemfs.h" #include "i915_scatterlist.h" #include "i915_trace.h" +#include "i915_utils.h" /* * Move folios to appropriate lru and release the batch, decrementing the @@ -506,9 +507,9 @@ static int __create_shmem(struct drm_i915_private *i915, if (BITS_PER_LONG == 64 && size > MAX_LFS_FILESIZE) return -E2BIG; - if (i915->mm.gemfs) - filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size, - flags); + if (drm_gem_has_huge_mnt(&i915->drm)) + filp = shmem_file_setup_with_mnt(i915->drm.huge_mnt, "i915", + size, flags); else filp = shmem_file_setup("i915", size, flags); if (IS_ERR(filp)) @@ -635,21 +636,41 @@ i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915, static int init_shmem(struct intel_memory_region *mem) { - i915_gemfs_init(mem->i915); - intel_memory_region_set_name(mem, "system"); + struct drm_i915_private *i915 = mem->i915; + int err; - return 0; /* We have fallback to the kernel mnt if gemfs init failed. */ -} + /* + * By creating our own shmemfs mountpoint, we can pass in + * mount flags that better match our usecase. + * + * One example, although it is probably better with a per-file + * control, is selecting huge page allocations ("huge=within_size"). + * However, we only do so on platforms which benefit from it, or to + * offset the overhead of iommu lookups, where with latter it is a net + * win even on platforms which would otherwise see some performance + * regressions such a slow reads issue on Broadwell and Skylake. + */ -static int release_shmem(struct intel_memory_region *mem) -{ - i915_gemfs_fini(mem->i915); - return 0; + if (GRAPHICS_VER(i915) < 11 && !i915_vtd_active(i915)) + goto no_thp; + + err = drm_gem_huge_mnt_create(&i915->drm, "within_size"); + if (drm_gem_has_huge_mnt(&i915->drm)) + drm_info(&i915->drm, "Using Transparent Hugepages\n"); + else if (err) + drm_notice(&i915->drm, + "Transparent Hugepage support is recommended for optimal performance%s\n", + GRAPHICS_VER(i915) >= 11 ? " on this platform!" : + " when IOMMU is enabled!"); + + no_thp: + intel_memory_region_set_name(mem, "system"); + + return 0; /* We have fallback to the kernel mnt if huge mnt failed. */ } static const struct intel_memory_region_ops shmem_region_ops = { .init = init_shmem, - .release = release_shmem, .init_object = shmem_object_init, }; diff --git a/drivers/gpu/drm/i915/gem/i915_gemfs.c b/drivers/gpu/drm/i915/gem/i915_gemfs.c deleted file mode 100644 index 8f13ec4ff0d0..000000000000 --- a/drivers/gpu/drm/i915/gem/i915_gemfs.c +++ /dev/null @@ -1,69 +0,0 @@ -// SPDX-License-Identifier: MIT -/* - * Copyright © 2017 Intel Corporation - */ - -#include -#include -#include - -#include "i915_drv.h" -#include "i915_gemfs.h" -#include "i915_utils.h" - -void i915_gemfs_init(struct drm_i915_private *i915) -{ - struct file_system_type *type; - struct fs_context *fc; - struct vfsmount *gemfs; - int ret; - - /* - * By creating our own shmemfs mountpoint, we can pass in - * mount flags that better match our usecase. - * - * One example, although it is probably better with a per-file - * control, is selecting huge page allocations ("huge=within_size"). - * However, we only do so on platforms which benefit from it, or to - * offset the overhead of iommu lookups, where with latter it is a net - * win even on platforms which would otherwise see some performance - * regressions such a slow reads issue on Broadwell and Skylake. - */ - - if (GRAPHICS_VER(i915) < 11 && !i915_vtd_active(i915)) - return; - - if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) - goto err; - - type = get_fs_type("tmpfs"); - if (!type) - goto err; - - fc = fs_context_for_mount(type, SB_KERNMOUNT); - if (IS_ERR(fc)) - goto err; - ret = vfs_parse_fs_string(fc, "source", "tmpfs"); - if (!ret) - ret = vfs_parse_fs_string(fc, "huge", "within_size"); - if (!ret) - gemfs = fc_mount_longterm(fc); - put_fs_context(fc); - if (ret) - goto err; - - i915->mm.gemfs = gemfs; - drm_info(&i915->drm, "Using Transparent Hugepages\n"); - return; - -err: - drm_notice(&i915->drm, - "Transparent Hugepage support is recommended for optimal performance%s\n", - GRAPHICS_VER(i915) >= 11 ? " on this platform!" : - " when IOMMU is enabled!"); -} - -void i915_gemfs_fini(struct drm_i915_private *i915) -{ - kern_unmount(i915->mm.gemfs); -} diff --git a/drivers/gpu/drm/i915/gem/i915_gemfs.h b/drivers/gpu/drm/i915/gem/i915_gemfs.h deleted file mode 100644 index 16d4333c9a4e..000000000000 --- a/drivers/gpu/drm/i915/gem/i915_gemfs.h +++ /dev/null @@ -1,14 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright © 2017 Intel Corporation - */ - -#ifndef __I915_GEMFS_H__ -#define __I915_GEMFS_H__ - -struct drm_i915_private; - -void i915_gemfs_init(struct drm_i915_private *i915); -void i915_gemfs_fini(struct drm_i915_private *i915); - -#endif diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c index bd08605a1611..2b9f7d86b46e 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c @@ -1316,7 +1316,7 @@ typedef struct drm_i915_gem_object * static inline bool igt_can_allocate_thp(struct drm_i915_private *i915) { - return i915->mm.gemfs && has_transparent_hugepage(); + return drm_gem_has_huge_mnt(&i915->drm); } static struct drm_i915_gem_object * @@ -1761,7 +1761,8 @@ static int igt_tmpfs_fallback(void *arg) struct drm_i915_private *i915 = arg; struct i915_address_space *vm; struct i915_gem_context *ctx; - struct vfsmount *gemfs = i915->mm.gemfs; + struct vfsmount *huge_mnt = + drm_gem_has_huge_mnt(&i915->drm) ? i915->drm.huge_mnt : NULL; struct drm_i915_gem_object *obj; struct i915_vma *vma; struct file *file; @@ -1782,10 +1783,10 @@ static int igt_tmpfs_fallback(void *arg) /* * Make sure that we don't burst into a ball of flames upon falling back * to tmpfs, which we rely on if on the off-chance we encounter a failure - * when setting up gemfs. + * when setting up a huge mountpoint. */ - i915->mm.gemfs = NULL; + i915->drm.huge_mnt = NULL; obj = i915_gem_object_create_shmem(i915, PAGE_SIZE); if (IS_ERR(obj)) { @@ -1819,7 +1820,7 @@ static int igt_tmpfs_fallback(void *arg) out_put: i915_gem_object_put(obj); out_restore: - i915->mm.gemfs = gemfs; + i915->drm.huge_mnt = huge_mnt; i915_vm_put(vm); out: diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 6a768aad8edd..1bfee23e64a3 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -141,11 +141,6 @@ struct i915_gem_mm { */ atomic_t free_count; - /** - * tmpfs instance used for shmem backed objects - */ - struct vfsmount *gemfs; - struct intel_memory_region *regions[INTEL_REGION_UNKNOWN]; struct notifier_block oom_notifier; -- 2.47.3 Make use of the new drm_gem_huge_mnt_create() and drm_gem_has_huge_mnt() helpers to avoid code duplication. Now that it's just a few lines long, the single function in v3d_gemfs.c is moved into v3d_gem.c. v3: - use huge tmpfs mountpoint in drm_device - move v3d_gemfs.c into v3d_gem.c v4: - clean up mountpoint creation error handling v5: - fix CONFIG_TRANSPARENT_HUGEPAGE check - use drm_gem_has_huge_mnt() helper Signed-off-by: Loïc Molinari --- drivers/gpu/drm/v3d/Makefile | 3 +- drivers/gpu/drm/v3d/v3d_bo.c | 5 ++- drivers/gpu/drm/v3d/v3d_drv.c | 2 +- drivers/gpu/drm/v3d/v3d_drv.h | 11 +----- drivers/gpu/drm/v3d/v3d_gem.c | 27 +++++++++++++-- drivers/gpu/drm/v3d/v3d_gemfs.c | 60 --------------------------------- 6 files changed, 30 insertions(+), 78 deletions(-) delete mode 100644 drivers/gpu/drm/v3d/v3d_gemfs.c diff --git a/drivers/gpu/drm/v3d/Makefile b/drivers/gpu/drm/v3d/Makefile index fcf710926057..b7d673f1153b 100644 --- a/drivers/gpu/drm/v3d/Makefile +++ b/drivers/gpu/drm/v3d/Makefile @@ -13,8 +13,7 @@ v3d-y := \ v3d_trace_points.o \ v3d_sched.o \ v3d_sysfs.o \ - v3d_submit.o \ - v3d_gemfs.o + v3d_submit.o v3d-$(CONFIG_DEBUG_FS) += v3d_debugfs.o diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c index c41476ddde68..a1e5881e8b62 100644 --- a/drivers/gpu/drm/v3d/v3d_bo.c +++ b/drivers/gpu/drm/v3d/v3d_bo.c @@ -112,7 +112,7 @@ v3d_bo_create_finish(struct drm_gem_object *obj) if (IS_ERR(sgt)) return PTR_ERR(sgt); - if (!v3d->gemfs) + if (!drm_gem_has_huge_mnt(obj->dev)) align = SZ_4K; else if (obj->size >= SZ_1M) align = SZ_1M; @@ -148,12 +148,11 @@ struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv, size_t unaligned_size) { struct drm_gem_shmem_object *shmem_obj; - struct v3d_dev *v3d = to_v3d_dev(dev); struct v3d_bo *bo; int ret; shmem_obj = drm_gem_shmem_create_with_mnt(dev, unaligned_size, - v3d->gemfs); + dev->huge_mnt); if (IS_ERR(shmem_obj)) return ERR_CAST(shmem_obj); bo = to_v3d_bo(&shmem_obj->base); diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c index c5a3bbbc74c5..ce82a1ceda5f 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.c +++ b/drivers/gpu/drm/v3d/v3d_drv.c @@ -106,7 +106,7 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data, args->value = v3d->perfmon_info.max_counters; return 0; case DRM_V3D_PARAM_SUPPORTS_SUPER_PAGES: - args->value = !!v3d->gemfs; + args->value = drm_gem_has_huge_mnt(dev); return 0; case DRM_V3D_PARAM_GLOBAL_RESET_COUNTER: mutex_lock(&v3d->reset_lock); diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h index 1884686985b8..99a39329bb85 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.h +++ b/drivers/gpu/drm/v3d/v3d_drv.h @@ -158,11 +158,6 @@ struct v3d_dev { struct drm_mm mm; spinlock_t mm_lock; - /* - * tmpfs instance used for shmem backed objects - */ - struct vfsmount *gemfs; - struct work_struct overflow_mem_work; struct v3d_queue_state queue[V3D_MAX_QUEUES]; @@ -569,6 +564,7 @@ extern const struct dma_fence_ops v3d_fence_ops; struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue q); /* v3d_gem.c */ +extern bool super_pages; int v3d_gem_init(struct drm_device *dev); void v3d_gem_destroy(struct drm_device *dev); void v3d_reset_sms(struct v3d_dev *v3d); @@ -576,11 +572,6 @@ void v3d_reset(struct v3d_dev *v3d); void v3d_invalidate_caches(struct v3d_dev *v3d); void v3d_clean_caches(struct v3d_dev *v3d); -/* v3d_gemfs.c */ -extern bool super_pages; -void v3d_gemfs_init(struct v3d_dev *v3d); -void v3d_gemfs_fini(struct v3d_dev *v3d); - /* v3d_submit.c */ void v3d_job_cleanup(struct v3d_job *job); void v3d_job_put(struct v3d_job *job); diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index bb110d35f749..b6479a8a279e 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -258,6 +258,30 @@ v3d_invalidate_caches(struct v3d_dev *v3d) v3d_invalidate_slices(v3d, 0); } +static void +v3d_huge_mnt_init(struct v3d_dev *v3d) +{ + int err = 0; + + /* + * By using a huge shmemfs mountpoint when the user wants to + * enable Super Pages, we can pass in mount flags that better + * match our usecase. + */ + + if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && super_pages) + err = drm_gem_huge_mnt_create(&v3d->drm, "within_size"); + + if (drm_gem_has_huge_mnt(&v3d->drm)) + drm_info(&v3d->drm, "Using Transparent Hugepages\n"); + else if (err) + drm_warn(&v3d->drm, "Can't use Transparent Hugepages (%d)\n", + err); + else + drm_notice(&v3d->drm, + "Transparent Hugepage support is recommended for optimal performance on this platform!\n"); +} + int v3d_gem_init(struct drm_device *dev) { @@ -309,7 +333,7 @@ v3d_gem_init(struct drm_device *dev) v3d_init_hw_state(v3d); v3d_mmu_set_page_table(v3d); - v3d_gemfs_init(v3d); + v3d_huge_mnt_init(v3d); ret = v3d_sched_init(v3d); if (ret) { @@ -329,7 +353,6 @@ v3d_gem_destroy(struct drm_device *dev) enum v3d_queue q; v3d_sched_fini(v3d); - v3d_gemfs_fini(v3d); /* Waiting for jobs to finish would need to be done before * unregistering V3D. diff --git a/drivers/gpu/drm/v3d/v3d_gemfs.c b/drivers/gpu/drm/v3d/v3d_gemfs.c deleted file mode 100644 index c1a30166c099..000000000000 --- a/drivers/gpu/drm/v3d/v3d_gemfs.c +++ /dev/null @@ -1,60 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* Copyright (C) 2024 Raspberry Pi */ - -#include -#include -#include - -#include "v3d_drv.h" - -void v3d_gemfs_init(struct v3d_dev *v3d) -{ - struct file_system_type *type; - struct fs_context *fc; - struct vfsmount *gemfs; - int ret; - - /* - * By creating our own shmemfs mountpoint, we can pass in - * mount flags that better match our usecase. However, we - * only do so on platforms which benefit from it. - */ - if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) - goto err; - - /* The user doesn't want to enable Super Pages */ - if (!super_pages) - goto err; - - type = get_fs_type("tmpfs"); - if (!type) - goto err; - - fc = fs_context_for_mount(type, SB_KERNMOUNT); - if (IS_ERR(fc)) - goto err; - ret = vfs_parse_fs_string(fc, "source", "tmpfs"); - if (!ret) - ret = vfs_parse_fs_string(fc, "huge", "within_size"); - if (!ret) - gemfs = fc_mount_longterm(fc); - put_fs_context(fc); - if (ret) - goto err; - - v3d->gemfs = gemfs; - drm_info(&v3d->drm, "Using Transparent Hugepages\n"); - - return; - -err: - v3d->gemfs = NULL; - drm_notice(&v3d->drm, - "Transparent Hugepage support is recommended for optimal performance on this platform!\n"); -} - -void v3d_gemfs_fini(struct v3d_dev *v3d) -{ - if (v3d->gemfs) - kern_unmount(v3d->gemfs); -} -- 2.47.3 drm_gem_object_init_with_mnt() and drm_gem_shmem_create_with_mnt() can be removed now that the drivers use the new drm_gem_huge_mnt_create() and drm_gem_has_huge_mnt() helpers. v5: - use drm_gem_has_huge_mnt() helper - compile out shmem_file_setup_with_mnt() call in builds with CONFIG_TRANSPARENT_HUGEPAGE=n Signed-off-by: Loïc Molinari Reviewed-by: Boris Brezillon --- drivers/gpu/drm/drm_gem.c | 38 ++++++++------------------ drivers/gpu/drm/drm_gem_shmem_helper.c | 38 ++++++-------------------- drivers/gpu/drm/v3d/v3d_bo.c | 3 +- include/drm/drm_gem.h | 3 -- include/drm/drm_gem_shmem_helper.h | 3 -- 5 files changed, 21 insertions(+), 64 deletions(-) diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 161da048330e..ffb1d374d259 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -153,31 +153,33 @@ drm_gem_init(struct drm_device *dev) } /** - * drm_gem_object_init_with_mnt - initialize an allocated shmem-backed GEM - * object in a given shmfs mountpoint + * drm_gem_object_init - initialize an allocated shmem-backed GEM object * * @dev: drm_device the object should be initialized for * @obj: drm_gem_object to initialize * @size: object size - * @gemfs: tmpfs mount where the GEM object will be created. If NULL, use - * the usual tmpfs mountpoint (`shm_mnt`). * * Initialize an already allocated GEM object of the specified size with - * shmfs backing store. + * shmfs backing store. A huge mountpoint can be used by calling + * drm_gem_huge_mnt_create() beforehand. */ -int drm_gem_object_init_with_mnt(struct drm_device *dev, - struct drm_gem_object *obj, size_t size, - struct vfsmount *gemfs) +int drm_gem_object_init(struct drm_device *dev, struct drm_gem_object *obj, + size_t size) { struct file *filp; drm_gem_private_object_init(dev, obj, size); - if (gemfs) - filp = shmem_file_setup_with_mnt(gemfs, "drm mm object", size, +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + if (drm_gem_has_huge_mnt(dev)) + filp = shmem_file_setup_with_mnt(dev->huge_mnt, + "drm mm object", size, VM_NORESERVE); else filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); +#else + filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); +#endif if (IS_ERR(filp)) return PTR_ERR(filp); @@ -186,22 +188,6 @@ int drm_gem_object_init_with_mnt(struct drm_device *dev, return 0; } -EXPORT_SYMBOL(drm_gem_object_init_with_mnt); - -/** - * drm_gem_object_init - initialize an allocated shmem-backed GEM object - * @dev: drm_device the object should be initialized for - * @obj: drm_gem_object to initialize - * @size: object size - * - * Initialize an already allocated GEM object of the specified size with - * shmfs backing store. - */ -int drm_gem_object_init(struct drm_device *dev, struct drm_gem_object *obj, - size_t size) -{ - return drm_gem_object_init_with_mnt(dev, obj, size, NULL); -} EXPORT_SYMBOL(drm_gem_object_init); /** diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index b7238448d4f9..415d19136de1 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -50,7 +50,7 @@ static const struct drm_gem_object_funcs drm_gem_shmem_funcs = { }; static int __drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem, - size_t size, bool private, struct vfsmount *gemfs) + size_t size, bool private) { struct drm_gem_object *obj = &shmem->base; int ret = 0; @@ -62,7 +62,7 @@ static int __drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_obj drm_gem_private_object_init(dev, obj, size); shmem->map_wc = false; /* dma-buf mappings use always writecombine */ } else { - ret = drm_gem_object_init_with_mnt(dev, obj, size, gemfs); + ret = drm_gem_object_init(dev, obj, size); } if (ret) { drm_gem_private_object_fini(obj); @@ -103,13 +103,12 @@ static int __drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_obj */ int drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem, size_t size) { - return __drm_gem_shmem_init(dev, shmem, size, false, NULL); + return __drm_gem_shmem_init(dev, shmem, size, false); } EXPORT_SYMBOL_GPL(drm_gem_shmem_init); static struct drm_gem_shmem_object * -__drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private, - struct vfsmount *gemfs) +__drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private) { struct drm_gem_shmem_object *shmem; struct drm_gem_object *obj; @@ -129,7 +128,7 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private, obj = &shmem->base; } - ret = __drm_gem_shmem_init(dev, shmem, size, private, gemfs); + ret = __drm_gem_shmem_init(dev, shmem, size, private); if (ret) { kfree(obj); return ERR_PTR(ret); @@ -150,31 +149,10 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private, */ struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size) { - return __drm_gem_shmem_create(dev, size, false, NULL); + return __drm_gem_shmem_create(dev, size, false); } EXPORT_SYMBOL_GPL(drm_gem_shmem_create); -/** - * drm_gem_shmem_create_with_mnt - Allocate an object with the given size in a - * given mountpoint - * @dev: DRM device - * @size: Size of the object to allocate - * @gemfs: tmpfs mount where the GEM object will be created - * - * This function creates a shmem GEM object in a given tmpfs mountpoint. - * - * Returns: - * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative - * error code on failure. - */ -struct drm_gem_shmem_object *drm_gem_shmem_create_with_mnt(struct drm_device *dev, - size_t size, - struct vfsmount *gemfs) -{ - return __drm_gem_shmem_create(dev, size, false, gemfs); -} -EXPORT_SYMBOL_GPL(drm_gem_shmem_create_with_mnt); - /** * drm_gem_shmem_release - Release resources associated with a shmem GEM object. * @shmem: shmem GEM object @@ -906,7 +884,7 @@ drm_gem_shmem_prime_import_sg_table(struct drm_device *dev, size_t size = PAGE_ALIGN(attach->dmabuf->size); struct drm_gem_shmem_object *shmem; - shmem = __drm_gem_shmem_create(dev, size, true, NULL); + shmem = __drm_gem_shmem_create(dev, size, true); if (IS_ERR(shmem)) return ERR_CAST(shmem); @@ -954,7 +932,7 @@ struct drm_gem_object *drm_gem_shmem_prime_import_no_map(struct drm_device *dev, size = PAGE_ALIGN(attach->dmabuf->size); - shmem = __drm_gem_shmem_create(dev, size, true, NULL); + shmem = __drm_gem_shmem_create(dev, size, true); if (IS_ERR(shmem)) { ret = PTR_ERR(shmem); goto fail_detach; diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c index a1e5881e8b62..e0517c0d5896 100644 --- a/drivers/gpu/drm/v3d/v3d_bo.c +++ b/drivers/gpu/drm/v3d/v3d_bo.c @@ -151,8 +151,7 @@ struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv, struct v3d_bo *bo; int ret; - shmem_obj = drm_gem_shmem_create_with_mnt(dev, unaligned_size, - dev->huge_mnt); + shmem_obj = drm_gem_shmem_create(dev, unaligned_size); if (IS_ERR(shmem_obj)) return ERR_CAST(shmem_obj); bo = to_v3d_bo(&shmem_obj->base); diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h index 9845854850fb..b57017ed38d1 100644 --- a/include/drm/drm_gem.h +++ b/include/drm/drm_gem.h @@ -548,9 +548,6 @@ void drm_gem_object_release(struct drm_gem_object *obj); void drm_gem_object_free(struct kref *kref); int drm_gem_object_init(struct drm_device *dev, struct drm_gem_object *obj, size_t size); -int drm_gem_object_init_with_mnt(struct drm_device *dev, - struct drm_gem_object *obj, size_t size, - struct vfsmount *gemfs); void drm_gem_private_object_init(struct drm_device *dev, struct drm_gem_object *obj, size_t size); void drm_gem_private_object_fini(struct drm_gem_object *obj); diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h index 589f7bfe7506..6b6478f5ca24 100644 --- a/include/drm/drm_gem_shmem_helper.h +++ b/include/drm/drm_gem_shmem_helper.h @@ -109,9 +109,6 @@ struct drm_gem_shmem_object { int drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem, size_t size); struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size); -struct drm_gem_shmem_object *drm_gem_shmem_create_with_mnt(struct drm_device *dev, - size_t size, - struct vfsmount *gemfs); void drm_gem_shmem_release(struct drm_gem_shmem_object *shmem); void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem); -- 2.47.3 Introduce the 'panthor.transparent_hugepage' boolean module parameter (false by default). When the parameter is set to true, a new tmpfs mountpoint is created and mounted using the 'huge=within_size' option. It's then used at GEM object creation instead of the default 'shm_mnt' mountpoint in order to enable Transparent Hugepage (THP) for the object (without having to rely on a system wide parameter). v3: - use huge tmpfs mountpoint in drm_device v4: - fix builds with CONFIG_TRANSPARENT_HUGEPAGE=n - clean up mountpoint creation error handling - print negative error value v5: - use drm_gem_has_huge_tmp() helper - get rid of CONFIG_TRANSPARENT_HUGEPAGE ifdefs Signed-off-by: Loïc Molinari Reviewed-by: Boris Brezillon --- drivers/gpu/drm/panthor/panthor_device.c | 3 +++ drivers/gpu/drm/panthor/panthor_drv.c | 7 +++++++ drivers/gpu/drm/panthor/panthor_drv.h | 9 +++++++++ drivers/gpu/drm/panthor/panthor_gem.c | 18 ++++++++++++++++++ drivers/gpu/drm/panthor/panthor_gem.h | 2 ++ 5 files changed, 39 insertions(+) create mode 100644 drivers/gpu/drm/panthor/panthor_drv.h diff --git a/drivers/gpu/drm/panthor/panthor_device.c b/drivers/gpu/drm/panthor/panthor_device.c index c7033d82cef5..4ced0bef22e8 100644 --- a/drivers/gpu/drm/panthor/panthor_device.c +++ b/drivers/gpu/drm/panthor/panthor_device.c @@ -17,6 +17,7 @@ #include "panthor_devfreq.h" #include "panthor_device.h" #include "panthor_fw.h" +#include "panthor_gem.h" #include "panthor_gpu.h" #include "panthor_hw.h" #include "panthor_mmu.h" @@ -271,6 +272,8 @@ int panthor_device_init(struct panthor_device *ptdev) if (ret) goto err_unplug_fw; + panthor_gem_init(ptdev); + /* ~3 frames */ pm_runtime_set_autosuspend_delay(ptdev->base.dev, 50); pm_runtime_use_autosuspend(ptdev->base.dev); diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c index fb4b293f17f0..9e49af1d0959 100644 --- a/drivers/gpu/drm/panthor/panthor_drv.c +++ b/drivers/gpu/drm/panthor/panthor_drv.c @@ -1556,6 +1556,7 @@ static const struct file_operations panthor_drm_driver_fops = { .read = drm_read, .llseek = noop_llseek, .mmap = panthor_mmap, + .get_unmapped_area = drm_gem_get_unmapped_area, .show_fdinfo = drm_show_fdinfo, .fop_flags = FOP_UNSIGNED_OFFSET, }; @@ -1623,6 +1624,12 @@ static const struct drm_driver panthor_drm_driver = { #endif }; +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +bool panthor_transparent_hugepage; +module_param_named(transparent_hugepage, panthor_transparent_hugepage, bool, 0400); +MODULE_PARM_DESC(transparent_hugepage, "Use a dedicated tmpfs mount point with Transparent Hugepage enabled (false = default)"); +#endif + static int panthor_probe(struct platform_device *pdev) { struct panthor_device *ptdev; diff --git a/drivers/gpu/drm/panthor/panthor_drv.h b/drivers/gpu/drm/panthor/panthor_drv.h new file mode 100644 index 000000000000..79dccd289881 --- /dev/null +++ b/drivers/gpu/drm/panthor/panthor_drv.h @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 or MIT +/* Copyright 2025 Amazon.com, Inc. or its affiliates */ + +#ifndef __PANTHOR_DRV_H__ +#define __PANTHOR_DRV_H__ + +extern bool panthor_transparent_hugepage; + +#endif diff --git a/drivers/gpu/drm/panthor/panthor_gem.c b/drivers/gpu/drm/panthor/panthor_gem.c index 156c7a0b62a2..b5326b29417c 100644 --- a/drivers/gpu/drm/panthor/panthor_gem.c +++ b/drivers/gpu/drm/panthor/panthor_gem.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 or MIT /* Copyright 2019 Linaro, Ltd, Rob Herring */ /* Copyright 2023 Collabora ltd. */ +/* Copyright 2025 Amazon.com, Inc. or its affiliates */ #include #include @@ -11,10 +12,27 @@ #include #include "panthor_device.h" +#include "panthor_drv.h" #include "panthor_fw.h" #include "panthor_gem.h" #include "panthor_mmu.h" +void panthor_gem_init(struct panthor_device *ptdev) +{ + int err; + + if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && + !panthor_transparent_hugepage) + return; + + err = drm_gem_huge_mnt_create(&ptdev->base, "within_size"); + if (drm_gem_has_huge_mnt(&ptdev->base)) + drm_info(&ptdev->base, "Using Transparent Hugepage\n"); + else if (err) + drm_warn(&ptdev->base, "Can't use Transparent Hugepage (%d)\n", + err); +} + #ifdef CONFIG_DEBUG_FS static void panthor_gem_debugfs_bo_init(struct panthor_gem_object *bo) { diff --git a/drivers/gpu/drm/panthor/panthor_gem.h b/drivers/gpu/drm/panthor/panthor_gem.h index 80c6e24112d0..2eefe9104e5e 100644 --- a/drivers/gpu/drm/panthor/panthor_gem.h +++ b/drivers/gpu/drm/panthor/panthor_gem.h @@ -136,6 +136,8 @@ struct panthor_gem_object *to_panthor_bo(struct drm_gem_object *obj) return container_of(to_drm_gem_shmem_obj(obj), struct panthor_gem_object, base); } +void panthor_gem_init(struct panthor_device *ptdev); + struct drm_gem_object *panthor_gem_create_object(struct drm_device *ddev, size_t size); int -- 2.47.3 Log the number of pages and their sizes actually mapped/unmapped by the IOMMU page table driver. Since a map/unmap op is often split in several ops depending on the underlying scatter/gather table, add the start address and the total size to the debugging logs in order to help understand which batch an op is part of. Signed-off-by: Loïc Molinari Reviewed-by: Boris Brezillon --- drivers/gpu/drm/panthor/panthor_mmu.c | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c index 6dec4354e378..aefbd83d1a75 100644 --- a/drivers/gpu/drm/panthor/panthor_mmu.c +++ b/drivers/gpu/drm/panthor/panthor_mmu.c @@ -917,10 +917,9 @@ static int panthor_vm_unmap_pages(struct panthor_vm *vm, u64 iova, u64 size) { struct panthor_device *ptdev = vm->ptdev; struct io_pgtable_ops *ops = vm->pgtbl_ops; + u64 start_iova = iova; u64 offset = 0; - drm_dbg(&ptdev->base, "unmap: as=%d, iova=%llx, len=%llx", vm->as.id, iova, size); - while (offset < size) { size_t unmapped_sz = 0, pgcount; size_t pgsize = get_pgsize(iova + offset, size - offset, &pgcount); @@ -935,6 +934,12 @@ static int panthor_vm_unmap_pages(struct panthor_vm *vm, u64 iova, u64 size) panthor_vm_flush_range(vm, iova, offset + unmapped_sz); return -EINVAL; } + + drm_dbg(&ptdev->base, + "unmap: as=%d, iova=0x%llx, sz=%llu, va=0x%llx, pgcnt=%zu, pgsz=%zu", + vm->as.id, start_iova, size, iova + offset, + unmapped_sz / pgsize, pgsize); + offset += unmapped_sz; } @@ -950,6 +955,7 @@ panthor_vm_map_pages(struct panthor_vm *vm, u64 iova, int prot, struct scatterlist *sgl; struct io_pgtable_ops *ops = vm->pgtbl_ops; u64 start_iova = iova; + u64 start_size = size; int ret; if (!size) @@ -969,15 +975,18 @@ panthor_vm_map_pages(struct panthor_vm *vm, u64 iova, int prot, len = min_t(size_t, len, size); size -= len; - drm_dbg(&ptdev->base, "map: as=%d, iova=%llx, paddr=%pad, len=%zx", - vm->as.id, iova, &paddr, len); - while (len) { size_t pgcount, mapped = 0; size_t pgsize = get_pgsize(iova | paddr, len, &pgcount); ret = ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, GFP_KERNEL, &mapped); + + drm_dbg(&ptdev->base, + "map: as=%d, iova=0x%llx, sz=%llu, va=0x%llx, pa=%pad, pgcnt=%zu, pgsz=%zu", + vm->as.id, start_iova, start_size, iova, &paddr, + mapped / pgsize, pgsize); + iova += mapped; paddr += mapped; len -= mapped; -- 2.47.3 Introduce the 'panfrost.transparent_hugepage' boolean module parameter (false by default). When the parameter is set to true, a new tmpfs mountpoint is created and mounted using the 'huge=within_size' option. It's then used at GEM object creation instead of the default 'shm_mnt' mountpoint in order to enable Transparent Hugepage (THP) for the object (without having to rely on a system wide parameter). v3: - use huge tmpfs mountpoint in drm_device v4: - fix builds with CONFIG_TRANSPARENT_HUGEPAGE=n - clean up mountpoint creation error handling - print negative error value v5: - use drm_gem_has_huge_tmp() helper - get rid of CONFIG_TRANSPARENT_HUGEPAGE ifdefs Signed-off-by: Loïc Molinari Reviewed-by: Boris Brezillon --- drivers/gpu/drm/panfrost/panfrost_device.c | 3 +++ drivers/gpu/drm/panfrost/panfrost_drv.c | 6 ++++++ drivers/gpu/drm/panfrost/panfrost_drv.h | 9 +++++++++ drivers/gpu/drm/panfrost/panfrost_gem.c | 18 ++++++++++++++++++ drivers/gpu/drm/panfrost/panfrost_gem.h | 2 ++ 5 files changed, 38 insertions(+) create mode 100644 drivers/gpu/drm/panfrost/panfrost_drv.h diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c index c61b97af120c..dedc13e56631 100644 --- a/drivers/gpu/drm/panfrost/panfrost_device.c +++ b/drivers/gpu/drm/panfrost/panfrost_device.c @@ -12,6 +12,7 @@ #include "panfrost_device.h" #include "panfrost_devfreq.h" #include "panfrost_features.h" +#include "panfrost_gem.h" #include "panfrost_issues.h" #include "panfrost_gpu.h" #include "panfrost_job.h" @@ -267,6 +268,8 @@ int panfrost_device_init(struct panfrost_device *pfdev) if (err) goto out_job; + panfrost_gem_init(pfdev); + return 0; out_job: panfrost_jm_fini(pfdev); diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index 1c3c574cd64a..500c1ce96b0a 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -857,6 +857,12 @@ static const struct drm_driver panfrost_drm_driver = { #endif }; +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +bool panfrost_transparent_hugepage; +module_param_named(transparent_hugepage, panfrost_transparent_hugepage, bool, 0400); +MODULE_PARM_DESC(transparent_hugepage, "Use a dedicated tmpfs mount point with Transparent Hugepage enabled (false = default)"); +#endif + static int panfrost_probe(struct platform_device *pdev) { struct panfrost_device *pfdev; diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.h b/drivers/gpu/drm/panfrost/panfrost_drv.h new file mode 100644 index 000000000000..edeb093eb6da --- /dev/null +++ b/drivers/gpu/drm/panfrost/panfrost_drv.h @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 or MIT +/* Copyright 2025 Amazon.com, Inc. or its affiliates */ + +#ifndef __PANFROST_DRV_H__ +#define __PANFROST_DRV_H__ + +extern bool panfrost_transparent_hugepage; + +#endif diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c index 0528de674a4f..50fdcb4ef453 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright 2019 Linaro, Ltd, Rob Herring */ +/* Copyright 2025 Amazon.com, Inc. or its affiliates */ #include #include @@ -9,9 +10,26 @@ #include #include "panfrost_device.h" +#include "panfrost_drv.h" #include "panfrost_gem.h" #include "panfrost_mmu.h" +void panfrost_gem_init(struct panfrost_device *pfdev) +{ + int err; + + if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && + !panfrost_transparent_hugepage) + return; + + err = drm_gem_huge_mnt_create(&pfdev->base, "within_size"); + if (drm_gem_has_huge_mnt(&pfdev->base)) + drm_info(&pfdev->base, "Using Transparent Hugepage\n"); + else if (err) + drm_warn(&pfdev->base, "Can't use Transparent Hugepage (%d)\n", + err); +} + #ifdef CONFIG_DEBUG_FS static void panfrost_gem_debugfs_bo_add(struct panfrost_device *pfdev, struct panfrost_gem_object *bo) diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h index 8de3e76f2717..1a62529ff06f 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.h +++ b/drivers/gpu/drm/panfrost/panfrost_gem.h @@ -124,6 +124,8 @@ drm_mm_node_to_panfrost_mapping(struct drm_mm_node *node) return container_of(node, struct panfrost_gem_mapping, mmnode); } +void panfrost_gem_init(struct panfrost_device *pfdev); + struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size); struct drm_gem_object * -- 2.47.3 Add a paragraph to the GEM objects mapping section explaining how transparent huge pages are handled by GEM. v4: - fix wording after huge_pages handler removal Signed-off-by: Loïc Molinari Reviewed-by: Bagas Sanjaya --- Documentation/gpu/drm-mm.rst | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/Documentation/gpu/drm-mm.rst b/Documentation/gpu/drm-mm.rst index d55751cad67c..3d6176adc7ca 100644 --- a/Documentation/gpu/drm-mm.rst +++ b/Documentation/gpu/drm-mm.rst @@ -283,6 +283,9 @@ made up of several fields, the more interesting ones being: void (*open)(struct vm_area_struct * area); void (*close)(struct vm_area_struct * area); vm_fault_t (*fault)(struct vm_fault *vmf); + vm_fault_t (*map_pages)(struct vm_fault *vmf, + pgoff_t start_pgoff, + pgoff_t end_pgoff); }; @@ -290,15 +293,27 @@ The open and close operations must update the GEM object reference count. Drivers can use the drm_gem_vm_open() and drm_gem_vm_close() helper functions directly as open and close handlers. -The fault operation handler is responsible for mapping individual pages -to userspace when a page fault occurs. Depending on the memory -allocation scheme, drivers can allocate pages at fault time, or can -decide to allocate memory for the GEM object at the time the object is -created. +The fault and map_pages operations are responsible for mapping pages to +userspace when a page fault occurs. Depending on the memory allocation +scheme, drivers can allocate pages at fault time, or can decide to +allocate memory for the GEM object at the time the object is created. Drivers that want to map the GEM object upfront instead of handling page faults can implement their own mmap file operation handler. +In order to reduce page table overhead, if the internal shmem mountpoint +"shm_mnt" is configured to use transparent huge pages (for builds with +CONFIG_TRANSPARENT_HUGEPAGE enabled) and if the shmem backing store +managed to allocate a huge page for a faulty address, the fault and +map_pages handlers will first attempt to insert that huge page into the +VMA before falling back to individual page insertion. mmap() user +address alignment for GEM objects is handled by providing a custom +get_unmapped_area file operation which forwards to the shmem backing +store. For most drivers, which don't create a huge mountpoint by default +or through a module parameter, transparent huge pages can be enabled by +either setting the "transparent_hugepage_shmem" kernel parameter or the +"/sys/kernel/mm/transparent_hugepage/shmem_enabled" sysfs knob. + For platforms without MMU the GEM core provides a helper method drm_gem_dma_get_unmapped_area(). The mmap() routines will call this to get a proposed address for the mapping. -- 2.47.3