Return the exact VM_FAULT_ mask from drm_gem_shmem_try_map_pmd(). Gives the caller better insight into the result. Return 0 if nothing was done. If the caller sees VM_FAULT_NOPAGE, drm_gem_shmem_try_map_pmd() added a PMD entry to the page table. As before, return early from the page-fault handler in that case. Signed-off-by: Thomas Zimmermann Suggested-by: Matthew Wilcox --- drivers/gpu/drm/drm_gem_shmem_helper.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index 3871a6d92f77..e7316dc7e921 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -550,8 +550,8 @@ int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev, } EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create); -static bool drm_gem_shmem_try_map_pmd(struct vm_fault *vmf, unsigned long addr, - struct page *page) +static vm_fault_t drm_gem_shmem_try_map_pmd(struct vm_fault *vmf, unsigned long addr, + struct page *page) { #ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP unsigned long pfn = page_to_pfn(page); @@ -562,12 +562,11 @@ static bool drm_gem_shmem_try_map_pmd(struct vm_fault *vmf, unsigned long addr, pmd_none(*vmf->pmd) && folio_test_pmd_mappable(page_folio(page))) { pfn &= PMD_MASK >> PAGE_SHIFT; - if (vmf_insert_pfn_pmd(vmf, pfn, false) == VM_FAULT_NOPAGE) - return true; + return vmf_insert_pfn_pmd(vmf, pfn, false); } #endif - return false; + return 0; } static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf) @@ -593,10 +592,9 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf) goto out; } - if (drm_gem_shmem_try_map_pmd(vmf, vmf->address, pages[page_offset])) { - ret = VM_FAULT_NOPAGE; + ret = drm_gem_shmem_try_map_pmd(vmf, vmf->address, pages[page_offset]); + if (ret == VM_FAULT_NOPAGE) goto out; - } pfn = page_to_pfn(pages[page_offset]); ret = vmf_insert_pfn(vma, vmf->address, pfn); -- 2.52.0