From: Ankit Agrawal The memory failure handling implementation for the PFNMAP memory with no struct pages is faulty. The VA of the mapping is determined based on the the PFN. It should instead be based on the file mapping offset. At the occurrence of poison, the memory_failure_pfn is triggered on the poisoned PFN. Introduce a callback function that allows mm to translate the PFN to the corresponding file page offset. The kernel module using the registration API must implement the callback function and provide the translation. The translated value is then used to determine the VA information and sending the SIGBUS to the usermode process mapped to the poisoned PFN. The callback is also useful for the driver to be notified of the poisoned PFN, which may then track it. Fixes: 2ec41967189c ("mm: handle poisoning of pfn without struct pages") Suggested-by: Jason Gunthorpe Signed-off-by: Ankit Agrawal --- include/linux/memory-failure.h | 2 ++ mm/memory-failure.c | 29 ++++++++++++++++++----------- 2 files changed, 20 insertions(+), 11 deletions(-) diff --git a/include/linux/memory-failure.h b/include/linux/memory-failure.h index bc326503d2d2..7b5e11cf905f 100644 --- a/include/linux/memory-failure.h +++ b/include/linux/memory-failure.h @@ -9,6 +9,8 @@ struct pfn_address_space; struct pfn_address_space { struct interval_tree_node node; struct address_space *mapping; + int (*pfn_to_vma_pgoff)(struct vm_area_struct *vma, + unsigned long pfn, pgoff_t *pgoff); }; int register_pfn_address_space(struct pfn_address_space *pfn_space); diff --git a/mm/memory-failure.c b/mm/memory-failure.c index fbc5a01260c8..c80c2907da33 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -2161,6 +2161,9 @@ int register_pfn_address_space(struct pfn_address_space *pfn_space) { guard(mutex)(&pfn_space_lock); + if (!pfn_space->pfn_to_vma_pgoff) + return -EINVAL; + if (interval_tree_iter_first(&pfn_space_itree, pfn_space->node.start, pfn_space->node.last)) @@ -2183,10 +2186,10 @@ void unregister_pfn_address_space(struct pfn_address_space *pfn_space) } EXPORT_SYMBOL_GPL(unregister_pfn_address_space); -static void add_to_kill_pfn(struct task_struct *tsk, - struct vm_area_struct *vma, - struct list_head *to_kill, - unsigned long pfn) +static void add_to_kill_pgoff(struct task_struct *tsk, + struct vm_area_struct *vma, + struct list_head *to_kill, + pgoff_t pgoff) { struct to_kill *tk; @@ -2197,12 +2200,12 @@ static void add_to_kill_pfn(struct task_struct *tsk, } /* Check for pgoff not backed by struct page */ - tk->addr = vma_address(vma, pfn, 1); + tk->addr = vma_address(vma, pgoff, 1); tk->size_shift = PAGE_SHIFT; if (tk->addr == -EFAULT) pr_info("Unable to find address %lx in %s\n", - pfn, tsk->comm); + pgoff, tsk->comm); get_task_struct(tsk); tk->tsk = tsk; @@ -2212,11 +2215,12 @@ static void add_to_kill_pfn(struct task_struct *tsk, /* * Collect processes when the error hit a PFN not backed by struct page. */ -static void collect_procs_pfn(struct address_space *mapping, +static void collect_procs_pfn(struct pfn_address_space *pfn_space, unsigned long pfn, struct list_head *to_kill) { struct vm_area_struct *vma; struct task_struct *tsk; + struct address_space *mapping = pfn_space->mapping; i_mmap_lock_read(mapping); rcu_read_lock(); @@ -2226,9 +2230,12 @@ static void collect_procs_pfn(struct address_space *mapping, t = task_early_kill(tsk, true); if (!t) continue; - vma_interval_tree_foreach(vma, &mapping->i_mmap, pfn, pfn) { - if (vma->vm_mm == t->mm) - add_to_kill_pfn(t, vma, to_kill, pfn); + vma_interval_tree_foreach(vma, &mapping->i_mmap, 0, ULONG_MAX) { + pgoff_t pgoff; + + if (vma->vm_mm == t->mm && + !pfn_space->pfn_to_vma_pgoff(vma, pfn, &pgoff)) + add_to_kill_pgoff(t, vma, to_kill, pgoff); } } rcu_read_unlock(); @@ -2264,7 +2271,7 @@ static int memory_failure_pfn(unsigned long pfn, int flags) struct pfn_address_space *pfn_space = container_of(node, struct pfn_address_space, node); - collect_procs_pfn(pfn_space->mapping, pfn, &tokill); + collect_procs_pfn(pfn_space, pfn, &tokill); mf_handled = true; } -- 2.34.1 From: Ankit Agrawal Add stubs to address CONFIG_MEMORY_FAILURE disabled. Suggested-by: Alex Williamson Signed-off-by: Ankit Agrawal --- include/linux/memory-failure.h | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/include/linux/memory-failure.h b/include/linux/memory-failure.h index 7b5e11cf905f..d333dcdbeae7 100644 --- a/include/linux/memory-failure.h +++ b/include/linux/memory-failure.h @@ -4,8 +4,6 @@ #include -struct pfn_address_space; - struct pfn_address_space { struct interval_tree_node node; struct address_space *mapping; @@ -13,7 +11,18 @@ struct pfn_address_space { unsigned long pfn, pgoff_t *pgoff); }; +#ifdef CONFIG_MEMORY_FAILURE int register_pfn_address_space(struct pfn_address_space *pfn_space); void unregister_pfn_address_space(struct pfn_address_space *pfn_space); +#else +static inline int register_pfn_address_space(struct pfn_address_space *pfn_space) +{ + return -EOPNOTSUPP; +} + +static inline void unregister_pfn_address_space(struct pfn_address_space *pfn_space) +{ +} +#endif /* CONFIG_MEMORY_FAILURE */ #endif /* _LINUX_MEMORY_FAILURE_H */ -- 2.34.1 From: Ankit Agrawal The nvgrace-gpu module [1] maps the device memory to the user VA (Qemu) without adding the memory to the kernel. The device memory pages are PFNMAP and not backed by struct page. The module can thus utilize the MM's PFNMAP memory_failure mechanism that handles ECC/poison on regions with no struct pages. The kernel MM code exposes register/unregister APIs allowing modules to register the device memory for memory_failure handling. Make nvgrace-gpu register the GPU memory with the MM on open. The module registers its memory region, the address_space with the kernel MM for ECC handling and implements a callback function to convert the PFN to the file page offset. The callback functions checks if the PFN belongs to the device memory region and is also contained in the VMA range, an error is returned otherwise. Link: https://lore.kernel.org/all/20240220115055.23546-1-ankita@nvidia.com/ [1] Suggested-by: Alex Williamson Suggested-by: Jason Gunthorpe Signed-off-by: Ankit Agrawal --- drivers/vfio/pci/nvgrace-gpu/main.c | 116 +++++++++++++++++++++++++++- 1 file changed, 112 insertions(+), 4 deletions(-) diff --git a/drivers/vfio/pci/nvgrace-gpu/main.c b/drivers/vfio/pci/nvgrace-gpu/main.c index 84d142a47ec6..fdfb961a6972 100644 --- a/drivers/vfio/pci/nvgrace-gpu/main.c +++ b/drivers/vfio/pci/nvgrace-gpu/main.c @@ -9,6 +9,7 @@ #include #include #include +#include /* * The device memory usable to the workloads running in the VM is cached @@ -49,6 +50,7 @@ struct mem_region { void *memaddr; void __iomem *ioaddr; }; /* Base virtual address of the region */ + struct pfn_address_space pfn_address_space; }; struct nvgrace_gpu_pci_core_device { @@ -88,6 +90,83 @@ nvgrace_gpu_memregion(int index, return NULL; } +static inline int pfn_memregion_offset(struct nvgrace_gpu_pci_core_device *nvdev, + unsigned int index, + unsigned long pfn, + u64 *pfn_offset_in_region) +{ + struct mem_region *region; + unsigned long start_pfn, num_pages; + + region = nvgrace_gpu_memregion(index, nvdev); + if (!region) + return -ENOENT; + + start_pfn = PHYS_PFN(region->memphys); + num_pages = region->memlength >> PAGE_SHIFT; + + if (pfn < start_pfn || pfn >= start_pfn + num_pages) + return -EINVAL; + + *pfn_offset_in_region = pfn - start_pfn; + + return 0; +} + +static inline struct nvgrace_gpu_pci_core_device * +vma_to_nvdev(struct vm_area_struct *vma); + +static int nvgrace_gpu_pfn_to_vma_pgoff(struct vm_area_struct *vma, + unsigned long pfn, + pgoff_t *pgoff) +{ + struct nvgrace_gpu_pci_core_device *nvdev; + unsigned int index = + vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT); + u64 vma_offset_in_region = vma->vm_pgoff & + ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1); + u64 pfn_offset_in_region; + int ret; + + nvdev = vma_to_nvdev(vma); + if (!nvdev) + return -EPERM; + + ret = pfn_memregion_offset(nvdev, index, pfn, &pfn_offset_in_region); + if (ret) + return ret; + + /* Ensure PFN is not before VMA's start within the region */ + if (pfn_offset_in_region < vma_offset_in_region) + return -EINVAL; + + /* Calculate offset from VMA start */ + *pgoff = vma->vm_pgoff + + (pfn_offset_in_region - vma_offset_in_region); + + return 0; +} + +static int +nvgrace_gpu_vfio_pci_register_pfn_range(struct vfio_device *core_vdev, + struct mem_region *region) +{ + int ret; + unsigned long pfn, nr_pages; + + pfn = PHYS_PFN(region->memphys); + nr_pages = region->memlength >> PAGE_SHIFT; + + region->pfn_address_space.node.start = pfn; + region->pfn_address_space.node.last = pfn + nr_pages - 1; + region->pfn_address_space.mapping = core_vdev->inode->i_mapping; + region->pfn_address_space.pfn_to_vma_pgoff = nvgrace_gpu_pfn_to_vma_pgoff; + + ret = register_pfn_address_space(®ion->pfn_address_space); + + return ret; +} + static int nvgrace_gpu_open_device(struct vfio_device *core_vdev) { struct vfio_pci_core_device *vdev = @@ -114,14 +193,28 @@ static int nvgrace_gpu_open_device(struct vfio_device *core_vdev) * memory mapping. */ ret = vfio_pci_core_setup_barmap(vdev, 0); - if (ret) { - vfio_pci_core_disable(vdev); - return ret; + if (ret) + goto error_exit; + + if (nvdev->resmem.memlength) { + ret = nvgrace_gpu_vfio_pci_register_pfn_range(core_vdev, &nvdev->resmem); + if (ret && ret != -EOPNOTSUPP) + goto error_exit; } - vfio_pci_core_finish_enable(vdev); + ret = nvgrace_gpu_vfio_pci_register_pfn_range(core_vdev, &nvdev->usemem); + if (ret && ret != -EOPNOTSUPP) + goto register_mem_failed; + vfio_pci_core_finish_enable(vdev); return 0; + +register_mem_failed: + if (nvdev->resmem.memlength) + unregister_pfn_address_space(&nvdev->resmem.pfn_address_space); +error_exit: + vfio_pci_core_disable(vdev); + return ret; } static void nvgrace_gpu_close_device(struct vfio_device *core_vdev) @@ -130,6 +223,11 @@ static void nvgrace_gpu_close_device(struct vfio_device *core_vdev) container_of(core_vdev, struct nvgrace_gpu_pci_core_device, core_device.vdev); + if (nvdev->resmem.memlength) + unregister_pfn_address_space(&nvdev->resmem.pfn_address_space); + + unregister_pfn_address_space(&nvdev->usemem.pfn_address_space); + /* Unmap the mapping to the device memory cached region */ if (nvdev->usemem.memaddr) { memunmap(nvdev->usemem.memaddr); @@ -247,6 +345,16 @@ static const struct vm_operations_struct nvgrace_gpu_vfio_pci_mmap_ops = { #endif }; +static inline struct nvgrace_gpu_pci_core_device * +vma_to_nvdev(struct vm_area_struct *vma) +{ + /* Check if this VMA belongs to us */ + if (vma->vm_ops != &nvgrace_gpu_vfio_pci_mmap_ops) + return NULL; + + return vma->vm_private_data; +} + static int nvgrace_gpu_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma) { -- 2.34.1