guest_memfd currently uses the folio uptodate flag to track: 1) whether or not a page has been cleared before initial usage 2) whether or not the architecture hooks have been issued to put the page in a private state as defined by the architecture In practice, 2) is only actually being tracked for SEV-SNP VMs, and there do not seem to be any plans/reasons that would suggest this will change in the future, so this additional tracking/complexity is not really providing any general benefit to guest_memfd users. Future plans around in-place conversion and hugepage support, where the per-folio uptodate flag is planned to be used purely to track the initial clearing of folios, whereas conversion operations could trigger multiple transitions between 'prepared' and 'unprepared' and thus need separate tracking, will make the burden of tracking this information within guest_memfd even more complex, since preparation generally happens during fault time, on the "read-side" of any global locks that might protect state tracked by guest_memfd, and so may require more complex locking schemes to allow for concurrent handling of page faults for multiple vCPUs where the "preparedness" state tracked by guest_memfd might need to be updated as part of handling the fault. Instead of keeping this current/future complexity within guest_memfd for what is essentially just SEV-SNP, just drop the tracking for 2) and have the arch-specific preparation hooks get triggered unconditionally on every fault so the arch-specific hooks can check the preparation state directly and decide whether or not a folio still needs additional preparation. In the case of SEV-SNP, the preparation state is already checked again via the preparation hooks to avoid double-preparation, so nothing extra needs to be done to update the handling of things there. Signed-off-by: Michael Roth --- virt/kvm/guest_memfd.c | 47 ++++++++++++++---------------------------- 1 file changed, 15 insertions(+), 32 deletions(-) diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c index fdaea3422c30..9160379df378 100644 --- a/virt/kvm/guest_memfd.c +++ b/virt/kvm/guest_memfd.c @@ -76,11 +76,6 @@ static int __kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slo return 0; } -static inline void kvm_gmem_mark_prepared(struct folio *folio) -{ - folio_mark_uptodate(folio); -} - /* * Process @folio, which contains @gfn, so that the guest can use it. * The folio must be locked and the gfn must be contained in @slot. @@ -90,13 +85,7 @@ static inline void kvm_gmem_mark_prepared(struct folio *folio) static int kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, struct folio *folio) { - unsigned long nr_pages, i; pgoff_t index; - int r; - - nr_pages = folio_nr_pages(folio); - for (i = 0; i < nr_pages; i++) - clear_highpage(folio_page(folio, i)); /* * Preparing huge folios should always be safe, since it should @@ -114,11 +103,8 @@ static int kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slot, WARN_ON(!IS_ALIGNED(slot->gmem.pgoff, folio_nr_pages(folio))); index = kvm_gmem_get_index(slot, gfn); index = ALIGN_DOWN(index, folio_nr_pages(folio)); - r = __kvm_gmem_prepare_folio(kvm, slot, index, folio); - if (!r) - kvm_gmem_mark_prepared(folio); - return r; + return __kvm_gmem_prepare_folio(kvm, slot, index, folio); } /* @@ -420,7 +406,7 @@ static vm_fault_t kvm_gmem_fault_user_mapping(struct vm_fault *vmf) if (!folio_test_uptodate(folio)) { clear_highpage(folio_page(folio, 0)); - kvm_gmem_mark_prepared(folio); + folio_mark_uptodate(folio); } vmf->page = folio_file_page(folio, vmf->pgoff); @@ -757,7 +743,7 @@ void kvm_gmem_unbind(struct kvm_memory_slot *slot) static struct folio *__kvm_gmem_get_pfn(struct file *file, struct kvm_memory_slot *slot, pgoff_t index, kvm_pfn_t *pfn, - bool *is_prepared, int *max_order) + int *max_order) { struct file *slot_file = READ_ONCE(slot->gmem.file); struct gmem_file *f = file->private_data; @@ -787,7 +773,6 @@ static struct folio *__kvm_gmem_get_pfn(struct file *file, if (max_order) *max_order = 0; - *is_prepared = folio_test_uptodate(folio); return folio; } @@ -797,19 +782,25 @@ int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot, { pgoff_t index = kvm_gmem_get_index(slot, gfn); struct folio *folio; - bool is_prepared = false; int r = 0; CLASS(gmem_get_file, file)(slot); if (!file) return -EFAULT; - folio = __kvm_gmem_get_pfn(file, slot, index, pfn, &is_prepared, max_order); + folio = __kvm_gmem_get_pfn(file, slot, index, pfn, max_order); if (IS_ERR(folio)) return PTR_ERR(folio); - if (!is_prepared) - r = kvm_gmem_prepare_folio(kvm, slot, gfn, folio); + if (!folio_test_uptodate(folio)) { + unsigned long i, nr_pages = folio_nr_pages(folio); + + for (i = 0; i < nr_pages; i++) + clear_highpage(folio_page(folio, i)); + folio_mark_uptodate(folio); + } + + r = kvm_gmem_prepare_folio(kvm, slot, gfn, folio); folio_unlock(folio); @@ -852,7 +843,6 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long struct folio *folio; gfn_t gfn = start_gfn + i; pgoff_t index = kvm_gmem_get_index(slot, gfn); - bool is_prepared = false; kvm_pfn_t pfn; if (signal_pending(current)) { @@ -860,19 +850,12 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long break; } - folio = __kvm_gmem_get_pfn(file, slot, index, &pfn, &is_prepared, &max_order); + folio = __kvm_gmem_get_pfn(file, slot, index, &pfn, &max_order); if (IS_ERR(folio)) { ret = PTR_ERR(folio); break; } - if (is_prepared) { - folio_unlock(folio); - folio_put(folio); - ret = -EEXIST; - break; - } - folio_unlock(folio); WARN_ON(!IS_ALIGNED(gfn, 1 << max_order) || (npages - i) < (1 << max_order)); @@ -889,7 +872,7 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long p = src ? src + i * PAGE_SIZE : NULL; ret = post_populate(kvm, gfn, pfn, p, max_order, opaque); if (!ret) - kvm_gmem_mark_prepared(folio); + folio_mark_uptodate(folio); put_folio_and_exit: folio_put(folio); -- 2.25.1 Since it was never possible to use a non-PAGE_SIZE-aligned @source_addr, go ahead and document this as a requirement, and add a KVM_BUG_ON() in the post-populate callback handler to ensure future reworks to guest_memfd do not violate this constraint. Signed-off-by: Michael Roth --- Documentation/virt/kvm/x86/intel-tdx.rst | 2 +- arch/x86/kvm/vmx/tdx.c | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/Documentation/virt/kvm/x86/intel-tdx.rst b/Documentation/virt/kvm/x86/intel-tdx.rst index 5efac62c92c7..6a222e9d0954 100644 --- a/Documentation/virt/kvm/x86/intel-tdx.rst +++ b/Documentation/virt/kvm/x86/intel-tdx.rst @@ -156,7 +156,7 @@ KVM_TDX_INIT_MEM_REGION :Returns: 0 on success, <0 on error Initialize @nr_pages TDX guest private memory starting from @gpa with userspace -provided data from @source_addr. +provided data from @source_addr. @source_addr must be PAGE_SIZE-aligned. Note, before calling this sub command, memory attribute of the range [gpa, gpa + nr_pages] needs to be private. Userspace can use diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c index 3cf80babc3c1..57ed101a1181 100644 --- a/arch/x86/kvm/vmx/tdx.c +++ b/arch/x86/kvm/vmx/tdx.c @@ -3127,6 +3127,9 @@ static int tdx_gmem_post_populate(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, if (KVM_BUG_ON(kvm_tdx->page_add_src, kvm)) return -EIO; + if (KVM_BUG_ON(!PAGE_ALIGNED(src), kvm)) + return -EINVAL; + /* * Get the source page if it has been faulted in. Return failure if the * source page has been swapped out or unmapped in primary memory. -- 2.25.1 Currently the post-populate callbacks handle copying source pages into private GPA ranges backed by guest_memfd, where kvm_gmem_populate() acquires the filemap invalidate lock, then calls a post-populate callback which may issue a get_user_pages() on the source pages prior to copying them into the private GPA (e.g. TDX). This will not be compatible with in-place conversion, where the userspace page fault path will attempt to acquire filemap invalidate lock while holding the mm->mmap_lock, leading to a potential ABBA deadlock[1]. Address this by hoisting the GUP above the filemap invalidate lock so that these page faults path can be taken early, prior to acquiring the filemap invalidate lock. It's not currently clear whether this issue is reachable with the current implementation of guest_memfd, which doesn't support in-place conversion, however it does provide a consistent mechanism to provide stable source/target PFNs to callbacks rather than punting to vendor-specific code, which allows for more commonality across architectures, which may be worthwhile even without in-place conversion. Suggested-by: Sean Christopherson Signed-off-by: Michael Roth --- arch/x86/kvm/svm/sev.c | 40 ++++++++++++++++++++++++++------------ arch/x86/kvm/vmx/tdx.c | 21 +++++--------------- include/linux/kvm_host.h | 3 ++- virt/kvm/guest_memfd.c | 42 ++++++++++++++++++++++++++++++++++------ 4 files changed, 71 insertions(+), 35 deletions(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 0835c664fbfd..d0ac710697a2 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -2260,7 +2260,8 @@ struct sev_gmem_populate_args { }; static int sev_gmem_post_populate(struct kvm *kvm, gfn_t gfn_start, kvm_pfn_t pfn, - void __user *src, int order, void *opaque) + struct page **src_pages, loff_t src_offset, + int order, void *opaque) { struct sev_gmem_populate_args *sev_populate_args = opaque; struct kvm_sev_info *sev = to_kvm_sev_info(kvm); @@ -2268,7 +2269,7 @@ static int sev_gmem_post_populate(struct kvm *kvm, gfn_t gfn_start, kvm_pfn_t pf int npages = (1 << order); gfn_t gfn; - if (WARN_ON_ONCE(sev_populate_args->type != KVM_SEV_SNP_PAGE_TYPE_ZERO && !src)) + if (WARN_ON_ONCE(sev_populate_args->type != KVM_SEV_SNP_PAGE_TYPE_ZERO && !src_pages)) return -EINVAL; for (gfn = gfn_start, i = 0; gfn < gfn_start + npages; gfn++, i++) { @@ -2284,14 +2285,21 @@ static int sev_gmem_post_populate(struct kvm *kvm, gfn_t gfn_start, kvm_pfn_t pf goto err; } - if (src) { - void *vaddr = kmap_local_pfn(pfn + i); + if (src_pages) { + void *src_vaddr = kmap_local_pfn(page_to_pfn(src_pages[i])); + void *dst_vaddr = kmap_local_pfn(pfn + i); - if (copy_from_user(vaddr, src + i * PAGE_SIZE, PAGE_SIZE)) { - ret = -EFAULT; - goto err; + memcpy(dst_vaddr, src_vaddr + src_offset, PAGE_SIZE - src_offset); + kunmap_local(src_vaddr); + + if (src_offset) { + src_vaddr = kmap_local_pfn(page_to_pfn(src_pages[i + 1])); + + memcpy(dst_vaddr + PAGE_SIZE - src_offset, src_vaddr, src_offset); + kunmap_local(src_vaddr); } - kunmap_local(vaddr); + + kunmap_local(dst_vaddr); } ret = rmp_make_private(pfn + i, gfn << PAGE_SHIFT, PG_LEVEL_4K, @@ -2331,12 +2339,20 @@ static int sev_gmem_post_populate(struct kvm *kvm, gfn_t gfn_start, kvm_pfn_t pf if (!snp_page_reclaim(kvm, pfn + i) && sev_populate_args->type == KVM_SEV_SNP_PAGE_TYPE_CPUID && sev_populate_args->fw_error == SEV_RET_INVALID_PARAM) { - void *vaddr = kmap_local_pfn(pfn + i); + void *src_vaddr = kmap_local_pfn(page_to_pfn(src_pages[i])); + void *dst_vaddr = kmap_local_pfn(pfn + i); - if (copy_to_user(src + i * PAGE_SIZE, vaddr, PAGE_SIZE)) - pr_debug("Failed to write CPUID page back to userspace\n"); + memcpy(src_vaddr + src_offset, dst_vaddr, PAGE_SIZE - src_offset); + kunmap_local(src_vaddr); + + if (src_offset) { + src_vaddr = kmap_local_pfn(page_to_pfn(src_pages[i + 1])); + + memcpy(src_vaddr, dst_vaddr + PAGE_SIZE - src_offset, src_offset); + kunmap_local(src_vaddr); + } - kunmap_local(vaddr); + kunmap_local(dst_vaddr); } /* pfn + i is hypervisor-owned now, so skip below cleanup for it. */ diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c index 57ed101a1181..dd5439ec1473 100644 --- a/arch/x86/kvm/vmx/tdx.c +++ b/arch/x86/kvm/vmx/tdx.c @@ -3115,37 +3115,26 @@ struct tdx_gmem_post_populate_arg { }; static int tdx_gmem_post_populate(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, - void __user *src, int order, void *_arg) + struct page **src_pages, loff_t src_offset, + int order, void *_arg) { struct tdx_gmem_post_populate_arg *arg = _arg; struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm); u64 err, entry, level_state; gpa_t gpa = gfn_to_gpa(gfn); - struct page *src_page; int ret, i; if (KVM_BUG_ON(kvm_tdx->page_add_src, kvm)) return -EIO; - if (KVM_BUG_ON(!PAGE_ALIGNED(src), kvm)) + /* Source should be page-aligned, in which case src_offset will be 0. */ + if (KVM_BUG_ON(src_offset)) return -EINVAL; - /* - * Get the source page if it has been faulted in. Return failure if the - * source page has been swapped out or unmapped in primary memory. - */ - ret = get_user_pages_fast((unsigned long)src, 1, 0, &src_page); - if (ret < 0) - return ret; - if (ret != 1) - return -ENOMEM; - - kvm_tdx->page_add_src = src_page; + kvm_tdx->page_add_src = src_pages[i]; ret = kvm_tdp_mmu_map_private_pfn(arg->vcpu, gfn, pfn); kvm_tdx->page_add_src = NULL; - put_page(src_page); - if (ret || !(arg->flags & KVM_TDX_MEASURE_MEMORY_REGION)) return ret; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index d93f75b05ae2..7e9d2403c61f 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -2581,7 +2581,8 @@ int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_ord * Returns the number of pages that were populated. */ typedef int (*kvm_gmem_populate_cb)(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, - void __user *src, int order, void *opaque); + struct page **src_pages, loff_t src_offset, + int order, void *opaque); long kvm_gmem_populate(struct kvm *kvm, gfn_t gfn, void __user *src, long npages, kvm_gmem_populate_cb post_populate, void *opaque); diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c index 9160379df378..e9ac3fd4fd8f 100644 --- a/virt/kvm/guest_memfd.c +++ b/virt/kvm/guest_memfd.c @@ -814,14 +814,17 @@ int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot, EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_gmem_get_pfn); #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_POPULATE + +#define GMEM_GUP_NPAGES (1UL << PMD_ORDER) + long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long npages, kvm_gmem_populate_cb post_populate, void *opaque) { struct kvm_memory_slot *slot; - void __user *p; - + struct page **src_pages; int ret = 0, max_order; - long i; + loff_t src_offset = 0; + long i, src_npages; lockdep_assert_held(&kvm->slots_lock); @@ -836,9 +839,28 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long if (!file) return -EFAULT; + npages = min_t(ulong, slot->npages - (start_gfn - slot->base_gfn), npages); + npages = min_t(ulong, npages, GMEM_GUP_NPAGES); + + if (src) { + src_npages = IS_ALIGNED((unsigned long)src, PAGE_SIZE) ? npages : npages + 1; + + src_pages = kmalloc_array(src_npages, sizeof(struct page *), GFP_KERNEL); + if (!src_pages) + return -ENOMEM; + + ret = get_user_pages_fast((unsigned long)src, src_npages, 0, src_pages); + if (ret < 0) + return ret; + + if (ret != src_npages) + return -ENOMEM; + + src_offset = (loff_t)(src - PTR_ALIGN_DOWN(src, PAGE_SIZE)); + } + filemap_invalidate_lock(file->f_mapping); - npages = min_t(ulong, slot->npages - (start_gfn - slot->base_gfn), npages); for (i = 0; i < npages; i += (1 << max_order)) { struct folio *folio; gfn_t gfn = start_gfn + i; @@ -869,8 +891,8 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long max_order--; } - p = src ? src + i * PAGE_SIZE : NULL; - ret = post_populate(kvm, gfn, pfn, p, max_order, opaque); + ret = post_populate(kvm, gfn, pfn, src ? &src_pages[i] : NULL, + src_offset, max_order, opaque); if (!ret) folio_mark_uptodate(folio); @@ -882,6 +904,14 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long filemap_invalidate_unlock(file->f_mapping); + if (src) { + long j; + + for (j = 0; j < src_npages; j++) + put_page(src_pages[j]); + kfree(src_pages); + } + return ret && !i ? ret : i; } EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_gmem_populate); -- 2.25.1