After removing a TD's private page, the TDX module does not write back and invalidate cache lines associated with the page and its keyID (i.e., the TD's guest keyID). The SEAMCALL wrapper tdh_phymem_page_wbinvd_hkid() enables the caller to provide the TD's guest keyID and physical memory address to invoke the SEAMCALL TDH_PHYMEM_PAGE_WBINVD to perform cache line invalidation. Enhance the SEAMCALL wrapper tdh_phymem_page_wbinvd_hkid() to support cache line invalidation for huge pages by introducing the parameters "folio", "start_idx", and "npages". These parameters specify the physical memory starting from the page at "start_idx" within a "folio" and spanning "npages" contiguous PFNs. Return TDX_OPERAND_INVALID if the specified memory is not entirely contained within a single folio. Signed-off-by: Xiaoyao Li Signed-off-by: Isaku Yamahata Suggested-by: Rick Edgecombe Signed-off-by: Yan Zhao --- RFC v2: - Enhance tdh_phymem_page_wbinvd_hkid() to invalidate multiple pages directly, rather than looping within KVM, following Dave's suggestion: "Don't wrap the wrappers." (Rick). RFC v1: - Split patch - Aded a helper tdx_wbinvd_page() in TDX, which accepts param "struct page *". --- arch/x86/include/asm/tdx.h | 4 ++-- arch/x86/kvm/vmx/tdx.c | 6 ++++-- arch/x86/virt/vmx/tdx/tdx.c | 17 ++++++++++++++--- 3 files changed, 20 insertions(+), 7 deletions(-) diff --git a/arch/x86/include/asm/tdx.h b/arch/x86/include/asm/tdx.h index d2cf48e273d5..a125bb20a28a 100644 --- a/arch/x86/include/asm/tdx.h +++ b/arch/x86/include/asm/tdx.h @@ -194,8 +194,8 @@ u64 tdh_mem_track(struct tdx_td *tdr); u64 tdh_mem_page_remove(struct tdx_td *td, u64 gpa, u64 level, u64 *ext_err1, u64 *ext_err2); u64 tdh_phymem_cache_wb(bool resume); u64 tdh_phymem_page_wbinvd_tdr(struct tdx_td *td); -u64 tdh_phymem_page_wbinvd_hkid(u64 hkid, struct page *page); - +u64 tdh_phymem_page_wbinvd_hkid(u64 hkid, struct folio *folio, + unsigned long start_idx, unsigned long npages); void tdx_meminfo(struct seq_file *m); #else static inline void tdx_init(void) { } diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c index 0a2b183899d8..8eaf8431c5f1 100644 --- a/arch/x86/kvm/vmx/tdx.c +++ b/arch/x86/kvm/vmx/tdx.c @@ -1694,6 +1694,7 @@ static int tdx_sept_drop_private_spte(struct kvm *kvm, gfn_t gfn, { int tdx_level = pg_level_to_tdx_sept_level(level); struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm); + struct folio *folio = page_folio(page); gpa_t gpa = gfn_to_gpa(gfn); u64 err, entry, level_state; @@ -1728,8 +1729,9 @@ static int tdx_sept_drop_private_spte(struct kvm *kvm, gfn_t gfn, return -EIO; } - err = tdh_phymem_page_wbinvd_hkid((u16)kvm_tdx->hkid, page); - + err = tdh_phymem_page_wbinvd_hkid((u16)kvm_tdx->hkid, folio, + folio_page_idx(folio, page), + KVM_PAGES_PER_HPAGE(level)); if (KVM_BUG_ON(err, kvm)) { pr_tdx_error(TDH_PHYMEM_PAGE_WBINVD, err); return -EIO; diff --git a/arch/x86/virt/vmx/tdx/tdx.c b/arch/x86/virt/vmx/tdx/tdx.c index d941f083f741..64219c659844 100644 --- a/arch/x86/virt/vmx/tdx/tdx.c +++ b/arch/x86/virt/vmx/tdx/tdx.c @@ -2030,13 +2030,24 @@ u64 tdh_phymem_page_wbinvd_tdr(struct tdx_td *td) } EXPORT_SYMBOL_GPL(tdh_phymem_page_wbinvd_tdr); -u64 tdh_phymem_page_wbinvd_hkid(u64 hkid, struct page *page) +u64 tdh_phymem_page_wbinvd_hkid(u64 hkid, struct folio *folio, + unsigned long start_idx, unsigned long npages) { + struct page *start = folio_page(folio, start_idx); struct tdx_module_args args = {}; + u64 err; + + if (start_idx + npages > folio_nr_pages(folio)) + return TDX_OPERAND_INVALID; - args.rcx = mk_keyed_paddr(hkid, page); + for (unsigned long i = 0; i < npages; i++) { + args.rcx = mk_keyed_paddr(hkid, nth_page(start, i)); - return seamcall(TDH_PHYMEM_PAGE_WBINVD, &args); + err = seamcall(TDH_PHYMEM_PAGE_WBINVD, &args); + if (err) + break; + } + return err; } EXPORT_SYMBOL_GPL(tdh_phymem_page_wbinvd_hkid); -- 2.43.2