Plumb the SPTE pointer into handle_changed_spte() so that remove leaf mirror entries can be forwarded to TDX in handle_changed_spte(), instead of effectively requiring callers to manually do so. Relying on each caller to invoke .remove_external_spte() is confusing and brittle, e.g. subtly relies tdp_mmu_set_spte_atomic() never removing SPTEs. This will also allow consolidating all S-EPT updates into a single kvm_x86_ops hook. Signed-off-by: Sean Christopherson --- arch/x86/kvm/mmu/tdp_mmu.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 6fb48b217f5b..8743cd020d12 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -320,9 +320,9 @@ void kvm_tdp_mmu_alloc_root(struct kvm_vcpu *vcpu, bool mirror) } } -static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, - u64 old_spte, u64 new_spte, int level, - bool shared); +static void handle_changed_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep, + gfn_t gfn, u64 old_spte, u64 new_spte, + int level, bool shared); static void tdp_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp) { @@ -471,7 +471,7 @@ static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared) old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte, FROZEN_SPTE, level); } - handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn, + handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), sptep, gfn, old_spte, FROZEN_SPTE, level, shared); if (is_mirror_sp(sp)) { @@ -499,6 +499,7 @@ static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared) * handle_changed_spte - handle bookkeeping associated with an SPTE change * @kvm: kvm instance * @as_id: the address space of the paging structure the SPTE was a part of + * @sptep: pointer to the SPTE * @gfn: the base GFN that was mapped by the SPTE * @old_spte: The value of the SPTE before the change * @new_spte: The value of the SPTE after the change @@ -511,9 +512,9 @@ static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared) * dirty logging updates are handled in common code, not here (see make_spte() * and fast_pf_fix_direct_spte()). */ -static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, - u64 old_spte, u64 new_spte, int level, - bool shared) +static void handle_changed_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep, + gfn_t gfn, u64 old_spte, u64 new_spte, + int level, bool shared) { bool was_present = is_shadow_present_pte(old_spte); bool is_present = is_shadow_present_pte(new_spte); @@ -685,8 +686,8 @@ static inline int __must_check tdp_mmu_set_spte_atomic(struct kvm *kvm, if (ret) return ret; - handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte, - new_spte, iter->level, true); + handle_changed_spte(kvm, iter->as_id, iter->sptep, iter->gfn, + iter->old_spte, new_spte, iter->level, true); return 0; } @@ -720,7 +721,7 @@ static u64 tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep, old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte, new_spte, level); - handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, false); + handle_changed_spte(kvm, as_id, sptep, gfn, old_spte, new_spte, level, false); /* * Users that do non-atomic setting of PTEs don't operate on mirror -- 2.53.0.rc1.217.geba53bf80e-goog