Add and use a helper, kvm_load_xfeatures(), to dedup the code that loads guest/host xfeatures by passing XCR0 and XSS values accordingly. No functional change intended. Signed-off-by: Binbin Wu --- The patch is based on the patch series "KVM: x86: Cleanup #MC and XCR0/XSS/PKRU handling" [1], which is applied on top of kvm-x86 next branch(commit a996dd2a5e1ec54dcf7d7b93915ea3f97e14e68a). [1] https://lore.kernel.org/all/20251030224246.3456492-1-seanjc@google.com --- arch/x86/kvm/x86.c | 25 +++++-------------------- 1 file changed, 5 insertions(+), 20 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index b8df02a083b2..35fc1333e198 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1203,33 +1203,18 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) } EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_lmsw); -static void kvm_load_guest_xfeatures(struct kvm_vcpu *vcpu) +static void kvm_load_xfeatures(struct kvm_vcpu *vcpu, u64 xcr0, u64 xss) { if (vcpu->arch.guest_state_protected) return; if (kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE)) { if (vcpu->arch.xcr0 != kvm_host.xcr0) - xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); + xsetbv(XCR_XFEATURE_ENABLED_MASK, xcr0); if (guest_cpu_cap_has(vcpu, X86_FEATURE_XSAVES) && vcpu->arch.ia32_xss != kvm_host.xss) - wrmsrq(MSR_IA32_XSS, vcpu->arch.ia32_xss); - } -} - -static void kvm_load_host_xfeatures(struct kvm_vcpu *vcpu) -{ - if (vcpu->arch.guest_state_protected) - return; - - if (kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE)) { - if (vcpu->arch.xcr0 != kvm_host.xcr0) - xsetbv(XCR_XFEATURE_ENABLED_MASK, kvm_host.xcr0); - - if (guest_cpu_cap_has(vcpu, X86_FEATURE_XSAVES) && - vcpu->arch.ia32_xss != kvm_host.xss) - wrmsrq(MSR_IA32_XSS, kvm_host.xss); + wrmsrq(MSR_IA32_XSS, xss); } } @@ -11310,7 +11295,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) if (vcpu->arch.guest_fpu.xfd_err) wrmsrq(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err); - kvm_load_guest_xfeatures(vcpu); + kvm_load_xfeatures(vcpu, vcpu->arch.xcr0, vcpu->arch.ia32_xss); if (unlikely(vcpu->arch.switch_db_regs && !(vcpu->arch.switch_db_regs & KVM_DEBUGREG_AUTO_SWITCH))) { @@ -11406,7 +11391,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) vcpu->mode = OUTSIDE_GUEST_MODE; smp_wmb(); - kvm_load_host_xfeatures(vcpu); + kvm_load_xfeatures(vcpu, kvm_host.xcr0, kvm_host.xss); /* * Sync xfd before calling handle_exit_irqoff() which may base-commit: a996dd2a5e1ec54dcf7d7b93915ea3f97e14e68a prerequisite-patch-id: 9aafd634f0ab2033d7b032e227d356777469e046 prerequisite-patch-id: 656ce1f5aa97c77a9cf6125713707a5007b2c7ba prerequisite-patch-id: d6328b8c0fdb8593bb534ab7378821edcf9f639d prerequisite-patch-id: c7f36d1cedc4ae6416223d2225460944629b3d4f -- 2.46.0