From: Uros Bizjak SPEC_CTRL is an MSR, i.e. a 64-bit value, but the VMRUN assembly code assumes bits 63:32 are always zero. The bug is _currently_ benign because neither KVM nor the kernel support setting any of bits 63:32, but it's still a bug that needs to be fixed. Signed-off-by: Uros Bizjak Suggested-by: Sean Christopherson Co-developed-by: Sean Christopherson Signed-off-by: Sean Christopherson --- v2: Load the host's value into RDX and CMP against the guest's value from memory to avoid loading the host's value twice. Swap the order of loads for 32-bit to match. [Uros] v1: https://lore.kernel.org/all/20251106011330.75571-1-seanjc@google.com arch/x86/kvm/svm/vmenter.S | 43 +++++++++++++++++++++++++++++++------- 1 file changed, 35 insertions(+), 8 deletions(-) diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S index 235c4af6b692..22be0acf2a41 100644 --- a/arch/x86/kvm/svm/vmenter.S +++ b/arch/x86/kvm/svm/vmenter.S @@ -52,11 +52,23 @@ * there must not be any returns or indirect branches between this code * and vmentry. */ - movl SVM_spec_ctrl(%_ASM_DI), %eax - cmp PER_CPU_VAR(x86_spec_ctrl_current), %eax +#ifdef CONFIG_X86_64 + mov SVM_spec_ctrl(%rdi), %rdx + cmp PER_CPU_VAR(x86_spec_ctrl_current), %rdx je 801b + movl %edx, %eax + shr $32, %rdx +#else + mov SVM_spec_ctrl(%edi), %eax + mov PER_CPU_VAR(x86_spec_ctrl_current), %ecx + xor %eax, %ecx + mov SVM_spec_ctrl + 4(%edi), %edx + mov PER_CPU_VAR(x86_spec_ctrl_current + 4), %esi + xor %edx, %esi + or %esi, %ecx + je 801b +#endif mov $MSR_IA32_SPEC_CTRL, %ecx - xor %edx, %edx wrmsr jmp 801b .endm @@ -81,13 +93,25 @@ jnz 998f rdmsr movl %eax, SVM_spec_ctrl(%_ASM_DI) + movl %edx, SVM_spec_ctrl + 4(%_ASM_DI) 998: - /* Now restore the host value of the MSR if different from the guest's. */ - movl PER_CPU_VAR(x86_spec_ctrl_current), %eax - cmp SVM_spec_ctrl(%_ASM_DI), %eax +#ifdef CONFIG_X86_64 + mov PER_CPU_VAR(x86_spec_ctrl_current), %rdx + cmp SVM_spec_ctrl(%rdi), %rdx je 901b - xor %edx, %edx + movl %edx, %eax + shr $32, %rdx +#else + mov PER_CPU_VAR(x86_spec_ctrl_current), %eax + mov SVM_spec_ctrl(%edi), %esi + xor %eax, %esi + mov PER_CPU_VAR(x86_spec_ctrl_current + 4), %edx + mov SVM_spec_ctrl + 4(%edi), %edi + xor %edx, %edi + or %edi, %esi + je 901b +#endif wrmsr jmp 901b .endm @@ -211,7 +235,10 @@ SYM_FUNC_START(__svm_vcpu_run) /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */ FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT - /* Clobbers RAX, RCX, RDX. */ + /* + * Clobbers RAX, RCX, RDX (and EDI on 32-bit), consumes RDI (@svm) and + * RSP (pointer to @spec_ctrl_intercepted). + */ RESTORE_HOST_SPEC_CTRL /* base-commit: a996dd2a5e1ec54dcf7d7b93915ea3f97e14e68a -- 2.51.2.1041.gc1ab5b90ca-goog